blob: da8966ccf5c1b08c94db4affbcde5c9ffbf51f6e [file] [log] [blame]
David Gibson10b46522013-03-12 00:31:06 +00001/*
2 * PowerPC MMU, TLB, SLB and BAT emulation helpers for QEMU.
3 *
4 * Copyright (c) 2003-2007 Jocelyn Mayer
5 * Copyright (c) 2013 David Gibson, IBM Corporation
6 *
7 * This library is free software; you can redistribute it and/or
8 * modify it under the terms of the GNU Lesser General Public
9 * License as published by the Free Software Foundation; either
10 * version 2 of the License, or (at your option) any later version.
11 *
12 * This library is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
15 * Lesser General Public License for more details.
16 *
17 * You should have received a copy of the GNU Lesser General Public
18 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
19 */
Peter Maydell0d755902016-01-26 18:16:58 +000020#include "qemu/osdep.h"
David Gibson10b46522013-03-12 00:31:06 +000021#include "cpu.h"
Paolo Bonzini63c91552016-03-15 13:18:37 +010022#include "exec/exec-all.h"
Richard Henderson2ef61752014-04-07 22:31:41 -070023#include "exec/helper-proto.h"
David Gibsoncd6a9bb2016-01-27 11:52:57 +110024#include "qemu/error-report.h"
Markus Armbrusterfad866d2019-04-17 21:17:58 +020025#include "qemu/qemu-print.h"
Vincent Palatinb3946622017-01-10 11:59:55 +010026#include "sysemu/hw_accel.h"
David Gibson10b46522013-03-12 00:31:06 +000027#include "kvm_ppc.h"
28#include "mmu-hash64.h"
Paolo Bonzini508127e2016-01-07 16:55:28 +030029#include "exec/log.h"
David Gibson7222b942017-02-27 16:03:41 +110030#include "hw/hw.h"
Suraj Jitindar Singhb2899492017-03-01 17:54:38 +110031#include "mmu-book3s-v3.h"
David Gibson10b46522013-03-12 00:31:06 +000032
David Gibsond75cbae2019-03-21 22:32:53 +110033/* #define DEBUG_SLB */
David Gibson10b46522013-03-12 00:31:06 +000034
35#ifdef DEBUG_SLB
Paolo Bonzini48880da2015-11-13 13:34:23 +010036# define LOG_SLB(...) qemu_log_mask(CPU_LOG_MMU, __VA_ARGS__)
David Gibson10b46522013-03-12 00:31:06 +000037#else
38# define LOG_SLB(...) do { } while (0)
39#endif
40
41/*
42 * SLB handling
43 */
44
David Gibson7ef23062016-01-14 15:33:27 +110045static ppc_slb_t *slb_lookup(PowerPCCPU *cpu, target_ulong eaddr)
David Gibson10b46522013-03-12 00:31:06 +000046{
David Gibson7ef23062016-01-14 15:33:27 +110047 CPUPPCState *env = &cpu->env;
David Gibson10b46522013-03-12 00:31:06 +000048 uint64_t esid_256M, esid_1T;
49 int n;
50
51 LOG_SLB("%s: eaddr " TARGET_FMT_lx "\n", __func__, eaddr);
52
53 esid_256M = (eaddr & SEGMENT_MASK_256M) | SLB_ESID_V;
54 esid_1T = (eaddr & SEGMENT_MASK_1T) | SLB_ESID_V;
55
David Gibson67d7d662018-03-29 18:29:38 +110056 for (n = 0; n < cpu->hash64_opts->slb_size; n++) {
David Gibson10b46522013-03-12 00:31:06 +000057 ppc_slb_t *slb = &env->slb[n];
58
59 LOG_SLB("%s: slot %d %016" PRIx64 " %016"
60 PRIx64 "\n", __func__, n, slb->esid, slb->vsid);
David Gibsond75cbae2019-03-21 22:32:53 +110061 /*
62 * We check for 1T matches on all MMUs here - if the MMU
David Gibson10b46522013-03-12 00:31:06 +000063 * doesn't have 1T segment support, we will have prevented 1T
David Gibsond75cbae2019-03-21 22:32:53 +110064 * entries from being inserted in the slbmte code.
65 */
David Gibson10b46522013-03-12 00:31:06 +000066 if (((slb->esid == esid_256M) &&
67 ((slb->vsid & SLB_VSID_B) == SLB_VSID_B_256M))
68 || ((slb->esid == esid_1T) &&
69 ((slb->vsid & SLB_VSID_B) == SLB_VSID_B_1T))) {
70 return slb;
71 }
72 }
73
74 return NULL;
75}
76
Markus Armbrusterfad866d2019-04-17 21:17:58 +020077void dump_slb(PowerPCCPU *cpu)
David Gibson10b46522013-03-12 00:31:06 +000078{
David Gibson7ef23062016-01-14 15:33:27 +110079 CPUPPCState *env = &cpu->env;
David Gibson10b46522013-03-12 00:31:06 +000080 int i;
81 uint64_t slbe, slbv;
82
David Gibson7ef23062016-01-14 15:33:27 +110083 cpu_synchronize_state(CPU(cpu));
David Gibson10b46522013-03-12 00:31:06 +000084
Markus Armbrusterfad866d2019-04-17 21:17:58 +020085 qemu_printf("SLB\tESID\t\t\tVSID\n");
David Gibson67d7d662018-03-29 18:29:38 +110086 for (i = 0; i < cpu->hash64_opts->slb_size; i++) {
David Gibson10b46522013-03-12 00:31:06 +000087 slbe = env->slb[i].esid;
88 slbv = env->slb[i].vsid;
89 if (slbe == 0 && slbv == 0) {
90 continue;
91 }
Markus Armbrusterfad866d2019-04-17 21:17:58 +020092 qemu_printf("%d\t0x%016" PRIx64 "\t0x%016" PRIx64 "\n",
David Gibson10b46522013-03-12 00:31:06 +000093 i, slbe, slbv);
94 }
95}
96
97void helper_slbia(CPUPPCState *env)
98{
Richard Hendersondb70b312019-03-22 19:07:57 -070099 PowerPCCPU *cpu = env_archcpu(env);
Benjamin Herrenschmidtcd0c6f42016-05-03 18:03:25 +0200100 int n;
David Gibson10b46522013-03-12 00:31:06 +0000101
David Gibson10b46522013-03-12 00:31:06 +0000102 /* XXX: Warning: slbia never invalidates the first segment */
David Gibson67d7d662018-03-29 18:29:38 +1100103 for (n = 1; n < cpu->hash64_opts->slb_size; n++) {
David Gibson10b46522013-03-12 00:31:06 +0000104 ppc_slb_t *slb = &env->slb[n];
105
106 if (slb->esid & SLB_ESID_V) {
107 slb->esid &= ~SLB_ESID_V;
David Gibsond75cbae2019-03-21 22:32:53 +1100108 /*
109 * XXX: given the fact that segment size is 256 MB or 1TB,
David Gibson10b46522013-03-12 00:31:06 +0000110 * and we still don't have a tlb_flush_mask(env, n, mask)
111 * in QEMU, we just invalidate all TLBs
112 */
Nikunj A Dadhaniaa8a6d532016-09-20 22:04:59 +0530113 env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH;
David Gibson10b46522013-03-12 00:31:06 +0000114 }
115 }
David Gibson10b46522013-03-12 00:31:06 +0000116}
117
Nikunj A Dadhaniaa63f1df2017-02-09 16:04:01 +0530118static void __helper_slbie(CPUPPCState *env, target_ulong addr,
119 target_ulong global)
David Gibson10b46522013-03-12 00:31:06 +0000120{
Richard Hendersondb70b312019-03-22 19:07:57 -0700121 PowerPCCPU *cpu = env_archcpu(env);
David Gibson10b46522013-03-12 00:31:06 +0000122 ppc_slb_t *slb;
123
David Gibson7ef23062016-01-14 15:33:27 +1100124 slb = slb_lookup(cpu, addr);
David Gibson10b46522013-03-12 00:31:06 +0000125 if (!slb) {
126 return;
127 }
128
129 if (slb->esid & SLB_ESID_V) {
130 slb->esid &= ~SLB_ESID_V;
131
David Gibsond75cbae2019-03-21 22:32:53 +1100132 /*
133 * XXX: given the fact that segment size is 256 MB or 1TB,
David Gibson10b46522013-03-12 00:31:06 +0000134 * and we still don't have a tlb_flush_mask(env, n, mask)
135 * in QEMU, we just invalidate all TLBs
136 */
Nikunj A Dadhaniaa63f1df2017-02-09 16:04:01 +0530137 env->tlb_need_flush |=
138 (global == false ? TLB_NEED_LOCAL_FLUSH : TLB_NEED_GLOBAL_FLUSH);
David Gibson10b46522013-03-12 00:31:06 +0000139 }
140}
141
Nikunj A Dadhaniaa63f1df2017-02-09 16:04:01 +0530142void helper_slbie(CPUPPCState *env, target_ulong addr)
143{
144 __helper_slbie(env, addr, false);
145}
146
147void helper_slbieg(CPUPPCState *env, target_ulong addr)
148{
149 __helper_slbie(env, addr, true);
150}
151
David Gibsonbcd81232016-01-27 11:07:29 +1100152int ppc_store_slb(PowerPCCPU *cpu, target_ulong slot,
153 target_ulong esid, target_ulong vsid)
David Gibson10b46522013-03-12 00:31:06 +0000154{
David Gibson7ef23062016-01-14 15:33:27 +1100155 CPUPPCState *env = &cpu->env;
David Gibson10b46522013-03-12 00:31:06 +0000156 ppc_slb_t *slb = &env->slb[slot];
David Gibsonb07c59f2018-03-23 13:31:52 +1100157 const PPCHash64SegmentPageSizes *sps = NULL;
David Gibsoncd6a9bb2016-01-27 11:52:57 +1100158 int i;
David Gibson10b46522013-03-12 00:31:06 +0000159
David Gibson67d7d662018-03-29 18:29:38 +1100160 if (slot >= cpu->hash64_opts->slb_size) {
David Gibsonbcd81232016-01-27 11:07:29 +1100161 return -1; /* Bad slot number */
David Gibson10b46522013-03-12 00:31:06 +0000162 }
David Gibsonbcd81232016-01-27 11:07:29 +1100163 if (esid & ~(SLB_ESID_ESID | SLB_ESID_V)) {
164 return -1; /* Reserved bits set */
165 }
166 if (vsid & (SLB_VSID_B & ~SLB_VSID_B_1T)) {
David Gibson10b46522013-03-12 00:31:06 +0000167 return -1; /* Bad segment size */
168 }
David Gibson58969ee2018-03-23 14:11:07 +1100169 if ((vsid & SLB_VSID_B) && !(ppc_hash64_has(cpu, PPC_HASH64_1TSEG))) {
David Gibson10b46522013-03-12 00:31:06 +0000170 return -1; /* 1T segment on MMU that doesn't support it */
171 }
172
David Gibsoncd6a9bb2016-01-27 11:52:57 +1100173 for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
David Gibsonb07c59f2018-03-23 13:31:52 +1100174 const PPCHash64SegmentPageSizes *sps1 = &cpu->hash64_opts->sps[i];
David Gibsoncd6a9bb2016-01-27 11:52:57 +1100175
176 if (!sps1->page_shift) {
177 break;
178 }
179
180 if ((vsid & SLB_VSID_LLP_MASK) == sps1->slb_enc) {
181 sps = sps1;
182 break;
183 }
184 }
185
186 if (!sps) {
187 error_report("Bad page size encoding in SLB store: slot "TARGET_FMT_lu
188 " esid 0x"TARGET_FMT_lx" vsid 0x"TARGET_FMT_lx,
189 slot, esid, vsid);
190 return -1;
191 }
192
David Gibsonbcd81232016-01-27 11:07:29 +1100193 slb->esid = esid;
194 slb->vsid = vsid;
David Gibsoncd6a9bb2016-01-27 11:52:57 +1100195 slb->sps = sps;
David Gibson10b46522013-03-12 00:31:06 +0000196
Suraj Jitindar Singh76134d42017-01-13 17:28:22 +1100197 LOG_SLB("%s: " TARGET_FMT_lu " " TARGET_FMT_lx " - " TARGET_FMT_lx
198 " => %016" PRIx64 " %016" PRIx64 "\n", __func__, slot, esid, vsid,
David Gibson10b46522013-03-12 00:31:06 +0000199 slb->esid, slb->vsid);
200
201 return 0;
202}
203
David Gibson7ef23062016-01-14 15:33:27 +1100204static int ppc_load_slb_esid(PowerPCCPU *cpu, target_ulong rb,
David Gibson10b46522013-03-12 00:31:06 +0000205 target_ulong *rt)
206{
David Gibson7ef23062016-01-14 15:33:27 +1100207 CPUPPCState *env = &cpu->env;
David Gibson10b46522013-03-12 00:31:06 +0000208 int slot = rb & 0xfff;
209 ppc_slb_t *slb = &env->slb[slot];
210
David Gibson67d7d662018-03-29 18:29:38 +1100211 if (slot >= cpu->hash64_opts->slb_size) {
David Gibson10b46522013-03-12 00:31:06 +0000212 return -1;
213 }
214
215 *rt = slb->esid;
216 return 0;
217}
218
David Gibson7ef23062016-01-14 15:33:27 +1100219static int ppc_load_slb_vsid(PowerPCCPU *cpu, target_ulong rb,
David Gibson10b46522013-03-12 00:31:06 +0000220 target_ulong *rt)
221{
David Gibson7ef23062016-01-14 15:33:27 +1100222 CPUPPCState *env = &cpu->env;
David Gibson10b46522013-03-12 00:31:06 +0000223 int slot = rb & 0xfff;
224 ppc_slb_t *slb = &env->slb[slot];
225
David Gibson67d7d662018-03-29 18:29:38 +1100226 if (slot >= cpu->hash64_opts->slb_size) {
David Gibson10b46522013-03-12 00:31:06 +0000227 return -1;
228 }
229
230 *rt = slb->vsid;
231 return 0;
232}
233
Benjamin Herrenschmidtc76c22d2016-06-07 12:50:27 +1000234static int ppc_find_slb_vsid(PowerPCCPU *cpu, target_ulong rb,
235 target_ulong *rt)
236{
237 CPUPPCState *env = &cpu->env;
238 ppc_slb_t *slb;
239
240 if (!msr_is_64bit(env, env->msr)) {
241 rb &= 0xffffffff;
242 }
243 slb = slb_lookup(cpu, rb);
244 if (slb == NULL) {
245 *rt = (target_ulong)-1ul;
246 } else {
247 *rt = slb->vsid;
248 }
249 return 0;
250}
251
David Gibson10b46522013-03-12 00:31:06 +0000252void helper_store_slb(CPUPPCState *env, target_ulong rb, target_ulong rs)
253{
Richard Hendersondb70b312019-03-22 19:07:57 -0700254 PowerPCCPU *cpu = env_archcpu(env);
David Gibson7ef23062016-01-14 15:33:27 +1100255
David Gibsonbcd81232016-01-27 11:07:29 +1100256 if (ppc_store_slb(cpu, rb & 0xfff, rb & ~0xfffULL, rs) < 0) {
Benjamin Herrenschmidt0f72b7c2016-07-27 16:56:34 +1000257 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
258 POWERPC_EXCP_INVAL, GETPC());
David Gibson10b46522013-03-12 00:31:06 +0000259 }
260}
261
262target_ulong helper_load_slb_esid(CPUPPCState *env, target_ulong rb)
263{
Richard Hendersondb70b312019-03-22 19:07:57 -0700264 PowerPCCPU *cpu = env_archcpu(env);
David Gibson10b46522013-03-12 00:31:06 +0000265 target_ulong rt = 0;
266
David Gibson7ef23062016-01-14 15:33:27 +1100267 if (ppc_load_slb_esid(cpu, rb, &rt) < 0) {
Benjamin Herrenschmidt0f72b7c2016-07-27 16:56:34 +1000268 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
269 POWERPC_EXCP_INVAL, GETPC());
David Gibson10b46522013-03-12 00:31:06 +0000270 }
271 return rt;
272}
273
Benjamin Herrenschmidtc76c22d2016-06-07 12:50:27 +1000274target_ulong helper_find_slb_vsid(CPUPPCState *env, target_ulong rb)
275{
Richard Hendersondb70b312019-03-22 19:07:57 -0700276 PowerPCCPU *cpu = env_archcpu(env);
Benjamin Herrenschmidtc76c22d2016-06-07 12:50:27 +1000277 target_ulong rt = 0;
278
279 if (ppc_find_slb_vsid(cpu, rb, &rt) < 0) {
Benjamin Herrenschmidt0f72b7c2016-07-27 16:56:34 +1000280 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
281 POWERPC_EXCP_INVAL, GETPC());
Benjamin Herrenschmidtc76c22d2016-06-07 12:50:27 +1000282 }
283 return rt;
284}
285
David Gibson10b46522013-03-12 00:31:06 +0000286target_ulong helper_load_slb_vsid(CPUPPCState *env, target_ulong rb)
287{
Richard Hendersondb70b312019-03-22 19:07:57 -0700288 PowerPCCPU *cpu = env_archcpu(env);
David Gibson10b46522013-03-12 00:31:06 +0000289 target_ulong rt = 0;
290
David Gibson7ef23062016-01-14 15:33:27 +1100291 if (ppc_load_slb_vsid(cpu, rb, &rt) < 0) {
Benjamin Herrenschmidt0f72b7c2016-07-27 16:56:34 +1000292 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
293 POWERPC_EXCP_INVAL, GETPC());
David Gibson10b46522013-03-12 00:31:06 +0000294 }
295 return rt;
296}
David Gibson9d7c3f42013-03-12 00:31:07 +0000297
Suraj Jitindar Singh07a68f92017-03-01 18:12:54 +1100298/* Check No-Execute or Guarded Storage */
299static inline int ppc_hash64_pte_noexec_guard(PowerPCCPU *cpu,
300 ppc_hash_pte64_t pte)
301{
302 /* Exec permissions CANNOT take away read or write permissions */
303 return (pte.pte1 & HPTE64_R_N) || (pte.pte1 & HPTE64_R_G) ?
304 PAGE_READ | PAGE_WRITE : PAGE_READ | PAGE_WRITE | PAGE_EXEC;
305}
306
307/* Check Basic Storage Protection */
David Gibson7ef23062016-01-14 15:33:27 +1100308static int ppc_hash64_pte_prot(PowerPCCPU *cpu,
David Gibsone01b4442013-03-12 00:31:40 +0000309 ppc_slb_t *slb, ppc_hash_pte64_t pte)
David Gibson496272a2013-03-12 00:31:14 +0000310{
David Gibson7ef23062016-01-14 15:33:27 +1100311 CPUPPCState *env = &cpu->env;
David Gibsone01b4442013-03-12 00:31:40 +0000312 unsigned pp, key;
David Gibsond75cbae2019-03-21 22:32:53 +1100313 /*
314 * Some pp bit combinations have undefined behaviour, so default
315 * to no access in those cases
316 */
David Gibsone01b4442013-03-12 00:31:40 +0000317 int prot = 0;
David Gibson496272a2013-03-12 00:31:14 +0000318
David Gibsone01b4442013-03-12 00:31:40 +0000319 key = !!(msr_pr ? (slb->vsid & SLB_VSID_KP)
320 : (slb->vsid & SLB_VSID_KS));
321 pp = (pte.pte1 & HPTE64_R_PP) | ((pte.pte1 & HPTE64_R_PP0) >> 61);
322
David Gibson496272a2013-03-12 00:31:14 +0000323 if (key == 0) {
324 switch (pp) {
325 case 0x0:
326 case 0x1:
327 case 0x2:
Suraj Jitindar Singh347a5c72017-03-01 18:12:53 +1100328 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
David Gibsone01b4442013-03-12 00:31:40 +0000329 break;
330
David Gibson496272a2013-03-12 00:31:14 +0000331 case 0x3:
332 case 0x6:
Suraj Jitindar Singh347a5c72017-03-01 18:12:53 +1100333 prot = PAGE_READ | PAGE_EXEC;
David Gibson496272a2013-03-12 00:31:14 +0000334 break;
335 }
336 } else {
337 switch (pp) {
338 case 0x0:
339 case 0x6:
David Gibson496272a2013-03-12 00:31:14 +0000340 break;
David Gibsone01b4442013-03-12 00:31:40 +0000341
David Gibson496272a2013-03-12 00:31:14 +0000342 case 0x1:
343 case 0x3:
Suraj Jitindar Singh347a5c72017-03-01 18:12:53 +1100344 prot = PAGE_READ | PAGE_EXEC;
David Gibson496272a2013-03-12 00:31:14 +0000345 break;
David Gibsone01b4442013-03-12 00:31:40 +0000346
David Gibson496272a2013-03-12 00:31:14 +0000347 case 0x2:
Suraj Jitindar Singh347a5c72017-03-01 18:12:53 +1100348 prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
David Gibson496272a2013-03-12 00:31:14 +0000349 break;
350 }
351 }
David Gibsone01b4442013-03-12 00:31:40 +0000352
David Gibsone01b4442013-03-12 00:31:40 +0000353 return prot;
David Gibson496272a2013-03-12 00:31:14 +0000354}
355
Suraj Jitindar Singha6152b52017-03-01 18:12:52 +1100356/* Check the instruction access permissions specified in the IAMR */
357static int ppc_hash64_iamr_prot(PowerPCCPU *cpu, int key)
358{
359 CPUPPCState *env = &cpu->env;
360 int iamr_bits = (env->spr[SPR_IAMR] >> 2 * (31 - key)) & 0x3;
361
362 /*
363 * An instruction fetch is permitted if the IAMR bit is 0.
364 * If the bit is set, return PAGE_READ | PAGE_WRITE because this bit
365 * can only take away EXEC permissions not READ or WRITE permissions.
366 * If bit is cleared return PAGE_READ | PAGE_WRITE | PAGE_EXEC since
367 * EXEC permissions are allowed.
368 */
369 return (iamr_bits & 0x1) ? PAGE_READ | PAGE_WRITE :
370 PAGE_READ | PAGE_WRITE | PAGE_EXEC;
371}
372
David Gibson7ef23062016-01-14 15:33:27 +1100373static int ppc_hash64_amr_prot(PowerPCCPU *cpu, ppc_hash_pte64_t pte)
David Gibsonf80872e2013-03-12 00:31:47 +0000374{
David Gibson7ef23062016-01-14 15:33:27 +1100375 CPUPPCState *env = &cpu->env;
David Gibsonf80872e2013-03-12 00:31:47 +0000376 int key, amrbits;
Cédric Le Goater363248e2014-02-04 18:21:39 +0100377 int prot = PAGE_READ | PAGE_WRITE | PAGE_EXEC;
David Gibsonf80872e2013-03-12 00:31:47 +0000378
David Gibsonf80872e2013-03-12 00:31:47 +0000379 /* Only recent MMUs implement Virtual Page Class Key Protection */
David Gibson58969ee2018-03-23 14:11:07 +1100380 if (!ppc_hash64_has(cpu, PPC_HASH64_AMR)) {
Cédric Le Goater363248e2014-02-04 18:21:39 +0100381 return prot;
David Gibsonf80872e2013-03-12 00:31:47 +0000382 }
383
384 key = HPTE64_R_KEY(pte.pte1);
David Gibsond75cbae2019-03-21 22:32:53 +1100385 amrbits = (env->spr[SPR_AMR] >> 2 * (31 - key)) & 0x3;
David Gibsonf80872e2013-03-12 00:31:47 +0000386
387 /* fprintf(stderr, "AMR protection: key=%d AMR=0x%" PRIx64 "\n", key, */
388 /* env->spr[SPR_AMR]); */
389
Cédric Le Goater363248e2014-02-04 18:21:39 +0100390 /*
391 * A store is permitted if the AMR bit is 0. Remove write
392 * protection if it is set.
393 */
David Gibsonf80872e2013-03-12 00:31:47 +0000394 if (amrbits & 0x2) {
Cédric Le Goater363248e2014-02-04 18:21:39 +0100395 prot &= ~PAGE_WRITE;
David Gibsonf80872e2013-03-12 00:31:47 +0000396 }
Cédric Le Goater363248e2014-02-04 18:21:39 +0100397 /*
398 * A load is permitted if the AMR bit is 0. Remove read
399 * protection if it is set.
400 */
David Gibsonf80872e2013-03-12 00:31:47 +0000401 if (amrbits & 0x1) {
Cédric Le Goater363248e2014-02-04 18:21:39 +0100402 prot &= ~PAGE_READ;
David Gibsonf80872e2013-03-12 00:31:47 +0000403 }
404
Suraj Jitindar Singha6152b52017-03-01 18:12:52 +1100405 switch (env->mmu_model) {
406 /*
407 * MMU version 2.07 and later support IAMR
408 * Check if the IAMR allows the instruction access - it will return
409 * PAGE_EXEC if it doesn't (and thus that bit will be cleared) or 0
410 * if it does (and prot will be unchanged indicating execution support).
411 */
412 case POWERPC_MMU_2_07:
413 case POWERPC_MMU_3_00:
414 prot &= ppc_hash64_iamr_prot(cpu, key);
415 break;
416 default:
417 break;
418 }
419
David Gibsonf80872e2013-03-12 00:31:47 +0000420 return prot;
421}
422
David Gibson7222b942017-02-27 16:03:41 +1100423const ppc_hash_pte64_t *ppc_hash64_map_hptes(PowerPCCPU *cpu,
424 hwaddr ptex, int n)
Aneesh Kumar K.V7c43bca2014-02-20 18:52:24 +0100425{
David Gibson7222b942017-02-27 16:03:41 +1100426 hwaddr pte_offset = ptex * HASH_PTE_SIZE_64;
Benjamin Herrenschmidt3367c622019-02-15 18:00:28 +0100427 hwaddr base;
David Gibsone57ca752017-02-23 11:39:18 +1100428 hwaddr plen = n * HASH_PTE_SIZE_64;
429 const ppc_hash_pte64_t *hptes;
Aneesh Kumar K.V7c43bca2014-02-20 18:52:24 +0100430
David Gibsone57ca752017-02-23 11:39:18 +1100431 if (cpu->vhyp) {
432 PPCVirtualHypervisorClass *vhc =
433 PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
434 return vhc->map_hptes(cpu->vhyp, ptex, n);
435 }
Benjamin Herrenschmidt3367c622019-02-15 18:00:28 +0100436 base = ppc_hash64_hpt_base(cpu);
David Gibsone57ca752017-02-23 11:39:18 +1100437
438 if (!base) {
439 return NULL;
440 }
441
Peter Maydellf26404f2018-05-31 14:50:52 +0100442 hptes = address_space_map(CPU(cpu)->as, base + pte_offset, &plen, false,
443 MEMTXATTRS_UNSPECIFIED);
David Gibsone57ca752017-02-23 11:39:18 +1100444 if (plen < (n * HASH_PTE_SIZE_64)) {
445 hw_error("%s: Unable to map all requested HPTEs\n", __func__);
Aneesh Kumar K.V7c43bca2014-02-20 18:52:24 +0100446 }
David Gibson7222b942017-02-27 16:03:41 +1100447 return hptes;
Aneesh Kumar K.V7c43bca2014-02-20 18:52:24 +0100448}
449
David Gibson7222b942017-02-27 16:03:41 +1100450void ppc_hash64_unmap_hptes(PowerPCCPU *cpu, const ppc_hash_pte64_t *hptes,
451 hwaddr ptex, int n)
Aneesh Kumar K.V7c43bca2014-02-20 18:52:24 +0100452{
David Gibsone57ca752017-02-23 11:39:18 +1100453 if (cpu->vhyp) {
454 PPCVirtualHypervisorClass *vhc =
455 PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
456 vhc->unmap_hptes(cpu->vhyp, hptes, ptex, n);
457 return;
Aneesh Kumar K.V7c43bca2014-02-20 18:52:24 +0100458 }
David Gibsone57ca752017-02-23 11:39:18 +1100459
460 address_space_unmap(CPU(cpu)->as, (void *)hptes, n * HASH_PTE_SIZE_64,
461 false, n * HASH_PTE_SIZE_64);
Aneesh Kumar K.V7c43bca2014-02-20 18:52:24 +0100462}
463
David Gibsonb07c59f2018-03-23 13:31:52 +1100464static unsigned hpte_page_shift(const PPCHash64SegmentPageSizes *sps,
465 uint64_t pte0, uint64_t pte1)
Benjamin Herrenschmidt4322e8c2016-06-28 08:48:34 +0200466{
David Gibson651060a2016-07-05 12:17:56 +1000467 int i;
468
469 if (!(pte0 & HPTE64_V_LARGE)) {
470 if (sps->page_shift != 12) {
471 /* 4kiB page in a non 4kiB segment */
472 return 0;
473 }
474 /* Normal 4kiB page */
Benjamin Herrenschmidt4322e8c2016-06-28 08:48:34 +0200475 return 12;
Benjamin Herrenschmidt4322e8c2016-06-28 08:48:34 +0200476 }
David Gibson651060a2016-07-05 12:17:56 +1000477
478 for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
David Gibsonb07c59f2018-03-23 13:31:52 +1100479 const PPCHash64PageSize *ps = &sps->enc[i];
David Gibson651060a2016-07-05 12:17:56 +1000480 uint64_t mask;
481
482 if (!ps->page_shift) {
483 break;
484 }
485
486 if (ps->page_shift == 12) {
487 /* L bit is set so this can't be a 4kiB page */
488 continue;
489 }
490
491 mask = ((1ULL << ps->page_shift) - 1) & HPTE64_R_RPN;
492
Paolo Bonzinib56d4172016-07-15 17:22:10 +0200493 if ((pte1 & mask) == ((uint64_t)ps->pte_enc << HPTE64_R_RPN_SHIFT)) {
David Gibson651060a2016-07-05 12:17:56 +1000494 return ps->page_shift;
495 }
496 }
497
498 return 0; /* Bad page size encoding */
Benjamin Herrenschmidt4322e8c2016-06-28 08:48:34 +0200499}
500
Benjamin Herrenschmidt34525592019-02-15 18:00:24 +0100501static void ppc64_v3_new_to_old_hpte(target_ulong *pte0, target_ulong *pte1)
502{
503 /* Insert B into pte0 */
504 *pte0 = (*pte0 & HPTE64_V_COMMON_BITS) |
505 ((*pte1 & HPTE64_R_3_0_SSIZE_MASK) <<
506 (HPTE64_V_SSIZE_SHIFT - HPTE64_R_3_0_SSIZE_SHIFT));
507
508 /* Remove B from pte1 */
509 *pte1 = *pte1 & ~HPTE64_R_3_0_SSIZE_MASK;
510}
511
512
David Gibson7ef23062016-01-14 15:33:27 +1100513static hwaddr ppc_hash64_pteg_search(PowerPCCPU *cpu, hwaddr hash,
David Gibsonb07c59f2018-03-23 13:31:52 +1100514 const PPCHash64SegmentPageSizes *sps,
Benjamin Herrenschmidt2c7ad802016-07-04 17:44:11 +1000515 target_ulong ptem,
David Gibson94986862016-07-05 12:31:57 +1000516 ppc_hash_pte64_t *pte, unsigned *pshift)
David Gibsonaea390e2013-03-12 00:31:28 +0000517{
David Gibsonaea390e2013-03-12 00:31:28 +0000518 int i;
David Gibson7222b942017-02-27 16:03:41 +1100519 const ppc_hash_pte64_t *pteg;
Aneesh Kumar K.V7c43bca2014-02-20 18:52:24 +0100520 target_ulong pte0, pte1;
David Gibson7222b942017-02-27 16:03:41 +1100521 target_ulong ptex;
David Gibsonaea390e2013-03-12 00:31:28 +0000522
David Gibson36778662017-02-24 16:36:44 +1100523 ptex = (hash & ppc_hash64_hpt_mask(cpu)) * HPTES_PER_GROUP;
David Gibson7222b942017-02-27 16:03:41 +1100524 pteg = ppc_hash64_map_hptes(cpu, ptex, HPTES_PER_GROUP);
525 if (!pteg) {
Aneesh Kumar K.V7c43bca2014-02-20 18:52:24 +0100526 return -1;
527 }
David Gibsonaea390e2013-03-12 00:31:28 +0000528 for (i = 0; i < HPTES_PER_GROUP; i++) {
David Gibson7222b942017-02-27 16:03:41 +1100529 pte0 = ppc_hash64_hpte0(cpu, pteg, i);
Benjamin Herrenschmidt3054b0c2019-02-15 18:00:23 +0100530 /*
531 * pte0 contains the valid bit and must be read before pte1,
532 * otherwise we might see an old pte1 with a new valid bit and
533 * thus an inconsistent hpte value
534 */
535 smp_rmb();
David Gibson7222b942017-02-27 16:03:41 +1100536 pte1 = ppc_hash64_hpte1(cpu, pteg, i);
David Gibsonaea390e2013-03-12 00:31:28 +0000537
Benjamin Herrenschmidt34525592019-02-15 18:00:24 +0100538 /* Convert format if necessary */
539 if (cpu->env.mmu_model == POWERPC_MMU_3_00 && !cpu->vhyp) {
540 ppc64_v3_new_to_old_hpte(&pte0, &pte1);
541 }
542
David Gibson073de862016-07-05 12:31:48 +1000543 /* This compares V, B, H (secondary) and the AVPN */
544 if (HPTE64_V_COMPARE(pte0, ptem)) {
Benjamin Herrenschmidt2c7ad802016-07-04 17:44:11 +1000545 *pshift = hpte_page_shift(sps, pte0, pte1);
David Gibson651060a2016-07-05 12:17:56 +1000546 /*
547 * If there is no match, ignore the PTE, it could simply
548 * be for a different segment size encoding and the
549 * architecture specifies we should not match. Linux will
550 * potentially leave behind PTEs for the wrong base page
551 * size when demoting segments.
552 */
David Gibson94986862016-07-05 12:31:57 +1000553 if (*pshift == 0) {
Benjamin Herrenschmidt4322e8c2016-06-28 08:48:34 +0200554 continue;
555 }
David Gibsond75cbae2019-03-21 22:32:53 +1100556 /*
557 * We don't do anything with pshift yet as qemu TLB only
558 * deals with 4K pages anyway
Benjamin Herrenschmidt4322e8c2016-06-28 08:48:34 +0200559 */
David Gibsonaea390e2013-03-12 00:31:28 +0000560 pte->pte0 = pte0;
561 pte->pte1 = pte1;
David Gibson7222b942017-02-27 16:03:41 +1100562 ppc_hash64_unmap_hptes(cpu, pteg, ptex, HPTES_PER_GROUP);
563 return ptex + i;
David Gibsonaea390e2013-03-12 00:31:28 +0000564 }
David Gibsonaea390e2013-03-12 00:31:28 +0000565 }
David Gibson7222b942017-02-27 16:03:41 +1100566 ppc_hash64_unmap_hptes(cpu, pteg, ptex, HPTES_PER_GROUP);
Aneesh Kumar K.V7c43bca2014-02-20 18:52:24 +0100567 /*
568 * We didn't find a valid entry.
569 */
David Gibsonaea390e2013-03-12 00:31:28 +0000570 return -1;
571}
572
David Gibson7ef23062016-01-14 15:33:27 +1100573static hwaddr ppc_hash64_htab_lookup(PowerPCCPU *cpu,
David Gibson7f3bdc22013-03-12 00:31:30 +0000574 ppc_slb_t *slb, target_ulong eaddr,
David Gibson94986862016-07-05 12:31:57 +1000575 ppc_hash_pte64_t *pte, unsigned *pshift)
David Gibsonc69b6152013-03-12 00:31:08 +0000576{
David Gibson7ef23062016-01-14 15:33:27 +1100577 CPUPPCState *env = &cpu->env;
David Gibson7222b942017-02-27 16:03:41 +1100578 hwaddr hash, ptex;
David Gibsoncd6a9bb2016-01-27 11:52:57 +1100579 uint64_t vsid, epnmask, epn, ptem;
David Gibsonb07c59f2018-03-23 13:31:52 +1100580 const PPCHash64SegmentPageSizes *sps = slb->sps;
David Gibsona1ff7512013-03-12 00:31:29 +0000581
David Gibsond75cbae2019-03-21 22:32:53 +1100582 /*
583 * The SLB store path should prevent any bad page size encodings
584 * getting in there, so:
585 */
Benjamin Herrenschmidt2c7ad802016-07-04 17:44:11 +1000586 assert(sps);
David Gibsoncd6a9bb2016-01-27 11:52:57 +1100587
Benjamin Herrenschmidt2c7ad802016-07-04 17:44:11 +1000588 /* If ISL is set in LPCR we need to clamp the page size to 4K */
589 if (env->spr[SPR_LPCR] & LPCR_ISL) {
590 /* We assume that when using TCG, 4k is first entry of SPS */
David Gibsonb07c59f2018-03-23 13:31:52 +1100591 sps = &cpu->hash64_opts->sps[0];
Benjamin Herrenschmidt2c7ad802016-07-04 17:44:11 +1000592 assert(sps->page_shift == 12);
593 }
594
595 epnmask = ~((1ULL << sps->page_shift) - 1);
David Gibsona1ff7512013-03-12 00:31:29 +0000596
David Gibsona1ff7512013-03-12 00:31:29 +0000597 if (slb->vsid & SLB_VSID_B) {
David Gibson18148892013-03-12 00:31:31 +0000598 /* 1TB segment */
599 vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT_1T;
600 epn = (eaddr & ~SEGMENT_MASK_1T) & epnmask;
Benjamin Herrenschmidt2c7ad802016-07-04 17:44:11 +1000601 hash = vsid ^ (vsid << 25) ^ (epn >> sps->page_shift);
David Gibsona1ff7512013-03-12 00:31:29 +0000602 } else {
David Gibson18148892013-03-12 00:31:31 +0000603 /* 256M segment */
604 vsid = (slb->vsid & SLB_VSID_VSID) >> SLB_VSID_SHIFT;
605 epn = (eaddr & ~SEGMENT_MASK_256M) & epnmask;
Benjamin Herrenschmidt2c7ad802016-07-04 17:44:11 +1000606 hash = vsid ^ (epn >> sps->page_shift);
David Gibsona1ff7512013-03-12 00:31:29 +0000607 }
David Gibson18148892013-03-12 00:31:31 +0000608 ptem = (slb->vsid & SLB_VSID_PTEM) | ((epn >> 16) & HPTE64_V_AVPN);
David Gibson073de862016-07-05 12:31:48 +1000609 ptem |= HPTE64_V_VALID;
David Gibsona1ff7512013-03-12 00:31:29 +0000610
David Gibsona1ff7512013-03-12 00:31:29 +0000611 /* Page address translation */
Antony Pavlov339aaf52014-12-13 19:48:18 +0300612 qemu_log_mask(CPU_LOG_MMU,
613 "htab_base " TARGET_FMT_plx " htab_mask " TARGET_FMT_plx
David Gibsona1ff7512013-03-12 00:31:29 +0000614 " hash " TARGET_FMT_plx "\n",
David Gibson36778662017-02-24 16:36:44 +1100615 ppc_hash64_hpt_base(cpu), ppc_hash64_hpt_mask(cpu), hash);
David Gibsona1ff7512013-03-12 00:31:29 +0000616
David Gibsona1ff7512013-03-12 00:31:29 +0000617 /* Primary PTEG lookup */
Antony Pavlov339aaf52014-12-13 19:48:18 +0300618 qemu_log_mask(CPU_LOG_MMU,
619 "0 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx
David Gibsona1ff7512013-03-12 00:31:29 +0000620 " vsid=" TARGET_FMT_lx " ptem=" TARGET_FMT_lx
621 " hash=" TARGET_FMT_plx "\n",
David Gibson36778662017-02-24 16:36:44 +1100622 ppc_hash64_hpt_base(cpu), ppc_hash64_hpt_mask(cpu),
623 vsid, ptem, hash);
David Gibson7222b942017-02-27 16:03:41 +1100624 ptex = ppc_hash64_pteg_search(cpu, hash, sps, ptem, pte, pshift);
David Gibson7f3bdc22013-03-12 00:31:30 +0000625
David Gibson7222b942017-02-27 16:03:41 +1100626 if (ptex == -1) {
David Gibsona1ff7512013-03-12 00:31:29 +0000627 /* Secondary PTEG lookup */
David Gibson073de862016-07-05 12:31:48 +1000628 ptem |= HPTE64_V_SECONDARY;
Antony Pavlov339aaf52014-12-13 19:48:18 +0300629 qemu_log_mask(CPU_LOG_MMU,
630 "1 htab=" TARGET_FMT_plx "/" TARGET_FMT_plx
David Gibsona1ff7512013-03-12 00:31:29 +0000631 " vsid=" TARGET_FMT_lx " api=" TARGET_FMT_lx
David Gibson36778662017-02-24 16:36:44 +1100632 " hash=" TARGET_FMT_plx "\n", ppc_hash64_hpt_base(cpu),
633 ppc_hash64_hpt_mask(cpu), vsid, ptem, ~hash);
David Gibsona1ff7512013-03-12 00:31:29 +0000634
David Gibson7222b942017-02-27 16:03:41 +1100635 ptex = ppc_hash64_pteg_search(cpu, ~hash, sps, ptem, pte, pshift);
David Gibsona1ff7512013-03-12 00:31:29 +0000636 }
637
David Gibson7222b942017-02-27 16:03:41 +1100638 return ptex;
David Gibsonc69b6152013-03-12 00:31:08 +0000639}
David Gibson04808842013-03-12 00:31:09 +0000640
David Gibson1114e712016-01-27 12:01:20 +1100641unsigned ppc_hash64_hpte_page_shift_noslb(PowerPCCPU *cpu,
Cédric Le Goater1f0252e2016-07-01 09:10:10 +0200642 uint64_t pte0, uint64_t pte1)
David Gibson1114e712016-01-27 12:01:20 +1100643{
David Gibson1114e712016-01-27 12:01:20 +1100644 int i;
645
646 if (!(pte0 & HPTE64_V_LARGE)) {
David Gibson1114e712016-01-27 12:01:20 +1100647 return 12;
648 }
649
650 /*
651 * The encodings in env->sps need to be carefully chosen so that
652 * this gives an unambiguous result.
653 */
654 for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
David Gibsonb07c59f2018-03-23 13:31:52 +1100655 const PPCHash64SegmentPageSizes *sps = &cpu->hash64_opts->sps[i];
David Gibson1114e712016-01-27 12:01:20 +1100656 unsigned shift;
657
658 if (!sps->page_shift) {
659 break;
660 }
661
662 shift = hpte_page_shift(sps, pte0, pte1);
663 if (shift) {
David Gibson1114e712016-01-27 12:01:20 +1100664 return shift;
665 }
666 }
667
David Gibson1114e712016-01-27 12:01:20 +1100668 return 0;
669}
670
David Gibson8fe08fa2018-03-22 16:49:28 +1100671static void ppc_hash64_set_isi(CPUState *cs, uint64_t error_code)
Benjamin Herrenschmidt33595dc2016-06-21 23:48:50 +0200672{
David Gibson8fe08fa2018-03-22 16:49:28 +1100673 CPUPPCState *env = &POWERPC_CPU(cs)->env;
Benjamin Herrenschmidt33595dc2016-06-21 23:48:50 +0200674 bool vpm;
675
676 if (msr_ir) {
677 vpm = !!(env->spr[SPR_LPCR] & LPCR_VPM1);
678 } else {
Suraj Jitindar Singh50659082017-02-10 16:25:54 +1100679 switch (env->mmu_model) {
680 case POWERPC_MMU_3_00:
681 /* Field deprecated in ISAv3.00 - interrupts always go to hyperv */
682 vpm = true;
683 break;
684 default:
685 vpm = !!(env->spr[SPR_LPCR] & LPCR_VPM0);
686 break;
687 }
Benjamin Herrenschmidt33595dc2016-06-21 23:48:50 +0200688 }
689 if (vpm && !msr_hv) {
690 cs->exception_index = POWERPC_EXCP_HISI;
691 } else {
692 cs->exception_index = POWERPC_EXCP_ISI;
693 }
694 env->error_code = error_code;
695}
696
David Gibson8fe08fa2018-03-22 16:49:28 +1100697static void ppc_hash64_set_dsi(CPUState *cs, uint64_t dar, uint64_t dsisr)
Benjamin Herrenschmidt33595dc2016-06-21 23:48:50 +0200698{
David Gibson8fe08fa2018-03-22 16:49:28 +1100699 CPUPPCState *env = &POWERPC_CPU(cs)->env;
Benjamin Herrenschmidt33595dc2016-06-21 23:48:50 +0200700 bool vpm;
701
702 if (msr_dr) {
703 vpm = !!(env->spr[SPR_LPCR] & LPCR_VPM1);
704 } else {
Suraj Jitindar Singh50659082017-02-10 16:25:54 +1100705 switch (env->mmu_model) {
706 case POWERPC_MMU_3_00:
707 /* Field deprecated in ISAv3.00 - interrupts always go to hyperv */
708 vpm = true;
709 break;
710 default:
711 vpm = !!(env->spr[SPR_LPCR] & LPCR_VPM0);
712 break;
713 }
Benjamin Herrenschmidt33595dc2016-06-21 23:48:50 +0200714 }
715 if (vpm && !msr_hv) {
716 cs->exception_index = POWERPC_EXCP_HDSI;
717 env->spr[SPR_HDAR] = dar;
718 env->spr[SPR_HDSISR] = dsisr;
719 } else {
720 cs->exception_index = POWERPC_EXCP_DSI;
721 env->spr[SPR_DAR] = dar;
722 env->spr[SPR_DSISR] = dsisr;
723 }
724 env->error_code = 0;
725}
726
727
Benjamin Herrenschmidta2dd4e82019-04-11 10:00:01 +0200728static void ppc_hash64_set_r(PowerPCCPU *cpu, hwaddr ptex, uint64_t pte1)
729{
730 hwaddr base, offset = ptex * HASH_PTE_SIZE_64 + 16;
731
732 if (cpu->vhyp) {
733 PPCVirtualHypervisorClass *vhc =
734 PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
735 vhc->hpte_set_r(cpu->vhyp, ptex, pte1);
736 return;
737 }
738 base = ppc_hash64_hpt_base(cpu);
739
740
741 /* The HW performs a non-atomic byte update */
742 stb_phys(CPU(cpu)->as, base + offset, ((pte1 >> 8) & 0xff) | 0x01);
743}
744
745static void ppc_hash64_set_c(PowerPCCPU *cpu, hwaddr ptex, uint64_t pte1)
746{
747 hwaddr base, offset = ptex * HASH_PTE_SIZE_64 + 15;
748
749 if (cpu->vhyp) {
750 PPCVirtualHypervisorClass *vhc =
751 PPC_VIRTUAL_HYPERVISOR_GET_CLASS(cpu->vhyp);
752 vhc->hpte_set_c(cpu->vhyp, ptex, pte1);
753 return;
754 }
755 base = ppc_hash64_hpt_base(cpu);
756
757 /* The HW performs a non-atomic byte update */
758 stb_phys(CPU(cpu)->as, base + offset, (pte1 & 0xff) | 0x80);
759}
760
Paolo Bonzinib2305602016-03-15 15:12:16 +0100761int ppc_hash64_handle_mmu_fault(PowerPCCPU *cpu, vaddr eaddr,
David Gibsoncaa597b2013-03-12 00:31:46 +0000762 int rwx, int mmu_idx)
David Gibson04808842013-03-12 00:31:09 +0000763{
Andreas Färberd0e39c52013-09-02 14:14:24 +0200764 CPUState *cs = CPU(cpu);
765 CPUPPCState *env = &cpu->env;
David Gibson04808842013-03-12 00:31:09 +0000766 ppc_slb_t *slb;
David Gibsonbe18b2b2016-01-27 11:39:15 +1100767 unsigned apshift;
David Gibson7222b942017-02-27 16:03:41 +1100768 hwaddr ptex;
David Gibson7f3bdc22013-03-12 00:31:30 +0000769 ppc_hash_pte64_t pte;
Suraj Jitindar Singh07a68f92017-03-01 18:12:54 +1100770 int exec_prot, pp_prot, amr_prot, prot;
David Gibsone01b4442013-03-12 00:31:40 +0000771 const int need_prot[] = {PAGE_READ, PAGE_WRITE, PAGE_EXEC};
David Gibsoncaa597b2013-03-12 00:31:46 +0000772 hwaddr raddr;
David Gibson04808842013-03-12 00:31:09 +0000773
David Gibson6a980112013-03-12 00:31:32 +0000774 assert((rwx == 0) || (rwx == 1) || (rwx == 2));
775
David Gibsond75cbae2019-03-21 22:32:53 +1100776 /*
777 * Note on LPCR usage: 970 uses HID4, but our special variant of
778 * store_spr copies relevant fields into env->spr[SPR_LPCR].
779 * Similarily we filter unimplemented bits when storing into LPCR
780 * depending on the MMU version. This code can thus just use the
781 * LPCR "as-is".
Benjamin Herrenschmidt912acdf2016-07-05 07:37:08 +1000782 */
783
David Gibson65d61642013-03-12 00:31:23 +0000784 /* 1. Handle real mode accesses */
785 if (((rwx == 2) && (msr_ir == 0)) || ((rwx != 2) && (msr_dr == 0))) {
David Gibsond75cbae2019-03-21 22:32:53 +1100786 /*
787 * Translation is supposedly "off", but in real mode the top 4
788 * effective address bits are (mostly) ignored
789 */
David Gibsoncaa597b2013-03-12 00:31:46 +0000790 raddr = eaddr & 0x0FFFFFFFFFFFFFFFULL;
Benjamin Herrenschmidt912acdf2016-07-05 07:37:08 +1000791
792 /* In HV mode, add HRMOR if top EA bit is clear */
793 if (msr_hv || !env->has_hv_mode) {
794 if (!(eaddr >> 63)) {
795 raddr |= env->spr[SPR_HRMOR];
796 }
797 } else {
798 /* Otherwise, check VPM for RMA vs VRMA */
799 if (env->spr[SPR_LPCR] & LPCR_VPM0) {
800 slb = &env->vrma_slb;
801 if (slb->sps) {
802 goto skip_slb_search;
803 }
804 /* Not much else to do here */
805 cs->exception_index = POWERPC_EXCP_MCHECK;
806 env->error_code = 0;
807 return 1;
808 } else if (raddr < env->rmls) {
809 /* RMA. Check bounds in RMLS */
810 raddr |= env->spr[SPR_RMOR];
811 } else {
812 /* The access failed, generate the approriate interrupt */
813 if (rwx == 2) {
David Gibson8fe08fa2018-03-22 16:49:28 +1100814 ppc_hash64_set_isi(cs, SRR1_PROTFAULT);
Benjamin Herrenschmidt912acdf2016-07-05 07:37:08 +1000815 } else {
Suraj Jitindar Singhda82c732017-03-01 18:12:55 +1100816 int dsisr = DSISR_PROTFAULT;
Benjamin Herrenschmidt912acdf2016-07-05 07:37:08 +1000817 if (rwx == 1) {
Suraj Jitindar Singhda82c732017-03-01 18:12:55 +1100818 dsisr |= DSISR_ISSTORE;
Benjamin Herrenschmidt912acdf2016-07-05 07:37:08 +1000819 }
David Gibson8fe08fa2018-03-22 16:49:28 +1100820 ppc_hash64_set_dsi(cs, eaddr, dsisr);
Benjamin Herrenschmidt912acdf2016-07-05 07:37:08 +1000821 }
822 return 1;
823 }
824 }
Andreas Färber0c591eb2013-09-03 13:59:37 +0200825 tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
David Gibsoncaa597b2013-03-12 00:31:46 +0000826 PAGE_READ | PAGE_WRITE | PAGE_EXEC, mmu_idx,
827 TARGET_PAGE_SIZE);
David Gibson65d61642013-03-12 00:31:23 +0000828 return 0;
829 }
830
David Gibsonbb218042013-03-12 00:31:26 +0000831 /* 2. Translation is on, so look up the SLB */
David Gibson7ef23062016-01-14 15:33:27 +1100832 slb = slb_lookup(cpu, eaddr);
David Gibson04808842013-03-12 00:31:09 +0000833 if (!slb) {
Suraj Jitindar Singhb2899492017-03-01 17:54:38 +1100834 /* No entry found, check if in-memory segment tables are in use */
David Gibsonca79b3b2018-03-23 16:42:45 +1100835 if (ppc64_use_proc_tbl(cpu)) {
Suraj Jitindar Singhb2899492017-03-01 17:54:38 +1100836 /* TODO - Unsupported */
837 error_report("Segment Table Support Unimplemented");
838 exit(1);
839 }
840 /* Segment still not found, generate the appropriate interrupt */
David Gibsoncaa597b2013-03-12 00:31:46 +0000841 if (rwx == 2) {
Andreas Färber27103422013-08-26 08:31:06 +0200842 cs->exception_index = POWERPC_EXCP_ISEG;
David Gibsoncaa597b2013-03-12 00:31:46 +0000843 env->error_code = 0;
844 } else {
Andreas Färber27103422013-08-26 08:31:06 +0200845 cs->exception_index = POWERPC_EXCP_DSEG;
David Gibsoncaa597b2013-03-12 00:31:46 +0000846 env->error_code = 0;
847 env->spr[SPR_DAR] = eaddr;
848 }
849 return 1;
David Gibson04808842013-03-12 00:31:09 +0000850 }
851
Benjamin Herrenschmidt912acdf2016-07-05 07:37:08 +1000852skip_slb_search:
853
David Gibsonbb218042013-03-12 00:31:26 +0000854 /* 3. Check for segment level no-execute violation */
855 if ((rwx == 2) && (slb->vsid & SLB_VSID_N)) {
David Gibson8fe08fa2018-03-22 16:49:28 +1100856 ppc_hash64_set_isi(cs, SRR1_NOEXEC_GUARD);
David Gibsoncaa597b2013-03-12 00:31:46 +0000857 return 1;
David Gibsonbb218042013-03-12 00:31:26 +0000858 }
859
David Gibson7f3bdc22013-03-12 00:31:30 +0000860 /* 4. Locate the PTE in the hash table */
David Gibson7222b942017-02-27 16:03:41 +1100861 ptex = ppc_hash64_htab_lookup(cpu, slb, eaddr, &pte, &apshift);
862 if (ptex == -1) {
David Gibsoncaa597b2013-03-12 00:31:46 +0000863 if (rwx == 2) {
David Gibson8fe08fa2018-03-22 16:49:28 +1100864 ppc_hash64_set_isi(cs, SRR1_NOPTE);
David Gibsoncaa597b2013-03-12 00:31:46 +0000865 } else {
Suraj Jitindar Singhda82c732017-03-01 18:12:55 +1100866 int dsisr = DSISR_NOPTE;
David Gibsoncaa597b2013-03-12 00:31:46 +0000867 if (rwx == 1) {
Suraj Jitindar Singhda82c732017-03-01 18:12:55 +1100868 dsisr |= DSISR_ISSTORE;
David Gibsoncaa597b2013-03-12 00:31:46 +0000869 }
David Gibson8fe08fa2018-03-22 16:49:28 +1100870 ppc_hash64_set_dsi(cs, eaddr, dsisr);
David Gibsoncaa597b2013-03-12 00:31:46 +0000871 }
872 return 1;
David Gibson7f3bdc22013-03-12 00:31:30 +0000873 }
Antony Pavlov339aaf52014-12-13 19:48:18 +0300874 qemu_log_mask(CPU_LOG_MMU,
David Gibson7222b942017-02-27 16:03:41 +1100875 "found PTE at index %08" HWADDR_PRIx "\n", ptex);
David Gibson04808842013-03-12 00:31:09 +0000876
David Gibson7f3bdc22013-03-12 00:31:30 +0000877 /* 5. Check access permissions */
David Gibson7f3bdc22013-03-12 00:31:30 +0000878
Suraj Jitindar Singh07a68f92017-03-01 18:12:54 +1100879 exec_prot = ppc_hash64_pte_noexec_guard(cpu, pte);
David Gibson7ef23062016-01-14 15:33:27 +1100880 pp_prot = ppc_hash64_pte_prot(cpu, slb, pte);
881 amr_prot = ppc_hash64_amr_prot(cpu, pte);
Suraj Jitindar Singh07a68f92017-03-01 18:12:54 +1100882 prot = exec_prot & pp_prot & amr_prot;
David Gibson6a980112013-03-12 00:31:32 +0000883
David Gibsoncaa597b2013-03-12 00:31:46 +0000884 if ((need_prot[rwx] & ~prot) != 0) {
David Gibson6a980112013-03-12 00:31:32 +0000885 /* Access right violation */
Antony Pavlov339aaf52014-12-13 19:48:18 +0300886 qemu_log_mask(CPU_LOG_MMU, "PTE access rejected\n");
David Gibsoncaa597b2013-03-12 00:31:46 +0000887 if (rwx == 2) {
Suraj Jitindar Singha6152b52017-03-01 18:12:52 +1100888 int srr1 = 0;
Suraj Jitindar Singh07a68f92017-03-01 18:12:54 +1100889 if (PAGE_EXEC & ~exec_prot) {
890 srr1 |= SRR1_NOEXEC_GUARD; /* Access violates noexec or guard */
891 } else if (PAGE_EXEC & ~pp_prot) {
Suraj Jitindar Singha6152b52017-03-01 18:12:52 +1100892 srr1 |= SRR1_PROTFAULT; /* Access violates access authority */
893 }
894 if (PAGE_EXEC & ~amr_prot) {
895 srr1 |= SRR1_IAMR; /* Access violates virt pg class key prot */
896 }
David Gibson8fe08fa2018-03-22 16:49:28 +1100897 ppc_hash64_set_isi(cs, srr1);
David Gibsoncaa597b2013-03-12 00:31:46 +0000898 } else {
Suraj Jitindar Singhda82c732017-03-01 18:12:55 +1100899 int dsisr = 0;
David Gibsonf80872e2013-03-12 00:31:47 +0000900 if (need_prot[rwx] & ~pp_prot) {
Suraj Jitindar Singhda82c732017-03-01 18:12:55 +1100901 dsisr |= DSISR_PROTFAULT;
David Gibsoncaa597b2013-03-12 00:31:46 +0000902 }
David Gibsonf80872e2013-03-12 00:31:47 +0000903 if (rwx == 1) {
Suraj Jitindar Singhda82c732017-03-01 18:12:55 +1100904 dsisr |= DSISR_ISSTORE;
David Gibsonf80872e2013-03-12 00:31:47 +0000905 }
906 if (need_prot[rwx] & ~amr_prot) {
Suraj Jitindar Singhda82c732017-03-01 18:12:55 +1100907 dsisr |= DSISR_AMR;
David Gibsonf80872e2013-03-12 00:31:47 +0000908 }
David Gibson8fe08fa2018-03-22 16:49:28 +1100909 ppc_hash64_set_dsi(cs, eaddr, dsisr);
David Gibsoncaa597b2013-03-12 00:31:46 +0000910 }
911 return 1;
David Gibson6a980112013-03-12 00:31:32 +0000912 }
913
Antony Pavlov339aaf52014-12-13 19:48:18 +0300914 qemu_log_mask(CPU_LOG_MMU, "PTE access granted !\n");
David Gibson87dc3fd2013-03-12 00:31:38 +0000915
916 /* 6. Update PTE referenced and changed bits if necessary */
917
Benjamin Herrenschmidta2dd4e82019-04-11 10:00:01 +0200918 if (!(pte.pte1 & HPTE64_R_R)) {
919 ppc_hash64_set_r(cpu, ptex, pte.pte1);
David Gibsonb3440742013-03-12 00:31:42 +0000920 }
Benjamin Herrenschmidta2dd4e82019-04-11 10:00:01 +0200921 if (!(pte.pte1 & HPTE64_R_C)) {
922 if (rwx == 1) {
923 ppc_hash64_set_c(cpu, ptex, pte.pte1);
924 } else {
925 /*
926 * Treat the page as read-only for now, so that a later write
927 * will pass through this function again to set the C bit
928 */
929 prot &= ~PAGE_WRITE;
930 }
David Gibson7f3bdc22013-03-12 00:31:30 +0000931 }
932
David Gibson6d11d992013-03-12 00:31:43 +0000933 /* 7. Determine the real address from the PTE */
David Gibsone01b4442013-03-12 00:31:40 +0000934
David Gibsonbe18b2b2016-01-27 11:39:15 +1100935 raddr = deposit64(pte.pte1 & HPTE64_R_RPN, 0, apshift, eaddr);
David Gibsoncaa597b2013-03-12 00:31:46 +0000936
Andreas Färber0c591eb2013-09-03 13:59:37 +0200937 tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
David Gibsonbe18b2b2016-01-27 11:39:15 +1100938 prot, mmu_idx, 1ULL << apshift);
David Gibson6d11d992013-03-12 00:31:43 +0000939
David Gibsone01b4442013-03-12 00:31:40 +0000940 return 0;
David Gibson04808842013-03-12 00:31:09 +0000941}
David Gibson629bd512013-03-12 00:31:11 +0000942
David Gibson7ef23062016-01-14 15:33:27 +1100943hwaddr ppc_hash64_get_phys_page_debug(PowerPCCPU *cpu, target_ulong addr)
David Gibsonf2ad6be2013-03-12 00:31:13 +0000944{
David Gibson7ef23062016-01-14 15:33:27 +1100945 CPUPPCState *env = &cpu->env;
David Gibson5883d8b2013-03-12 00:31:45 +0000946 ppc_slb_t *slb;
David Gibson7222b942017-02-27 16:03:41 +1100947 hwaddr ptex, raddr;
David Gibson5883d8b2013-03-12 00:31:45 +0000948 ppc_hash_pte64_t pte;
David Gibsonbe18b2b2016-01-27 11:39:15 +1100949 unsigned apshift;
David Gibsonf2ad6be2013-03-12 00:31:13 +0000950
Benjamin Herrenschmidt912acdf2016-07-05 07:37:08 +1000951 /* Handle real mode */
David Gibson5883d8b2013-03-12 00:31:45 +0000952 if (msr_dr == 0) {
953 /* In real mode the top 4 effective address bits are ignored */
Benjamin Herrenschmidt912acdf2016-07-05 07:37:08 +1000954 raddr = addr & 0x0FFFFFFFFFFFFFFFULL;
David Gibson5883d8b2013-03-12 00:31:45 +0000955
Benjamin Herrenschmidt912acdf2016-07-05 07:37:08 +1000956 /* In HV mode, add HRMOR if top EA bit is clear */
957 if ((msr_hv || !env->has_hv_mode) && !(addr >> 63)) {
958 return raddr | env->spr[SPR_HRMOR];
959 }
960
961 /* Otherwise, check VPM for RMA vs VRMA */
962 if (env->spr[SPR_LPCR] & LPCR_VPM0) {
963 slb = &env->vrma_slb;
964 if (!slb->sps) {
965 return -1;
966 }
967 } else if (raddr < env->rmls) {
968 /* RMA. Check bounds in RMLS */
969 return raddr | env->spr[SPR_RMOR];
970 } else {
971 return -1;
972 }
973 } else {
974 slb = slb_lookup(cpu, addr);
975 if (!slb) {
976 return -1;
977 }
David Gibsonf2ad6be2013-03-12 00:31:13 +0000978 }
979
David Gibson7222b942017-02-27 16:03:41 +1100980 ptex = ppc_hash64_htab_lookup(cpu, slb, addr, &pte, &apshift);
981 if (ptex == -1) {
David Gibson5883d8b2013-03-12 00:31:45 +0000982 return -1;
983 }
984
David Gibsonbe18b2b2016-01-27 11:39:15 +1100985 return deposit64(pte.pte1 & HPTE64_R_RPN, 0, apshift, addr)
David Gibsoncd6a9bb2016-01-27 11:52:57 +1100986 & TARGET_PAGE_MASK;
David Gibsonf2ad6be2013-03-12 00:31:13 +0000987}
Aneesh Kumar K.Vc1385932014-02-20 18:52:38 +0100988
David Gibson7222b942017-02-27 16:03:41 +1100989void ppc_hash64_tlb_flush_hpte(PowerPCCPU *cpu, target_ulong ptex,
David Gibson61a36c92016-01-15 16:12:09 +1100990 target_ulong pte0, target_ulong pte1)
991{
992 /*
993 * XXX: given the fact that there are too many segments to
994 * invalidate, and we still don't have a tlb_flush_mask(env, n,
995 * mask) in QEMU, we just invalidate all TLBs
996 */
Nikunj A Dadhaniad76ab5e2016-09-20 22:05:01 +0530997 cpu->env.tlb_need_flush = TLB_NEED_GLOBAL_FLUSH | TLB_NEED_LOCAL_FLUSH;
David Gibson61a36c92016-01-15 16:12:09 +1100998}
Benjamin Herrenschmidt4b3fc372016-06-27 08:55:16 +0200999
David Gibson5ad55312018-04-05 16:43:59 +10001000static void ppc_hash64_update_rmls(PowerPCCPU *cpu)
Benjamin Herrenschmidt912acdf2016-07-05 07:37:08 +10001001{
David Gibson8fe08fa2018-03-22 16:49:28 +11001002 CPUPPCState *env = &cpu->env;
Benjamin Herrenschmidt912acdf2016-07-05 07:37:08 +10001003 uint64_t lpcr = env->spr[SPR_LPCR];
1004
1005 /*
1006 * This is the full 4 bits encoding of POWER8. Previous
1007 * CPUs only support a subset of these but the filtering
1008 * is done when writing LPCR
1009 */
1010 switch ((lpcr & LPCR_RMLS) >> LPCR_RMLS_SHIFT) {
1011 case 0x8: /* 32MB */
1012 env->rmls = 0x2000000ull;
1013 break;
1014 case 0x3: /* 64MB */
1015 env->rmls = 0x4000000ull;
1016 break;
1017 case 0x7: /* 128MB */
1018 env->rmls = 0x8000000ull;
1019 break;
1020 case 0x4: /* 256MB */
1021 env->rmls = 0x10000000ull;
1022 break;
1023 case 0x2: /* 1GB */
1024 env->rmls = 0x40000000ull;
1025 break;
1026 case 0x1: /* 16GB */
1027 env->rmls = 0x400000000ull;
1028 break;
1029 default:
1030 /* What to do here ??? */
1031 env->rmls = 0;
1032 }
1033}
1034
David Gibson5ad55312018-04-05 16:43:59 +10001035static void ppc_hash64_update_vrma(PowerPCCPU *cpu)
Benjamin Herrenschmidt912acdf2016-07-05 07:37:08 +10001036{
David Gibson8fe08fa2018-03-22 16:49:28 +11001037 CPUPPCState *env = &cpu->env;
David Gibsonb07c59f2018-03-23 13:31:52 +11001038 const PPCHash64SegmentPageSizes *sps = NULL;
Benjamin Herrenschmidt912acdf2016-07-05 07:37:08 +10001039 target_ulong esid, vsid, lpcr;
1040 ppc_slb_t *slb = &env->vrma_slb;
1041 uint32_t vrmasd;
1042 int i;
1043
1044 /* First clear it */
1045 slb->esid = slb->vsid = 0;
1046 slb->sps = NULL;
1047
1048 /* Is VRMA enabled ? */
1049 lpcr = env->spr[SPR_LPCR];
1050 if (!(lpcr & LPCR_VPM0)) {
1051 return;
1052 }
1053
David Gibsond75cbae2019-03-21 22:32:53 +11001054 /*
1055 * Make one up. Mostly ignore the ESID which will not be needed
1056 * for translation
Benjamin Herrenschmidt912acdf2016-07-05 07:37:08 +10001057 */
1058 vsid = SLB_VSID_VRMA;
1059 vrmasd = (lpcr & LPCR_VRMASD) >> LPCR_VRMASD_SHIFT;
1060 vsid |= (vrmasd << 4) & (SLB_VSID_L | SLB_VSID_LP);
1061 esid = SLB_ESID_V;
1062
David Gibson8fe08fa2018-03-22 16:49:28 +11001063 for (i = 0; i < PPC_PAGE_SIZES_MAX_SZ; i++) {
David Gibsonb07c59f2018-03-23 13:31:52 +11001064 const PPCHash64SegmentPageSizes *sps1 = &cpu->hash64_opts->sps[i];
Benjamin Herrenschmidt912acdf2016-07-05 07:37:08 +10001065
1066 if (!sps1->page_shift) {
1067 break;
1068 }
1069
1070 if ((vsid & SLB_VSID_LLP_MASK) == sps1->slb_enc) {
1071 sps = sps1;
1072 break;
1073 }
1074 }
1075
1076 if (!sps) {
1077 error_report("Bad page size encoding esid 0x"TARGET_FMT_lx
1078 " vsid 0x"TARGET_FMT_lx, esid, vsid);
1079 return;
1080 }
1081
1082 slb->vsid = vsid;
1083 slb->esid = esid;
1084 slb->sps = sps;
1085}
1086
David Gibson5ad55312018-04-05 16:43:59 +10001087void ppc_store_lpcr(PowerPCCPU *cpu, target_ulong val)
Benjamin Herrenschmidt4b3fc372016-06-27 08:55:16 +02001088{
David Gibson5ad55312018-04-05 16:43:59 +10001089 CPUPPCState *env = &cpu->env;
Benjamin Herrenschmidt4b3fc372016-06-27 08:55:16 +02001090 uint64_t lpcr = 0;
1091
1092 /* Filter out bits */
David Gibson0941d722018-03-23 16:48:43 +11001093 switch (env->mmu_model) {
1094 case POWERPC_MMU_64B: /* 970 */
Benjamin Herrenschmidt4b3fc372016-06-27 08:55:16 +02001095 if (val & 0x40) {
1096 lpcr |= LPCR_LPES0;
1097 }
1098 if (val & 0x8000000000000000ull) {
1099 lpcr |= LPCR_LPES1;
1100 }
1101 if (val & 0x20) {
1102 lpcr |= (0x4ull << LPCR_RMLS_SHIFT);
1103 }
1104 if (val & 0x4000000000000000ull) {
1105 lpcr |= (0x2ull << LPCR_RMLS_SHIFT);
1106 }
1107 if (val & 0x2000000000000000ull) {
1108 lpcr |= (0x1ull << LPCR_RMLS_SHIFT);
1109 }
1110 env->spr[SPR_RMOR] = ((lpcr >> 41) & 0xffffull) << 26;
1111
David Gibsond75cbae2019-03-21 22:32:53 +11001112 /*
1113 * XXX We could also write LPID from HID4 here
Benjamin Herrenschmidt4b3fc372016-06-27 08:55:16 +02001114 * but since we don't tag any translation on it
1115 * it doesn't actually matter
David Gibsond75cbae2019-03-21 22:32:53 +11001116 *
1117 * XXX For proper emulation of 970 we also need
Benjamin Herrenschmidt4b3fc372016-06-27 08:55:16 +02001118 * to dig HRMOR out of HID5
1119 */
1120 break;
David Gibson0941d722018-03-23 16:48:43 +11001121 case POWERPC_MMU_2_03: /* P5p */
Benjamin Herrenschmidt4b3fc372016-06-27 08:55:16 +02001122 lpcr = val & (LPCR_RMLS | LPCR_ILE |
1123 LPCR_LPES0 | LPCR_LPES1 |
1124 LPCR_RMI | LPCR_HDICE);
1125 break;
David Gibson0941d722018-03-23 16:48:43 +11001126 case POWERPC_MMU_2_06: /* P7 */
Benjamin Herrenschmidt4b3fc372016-06-27 08:55:16 +02001127 lpcr = val & (LPCR_VPM0 | LPCR_VPM1 | LPCR_ISL | LPCR_DPFD |
1128 LPCR_VRMASD | LPCR_RMLS | LPCR_ILE |
1129 LPCR_P7_PECE0 | LPCR_P7_PECE1 | LPCR_P7_PECE2 |
1130 LPCR_MER | LPCR_TC |
1131 LPCR_LPES0 | LPCR_LPES1 | LPCR_HDICE);
1132 break;
David Gibson0941d722018-03-23 16:48:43 +11001133 case POWERPC_MMU_2_07: /* P8 */
Benjamin Herrenschmidt4b3fc372016-06-27 08:55:16 +02001134 lpcr = val & (LPCR_VPM0 | LPCR_VPM1 | LPCR_ISL | LPCR_KBV |
1135 LPCR_DPFD | LPCR_VRMASD | LPCR_RMLS | LPCR_ILE |
1136 LPCR_AIL | LPCR_ONL | LPCR_P8_PECE0 | LPCR_P8_PECE1 |
1137 LPCR_P8_PECE2 | LPCR_P8_PECE3 | LPCR_P8_PECE4 |
1138 LPCR_MER | LPCR_TC | LPCR_LPES0 | LPCR_HDICE);
1139 break;
David Gibson0941d722018-03-23 16:48:43 +11001140 case POWERPC_MMU_3_00: /* P9 */
Suraj Jitindar Singh18aa49e2017-02-10 16:25:53 +11001141 lpcr = val & (LPCR_VPM1 | LPCR_ISL | LPCR_KBV | LPCR_DPFD |
1142 (LPCR_PECE_U_MASK & LPCR_HVEE) | LPCR_ILE | LPCR_AIL |
Suraj Jitindar Singha8dafa52019-03-01 13:43:15 +11001143 LPCR_UPRT | LPCR_EVIRT | LPCR_ONL | LPCR_HR | LPCR_LD |
Suraj Jitindar Singh18aa49e2017-02-10 16:25:53 +11001144 (LPCR_PECE_L_MASK & (LPCR_PDEE | LPCR_HDEE | LPCR_EEE |
1145 LPCR_DEE | LPCR_OEE)) | LPCR_MER | LPCR_GTSE | LPCR_TC |
1146 LPCR_HEIC | LPCR_LPES0 | LPCR_HVICE | LPCR_HDICE);
Benjamin Herrenschmidt2b9e0a62019-02-15 18:00:20 +01001147 /*
1148 * If we have a virtual hypervisor, we need to bring back RMLS. It
1149 * doesn't exist on an actual P9 but that's all we know how to
1150 * configure with softmmu at the moment
1151 */
1152 if (cpu->vhyp) {
1153 lpcr |= (val & LPCR_RMLS);
1154 }
Suraj Jitindar Singh18aa49e2017-02-10 16:25:53 +11001155 break;
Benjamin Herrenschmidt4b3fc372016-06-27 08:55:16 +02001156 default:
1157 ;
1158 }
1159 env->spr[SPR_LPCR] = lpcr;
David Gibson8fe08fa2018-03-22 16:49:28 +11001160 ppc_hash64_update_rmls(cpu);
1161 ppc_hash64_update_vrma(cpu);
Benjamin Herrenschmidt4b3fc372016-06-27 08:55:16 +02001162}
David Gibsona0594712018-03-23 13:07:48 +11001163
David Gibson5ad55312018-04-05 16:43:59 +10001164void helper_store_lpcr(CPUPPCState *env, target_ulong val)
1165{
Richard Hendersondb70b312019-03-22 19:07:57 -07001166 PowerPCCPU *cpu = env_archcpu(env);
David Gibson5ad55312018-04-05 16:43:59 +10001167
1168 ppc_store_lpcr(cpu, val);
1169}
1170
David Gibsona0594712018-03-23 13:07:48 +11001171void ppc_hash64_init(PowerPCCPU *cpu)
1172{
1173 CPUPPCState *env = &cpu->env;
1174 PowerPCCPUClass *pcc = POWERPC_CPU_GET_CLASS(cpu);
1175
David Gibson21e405f2018-03-23 13:59:20 +11001176 if (!pcc->hash64_opts) {
1177 assert(!(env->mmu_model & POWERPC_MMU_64));
1178 return;
David Gibsona0594712018-03-23 13:07:48 +11001179 }
David Gibson21e405f2018-03-23 13:59:20 +11001180
1181 cpu->hash64_opts = g_memdup(pcc->hash64_opts, sizeof(*cpu->hash64_opts));
David Gibsona0594712018-03-23 13:07:48 +11001182}
1183
1184void ppc_hash64_finalize(PowerPCCPU *cpu)
1185{
David Gibsonb07c59f2018-03-23 13:31:52 +11001186 g_free(cpu->hash64_opts);
David Gibsona0594712018-03-23 13:07:48 +11001187}
David Gibsonb07c59f2018-03-23 13:31:52 +11001188
David Gibson21e405f2018-03-23 13:59:20 +11001189const PPCHash64Options ppc_hash64_opts_basic = {
David Gibson58969ee2018-03-23 14:11:07 +11001190 .flags = 0,
David Gibson67d7d662018-03-29 18:29:38 +11001191 .slb_size = 64,
David Gibson21e405f2018-03-23 13:59:20 +11001192 .sps = {
1193 { .page_shift = 12, /* 4K */
1194 .slb_enc = 0,
1195 .enc = { { .page_shift = 12, .pte_enc = 0 } }
1196 },
1197 { .page_shift = 24, /* 16M */
1198 .slb_enc = 0x100,
1199 .enc = { { .page_shift = 24, .pte_enc = 0 } }
1200 },
1201 },
1202};
1203
David Gibsonb07c59f2018-03-23 13:31:52 +11001204const PPCHash64Options ppc_hash64_opts_POWER7 = {
David Gibson26cd35b2018-03-23 14:32:48 +11001205 .flags = PPC_HASH64_1TSEG | PPC_HASH64_AMR | PPC_HASH64_CI_LARGEPAGE,
David Gibson67d7d662018-03-29 18:29:38 +11001206 .slb_size = 32,
David Gibsonb07c59f2018-03-23 13:31:52 +11001207 .sps = {
1208 {
1209 .page_shift = 12, /* 4K */
1210 .slb_enc = 0,
1211 .enc = { { .page_shift = 12, .pte_enc = 0 },
1212 { .page_shift = 16, .pte_enc = 0x7 },
1213 { .page_shift = 24, .pte_enc = 0x38 }, },
1214 },
1215 {
1216 .page_shift = 16, /* 64K */
1217 .slb_enc = SLB_VSID_64K,
1218 .enc = { { .page_shift = 16, .pte_enc = 0x1 },
1219 { .page_shift = 24, .pte_enc = 0x8 }, },
1220 },
1221 {
1222 .page_shift = 24, /* 16M */
1223 .slb_enc = SLB_VSID_16M,
1224 .enc = { { .page_shift = 24, .pte_enc = 0 }, },
1225 },
1226 {
1227 .page_shift = 34, /* 16G */
1228 .slb_enc = SLB_VSID_16G,
1229 .enc = { { .page_shift = 34, .pte_enc = 0x3 }, },
1230 },
1231 }
1232};
David Gibson27f00f02018-03-26 15:01:22 +11001233
1234void ppc_hash64_filter_pagesizes(PowerPCCPU *cpu,
1235 bool (*cb)(void *, uint32_t, uint32_t),
1236 void *opaque)
1237{
1238 PPCHash64Options *opts = cpu->hash64_opts;
1239 int i;
1240 int n = 0;
1241 bool ci_largepage = false;
1242
1243 assert(opts);
1244
1245 n = 0;
1246 for (i = 0; i < ARRAY_SIZE(opts->sps); i++) {
1247 PPCHash64SegmentPageSizes *sps = &opts->sps[i];
1248 int j;
1249 int m = 0;
1250
1251 assert(n <= i);
1252
1253 if (!sps->page_shift) {
1254 break;
1255 }
1256
1257 for (j = 0; j < ARRAY_SIZE(sps->enc); j++) {
1258 PPCHash64PageSize *ps = &sps->enc[j];
1259
1260 assert(m <= j);
1261 if (!ps->page_shift) {
1262 break;
1263 }
1264
1265 if (cb(opaque, sps->page_shift, ps->page_shift)) {
1266 if (ps->page_shift >= 16) {
1267 ci_largepage = true;
1268 }
1269 sps->enc[m++] = *ps;
1270 }
1271 }
1272
1273 /* Clear rest of the row */
1274 for (j = m; j < ARRAY_SIZE(sps->enc); j++) {
1275 memset(&sps->enc[j], 0, sizeof(sps->enc[j]));
1276 }
1277
1278 if (m) {
1279 n++;
1280 }
1281 }
1282
1283 /* Clear the rest of the table */
1284 for (i = n; i < ARRAY_SIZE(opts->sps); i++) {
1285 memset(&opts->sps[i], 0, sizeof(opts->sps[i]));
1286 }
1287
1288 if (!ci_largepage) {
1289 opts->flags &= ~PPC_HASH64_CI_LARGEPAGE;
1290 }
1291}