blob: b0a0676bebae68352e6c3d3a0f65c881209a301b [file] [log] [blame]
Blue Swirlec19c4d2012-05-30 04:23:30 +00001/*
2 * PowerPC MMU, TLB, SLB and BAT emulation helpers for QEMU.
3 *
4 * Copyright (c) 2003-2007 Jocelyn Mayer
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
Chetan Pant6bd039c2020-10-19 06:11:26 +00009 * version 2.1 of the License, or (at your option) any later version.
Blue Swirlec19c4d2012-05-30 04:23:30 +000010 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
Markus Armbrusterdb725812019-08-12 07:23:50 +020019
Peter Maydell0d755902016-01-26 18:16:58 +000020#include "qemu/osdep.h"
Philippe Mathieu-Daudéab3dd742018-06-25 09:42:24 -030021#include "qemu/units.h"
Blue Swirlec19c4d2012-05-30 04:23:30 +000022#include "cpu.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010023#include "sysemu/kvm.h"
Blue Swirl8cbbe382012-05-30 04:23:33 +000024#include "kvm_ppc.h"
David Gibson10b46522013-03-12 00:31:06 +000025#include "mmu-hash64.h"
David Gibson9d7c3f42013-03-12 00:31:07 +000026#include "mmu-hash32.h"
Paolo Bonzini63c91552016-03-15 13:18:37 +010027#include "exec/exec-all.h"
Philippe Mathieu-Daudé74781c02023-12-06 20:27:32 +010028#include "exec/page-protection.h"
Paolo Bonzini508127e2016-01-07 16:55:28 +030029#include "exec/log.h"
Benjamin Herrenschmidtcd0c6f42016-05-03 18:03:25 +020030#include "helper_regs.h"
Suraj Jitindar Singh8d633512017-02-24 12:05:12 +110031#include "qemu/error-report.h"
Markus Armbrusterfad866d2019-04-17 21:17:58 +020032#include "qemu/qemu-print.h"
Richard Henderson91e615a2021-05-18 15:11:29 -050033#include "internal.h"
Suraj Jitindar Singhb2899492017-03-01 17:54:38 +110034#include "mmu-book3s-v3.h"
Suraj Jitindar Singh95cb0652017-07-03 16:19:47 +100035#include "mmu-radix64.h"
BALATON Zoltane7baac62024-05-13 01:28:08 +020036#include "mmu-booke.h"
Bruno Larsen (billionai)2b44e212021-05-25 08:53:53 -030037#include "exec/helper-proto.h"
38#include "exec/cpu_ldst.h"
Lucas Mateus Castro (alqotel)5118ebe2021-07-23 14:56:25 -030039
David Gibsonfe4ade32019-03-21 22:36:09 +110040/* #define FLUSH_ALL_TLBS */
Blue Swirl8cbbe382012-05-30 04:23:33 +000041
Blue Swirl8cbbe382012-05-30 04:23:33 +000042/*****************************************************************************/
43/* PowerPC MMU emulation */
David Gibson5dc68eb2013-03-12 00:31:17 +000044
Blue Swirl8cbbe382012-05-30 04:23:33 +000045/* Software driven TLB helpers */
Blue Swirl8cbbe382012-05-30 04:23:33 +000046static inline void ppc6xx_tlb_invalidate_all(CPUPPCState *env)
47{
48 ppc6xx_tlb_t *tlb;
BALATON Zoltan5fd257f2024-05-13 01:28:02 +020049 int nr, max = 2 * env->nb_tlb;
Blue Swirl8cbbe382012-05-30 04:23:33 +000050
Blue Swirl8cbbe382012-05-30 04:23:33 +000051 for (nr = 0; nr < max; nr++) {
52 tlb = &env->tlb.tlb6[nr];
53 pte_invalidate(&tlb->pte0);
54 }
Richard Hendersondb70b312019-03-22 19:07:57 -070055 tlb_flush(env_cpu(env));
Blue Swirl8cbbe382012-05-30 04:23:33 +000056}
57
58static inline void ppc6xx_tlb_invalidate_virt2(CPUPPCState *env,
59 target_ulong eaddr,
60 int is_code, int match_epn)
61{
62#if !defined(FLUSH_ALL_TLBS)
Richard Hendersondb70b312019-03-22 19:07:57 -070063 CPUState *cs = env_cpu(env);
Blue Swirl8cbbe382012-05-30 04:23:33 +000064 ppc6xx_tlb_t *tlb;
65 int way, nr;
66
67 /* Invalidate ITLB + DTLB, all ways */
68 for (way = 0; way < env->nb_ways; way++) {
69 nr = ppc6xx_tlb_getnum(env, eaddr, way, is_code);
70 tlb = &env->tlb.tlb6[nr];
71 if (pte_is_valid(tlb->pte0) && (match_epn == 0 || eaddr == tlb->EPN)) {
Cédric Le Goater56964582022-01-04 07:55:34 +010072 qemu_log_mask(CPU_LOG_MMU, "TLB invalidate %d/%d "
73 TARGET_FMT_lx "\n", nr, env->nb_tlb, eaddr);
Blue Swirl8cbbe382012-05-30 04:23:33 +000074 pte_invalidate(&tlb->pte0);
Andreas Färber31b030d2013-09-04 01:29:02 +020075 tlb_flush_page(cs, tlb->EPN);
Blue Swirl8cbbe382012-05-30 04:23:33 +000076 }
77 }
78#else
79 /* XXX: PowerPC specification say this is valid as well */
80 ppc6xx_tlb_invalidate_all(env);
81#endif
82}
83
84static inline void ppc6xx_tlb_invalidate_virt(CPUPPCState *env,
85 target_ulong eaddr, int is_code)
86{
87 ppc6xx_tlb_invalidate_virt2(env, eaddr, is_code, 0);
88}
89
Blue Swirl9aa5b152012-05-30 04:23:34 +000090static void ppc6xx_tlb_store(CPUPPCState *env, target_ulong EPN, int way,
91 int is_code, target_ulong pte0, target_ulong pte1)
Blue Swirl8cbbe382012-05-30 04:23:33 +000092{
93 ppc6xx_tlb_t *tlb;
94 int nr;
95
96 nr = ppc6xx_tlb_getnum(env, EPN, way, is_code);
97 tlb = &env->tlb.tlb6[nr];
Cédric Le Goater56964582022-01-04 07:55:34 +010098 qemu_log_mask(CPU_LOG_MMU, "Set TLB %d/%d EPN " TARGET_FMT_lx " PTE0 "
99 TARGET_FMT_lx " PTE1 " TARGET_FMT_lx "\n", nr, env->nb_tlb,
100 EPN, pte0, pte1);
Blue Swirl8cbbe382012-05-30 04:23:33 +0000101 /* Invalidate any pending reference in QEMU for this virtual address */
102 ppc6xx_tlb_invalidate_virt2(env, EPN, is_code, 1);
103 tlb->pte0 = pte0;
104 tlb->pte1 = pte1;
105 tlb->EPN = EPN;
106 /* Store last way for LRU mechanism */
107 env->last_way = way;
108}
109
Blue Swirl8cbbe382012-05-30 04:23:33 +0000110/* Helpers specific to PowerPC 40x implementations */
111static inline void ppc4xx_tlb_invalidate_all(CPUPPCState *env)
112{
113 ppcemb_tlb_t *tlb;
114 int i;
115
116 for (i = 0; i < env->nb_tlb; i++) {
117 tlb = &env->tlb.tlbe[i];
118 tlb->prot &= ~PAGE_VALID;
119 }
Richard Hendersondb70b312019-03-22 19:07:57 -0700120 tlb_flush(env_cpu(env));
Blue Swirl8cbbe382012-05-30 04:23:33 +0000121}
122
Blue Swirl6575c282012-10-28 11:04:50 +0000123static void booke206_flush_tlb(CPUPPCState *env, int flags,
124 const int check_iprot)
Blue Swirl8cbbe382012-05-30 04:23:33 +0000125{
126 int tlb_size;
127 int i, j;
128 ppcmas_tlb_t *tlb = env->tlb.tlbm;
129
130 for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
131 if (flags & (1 << i)) {
132 tlb_size = booke206_tlb_size(env, i);
133 for (j = 0; j < tlb_size; j++) {
134 if (!check_iprot || !(tlb[j].mas1 & MAS1_IPROT)) {
135 tlb[j].mas1 &= ~MAS1_VALID;
136 }
137 }
138 }
139 tlb += booke206_tlb_size(env, i);
140 }
141
Richard Hendersondb70b312019-03-22 19:07:57 -0700142 tlb_flush(env_cpu(env));
Blue Swirl8cbbe382012-05-30 04:23:33 +0000143}
Blue Swirl8cbbe382012-05-30 04:23:33 +0000144
Blue Swirl8cbbe382012-05-30 04:23:33 +0000145/*****************************************************************************/
146/* BATs management */
147#if !defined(FLUSH_ALL_TLBS)
148static inline void do_invalidate_BAT(CPUPPCState *env, target_ulong BATu,
149 target_ulong mask)
150{
Richard Hendersondb70b312019-03-22 19:07:57 -0700151 CPUState *cs = env_cpu(env);
Blue Swirl8cbbe382012-05-30 04:23:33 +0000152 target_ulong base, end, page;
153
154 base = BATu & ~0x0001FFFF;
155 end = base + mask + 0x00020000;
Artyom Tarasenkoaaef8732019-04-12 23:06:17 +0200156 if (((end - base) >> TARGET_PAGE_BITS) > 1024) {
157 /* Flushing 1024 4K pages is slower than a complete flush */
Cédric Le Goater56964582022-01-04 07:55:34 +0100158 qemu_log_mask(CPU_LOG_MMU, "Flush all BATs\n");
Philippe Mathieu-Daudé96449e42020-05-12 09:00:18 +0200159 tlb_flush(cs);
Cédric Le Goater56964582022-01-04 07:55:34 +0100160 qemu_log_mask(CPU_LOG_MMU, "Flush done\n");
Artyom Tarasenkoaaef8732019-04-12 23:06:17 +0200161 return;
162 }
Cédric Le Goater56964582022-01-04 07:55:34 +0100163 qemu_log_mask(CPU_LOG_MMU, "Flush BAT from " TARGET_FMT_lx
164 " to " TARGET_FMT_lx " (" TARGET_FMT_lx ")\n",
165 base, end, mask);
Blue Swirl8cbbe382012-05-30 04:23:33 +0000166 for (page = base; page != end; page += TARGET_PAGE_SIZE) {
Andreas Färber31b030d2013-09-04 01:29:02 +0200167 tlb_flush_page(cs, page);
Blue Swirl8cbbe382012-05-30 04:23:33 +0000168 }
Cédric Le Goater56964582022-01-04 07:55:34 +0100169 qemu_log_mask(CPU_LOG_MMU, "Flush done\n");
Blue Swirl8cbbe382012-05-30 04:23:33 +0000170}
171#endif
172
173static inline void dump_store_bat(CPUPPCState *env, char ID, int ul, int nr,
174 target_ulong value)
175{
Cédric Le Goater56964582022-01-04 07:55:34 +0100176 qemu_log_mask(CPU_LOG_MMU, "Set %cBAT%d%c to " TARGET_FMT_lx " ("
177 TARGET_FMT_lx ")\n", ID, nr, ul == 0 ? 'u' : 'l',
178 value, env->nip);
Blue Swirl8cbbe382012-05-30 04:23:33 +0000179}
180
Blue Swirl9aa5b152012-05-30 04:23:34 +0000181void helper_store_ibatu(CPUPPCState *env, uint32_t nr, target_ulong value)
Blue Swirl8cbbe382012-05-30 04:23:33 +0000182{
183 target_ulong mask;
184
185 dump_store_bat(env, 'I', 0, nr, value);
186 if (env->IBAT[0][nr] != value) {
187 mask = (value << 15) & 0x0FFE0000UL;
188#if !defined(FLUSH_ALL_TLBS)
189 do_invalidate_BAT(env, env->IBAT[0][nr], mask);
190#endif
David Gibsonfe4ade32019-03-21 22:36:09 +1100191 /*
192 * When storing valid upper BAT, mask BEPI and BRPN and
193 * invalidate all TLBs covered by this BAT
Blue Swirl8cbbe382012-05-30 04:23:33 +0000194 */
195 mask = (value << 15) & 0x0FFE0000UL;
196 env->IBAT[0][nr] = (value & 0x00001FFFUL) |
197 (value & ~0x0001FFFFUL & ~mask);
198 env->IBAT[1][nr] = (env->IBAT[1][nr] & 0x0000007B) |
199 (env->IBAT[1][nr] & ~0x0001FFFF & ~mask);
200#if !defined(FLUSH_ALL_TLBS)
201 do_invalidate_BAT(env, env->IBAT[0][nr], mask);
202#else
Richard Hendersondb70b312019-03-22 19:07:57 -0700203 tlb_flush(env_cpu(env));
Blue Swirl8cbbe382012-05-30 04:23:33 +0000204#endif
205 }
206}
207
Blue Swirl9aa5b152012-05-30 04:23:34 +0000208void helper_store_ibatl(CPUPPCState *env, uint32_t nr, target_ulong value)
Blue Swirl8cbbe382012-05-30 04:23:33 +0000209{
210 dump_store_bat(env, 'I', 1, nr, value);
211 env->IBAT[1][nr] = value;
212}
213
Blue Swirl9aa5b152012-05-30 04:23:34 +0000214void helper_store_dbatu(CPUPPCState *env, uint32_t nr, target_ulong value)
Blue Swirl8cbbe382012-05-30 04:23:33 +0000215{
216 target_ulong mask;
217
218 dump_store_bat(env, 'D', 0, nr, value);
219 if (env->DBAT[0][nr] != value) {
David Gibsonfe4ade32019-03-21 22:36:09 +1100220 /*
221 * When storing valid upper BAT, mask BEPI and BRPN and
222 * invalidate all TLBs covered by this BAT
Blue Swirl8cbbe382012-05-30 04:23:33 +0000223 */
224 mask = (value << 15) & 0x0FFE0000UL;
225#if !defined(FLUSH_ALL_TLBS)
226 do_invalidate_BAT(env, env->DBAT[0][nr], mask);
227#endif
228 mask = (value << 15) & 0x0FFE0000UL;
229 env->DBAT[0][nr] = (value & 0x00001FFFUL) |
230 (value & ~0x0001FFFFUL & ~mask);
231 env->DBAT[1][nr] = (env->DBAT[1][nr] & 0x0000007B) |
232 (env->DBAT[1][nr] & ~0x0001FFFF & ~mask);
233#if !defined(FLUSH_ALL_TLBS)
234 do_invalidate_BAT(env, env->DBAT[0][nr], mask);
235#else
Richard Hendersondb70b312019-03-22 19:07:57 -0700236 tlb_flush(env_cpu(env));
Blue Swirl8cbbe382012-05-30 04:23:33 +0000237#endif
238 }
239}
240
Blue Swirl9aa5b152012-05-30 04:23:34 +0000241void helper_store_dbatl(CPUPPCState *env, uint32_t nr, target_ulong value)
Blue Swirl8cbbe382012-05-30 04:23:33 +0000242{
243 dump_store_bat(env, 'D', 1, nr, value);
244 env->DBAT[1][nr] = value;
245}
246
Blue Swirl8cbbe382012-05-30 04:23:33 +0000247/*****************************************************************************/
248/* TLB management */
249void ppc_tlb_invalidate_all(CPUPPCState *env)
250{
Sam Bobroffec975e82017-03-02 16:38:56 +1100251#if defined(TARGET_PPC64)
Greg Kurzd57d72a2020-12-09 18:35:36 +0100252 if (mmu_is_64bit(env->mmu_model)) {
Sam Bobroffec975e82017-03-02 16:38:56 +1100253 env->tlb_need_flush = 0;
Richard Hendersondb70b312019-03-22 19:07:57 -0700254 tlb_flush(env_cpu(env));
Sam Bobroffec975e82017-03-02 16:38:56 +1100255 } else
256#endif /* defined(TARGET_PPC64) */
Blue Swirl8cbbe382012-05-30 04:23:33 +0000257 switch (env->mmu_model) {
258 case POWERPC_MMU_SOFT_6xx:
Blue Swirl8cbbe382012-05-30 04:23:33 +0000259 ppc6xx_tlb_invalidate_all(env);
260 break;
261 case POWERPC_MMU_SOFT_4xx:
Blue Swirl8cbbe382012-05-30 04:23:33 +0000262 ppc4xx_tlb_invalidate_all(env);
263 break;
264 case POWERPC_MMU_REAL:
Richard Hendersondb70b312019-03-22 19:07:57 -0700265 cpu_abort(env_cpu(env), "No TLB for PowerPC 4xx in real mode\n");
Blue Swirl8cbbe382012-05-30 04:23:33 +0000266 break;
267 case POWERPC_MMU_MPC8xx:
268 /* XXX: TODO */
Richard Hendersondb70b312019-03-22 19:07:57 -0700269 cpu_abort(env_cpu(env), "MPC8xx MMU model is not implemented\n");
Blue Swirl8cbbe382012-05-30 04:23:33 +0000270 break;
271 case POWERPC_MMU_BOOKE:
Richard Hendersondb70b312019-03-22 19:07:57 -0700272 tlb_flush(env_cpu(env));
Blue Swirl8cbbe382012-05-30 04:23:33 +0000273 break;
274 case POWERPC_MMU_BOOKE206:
275 booke206_flush_tlb(env, -1, 0);
276 break;
277 case POWERPC_MMU_32B:
Benjamin Herrenschmidtc5a8d8f2016-06-07 12:50:22 +1000278 env->tlb_need_flush = 0;
Richard Hendersondb70b312019-03-22 19:07:57 -0700279 tlb_flush(env_cpu(env));
Blue Swirl8cbbe382012-05-30 04:23:33 +0000280 break;
281 default:
282 /* XXX: TODO */
Richard Hendersondb70b312019-03-22 19:07:57 -0700283 cpu_abort(env_cpu(env), "Unknown MMU model %x\n", env->mmu_model);
Blue Swirl8cbbe382012-05-30 04:23:33 +0000284 break;
285 }
286}
287
288void ppc_tlb_invalidate_one(CPUPPCState *env, target_ulong addr)
289{
290#if !defined(FLUSH_ALL_TLBS)
291 addr &= TARGET_PAGE_MASK;
Sam Bobroffec975e82017-03-02 16:38:56 +1100292#if defined(TARGET_PPC64)
Greg Kurzd57d72a2020-12-09 18:35:36 +0100293 if (mmu_is_64bit(env->mmu_model)) {
Sam Bobroffec975e82017-03-02 16:38:56 +1100294 /* tlbie invalidate TLBs for all segments */
David Gibsonfe4ade32019-03-21 22:36:09 +1100295 /*
296 * XXX: given the fact that there are too many segments to invalidate,
Sam Bobroffec975e82017-03-02 16:38:56 +1100297 * and we still don't have a tlb_flush_mask(env, n, mask) in QEMU,
298 * we just invalidate all TLBs
299 */
300 env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH;
301 } else
302#endif /* defined(TARGET_PPC64) */
Blue Swirl8cbbe382012-05-30 04:23:33 +0000303 switch (env->mmu_model) {
304 case POWERPC_MMU_SOFT_6xx:
Blue Swirl8cbbe382012-05-30 04:23:33 +0000305 ppc6xx_tlb_invalidate_virt(env, addr, 0);
BALATON Zoltan5fd257f2024-05-13 01:28:02 +0200306 ppc6xx_tlb_invalidate_virt(env, addr, 1);
Blue Swirl8cbbe382012-05-30 04:23:33 +0000307 break;
Blue Swirl8cbbe382012-05-30 04:23:33 +0000308 case POWERPC_MMU_32B:
David Gibsonfe4ade32019-03-21 22:36:09 +1100309 /*
310 * Actual CPUs invalidate entire congruence classes based on
311 * the geometry of their TLBs and some OSes take that into
312 * account, we just mark the TLB to be flushed later (context
313 * synchronizing event or sync instruction on 32-bit).
Blue Swirl8cbbe382012-05-30 04:23:33 +0000314 */
Nikunj A Dadhaniaa8a6d532016-09-20 22:04:59 +0530315 env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH;
Blue Swirl8cbbe382012-05-30 04:23:33 +0000316 break;
Blue Swirl8cbbe382012-05-30 04:23:33 +0000317 default:
David Gibson041d95f2016-01-30 23:49:22 +1100318 /* Should never reach here with other MMU models */
319 assert(0);
Blue Swirl8cbbe382012-05-30 04:23:33 +0000320 }
321#else
322 ppc_tlb_invalidate_all(env);
323#endif
324}
325
326/*****************************************************************************/
327/* Special registers manipulation */
Cédric Le Goater4a7518e2018-04-24 13:30:42 +0200328
Blue Swirl9aa5b152012-05-30 04:23:34 +0000329/* Segment registers load and store */
330target_ulong helper_load_sr(CPUPPCState *env, target_ulong sr_num)
Blue Swirl8cbbe382012-05-30 04:23:33 +0000331{
Blue Swirl9aa5b152012-05-30 04:23:34 +0000332#if defined(TARGET_PPC64)
Greg Kurzd57d72a2020-12-09 18:35:36 +0100333 if (mmu_is_64bit(env->mmu_model)) {
Blue Swirl9aa5b152012-05-30 04:23:34 +0000334 /* XXX */
335 return 0;
336 }
Blue Swirl8cbbe382012-05-30 04:23:33 +0000337#endif
Blue Swirl9aa5b152012-05-30 04:23:34 +0000338 return env->sr[sr_num];
339}
Blue Swirl8cbbe382012-05-30 04:23:33 +0000340
Blue Swirl9aa5b152012-05-30 04:23:34 +0000341void helper_store_sr(CPUPPCState *env, target_ulong srnum, target_ulong value)
Blue Swirl8cbbe382012-05-30 04:23:33 +0000342{
Antony Pavlov339aaf52014-12-13 19:48:18 +0300343 qemu_log_mask(CPU_LOG_MMU,
344 "%s: reg=%d " TARGET_FMT_lx " " TARGET_FMT_lx "\n", __func__,
Blue Swirl9aa5b152012-05-30 04:23:34 +0000345 (int)srnum, value, env->sr[srnum]);
Blue Swirl8cbbe382012-05-30 04:23:33 +0000346#if defined(TARGET_PPC64)
Greg Kurzd57d72a2020-12-09 18:35:36 +0100347 if (mmu_is_64bit(env->mmu_model)) {
Richard Hendersondb70b312019-03-22 19:07:57 -0700348 PowerPCCPU *cpu = env_archcpu(env);
David Gibsonbcd81232016-01-27 11:07:29 +1100349 uint64_t esid, vsid;
Blue Swirl8cbbe382012-05-30 04:23:33 +0000350
351 /* ESID = srnum */
David Gibsonbcd81232016-01-27 11:07:29 +1100352 esid = ((uint64_t)(srnum & 0xf) << 28) | SLB_ESID_V;
Blue Swirl8cbbe382012-05-30 04:23:33 +0000353
354 /* VSID = VSID */
David Gibsonbcd81232016-01-27 11:07:29 +1100355 vsid = (value & 0xfffffff) << 12;
Blue Swirl8cbbe382012-05-30 04:23:33 +0000356 /* flags = flags */
David Gibsonbcd81232016-01-27 11:07:29 +1100357 vsid |= ((value >> 27) & 0xf) << 8;
Blue Swirl8cbbe382012-05-30 04:23:33 +0000358
David Gibsonbcd81232016-01-27 11:07:29 +1100359 ppc_store_slb(cpu, srnum, esid, vsid);
Blue Swirl8cbbe382012-05-30 04:23:33 +0000360 } else
361#endif
362 if (env->sr[srnum] != value) {
363 env->sr[srnum] = value;
David Gibsonfe4ade32019-03-21 22:36:09 +1100364 /*
365 * Invalidating 256MB of virtual memory in 4kB pages is way
zhaolichang136fbf62020-10-09 14:44:37 +0800366 * longer than flushing the whole TLB.
David Gibsonfe4ade32019-03-21 22:36:09 +1100367 */
Blue Swirl8cbbe382012-05-30 04:23:33 +0000368#if !defined(FLUSH_ALL_TLBS) && 0
369 {
370 target_ulong page, end;
371 /* Invalidate 256 MB of virtual memory */
372 page = (16 << 20) * srnum;
373 end = page + (16 << 20);
374 for (; page != end; page += TARGET_PAGE_SIZE) {
Richard Hendersondb70b312019-03-22 19:07:57 -0700375 tlb_flush_page(env_cpu(env), page);
Blue Swirl8cbbe382012-05-30 04:23:33 +0000376 }
377 }
378#else
Nikunj A Dadhaniaa8a6d532016-09-20 22:04:59 +0530379 env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH;
Blue Swirl8cbbe382012-05-30 04:23:33 +0000380#endif
381 }
382}
Blue Swirl8cbbe382012-05-30 04:23:33 +0000383
Blue Swirlec19c4d2012-05-30 04:23:30 +0000384/* TLB management */
Blue Swirlc6c7cf02012-05-30 04:23:31 +0000385void helper_tlbia(CPUPPCState *env)
Blue Swirlec19c4d2012-05-30 04:23:30 +0000386{
387 ppc_tlb_invalidate_all(env);
388}
389
Blue Swirlc6c7cf02012-05-30 04:23:31 +0000390void helper_tlbie(CPUPPCState *env, target_ulong addr)
Blue Swirlec19c4d2012-05-30 04:23:30 +0000391{
392 ppc_tlb_invalidate_one(env, addr);
393}
394
Leandro Luporie7beaea2022-07-12 16:37:41 -0300395#if defined(TARGET_PPC64)
396
397/* Invalidation Selector */
398#define TLBIE_IS_VA 0
399#define TLBIE_IS_PID 1
400#define TLBIE_IS_LPID 2
401#define TLBIE_IS_ALL 3
402
403/* Radix Invalidation Control */
404#define TLBIE_RIC_TLB 0
405#define TLBIE_RIC_PWC 1
406#define TLBIE_RIC_ALL 2
407#define TLBIE_RIC_GRP 3
408
409/* Radix Actual Page sizes */
410#define TLBIE_R_AP_4K 0
411#define TLBIE_R_AP_64K 5
412#define TLBIE_R_AP_2M 1
413#define TLBIE_R_AP_1G 2
414
415/* RB field masks */
416#define TLBIE_RB_EPN_MASK PPC_BITMASK(0, 51)
417#define TLBIE_RB_IS_MASK PPC_BITMASK(52, 53)
418#define TLBIE_RB_AP_MASK PPC_BITMASK(56, 58)
419
420void helper_tlbie_isa300(CPUPPCState *env, target_ulong rb, target_ulong rs,
421 uint32_t flags)
422{
423 unsigned ric = (flags & TLBIE_F_RIC_MASK) >> TLBIE_F_RIC_SHIFT;
424 /*
425 * With the exception of the checks for invalid instruction forms,
426 * PRS is currently ignored, because we don't know if a given TLB entry
427 * is process or partition scoped.
428 */
429 bool prs = flags & TLBIE_F_PRS;
430 bool r = flags & TLBIE_F_R;
431 bool local = flags & TLBIE_F_LOCAL;
432 bool effR;
433 unsigned is = extract64(rb, PPC_BIT_NR(53), 2);
434 unsigned ap; /* actual page size */
435 target_ulong addr, pgoffs_mask;
436
437 qemu_log_mask(CPU_LOG_MMU,
438 "%s: local=%d addr=" TARGET_FMT_lx " ric=%u prs=%d r=%d is=%u\n",
439 __func__, local, rb & TARGET_PAGE_MASK, ric, prs, r, is);
440
441 effR = FIELD_EX64(env->msr, MSR, HV) ? r : env->spr[SPR_LPCR] & LPCR_HR;
442
443 /* Partial TLB invalidation is supported for Radix only for now. */
444 if (!effR) {
445 goto inval_all;
446 }
447
448 /* Check for invalid instruction forms (effR=1). */
449 if (unlikely(ric == TLBIE_RIC_GRP ||
450 ((ric == TLBIE_RIC_PWC || ric == TLBIE_RIC_ALL) &&
451 is == TLBIE_IS_VA) ||
452 (!prs && is == TLBIE_IS_PID))) {
453 qemu_log_mask(LOG_GUEST_ERROR,
454 "%s: invalid instruction form: ric=%u prs=%d r=%d is=%u\n",
455 __func__, ric, prs, r, is);
456 goto invalid;
457 }
458
459 /* We don't cache Page Walks. */
460 if (ric == TLBIE_RIC_PWC) {
461 if (local) {
462 unsigned set = extract64(rb, PPC_BIT_NR(51), 12);
463 if (set != 0) {
464 qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid set: %d\n",
465 __func__, set);
466 goto invalid;
467 }
468 }
469 return;
470 }
471
472 /*
473 * Invalidation by LPID or PID is not supported, so fallback
474 * to full TLB flush in these cases.
475 */
476 if (is != TLBIE_IS_VA) {
477 goto inval_all;
478 }
479
480 /*
481 * The results of an attempt to invalidate a translation outside of
482 * quadrant 0 for Radix Tree translation (effR=1, RIC=0, PRS=1, IS=0,
483 * and EA 0:1 != 0b00) are boundedly undefined.
484 */
485 if (unlikely(ric == TLBIE_RIC_TLB && prs && is == TLBIE_IS_VA &&
486 (rb & R_EADDR_QUADRANT) != R_EADDR_QUADRANT0)) {
487 qemu_log_mask(LOG_GUEST_ERROR,
488 "%s: attempt to invalidate a translation outside of quadrant 0\n",
489 __func__);
490 goto inval_all;
491 }
492
493 assert(is == TLBIE_IS_VA);
494 assert(ric == TLBIE_RIC_TLB || ric == TLBIE_RIC_ALL);
495
496 ap = extract64(rb, PPC_BIT_NR(58), 3);
497 switch (ap) {
498 case TLBIE_R_AP_4K:
499 pgoffs_mask = 0xfffull;
500 break;
501
502 case TLBIE_R_AP_64K:
503 pgoffs_mask = 0xffffull;
504 break;
505
506 case TLBIE_R_AP_2M:
507 pgoffs_mask = 0x1fffffull;
508 break;
509
510 case TLBIE_R_AP_1G:
511 pgoffs_mask = 0x3fffffffull;
512 break;
513
514 default:
515 /*
516 * If the value specified in RS 0:31, RS 32:63, RB 54:55, RB 56:58,
517 * RB 44:51, or RB 56:63, when it is needed to perform the specified
518 * operation, is not supported by the implementation, the instruction
519 * is treated as if the instruction form were invalid.
520 */
521 qemu_log_mask(LOG_GUEST_ERROR, "%s: invalid AP: %d\n", __func__, ap);
522 goto invalid;
523 }
524
525 addr = rb & TLBIE_RB_EPN_MASK & ~pgoffs_mask;
526
527 if (local) {
528 tlb_flush_page(env_cpu(env), addr);
529 } else {
Nicholas Piggin82676f12024-03-26 23:20:43 +1000530 tlb_flush_page_all_cpus_synced(env_cpu(env), addr);
Leandro Luporie7beaea2022-07-12 16:37:41 -0300531 }
532 return;
533
534inval_all:
535 env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH;
536 if (!local) {
537 env->tlb_need_flush |= TLB_NEED_GLOBAL_FLUSH;
538 }
539 return;
540
541invalid:
542 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
543 POWERPC_EXCP_INVAL |
544 POWERPC_EXCP_INVAL_INVAL, GETPC());
545}
546
547#endif
548
David Gibson46933642016-01-28 10:31:04 +1100549void helper_tlbiva(CPUPPCState *env, target_ulong addr)
550{
David Gibson46933642016-01-28 10:31:04 +1100551 /* tlbiva instruction only exists on BookE */
552 assert(env->mmu_model == POWERPC_MMU_BOOKE);
553 /* XXX: TODO */
Richard Hendersondb70b312019-03-22 19:07:57 -0700554 cpu_abort(env_cpu(env), "BookE MMU model is not implemented\n");
David Gibson46933642016-01-28 10:31:04 +1100555}
556
Blue Swirlec19c4d2012-05-30 04:23:30 +0000557/* Software driven TLBs management */
558/* PowerPC 602/603 software TLB load instructions helpers */
Blue Swirlc6c7cf02012-05-30 04:23:31 +0000559static void do_6xx_tlb(CPUPPCState *env, target_ulong new_EPN, int is_code)
Blue Swirlec19c4d2012-05-30 04:23:30 +0000560{
561 target_ulong RPN, CMP, EPN;
562 int way;
563
564 RPN = env->spr[SPR_RPA];
565 if (is_code) {
566 CMP = env->spr[SPR_ICMP];
567 EPN = env->spr[SPR_IMISS];
568 } else {
569 CMP = env->spr[SPR_DCMP];
570 EPN = env->spr[SPR_DMISS];
571 }
572 way = (env->spr[SPR_SRR1] >> 17) & 1;
573 (void)EPN; /* avoid a compiler warning */
Cédric Le Goater56964582022-01-04 07:55:34 +0100574 qemu_log_mask(CPU_LOG_MMU, "%s: EPN " TARGET_FMT_lx " " TARGET_FMT_lx
575 " PTE0 " TARGET_FMT_lx " PTE1 " TARGET_FMT_lx " way %d\n",
576 __func__, new_EPN, EPN, CMP, RPN, way);
Blue Swirlec19c4d2012-05-30 04:23:30 +0000577 /* Store this TLB */
578 ppc6xx_tlb_store(env, (uint32_t)(new_EPN & TARGET_PAGE_MASK),
579 way, is_code, CMP, RPN);
580}
581
Blue Swirlc6c7cf02012-05-30 04:23:31 +0000582void helper_6xx_tlbd(CPUPPCState *env, target_ulong EPN)
Blue Swirlec19c4d2012-05-30 04:23:30 +0000583{
Blue Swirlc6c7cf02012-05-30 04:23:31 +0000584 do_6xx_tlb(env, EPN, 0);
Blue Swirlec19c4d2012-05-30 04:23:30 +0000585}
586
Blue Swirlc6c7cf02012-05-30 04:23:31 +0000587void helper_6xx_tlbi(CPUPPCState *env, target_ulong EPN)
Blue Swirlec19c4d2012-05-30 04:23:30 +0000588{
Blue Swirlc6c7cf02012-05-30 04:23:31 +0000589 do_6xx_tlb(env, EPN, 1);
Blue Swirlec19c4d2012-05-30 04:23:30 +0000590}
591
Blue Swirlec19c4d2012-05-30 04:23:30 +0000592static inline target_ulong booke_tlb_to_page_size(int size)
593{
594 return 1024 << (2 * size);
595}
596
597static inline int booke_page_size_to_tlb(target_ulong page_size)
598{
599 int size;
600
601 switch (page_size) {
602 case 0x00000400UL:
603 size = 0x0;
604 break;
605 case 0x00001000UL:
606 size = 0x1;
607 break;
608 case 0x00004000UL:
609 size = 0x2;
610 break;
611 case 0x00010000UL:
612 size = 0x3;
613 break;
614 case 0x00040000UL:
615 size = 0x4;
616 break;
617 case 0x00100000UL:
618 size = 0x5;
619 break;
620 case 0x00400000UL:
621 size = 0x6;
622 break;
623 case 0x01000000UL:
624 size = 0x7;
625 break;
626 case 0x04000000UL:
627 size = 0x8;
628 break;
629 case 0x10000000UL:
630 size = 0x9;
631 break;
632 case 0x40000000UL:
633 size = 0xA;
634 break;
635#if defined(TARGET_PPC64)
636 case 0x000100000000ULL:
637 size = 0xB;
638 break;
639 case 0x000400000000ULL:
640 size = 0xC;
641 break;
642 case 0x001000000000ULL:
643 size = 0xD;
644 break;
645 case 0x004000000000ULL:
646 size = 0xE;
647 break;
648 case 0x010000000000ULL:
649 size = 0xF;
650 break;
651#endif
652 default:
653 size = -1;
654 break;
655 }
656
657 return size;
658}
659
660/* Helpers for 4xx TLB management */
661#define PPC4XX_TLB_ENTRY_MASK 0x0000003f /* Mask for 64 TLB entries */
662
663#define PPC4XX_TLBHI_V 0x00000040
664#define PPC4XX_TLBHI_E 0x00000020
665#define PPC4XX_TLBHI_SIZE_MIN 0
666#define PPC4XX_TLBHI_SIZE_MAX 7
667#define PPC4XX_TLBHI_SIZE_DEFAULT 1
668#define PPC4XX_TLBHI_SIZE_SHIFT 7
669#define PPC4XX_TLBHI_SIZE_MASK 0x00000007
670
671#define PPC4XX_TLBLO_EX 0x00000200
672#define PPC4XX_TLBLO_WR 0x00000100
673#define PPC4XX_TLBLO_ATTR_MASK 0x000000FF
674#define PPC4XX_TLBLO_RPN_MASK 0xFFFFFC00
675
Cédric Le Goater47822482022-01-28 13:15:03 +0100676void helper_store_40x_pid(CPUPPCState *env, target_ulong val)
677{
678 if (env->spr[SPR_40x_PID] != val) {
679 env->spr[SPR_40x_PID] = val;
680 env->tlb_need_flush |= TLB_NEED_LOCAL_FLUSH;
681 }
682}
683
Blue Swirlc6c7cf02012-05-30 04:23:31 +0000684target_ulong helper_4xx_tlbre_hi(CPUPPCState *env, target_ulong entry)
Blue Swirlec19c4d2012-05-30 04:23:30 +0000685{
686 ppcemb_tlb_t *tlb;
687 target_ulong ret;
688 int size;
689
690 entry &= PPC4XX_TLB_ENTRY_MASK;
691 tlb = &env->tlb.tlbe[entry];
692 ret = tlb->EPN;
693 if (tlb->prot & PAGE_VALID) {
694 ret |= PPC4XX_TLBHI_V;
695 }
696 size = booke_page_size_to_tlb(tlb->size);
697 if (size < PPC4XX_TLBHI_SIZE_MIN || size > PPC4XX_TLBHI_SIZE_MAX) {
698 size = PPC4XX_TLBHI_SIZE_DEFAULT;
699 }
700 ret |= size << PPC4XX_TLBHI_SIZE_SHIFT;
Cédric Le Goater47822482022-01-28 13:15:03 +0100701 helper_store_40x_pid(env, tlb->PID);
Blue Swirlec19c4d2012-05-30 04:23:30 +0000702 return ret;
703}
704
Blue Swirlc6c7cf02012-05-30 04:23:31 +0000705target_ulong helper_4xx_tlbre_lo(CPUPPCState *env, target_ulong entry)
Blue Swirlec19c4d2012-05-30 04:23:30 +0000706{
707 ppcemb_tlb_t *tlb;
708 target_ulong ret;
709
710 entry &= PPC4XX_TLB_ENTRY_MASK;
711 tlb = &env->tlb.tlbe[entry];
712 ret = tlb->RPN;
713 if (tlb->prot & PAGE_EXEC) {
714 ret |= PPC4XX_TLBLO_EX;
715 }
716 if (tlb->prot & PAGE_WRITE) {
717 ret |= PPC4XX_TLBLO_WR;
718 }
719 return ret;
720}
721
Nicholas Pigginc191ad72023-11-13 22:04:06 +1000722static void ppcemb_tlb_flush(CPUState *cs, ppcemb_tlb_t *tlb)
723{
Nicholas Piggin4acc5052023-11-14 20:34:22 +1000724 unsigned mmu_idx = 0;
Nicholas Pigginc191ad72023-11-13 22:04:06 +1000725
Nicholas Piggin4acc5052023-11-14 20:34:22 +1000726 if (tlb->prot & 0xf) {
727 mmu_idx |= 0x1;
Nicholas Pigginc191ad72023-11-13 22:04:06 +1000728 }
Nicholas Piggin4acc5052023-11-14 20:34:22 +1000729 if ((tlb->prot >> 4) & 0xf) {
730 mmu_idx |= 0x2;
731 }
732 if (tlb->attr & 1) {
733 mmu_idx <<= 2;
734 }
735
736 tlb_flush_range_by_mmuidx(cs, tlb->EPN, tlb->size, mmu_idx,
737 TARGET_LONG_BITS);
Nicholas Pigginc191ad72023-11-13 22:04:06 +1000738}
739
Blue Swirlc6c7cf02012-05-30 04:23:31 +0000740void helper_4xx_tlbwe_hi(CPUPPCState *env, target_ulong entry,
741 target_ulong val)
Blue Swirlec19c4d2012-05-30 04:23:30 +0000742{
Richard Hendersondb70b312019-03-22 19:07:57 -0700743 CPUState *cs = env_cpu(env);
Blue Swirlec19c4d2012-05-30 04:23:30 +0000744 ppcemb_tlb_t *tlb;
Blue Swirlec19c4d2012-05-30 04:23:30 +0000745
Cédric Le Goater56964582022-01-04 07:55:34 +0100746 qemu_log_mask(CPU_LOG_MMU, "%s entry %d val " TARGET_FMT_lx "\n",
747 __func__, (int)entry,
Blue Swirlec19c4d2012-05-30 04:23:30 +0000748 val);
749 entry &= PPC4XX_TLB_ENTRY_MASK;
750 tlb = &env->tlb.tlbe[entry];
751 /* Invalidate previous TLB (if it's valid) */
Nicholas Piggin4acc5052023-11-14 20:34:22 +1000752 if ((tlb->prot & PAGE_VALID) && tlb->PID == env->spr[SPR_40x_PID]) {
Cédric Le Goater56964582022-01-04 07:55:34 +0100753 qemu_log_mask(CPU_LOG_MMU, "%s: invalidate old TLB %d start "
754 TARGET_FMT_lx " end " TARGET_FMT_lx "\n", __func__,
Nicholas Pigginc191ad72023-11-13 22:04:06 +1000755 (int)entry, tlb->EPN, tlb->EPN + tlb->size);
756 ppcemb_tlb_flush(cs, tlb);
Blue Swirlec19c4d2012-05-30 04:23:30 +0000757 }
758 tlb->size = booke_tlb_to_page_size((val >> PPC4XX_TLBHI_SIZE_SHIFT)
759 & PPC4XX_TLBHI_SIZE_MASK);
David Gibsonfe4ade32019-03-21 22:36:09 +1100760 /*
761 * We cannot handle TLB size < TARGET_PAGE_SIZE.
Thomas Hutha69dc532018-08-21 13:27:48 +0200762 * If this ever occurs, we should implement TARGET_PAGE_BITS_VARY
Blue Swirlec19c4d2012-05-30 04:23:30 +0000763 */
764 if ((val & PPC4XX_TLBHI_V) && tlb->size < TARGET_PAGE_SIZE) {
Andreas Färber31b030d2013-09-04 01:29:02 +0200765 cpu_abort(cs, "TLB size " TARGET_FMT_lu " < %u "
Thomas Hutha69dc532018-08-21 13:27:48 +0200766 "are not supported (%d)\n"
767 "Please implement TARGET_PAGE_BITS_VARY\n",
Blue Swirlec19c4d2012-05-30 04:23:30 +0000768 tlb->size, TARGET_PAGE_SIZE, (int)((val >> 7) & 0x7));
769 }
770 tlb->EPN = val & ~(tlb->size - 1);
771 if (val & PPC4XX_TLBHI_V) {
772 tlb->prot |= PAGE_VALID;
773 if (val & PPC4XX_TLBHI_E) {
774 /* XXX: TO BE FIXED */
Andreas Färber31b030d2013-09-04 01:29:02 +0200775 cpu_abort(cs,
Blue Swirlec19c4d2012-05-30 04:23:30 +0000776 "Little-endian TLB entries are not supported by now\n");
777 }
778 } else {
779 tlb->prot &= ~PAGE_VALID;
780 }
781 tlb->PID = env->spr[SPR_40x_PID]; /* PID */
Philippe Mathieu-Daudé883f2c52023-01-10 22:29:47 +0100782 qemu_log_mask(CPU_LOG_MMU, "%s: set up TLB %d RPN " HWADDR_FMT_plx
Cédric Le Goater56964582022-01-04 07:55:34 +0100783 " EPN " TARGET_FMT_lx " size " TARGET_FMT_lx
784 " prot %c%c%c%c PID %d\n", __func__,
785 (int)entry, tlb->RPN, tlb->EPN, tlb->size,
786 tlb->prot & PAGE_READ ? 'r' : '-',
787 tlb->prot & PAGE_WRITE ? 'w' : '-',
788 tlb->prot & PAGE_EXEC ? 'x' : '-',
789 tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
Blue Swirlec19c4d2012-05-30 04:23:30 +0000790}
791
Blue Swirlc6c7cf02012-05-30 04:23:31 +0000792void helper_4xx_tlbwe_lo(CPUPPCState *env, target_ulong entry,
793 target_ulong val)
Blue Swirlec19c4d2012-05-30 04:23:30 +0000794{
Nicholas Piggin2ab03482023-11-13 22:49:53 +1000795 CPUState *cs = env_cpu(env);
Blue Swirlec19c4d2012-05-30 04:23:30 +0000796 ppcemb_tlb_t *tlb;
797
Cédric Le Goater56964582022-01-04 07:55:34 +0100798 qemu_log_mask(CPU_LOG_MMU, "%s entry %i val " TARGET_FMT_lx "\n",
799 __func__, (int)entry, val);
Blue Swirlec19c4d2012-05-30 04:23:30 +0000800 entry &= PPC4XX_TLB_ENTRY_MASK;
801 tlb = &env->tlb.tlbe[entry];
Nicholas Piggin2ab03482023-11-13 22:49:53 +1000802 /* Invalidate previous TLB (if it's valid) */
Nicholas Piggin4acc5052023-11-14 20:34:22 +1000803 if ((tlb->prot & PAGE_VALID) && tlb->PID == env->spr[SPR_40x_PID]) {
Nicholas Piggin2ab03482023-11-13 22:49:53 +1000804 qemu_log_mask(CPU_LOG_MMU, "%s: invalidate old TLB %d start "
805 TARGET_FMT_lx " end " TARGET_FMT_lx "\n", __func__,
806 (int)entry, tlb->EPN, tlb->EPN + tlb->size);
807 ppcemb_tlb_flush(cs, tlb);
808 }
Blue Swirlec19c4d2012-05-30 04:23:30 +0000809 tlb->attr = val & PPC4XX_TLBLO_ATTR_MASK;
810 tlb->RPN = val & PPC4XX_TLBLO_RPN_MASK;
811 tlb->prot = PAGE_READ;
812 if (val & PPC4XX_TLBLO_EX) {
813 tlb->prot |= PAGE_EXEC;
814 }
815 if (val & PPC4XX_TLBLO_WR) {
816 tlb->prot |= PAGE_WRITE;
817 }
Philippe Mathieu-Daudé883f2c52023-01-10 22:29:47 +0100818 qemu_log_mask(CPU_LOG_MMU, "%s: set up TLB %d RPN " HWADDR_FMT_plx
Cédric Le Goater56964582022-01-04 07:55:34 +0100819 " EPN " TARGET_FMT_lx
820 " size " TARGET_FMT_lx " prot %c%c%c%c PID %d\n", __func__,
821 (int)entry, tlb->RPN, tlb->EPN, tlb->size,
822 tlb->prot & PAGE_READ ? 'r' : '-',
823 tlb->prot & PAGE_WRITE ? 'w' : '-',
824 tlb->prot & PAGE_EXEC ? 'x' : '-',
825 tlb->prot & PAGE_VALID ? 'v' : '-', (int)tlb->PID);
Blue Swirlec19c4d2012-05-30 04:23:30 +0000826}
827
Blue Swirlc6c7cf02012-05-30 04:23:31 +0000828target_ulong helper_4xx_tlbsx(CPUPPCState *env, target_ulong address)
Blue Swirlec19c4d2012-05-30 04:23:30 +0000829{
830 return ppcemb_tlb_search(env, address, env->spr[SPR_40x_PID]);
831}
832
Nicholas Piggin4acc5052023-11-14 20:34:22 +1000833static bool mmubooke_pid_match(CPUPPCState *env, ppcemb_tlb_t *tlb)
834{
835 if (tlb->PID == env->spr[SPR_BOOKE_PID]) {
836 return true;
837 }
838 if (!env->nb_pids) {
839 return false;
840 }
841
842 if (env->spr[SPR_BOOKE_PID1] && tlb->PID == env->spr[SPR_BOOKE_PID1]) {
843 return true;
844 }
845 if (env->spr[SPR_BOOKE_PID2] && tlb->PID == env->spr[SPR_BOOKE_PID2]) {
846 return true;
847 }
848
849 return false;
850}
851
Blue Swirlec19c4d2012-05-30 04:23:30 +0000852/* PowerPC 440 TLB management */
Blue Swirlc6c7cf02012-05-30 04:23:31 +0000853void helper_440_tlbwe(CPUPPCState *env, uint32_t word, target_ulong entry,
854 target_ulong value)
Blue Swirlec19c4d2012-05-30 04:23:30 +0000855{
856 ppcemb_tlb_t *tlb;
Blue Swirlec19c4d2012-05-30 04:23:30 +0000857
Cédric Le Goater56964582022-01-04 07:55:34 +0100858 qemu_log_mask(CPU_LOG_MMU, "%s word %d entry %d value " TARGET_FMT_lx "\n",
859 __func__, word, (int)entry, value);
Blue Swirlec19c4d2012-05-30 04:23:30 +0000860 entry &= 0x3F;
861 tlb = &env->tlb.tlbe[entry];
Nicholas Piggine8fe1412023-11-13 22:19:15 +1000862
863 /* Invalidate previous TLB (if it's valid) */
Nicholas Piggin4acc5052023-11-14 20:34:22 +1000864 if ((tlb->prot & PAGE_VALID) && mmubooke_pid_match(env, tlb)) {
865 qemu_log_mask(CPU_LOG_MMU, "%s: invalidate old TLB %d start "
866 TARGET_FMT_lx " end " TARGET_FMT_lx "\n", __func__,
867 (int)entry, tlb->EPN, tlb->EPN + tlb->size);
Nicholas Piggin1b729732023-11-13 22:23:06 +1000868 ppcemb_tlb_flush(env_cpu(env), tlb);
Nicholas Piggine8fe1412023-11-13 22:19:15 +1000869 }
870
Blue Swirlec19c4d2012-05-30 04:23:30 +0000871 switch (word) {
872 default:
873 /* Just here to please gcc */
874 case 0:
Nicholas Piggine8fe1412023-11-13 22:19:15 +1000875 tlb->EPN = value & 0xFFFFFC00;
876 tlb->size = booke_tlb_to_page_size((value >> 4) & 0xF);
Blue Swirlec19c4d2012-05-30 04:23:30 +0000877 tlb->attr &= ~0x1;
878 tlb->attr |= (value >> 8) & 1;
879 if (value & 0x200) {
880 tlb->prot |= PAGE_VALID;
881 } else {
Nicholas Piggine8fe1412023-11-13 22:19:15 +1000882 tlb->prot &= ~PAGE_VALID;
Blue Swirlec19c4d2012-05-30 04:23:30 +0000883 }
884 tlb->PID = env->spr[SPR_440_MMUCR] & 0x000000FF;
Blue Swirlec19c4d2012-05-30 04:23:30 +0000885 break;
886 case 1:
Nicholas Piggine8fe1412023-11-13 22:19:15 +1000887 tlb->RPN = value & 0xFFFFFC0F;
Blue Swirlec19c4d2012-05-30 04:23:30 +0000888 break;
889 case 2:
890 tlb->attr = (tlb->attr & 0x1) | (value & 0x0000FF00);
891 tlb->prot = tlb->prot & PAGE_VALID;
892 if (value & 0x1) {
893 tlb->prot |= PAGE_READ << 4;
894 }
895 if (value & 0x2) {
896 tlb->prot |= PAGE_WRITE << 4;
897 }
898 if (value & 0x4) {
899 tlb->prot |= PAGE_EXEC << 4;
900 }
901 if (value & 0x8) {
902 tlb->prot |= PAGE_READ;
903 }
904 if (value & 0x10) {
905 tlb->prot |= PAGE_WRITE;
906 }
907 if (value & 0x20) {
908 tlb->prot |= PAGE_EXEC;
909 }
910 break;
911 }
912}
913
Blue Swirlc6c7cf02012-05-30 04:23:31 +0000914target_ulong helper_440_tlbre(CPUPPCState *env, uint32_t word,
915 target_ulong entry)
Blue Swirlec19c4d2012-05-30 04:23:30 +0000916{
917 ppcemb_tlb_t *tlb;
918 target_ulong ret;
919 int size;
920
921 entry &= 0x3F;
922 tlb = &env->tlb.tlbe[entry];
923 switch (word) {
924 default:
925 /* Just here to please gcc */
926 case 0:
927 ret = tlb->EPN;
928 size = booke_page_size_to_tlb(tlb->size);
929 if (size < 0 || size > 0xF) {
930 size = 1;
931 }
932 ret |= size << 4;
933 if (tlb->attr & 0x1) {
934 ret |= 0x100;
935 }
936 if (tlb->prot & PAGE_VALID) {
937 ret |= 0x200;
938 }
939 env->spr[SPR_440_MMUCR] &= ~0x000000FF;
940 env->spr[SPR_440_MMUCR] |= tlb->PID;
941 break;
942 case 1:
943 ret = tlb->RPN;
944 break;
945 case 2:
946 ret = tlb->attr & ~0x1;
947 if (tlb->prot & (PAGE_READ << 4)) {
948 ret |= 0x1;
949 }
950 if (tlb->prot & (PAGE_WRITE << 4)) {
951 ret |= 0x2;
952 }
953 if (tlb->prot & (PAGE_EXEC << 4)) {
954 ret |= 0x4;
955 }
956 if (tlb->prot & PAGE_READ) {
957 ret |= 0x8;
958 }
959 if (tlb->prot & PAGE_WRITE) {
960 ret |= 0x10;
961 }
962 if (tlb->prot & PAGE_EXEC) {
963 ret |= 0x20;
964 }
965 break;
966 }
967 return ret;
968}
969
Blue Swirlc6c7cf02012-05-30 04:23:31 +0000970target_ulong helper_440_tlbsx(CPUPPCState *env, target_ulong address)
Blue Swirlec19c4d2012-05-30 04:23:30 +0000971{
972 return ppcemb_tlb_search(env, address, env->spr[SPR_440_MMUCR] & 0xFF);
973}
974
975/* PowerPC BookE 2.06 TLB management */
976
977static ppcmas_tlb_t *booke206_cur_tlb(CPUPPCState *env)
978{
979 uint32_t tlbncfg = 0;
980 int esel = (env->spr[SPR_BOOKE_MAS0] & MAS0_ESEL_MASK) >> MAS0_ESEL_SHIFT;
981 int ea = (env->spr[SPR_BOOKE_MAS2] & MAS2_EPN_MASK);
982 int tlb;
983
984 tlb = (env->spr[SPR_BOOKE_MAS0] & MAS0_TLBSEL_MASK) >> MAS0_TLBSEL_SHIFT;
985 tlbncfg = env->spr[SPR_BOOKE_TLB0CFG + tlb];
986
987 if ((tlbncfg & TLBnCFG_HES) && (env->spr[SPR_BOOKE_MAS0] & MAS0_HES)) {
Richard Hendersondb70b312019-03-22 19:07:57 -0700988 cpu_abort(env_cpu(env), "we don't support HES yet\n");
Blue Swirlec19c4d2012-05-30 04:23:30 +0000989 }
990
991 return booke206_get_tlbm(env, tlb, ea, esel);
992}
993
Blue Swirlc6c7cf02012-05-30 04:23:31 +0000994void helper_booke_setpid(CPUPPCState *env, uint32_t pidn, target_ulong pid)
Blue Swirlec19c4d2012-05-30 04:23:30 +0000995{
996 env->spr[pidn] = pid;
997 /* changing PIDs mean we're in a different address space now */
Richard Hendersondb70b312019-03-22 19:07:57 -0700998 tlb_flush(env_cpu(env));
Blue Swirlec19c4d2012-05-30 04:23:30 +0000999}
1000
Roman Kapl50728192018-09-21 08:59:07 +02001001void helper_booke_set_eplc(CPUPPCState *env, target_ulong val)
1002{
Roman Kapl50728192018-09-21 08:59:07 +02001003 env->spr[SPR_BOOKE_EPLC] = val & EPID_MASK;
Richard Hendersondb70b312019-03-22 19:07:57 -07001004 tlb_flush_by_mmuidx(env_cpu(env), 1 << PPC_TLB_EPID_LOAD);
Roman Kapl50728192018-09-21 08:59:07 +02001005}
1006void helper_booke_set_epsc(CPUPPCState *env, target_ulong val)
1007{
Roman Kapl50728192018-09-21 08:59:07 +02001008 env->spr[SPR_BOOKE_EPSC] = val & EPID_MASK;
Richard Hendersondb70b312019-03-22 19:07:57 -07001009 tlb_flush_by_mmuidx(env_cpu(env), 1 << PPC_TLB_EPID_STORE);
Roman Kapl50728192018-09-21 08:59:07 +02001010}
1011
Luc MICHEL2e569842018-01-15 10:32:20 +01001012static inline void flush_page(CPUPPCState *env, ppcmas_tlb_t *tlb)
1013{
Luc MICHEL2e569842018-01-15 10:32:20 +01001014 if (booke206_tlb_to_page_size(env, tlb) == TARGET_PAGE_SIZE) {
Richard Hendersondb70b312019-03-22 19:07:57 -07001015 tlb_flush_page(env_cpu(env), tlb->mas2 & MAS2_EPN_MASK);
Luc MICHEL2e569842018-01-15 10:32:20 +01001016 } else {
Richard Hendersondb70b312019-03-22 19:07:57 -07001017 tlb_flush(env_cpu(env));
Luc MICHEL2e569842018-01-15 10:32:20 +01001018 }
1019}
1020
Blue Swirlc6c7cf02012-05-30 04:23:31 +00001021void helper_booke206_tlbwe(CPUPPCState *env)
Blue Swirlec19c4d2012-05-30 04:23:30 +00001022{
1023 uint32_t tlbncfg, tlbn;
1024 ppcmas_tlb_t *tlb;
1025 uint32_t size_tlb, size_ps;
Fabien Chouteau77c2cf32012-05-21 06:11:06 +00001026 target_ulong mask;
1027
Blue Swirlec19c4d2012-05-30 04:23:30 +00001028
1029 switch (env->spr[SPR_BOOKE_MAS0] & MAS0_WQ_MASK) {
1030 case MAS0_WQ_ALWAYS:
1031 /* good to go, write that entry */
1032 break;
1033 case MAS0_WQ_COND:
1034 /* XXX check if reserved */
1035 if (0) {
1036 return;
1037 }
1038 break;
1039 case MAS0_WQ_CLR_RSRV:
1040 /* XXX clear entry */
1041 return;
1042 default:
1043 /* no idea what to do */
1044 return;
1045 }
1046
1047 if (((env->spr[SPR_BOOKE_MAS0] & MAS0_ATSEL) == MAS0_ATSEL_LRAT) &&
Víctor Colombo10b2b372022-05-04 18:05:30 -03001048 !FIELD_EX64(env->msr, MSR, GS)) {
Blue Swirlec19c4d2012-05-30 04:23:30 +00001049 /* XXX we don't support direct LRAT setting yet */
1050 fprintf(stderr, "cpu: don't support LRAT setting yet\n");
1051 return;
1052 }
1053
1054 tlbn = (env->spr[SPR_BOOKE_MAS0] & MAS0_TLBSEL_MASK) >> MAS0_TLBSEL_SHIFT;
1055 tlbncfg = env->spr[SPR_BOOKE_TLB0CFG + tlbn];
1056
1057 tlb = booke206_cur_tlb(env);
1058
1059 if (!tlb) {
Benjamin Herrenschmidt8c8966e2016-07-27 16:56:37 +10001060 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
1061 POWERPC_EXCP_INVAL |
1062 POWERPC_EXCP_INVAL_INVAL, GETPC());
Blue Swirlec19c4d2012-05-30 04:23:30 +00001063 }
1064
1065 /* check that we support the targeted size */
1066 size_tlb = (env->spr[SPR_BOOKE_MAS1] & MAS1_TSIZE_MASK) >> MAS1_TSIZE_SHIFT;
1067 size_ps = booke206_tlbnps(env, tlbn);
1068 if ((env->spr[SPR_BOOKE_MAS1] & MAS1_VALID) && (tlbncfg & TLBnCFG_AVAIL) &&
1069 !(size_ps & (1 << size_tlb))) {
Benjamin Herrenschmidt8c8966e2016-07-27 16:56:37 +10001070 raise_exception_err_ra(env, POWERPC_EXCP_PROGRAM,
1071 POWERPC_EXCP_INVAL |
1072 POWERPC_EXCP_INVAL_INVAL, GETPC());
Blue Swirlec19c4d2012-05-30 04:23:30 +00001073 }
1074
Víctor Colombo10b2b372022-05-04 18:05:30 -03001075 if (FIELD_EX64(env->msr, MSR, GS)) {
Richard Hendersondb70b312019-03-22 19:07:57 -07001076 cpu_abort(env_cpu(env), "missing HV implementation\n");
Blue Swirlec19c4d2012-05-30 04:23:30 +00001077 }
Luc MICHEL2e569842018-01-15 10:32:20 +01001078
1079 if (tlb->mas1 & MAS1_VALID) {
David Gibsonfe4ade32019-03-21 22:36:09 +11001080 /*
1081 * Invalidate the page in QEMU TLB if it was a valid entry.
Luc MICHEL2e569842018-01-15 10:32:20 +01001082 *
1083 * In "PowerPC e500 Core Family Reference Manual, Rev. 1",
1084 * Section "12.4.2 TLB Write Entry (tlbwe) Instruction":
1085 * (https://www.nxp.com/docs/en/reference-manual/E500CORERM.pdf)
1086 *
1087 * "Note that when an L2 TLB entry is written, it may be displacing an
1088 * already valid entry in the same L2 TLB location (a victim). If a
1089 * valid L1 TLB entry corresponds to the L2 MMU victim entry, that L1
David Gibsonfe4ade32019-03-21 22:36:09 +11001090 * TLB entry is automatically invalidated."
1091 */
Luc MICHEL2e569842018-01-15 10:32:20 +01001092 flush_page(env, tlb);
1093 }
1094
Blue Swirlec19c4d2012-05-30 04:23:30 +00001095 tlb->mas7_3 = ((uint64_t)env->spr[SPR_BOOKE_MAS7] << 32) |
1096 env->spr[SPR_BOOKE_MAS3];
1097 tlb->mas1 = env->spr[SPR_BOOKE_MAS1];
1098
KONRAD Fredericc449d8b2017-08-07 17:50:46 +02001099 if ((env->spr[SPR_MMUCFG] & MMUCFG_MAVN) == MMUCFG_MAVN_V2) {
1100 /* For TLB which has a fixed size TSIZE is ignored with MAV2 */
1101 booke206_fixed_size_tlbn(env, tlbn, tlb);
1102 } else {
1103 if (!(tlbncfg & TLBnCFG_AVAIL)) {
1104 /* force !AVAIL TLB entries to correct page size */
1105 tlb->mas1 &= ~MAS1_TSIZE_MASK;
1106 /* XXX can be configured in MMUCSR0 */
1107 tlb->mas1 |= (tlbncfg & TLBnCFG_MINSIZE) >> 12;
1108 }
Blue Swirlec19c4d2012-05-30 04:23:30 +00001109 }
1110
Fabien Chouteau77c2cf32012-05-21 06:11:06 +00001111 /* Make a mask from TLB size to discard invalid bits in EPN field */
1112 mask = ~(booke206_tlb_to_page_size(env, tlb) - 1);
1113 /* Add a mask for page attributes */
1114 mask |= MAS2_ACM | MAS2_VLE | MAS2_W | MAS2_I | MAS2_M | MAS2_G | MAS2_E;
1115
Víctor Colombocda23362022-05-04 18:05:32 -03001116 if (!FIELD_EX64(env->msr, MSR, CM)) {
David Gibsonfe4ade32019-03-21 22:36:09 +11001117 /*
1118 * Executing a tlbwe instruction in 32-bit mode will set bits
1119 * 0:31 of the TLB EPN field to zero.
Fabien Chouteau77c2cf32012-05-21 06:11:06 +00001120 */
1121 mask &= 0xffffffff;
1122 }
1123
1124 tlb->mas2 = env->spr[SPR_BOOKE_MAS2] & mask;
Blue Swirlec19c4d2012-05-30 04:23:30 +00001125
1126 if (!(tlbncfg & TLBnCFG_IPROT)) {
1127 /* no IPROT supported by TLB */
1128 tlb->mas1 &= ~MAS1_IPROT;
1129 }
1130
Luc MICHEL2e569842018-01-15 10:32:20 +01001131 flush_page(env, tlb);
Blue Swirlec19c4d2012-05-30 04:23:30 +00001132}
1133
1134static inline void booke206_tlb_to_mas(CPUPPCState *env, ppcmas_tlb_t *tlb)
1135{
1136 int tlbn = booke206_tlbm_to_tlbn(env, tlb);
1137 int way = booke206_tlbm_to_way(env, tlb);
1138
1139 env->spr[SPR_BOOKE_MAS0] = tlbn << MAS0_TLBSEL_SHIFT;
1140 env->spr[SPR_BOOKE_MAS0] |= way << MAS0_ESEL_SHIFT;
1141 env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_NV_SHIFT;
1142
1143 env->spr[SPR_BOOKE_MAS1] = tlb->mas1;
1144 env->spr[SPR_BOOKE_MAS2] = tlb->mas2;
1145 env->spr[SPR_BOOKE_MAS3] = tlb->mas7_3;
1146 env->spr[SPR_BOOKE_MAS7] = tlb->mas7_3 >> 32;
1147}
1148
Blue Swirlc6c7cf02012-05-30 04:23:31 +00001149void helper_booke206_tlbre(CPUPPCState *env)
Blue Swirlec19c4d2012-05-30 04:23:30 +00001150{
1151 ppcmas_tlb_t *tlb = NULL;
1152
1153 tlb = booke206_cur_tlb(env);
1154 if (!tlb) {
1155 env->spr[SPR_BOOKE_MAS1] = 0;
1156 } else {
1157 booke206_tlb_to_mas(env, tlb);
1158 }
1159}
1160
Blue Swirlc6c7cf02012-05-30 04:23:31 +00001161void helper_booke206_tlbsx(CPUPPCState *env, target_ulong address)
Blue Swirlec19c4d2012-05-30 04:23:30 +00001162{
1163 ppcmas_tlb_t *tlb = NULL;
1164 int i, j;
Avi Kivitya8170e52012-10-23 12:30:10 +02001165 hwaddr raddr;
Blue Swirlec19c4d2012-05-30 04:23:30 +00001166 uint32_t spid, sas;
1167
1168 spid = (env->spr[SPR_BOOKE_MAS6] & MAS6_SPID_MASK) >> MAS6_SPID_SHIFT;
1169 sas = env->spr[SPR_BOOKE_MAS6] & MAS6_SAS;
1170
1171 for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
1172 int ways = booke206_tlb_ways(env, i);
1173
1174 for (j = 0; j < ways; j++) {
1175 tlb = booke206_get_tlbm(env, i, address, j);
1176
1177 if (!tlb) {
1178 continue;
1179 }
1180
1181 if (ppcmas_tlb_check(env, tlb, &raddr, address, spid)) {
1182 continue;
1183 }
1184
1185 if (sas != ((tlb->mas1 & MAS1_TS) >> MAS1_TS_SHIFT)) {
1186 continue;
1187 }
1188
1189 booke206_tlb_to_mas(env, tlb);
1190 return;
1191 }
1192 }
1193
1194 /* no entry found, fill with defaults */
1195 env->spr[SPR_BOOKE_MAS0] = env->spr[SPR_BOOKE_MAS4] & MAS4_TLBSELD_MASK;
1196 env->spr[SPR_BOOKE_MAS1] = env->spr[SPR_BOOKE_MAS4] & MAS4_TSIZED_MASK;
1197 env->spr[SPR_BOOKE_MAS2] = env->spr[SPR_BOOKE_MAS4] & MAS4_WIMGED_MASK;
1198 env->spr[SPR_BOOKE_MAS3] = 0;
1199 env->spr[SPR_BOOKE_MAS7] = 0;
1200
1201 if (env->spr[SPR_BOOKE_MAS6] & MAS6_SAS) {
1202 env->spr[SPR_BOOKE_MAS1] |= MAS1_TS;
1203 }
1204
1205 env->spr[SPR_BOOKE_MAS1] |= (env->spr[SPR_BOOKE_MAS6] >> 16)
1206 << MAS1_TID_SHIFT;
1207
1208 /* next victim logic */
1209 env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_ESEL_SHIFT;
1210 env->last_way++;
1211 env->last_way &= booke206_tlb_ways(env, 0) - 1;
1212 env->spr[SPR_BOOKE_MAS0] |= env->last_way << MAS0_NV_SHIFT;
1213}
1214
1215static inline void booke206_invalidate_ea_tlb(CPUPPCState *env, int tlbn,
Daniel Henrique Barbozad1397862021-11-10 17:25:16 -03001216 vaddr ea)
Blue Swirlec19c4d2012-05-30 04:23:30 +00001217{
1218 int i;
1219 int ways = booke206_tlb_ways(env, tlbn);
1220 target_ulong mask;
1221
1222 for (i = 0; i < ways; i++) {
1223 ppcmas_tlb_t *tlb = booke206_get_tlbm(env, tlbn, ea, i);
1224 if (!tlb) {
1225 continue;
1226 }
1227 mask = ~(booke206_tlb_to_page_size(env, tlb) - 1);
1228 if (((tlb->mas2 & MAS2_EPN_MASK) == (ea & mask)) &&
1229 !(tlb->mas1 & MAS1_IPROT)) {
1230 tlb->mas1 &= ~MAS1_VALID;
1231 }
1232 }
1233}
1234
Blue Swirlc6c7cf02012-05-30 04:23:31 +00001235void helper_booke206_tlbivax(CPUPPCState *env, target_ulong address)
Blue Swirlec19c4d2012-05-30 04:23:30 +00001236{
Nikunj A Dadhaniad76ab5e2016-09-20 22:05:01 +05301237 CPUState *cs;
Andreas Färber31b030d2013-09-04 01:29:02 +02001238
Blue Swirlec19c4d2012-05-30 04:23:30 +00001239 if (address & 0x4) {
1240 /* flush all entries */
1241 if (address & 0x8) {
1242 /* flush all of TLB1 */
1243 booke206_flush_tlb(env, BOOKE206_FLUSH_TLB1, 1);
1244 } else {
1245 /* flush all of TLB0 */
1246 booke206_flush_tlb(env, BOOKE206_FLUSH_TLB0, 0);
1247 }
1248 return;
1249 }
1250
1251 if (address & 0x8) {
1252 /* flush TLB1 entries */
1253 booke206_invalidate_ea_tlb(env, 1, address);
Nikunj A Dadhaniad76ab5e2016-09-20 22:05:01 +05301254 CPU_FOREACH(cs) {
Alex Bennéed10eb082016-11-14 14:17:28 +00001255 tlb_flush(cs);
Nikunj A Dadhaniad76ab5e2016-09-20 22:05:01 +05301256 }
Blue Swirlec19c4d2012-05-30 04:23:30 +00001257 } else {
1258 /* flush TLB0 entries */
1259 booke206_invalidate_ea_tlb(env, 0, address);
Nikunj A Dadhaniad76ab5e2016-09-20 22:05:01 +05301260 CPU_FOREACH(cs) {
1261 tlb_flush_page(cs, address & MAS2_EPN_MASK);
1262 }
Blue Swirlec19c4d2012-05-30 04:23:30 +00001263 }
1264}
1265
Blue Swirlc6c7cf02012-05-30 04:23:31 +00001266void helper_booke206_tlbilx0(CPUPPCState *env, target_ulong address)
Blue Swirlec19c4d2012-05-30 04:23:30 +00001267{
1268 /* XXX missing LPID handling */
1269 booke206_flush_tlb(env, -1, 1);
1270}
1271
Blue Swirlc6c7cf02012-05-30 04:23:31 +00001272void helper_booke206_tlbilx1(CPUPPCState *env, target_ulong address)
Blue Swirlec19c4d2012-05-30 04:23:30 +00001273{
1274 int i, j;
1275 int tid = (env->spr[SPR_BOOKE_MAS6] & MAS6_SPID);
1276 ppcmas_tlb_t *tlb = env->tlb.tlbm;
1277 int tlb_size;
1278
1279 /* XXX missing LPID handling */
1280 for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
1281 tlb_size = booke206_tlb_size(env, i);
1282 for (j = 0; j < tlb_size; j++) {
1283 if (!(tlb[j].mas1 & MAS1_IPROT) &&
1284 ((tlb[j].mas1 & MAS1_TID_MASK) == tid)) {
1285 tlb[j].mas1 &= ~MAS1_VALID;
1286 }
1287 }
1288 tlb += booke206_tlb_size(env, i);
1289 }
Richard Hendersondb70b312019-03-22 19:07:57 -07001290 tlb_flush(env_cpu(env));
Blue Swirlec19c4d2012-05-30 04:23:30 +00001291}
1292
Blue Swirlc6c7cf02012-05-30 04:23:31 +00001293void helper_booke206_tlbilx3(CPUPPCState *env, target_ulong address)
Blue Swirlec19c4d2012-05-30 04:23:30 +00001294{
1295 int i, j;
1296 ppcmas_tlb_t *tlb;
1297 int tid = (env->spr[SPR_BOOKE_MAS6] & MAS6_SPID);
1298 int pid = tid >> MAS6_SPID_SHIFT;
1299 int sgs = env->spr[SPR_BOOKE_MAS5] & MAS5_SGS;
1300 int ind = (env->spr[SPR_BOOKE_MAS6] & MAS6_SIND) ? MAS1_IND : 0;
1301 /* XXX check for unsupported isize and raise an invalid opcode then */
1302 int size = env->spr[SPR_BOOKE_MAS6] & MAS6_ISIZE_MASK;
1303 /* XXX implement MAV2 handling */
1304 bool mav2 = false;
1305
1306 /* XXX missing LPID handling */
1307 /* flush by pid and ea */
1308 for (i = 0; i < BOOKE206_MAX_TLBN; i++) {
1309 int ways = booke206_tlb_ways(env, i);
1310
1311 for (j = 0; j < ways; j++) {
1312 tlb = booke206_get_tlbm(env, i, address, j);
1313 if (!tlb) {
1314 continue;
1315 }
1316 if ((ppcmas_tlb_check(env, tlb, NULL, address, pid) != 0) ||
1317 (tlb->mas1 & MAS1_IPROT) ||
1318 ((tlb->mas1 & MAS1_IND) != ind) ||
1319 ((tlb->mas8 & MAS8_TGS) != sgs)) {
1320 continue;
1321 }
1322 if (mav2 && ((tlb->mas1 & MAS1_TSIZE_MASK) != size)) {
1323 /* XXX only check when MMUCFG[TWC] || TLBnCFG[HES] */
1324 continue;
1325 }
1326 /* XXX e500mc doesn't match SAS, but other cores might */
1327 tlb->mas1 &= ~MAS1_VALID;
1328 }
1329 }
Richard Hendersondb70b312019-03-22 19:07:57 -07001330 tlb_flush(env_cpu(env));
Blue Swirlec19c4d2012-05-30 04:23:30 +00001331}
1332
Alex Zuepkea721d392014-05-28 19:25:36 +02001333void helper_booke206_tlbflush(CPUPPCState *env, target_ulong type)
Blue Swirlec19c4d2012-05-30 04:23:30 +00001334{
1335 int flags = 0;
1336
1337 if (type & 2) {
1338 flags |= BOOKE206_FLUSH_TLB1;
1339 }
1340
1341 if (type & 4) {
1342 flags |= BOOKE206_FLUSH_TLB0;
1343 }
1344
1345 booke206_flush_tlb(env, flags, 1);
1346}
David Gibsoneb20c1c2013-03-12 00:31:49 +00001347
1348
Nikunj A Dadhaniae3cffe62016-09-20 22:05:00 +05301349void helper_check_tlb_flush_local(CPUPPCState *env)
Benjamin Herrenschmidtcd0c6f42016-05-03 18:03:25 +02001350{
Nikunj A Dadhaniae3cffe62016-09-20 22:05:00 +05301351 check_tlb_flush(env, false);
1352}
1353
1354void helper_check_tlb_flush_global(CPUPPCState *env)
1355{
1356 check_tlb_flush(env, true);
Benjamin Herrenschmidtcd0c6f42016-05-03 18:03:25 +02001357}
1358
David Gibsoneb20c1c2013-03-12 00:31:49 +00001359
Richard Henderson51806b52021-06-21 09:51:13 -03001360bool ppc_cpu_tlb_fill(CPUState *cs, vaddr eaddr, int size,
Richard Henderson351bc972019-04-02 17:03:41 +07001361 MMUAccessType access_type, int mmu_idx,
1362 bool probe, uintptr_t retaddr)
David Gibsoneb20c1c2013-03-12 00:31:49 +00001363{
Andreas Färberd5a11fe2013-08-27 00:28:06 +02001364 PowerPCCPU *cpu = POWERPC_CPU(cs);
Richard Henderson51806b52021-06-21 09:51:13 -03001365 hwaddr raddr;
1366 int page_size, prot;
David Gibsoneb20c1c2013-03-12 00:31:49 +00001367
Richard Henderson51806b52021-06-21 09:51:13 -03001368 if (ppc_xlate(cpu, eaddr, access_type, &raddr,
1369 &page_size, &prot, mmu_idx, !probe)) {
1370 tlb_set_page(cs, eaddr & TARGET_PAGE_MASK, raddr & TARGET_PAGE_MASK,
1371 prot, mmu_idx, 1UL << page_size);
1372 return true;
David Gibsonb632a142013-03-13 11:40:33 +11001373 }
Richard Henderson51806b52021-06-21 09:51:13 -03001374 if (probe) {
1375 return false;
David Gibsoneb20c1c2013-03-12 00:31:49 +00001376 }
Richard Henderson51806b52021-06-21 09:51:13 -03001377 raise_exception_err_ra(&cpu->env, cs->exception_index,
1378 cpu->env.error_code, retaddr);
Richard Henderson351bc972019-04-02 17:03:41 +07001379}