blob: f90f4312ea3004ebb17bfe0c7706907a47f9b68a [file] [log] [blame]
Blue Swirl0cac1b62012-04-09 16:50:52 +00001/*
2 * Common CPU TLB handling
3 *
4 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
Thomas Huthfb0343d2019-01-23 15:08:56 +01009 * version 2.1 of the License, or (at your option) any later version.
Blue Swirl0cac1b62012-04-09 16:50:52 +000010 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
17 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
18 */
19
Peter Maydell7b31bbc2016-01-26 18:16:56 +000020#include "qemu/osdep.h"
Jan Kiszka8d04fb52017-02-23 18:29:11 +000021#include "qemu/main-loop.h"
Claudio Fontana78271682021-02-04 17:39:23 +010022#include "hw/core/tcg-cpu-ops.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010023#include "exec/exec-all.h"
24#include "exec/memory.h"
Paolo Bonzinif08b6172014-03-28 19:42:10 +010025#include "exec/cpu_ldst.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010026#include "exec/cputlb.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010027#include "exec/memory-internal.h"
Juan Quintela220c3eb2013-10-14 17:13:59 +020028#include "exec/ram_addr.h"
Paolo Bonzini0f590e742014-03-28 17:55:24 +010029#include "tcg/tcg.h"
Peter Maydelld7f30402016-06-20 18:07:05 +010030#include "qemu/error-report.h"
31#include "exec/log.h"
Richard Hendersonc482cb12016-06-28 11:37:27 -070032#include "exec/helper-proto.h"
33#include "qemu/atomic.h"
Richard Hendersone6cd4bb2018-08-15 16:31:47 -070034#include "qemu/atomic128.h"
Paolo Bonzini3b9bd3f2020-12-16 13:27:58 +010035#include "exec/translate-all.h"
Paolo Bonzini243af022020-02-04 12:20:10 +010036#include "trace/trace-root.h"
Philippe Mathieu-Daudée5ceadf2021-05-24 19:04:53 +020037#include "tb-hash.h"
Philippe Mathieu-Daudé65269192021-01-17 17:48:12 +010038#include "internal.h"
Alex Bennée235537f2019-06-19 20:20:08 +010039#ifdef CONFIG_PLUGIN
40#include "qemu/plugin-memory.h"
41#endif
Richard Hendersond2ba8022021-07-27 11:10:22 -100042#include "tcg/tcg-ldst.h"
Blue Swirl0cac1b62012-04-09 16:50:52 +000043
Alex Bennée8526e1f2016-03-15 14:30:24 +000044/* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */
45/* #define DEBUG_TLB */
46/* #define DEBUG_TLB_LOG */
47
48#ifdef DEBUG_TLB
49# define DEBUG_TLB_GATE 1
50# ifdef DEBUG_TLB_LOG
51# define DEBUG_TLB_LOG_GATE 1
52# else
53# define DEBUG_TLB_LOG_GATE 0
54# endif
55#else
56# define DEBUG_TLB_GATE 0
57# define DEBUG_TLB_LOG_GATE 0
58#endif
59
60#define tlb_debug(fmt, ...) do { \
61 if (DEBUG_TLB_LOG_GATE) { \
62 qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \
63 ## __VA_ARGS__); \
64 } else if (DEBUG_TLB_GATE) { \
65 fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \
66 } \
67} while (0)
Blue Swirl0cac1b62012-04-09 16:50:52 +000068
Emilio G. Cotaea9025c2018-10-09 13:45:55 -040069#define assert_cpu_is_self(cpu) do { \
Alex Bennéef0aff0f2017-02-23 18:29:16 +000070 if (DEBUG_TLB_GATE) { \
Emilio G. Cotaea9025c2018-10-09 13:45:55 -040071 g_assert(!(cpu)->created || qemu_cpu_is_self(cpu)); \
Alex Bennéef0aff0f2017-02-23 18:29:16 +000072 } \
73 } while (0)
74
KONRAD Frederice3b9ca82017-02-23 18:29:18 +000075/* run_on_cpu_data.target_ptr should always be big enough for a
76 * target_ulong even on 32 bit builds */
77QEMU_BUILD_BUG_ON(sizeof(target_ulong) > sizeof(run_on_cpu_data));
78
Alex Bennéee7218442017-02-23 18:29:20 +000079/* We currently can't handle more than 16 bits in the MMUIDX bitmask.
80 */
81QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16);
82#define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1)
83
Richard Henderson722a1c12019-12-07 11:47:41 -080084static inline size_t tlb_n_entries(CPUTLBDescFast *fast)
Richard Henderson7a1efe12019-12-07 11:37:57 -080085{
Richard Henderson722a1c12019-12-07 11:47:41 -080086 return (fast->mask >> CPU_TLB_ENTRY_BITS) + 1;
Richard Henderson7a1efe12019-12-07 11:37:57 -080087}
88
Richard Henderson722a1c12019-12-07 11:47:41 -080089static inline size_t sizeof_tlb(CPUTLBDescFast *fast)
Emilio G. Cota86e1eff2019-01-16 12:01:13 -050090{
Richard Henderson722a1c12019-12-07 11:47:41 -080091 return fast->mask + (1 << CPU_TLB_ENTRY_BITS);
Emilio G. Cota86e1eff2019-01-16 12:01:13 -050092}
93
Richard Henderson79e42082019-03-22 08:36:40 -070094static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns,
Emilio G. Cota86e1eff2019-01-16 12:01:13 -050095 size_t max_entries)
96{
Richard Henderson79e42082019-03-22 08:36:40 -070097 desc->window_begin_ns = ns;
98 desc->window_max_entries = max_entries;
Emilio G. Cota86e1eff2019-01-16 12:01:13 -050099}
100
Richard Henderson0f4abea2021-01-20 19:53:20 -1000101static void tb_jmp_cache_clear_page(CPUState *cpu, target_ulong page_addr)
102{
103 unsigned int i, i0 = tb_jmp_cache_hash_page(page_addr);
104
105 for (i = 0; i < TB_JMP_PAGE_SIZE; i++) {
106 qatomic_set(&cpu->tb_jmp_cache[i0 + i], NULL);
107 }
108}
109
110static void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr)
111{
112 /* Discard jump cache entries for any tb which might potentially
113 overlap the flushed page. */
114 tb_jmp_cache_clear_page(cpu, addr - TARGET_PAGE_SIZE);
115 tb_jmp_cache_clear_page(cpu, addr);
116}
117
Emilio G. Cota86e1eff2019-01-16 12:01:13 -0500118/**
119 * tlb_mmu_resize_locked() - perform TLB resize bookkeeping; resize if necessary
Richard Henderson71ccd472019-12-07 11:58:50 -0800120 * @desc: The CPUTLBDesc portion of the TLB
121 * @fast: The CPUTLBDescFast portion of the same TLB
Emilio G. Cota86e1eff2019-01-16 12:01:13 -0500122 *
123 * Called with tlb_lock_held.
124 *
125 * We have two main constraints when resizing a TLB: (1) we only resize it
126 * on a TLB flush (otherwise we'd have to take a perf hit by either rehashing
127 * the array or unnecessarily flushing it), which means we do not control how
128 * frequently the resizing can occur; (2) we don't have access to the guest's
129 * future scheduling decisions, and therefore have to decide the magnitude of
130 * the resize based on past observations.
131 *
132 * In general, a memory-hungry process can benefit greatly from an appropriately
133 * sized TLB, since a guest TLB miss is very expensive. This doesn't mean that
134 * we just have to make the TLB as large as possible; while an oversized TLB
135 * results in minimal TLB miss rates, it also takes longer to be flushed
136 * (flushes can be _very_ frequent), and the reduced locality can also hurt
137 * performance.
138 *
139 * To achieve near-optimal performance for all kinds of workloads, we:
140 *
141 * 1. Aggressively increase the size of the TLB when the use rate of the
142 * TLB being flushed is high, since it is likely that in the near future this
143 * memory-hungry process will execute again, and its memory hungriness will
144 * probably be similar.
145 *
146 * 2. Slowly reduce the size of the TLB as the use rate declines over a
147 * reasonably large time window. The rationale is that if in such a time window
148 * we have not observed a high TLB use rate, it is likely that we won't observe
149 * it in the near future. In that case, once a time window expires we downsize
150 * the TLB to match the maximum use rate observed in the window.
151 *
152 * 3. Try to keep the maximum use rate in a time window in the 30-70% range,
153 * since in that range performance is likely near-optimal. Recall that the TLB
154 * is direct mapped, so we want the use rate to be low (or at least not too
155 * high), since otherwise we are likely to have a significant amount of
156 * conflict misses.
157 */
Richard Henderson3c3959f2019-12-07 14:36:01 -0800158static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast,
159 int64_t now)
Emilio G. Cota86e1eff2019-01-16 12:01:13 -0500160{
Richard Henderson71ccd472019-12-07 11:58:50 -0800161 size_t old_size = tlb_n_entries(fast);
Emilio G. Cota86e1eff2019-01-16 12:01:13 -0500162 size_t rate;
163 size_t new_size = old_size;
Emilio G. Cota86e1eff2019-01-16 12:01:13 -0500164 int64_t window_len_ms = 100;
165 int64_t window_len_ns = window_len_ms * 1000 * 1000;
Richard Henderson79e42082019-03-22 08:36:40 -0700166 bool window_expired = now > desc->window_begin_ns + window_len_ns;
Emilio G. Cota86e1eff2019-01-16 12:01:13 -0500167
Richard Henderson79e42082019-03-22 08:36:40 -0700168 if (desc->n_used_entries > desc->window_max_entries) {
169 desc->window_max_entries = desc->n_used_entries;
Emilio G. Cota86e1eff2019-01-16 12:01:13 -0500170 }
Richard Henderson79e42082019-03-22 08:36:40 -0700171 rate = desc->window_max_entries * 100 / old_size;
Emilio G. Cota86e1eff2019-01-16 12:01:13 -0500172
173 if (rate > 70) {
174 new_size = MIN(old_size << 1, 1 << CPU_TLB_DYN_MAX_BITS);
175 } else if (rate < 30 && window_expired) {
Richard Henderson79e42082019-03-22 08:36:40 -0700176 size_t ceil = pow2ceil(desc->window_max_entries);
177 size_t expected_rate = desc->window_max_entries * 100 / ceil;
Emilio G. Cota86e1eff2019-01-16 12:01:13 -0500178
179 /*
180 * Avoid undersizing when the max number of entries seen is just below
181 * a pow2. For instance, if max_entries == 1025, the expected use rate
182 * would be 1025/2048==50%. However, if max_entries == 1023, we'd get
183 * 1023/1024==99.9% use rate, so we'd likely end up doubling the size
184 * later. Thus, make sure that the expected use rate remains below 70%.
185 * (and since we double the size, that means the lowest rate we'd
186 * expect to get is 35%, which is still in the 30-70% range where
187 * we consider that the size is appropriate.)
188 */
189 if (expected_rate > 70) {
190 ceil *= 2;
191 }
192 new_size = MAX(ceil, 1 << CPU_TLB_DYN_MIN_BITS);
193 }
194
195 if (new_size == old_size) {
196 if (window_expired) {
Richard Henderson79e42082019-03-22 08:36:40 -0700197 tlb_window_reset(desc, now, desc->n_used_entries);
Emilio G. Cota86e1eff2019-01-16 12:01:13 -0500198 }
199 return;
200 }
201
Richard Henderson71ccd472019-12-07 11:58:50 -0800202 g_free(fast->table);
203 g_free(desc->iotlb);
Emilio G. Cota86e1eff2019-01-16 12:01:13 -0500204
Richard Henderson79e42082019-03-22 08:36:40 -0700205 tlb_window_reset(desc, now, 0);
Emilio G. Cota86e1eff2019-01-16 12:01:13 -0500206 /* desc->n_used_entries is cleared by the caller */
Richard Henderson71ccd472019-12-07 11:58:50 -0800207 fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
208 fast->table = g_try_new(CPUTLBEntry, new_size);
209 desc->iotlb = g_try_new(CPUIOTLBEntry, new_size);
210
Emilio G. Cota86e1eff2019-01-16 12:01:13 -0500211 /*
212 * If the allocations fail, try smaller sizes. We just freed some
213 * memory, so going back to half of new_size has a good chance of working.
214 * Increased memory pressure elsewhere in the system might cause the
215 * allocations to fail though, so we progressively reduce the allocation
216 * size, aborting if we cannot even allocate the smallest TLB we support.
217 */
Richard Henderson71ccd472019-12-07 11:58:50 -0800218 while (fast->table == NULL || desc->iotlb == NULL) {
Emilio G. Cota86e1eff2019-01-16 12:01:13 -0500219 if (new_size == (1 << CPU_TLB_DYN_MIN_BITS)) {
220 error_report("%s: %s", __func__, strerror(errno));
221 abort();
222 }
223 new_size = MAX(new_size >> 1, 1 << CPU_TLB_DYN_MIN_BITS);
Richard Henderson71ccd472019-12-07 11:58:50 -0800224 fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS;
Emilio G. Cota86e1eff2019-01-16 12:01:13 -0500225
Richard Henderson71ccd472019-12-07 11:58:50 -0800226 g_free(fast->table);
227 g_free(desc->iotlb);
228 fast->table = g_try_new(CPUTLBEntry, new_size);
229 desc->iotlb = g_try_new(CPUIOTLBEntry, new_size);
Emilio G. Cota86e1eff2019-01-16 12:01:13 -0500230 }
231}
232
Richard Hendersonbbf021b2019-12-07 12:08:04 -0800233static void tlb_mmu_flush_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast)
Emilio G. Cota86e1eff2019-01-16 12:01:13 -0500234{
Richard Henderson5c948e32019-12-07 12:00:56 -0800235 desc->n_used_entries = 0;
236 desc->large_page_addr = -1;
237 desc->large_page_mask = -1;
238 desc->vindex = 0;
239 memset(fast->table, -1, sizeof_tlb(fast));
240 memset(desc->vtable, -1, sizeof(desc->vtable));
Emilio G. Cota86e1eff2019-01-16 12:01:13 -0500241}
242
Richard Henderson3c3959f2019-12-07 14:36:01 -0800243static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx,
244 int64_t now)
Richard Hendersonbbf021b2019-12-07 12:08:04 -0800245{
246 CPUTLBDesc *desc = &env_tlb(env)->d[mmu_idx];
247 CPUTLBDescFast *fast = &env_tlb(env)->f[mmu_idx];
248
Richard Henderson3c3959f2019-12-07 14:36:01 -0800249 tlb_mmu_resize_locked(desc, fast, now);
Richard Hendersonbbf021b2019-12-07 12:08:04 -0800250 tlb_mmu_flush_locked(desc, fast);
251}
252
Richard Henderson56e89f72019-12-07 13:22:19 -0800253static void tlb_mmu_init(CPUTLBDesc *desc, CPUTLBDescFast *fast, int64_t now)
254{
255 size_t n_entries = 1 << CPU_TLB_DYN_DEFAULT_BITS;
256
257 tlb_window_reset(desc, now, 0);
258 desc->n_used_entries = 0;
259 fast->mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS;
260 fast->table = g_new(CPUTLBEntry, n_entries);
261 desc->iotlb = g_new(CPUIOTLBEntry, n_entries);
Richard Henderson3c163042020-01-09 11:23:56 +1100262 tlb_mmu_flush_locked(desc, fast);
Richard Henderson56e89f72019-12-07 13:22:19 -0800263}
264
Emilio G. Cota86e1eff2019-01-16 12:01:13 -0500265static inline void tlb_n_used_entries_inc(CPUArchState *env, uintptr_t mmu_idx)
266{
Richard Hendersona40ec842019-03-22 13:52:09 -0700267 env_tlb(env)->d[mmu_idx].n_used_entries++;
Emilio G. Cota86e1eff2019-01-16 12:01:13 -0500268}
269
270static inline void tlb_n_used_entries_dec(CPUArchState *env, uintptr_t mmu_idx)
271{
Richard Hendersona40ec842019-03-22 13:52:09 -0700272 env_tlb(env)->d[mmu_idx].n_used_entries--;
Emilio G. Cota86e1eff2019-01-16 12:01:13 -0500273}
274
Emilio G. Cota5005e252018-10-09 13:45:54 -0400275void tlb_init(CPUState *cpu)
276{
Emilio G. Cota71aec352018-10-09 13:45:56 -0400277 CPUArchState *env = cpu->env_ptr;
Richard Henderson56e89f72019-12-07 13:22:19 -0800278 int64_t now = get_clock_realtime();
279 int i;
Emilio G. Cota71aec352018-10-09 13:45:56 -0400280
Richard Hendersona40ec842019-03-22 13:52:09 -0700281 qemu_spin_init(&env_tlb(env)->c.lock);
Richard Henderson3d1523c2018-10-20 12:04:57 -0700282
Richard Henderson3c163042020-01-09 11:23:56 +1100283 /* All tlbs are initialized flushed. */
284 env_tlb(env)->c.dirty = 0;
Emilio G. Cota86e1eff2019-01-16 12:01:13 -0500285
Richard Henderson56e89f72019-12-07 13:22:19 -0800286 for (i = 0; i < NB_MMU_MODES; i++) {
287 tlb_mmu_init(&env_tlb(env)->d[i], &env_tlb(env)->f[i], now);
288 }
Emilio G. Cota5005e252018-10-09 13:45:54 -0400289}
290
Emilio G. Cota816d9be2020-06-12 20:02:26 +0100291void tlb_destroy(CPUState *cpu)
292{
293 CPUArchState *env = cpu->env_ptr;
294 int i;
295
296 qemu_spin_destroy(&env_tlb(env)->c.lock);
297 for (i = 0; i < NB_MMU_MODES; i++) {
298 CPUTLBDesc *desc = &env_tlb(env)->d[i];
299 CPUTLBDescFast *fast = &env_tlb(env)->f[i];
300
301 g_free(fast->table);
302 g_free(desc->iotlb);
303 }
304}
305
Alex Bennéec3b9a072017-02-23 18:29:22 +0000306/* flush_all_helper: run fn across all cpus
307 *
308 * If the wait flag is set then the src cpu's helper will be queued as
309 * "safe" work and the loop exited creating a synchronisation point
310 * where all queued work will be finished before execution starts
311 * again.
312 */
313static void flush_all_helper(CPUState *src, run_on_cpu_func fn,
314 run_on_cpu_data d)
315{
316 CPUState *cpu;
317
318 CPU_FOREACH(cpu) {
319 if (cpu != src) {
320 async_run_on_cpu(cpu, fn, d);
321 }
322 }
323}
324
Richard Hendersone09de0a2018-10-19 14:36:43 -0700325void tlb_flush_counts(size_t *pfull, size_t *ppart, size_t *pelide)
Emilio G. Cota83974cf2017-07-06 14:42:26 -0400326{
327 CPUState *cpu;
Richard Hendersone09de0a2018-10-19 14:36:43 -0700328 size_t full = 0, part = 0, elide = 0;
Emilio G. Cota83974cf2017-07-06 14:42:26 -0400329
330 CPU_FOREACH(cpu) {
331 CPUArchState *env = cpu->env_ptr;
332
Stefan Hajnoczid73415a2020-09-23 11:56:46 +0100333 full += qatomic_read(&env_tlb(env)->c.full_flush_count);
334 part += qatomic_read(&env_tlb(env)->c.part_flush_count);
335 elide += qatomic_read(&env_tlb(env)->c.elide_flush_count);
Emilio G. Cota83974cf2017-07-06 14:42:26 -0400336 }
Richard Hendersone09de0a2018-10-19 14:36:43 -0700337 *pfull = full;
338 *ppart = part;
339 *pelide = elide;
Emilio G. Cota83974cf2017-07-06 14:42:26 -0400340}
Blue Swirl0cac1b62012-04-09 16:50:52 +0000341
Alex Bennéee7218442017-02-23 18:29:20 +0000342static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data)
Peter Maydelld7a74a92015-08-25 15:45:09 +0100343{
344 CPUArchState *env = cpu->env_ptr;
Richard Henderson3d1523c2018-10-20 12:04:57 -0700345 uint16_t asked = data.host_int;
346 uint16_t all_dirty, work, to_clean;
Richard Henderson3c3959f2019-12-07 14:36:01 -0800347 int64_t now = get_clock_realtime();
Peter Maydelld7a74a92015-08-25 15:45:09 +0100348
Alex Bennéef0aff0f2017-02-23 18:29:16 +0000349 assert_cpu_is_self(cpu);
Peter Maydelld7a74a92015-08-25 15:45:09 +0100350
Richard Henderson3d1523c2018-10-20 12:04:57 -0700351 tlb_debug("mmu_idx:0x%04" PRIx16 "\n", asked);
Alex Bennéee7218442017-02-23 18:29:20 +0000352
Richard Hendersona40ec842019-03-22 13:52:09 -0700353 qemu_spin_lock(&env_tlb(env)->c.lock);
Richard Henderson60a2ad72018-10-20 13:54:46 -0700354
Richard Hendersona40ec842019-03-22 13:52:09 -0700355 all_dirty = env_tlb(env)->c.dirty;
Richard Henderson3d1523c2018-10-20 12:04:57 -0700356 to_clean = asked & all_dirty;
357 all_dirty &= ~to_clean;
Richard Hendersona40ec842019-03-22 13:52:09 -0700358 env_tlb(env)->c.dirty = all_dirty;
Richard Henderson3d1523c2018-10-20 12:04:57 -0700359
360 for (work = to_clean; work != 0; work &= work - 1) {
361 int mmu_idx = ctz32(work);
Richard Henderson3c3959f2019-12-07 14:36:01 -0800362 tlb_flush_one_mmuidx_locked(env, mmu_idx, now);
Peter Maydelld7a74a92015-08-25 15:45:09 +0100363 }
Richard Henderson3d1523c2018-10-20 12:04:57 -0700364
Richard Hendersona40ec842019-03-22 13:52:09 -0700365 qemu_spin_unlock(&env_tlb(env)->c.lock);
Peter Maydelld7a74a92015-08-25 15:45:09 +0100366
Emilio G. Cotaf3ced3c2017-06-14 20:36:13 -0400367 cpu_tb_jmp_cache_clear(cpu);
Richard Henderson64f26742018-10-23 06:01:01 +0100368
Richard Henderson3d1523c2018-10-20 12:04:57 -0700369 if (to_clean == ALL_MMUIDX_BITS) {
Stefan Hajnoczid73415a2020-09-23 11:56:46 +0100370 qatomic_set(&env_tlb(env)->c.full_flush_count,
Richard Hendersona40ec842019-03-22 13:52:09 -0700371 env_tlb(env)->c.full_flush_count + 1);
Richard Hendersone09de0a2018-10-19 14:36:43 -0700372 } else {
Stefan Hajnoczid73415a2020-09-23 11:56:46 +0100373 qatomic_set(&env_tlb(env)->c.part_flush_count,
Richard Hendersona40ec842019-03-22 13:52:09 -0700374 env_tlb(env)->c.part_flush_count + ctpop16(to_clean));
Richard Henderson3d1523c2018-10-20 12:04:57 -0700375 if (to_clean != asked) {
Stefan Hajnoczid73415a2020-09-23 11:56:46 +0100376 qatomic_set(&env_tlb(env)->c.elide_flush_count,
Richard Hendersona40ec842019-03-22 13:52:09 -0700377 env_tlb(env)->c.elide_flush_count +
Richard Henderson3d1523c2018-10-20 12:04:57 -0700378 ctpop16(asked & ~to_clean));
379 }
Richard Henderson64f26742018-10-23 06:01:01 +0100380 }
Peter Maydelld7a74a92015-08-25 15:45:09 +0100381}
382
Alex Bennée0336cbf2017-02-23 18:29:19 +0000383void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap)
Peter Maydelld7a74a92015-08-25 15:45:09 +0100384{
Alex Bennéee7218442017-02-23 18:29:20 +0000385 tlb_debug("mmu_idx: 0x%" PRIx16 "\n", idxmap);
386
Richard Henderson64f26742018-10-23 06:01:01 +0100387 if (cpu->created && !qemu_cpu_is_self(cpu)) {
Richard Hendersonab651102018-10-23 06:58:03 +0100388 async_run_on_cpu(cpu, tlb_flush_by_mmuidx_async_work,
389 RUN_ON_CPU_HOST_INT(idxmap));
Alex Bennéee7218442017-02-23 18:29:20 +0000390 } else {
Richard Henderson60a2ad72018-10-20 13:54:46 -0700391 tlb_flush_by_mmuidx_async_work(cpu, RUN_ON_CPU_HOST_INT(idxmap));
Alex Bennéee7218442017-02-23 18:29:20 +0000392 }
Peter Maydelld7a74a92015-08-25 15:45:09 +0100393}
394
Richard Henderson64f26742018-10-23 06:01:01 +0100395void tlb_flush(CPUState *cpu)
396{
397 tlb_flush_by_mmuidx(cpu, ALL_MMUIDX_BITS);
398}
399
Alex Bennéec3b9a072017-02-23 18:29:22 +0000400void tlb_flush_by_mmuidx_all_cpus(CPUState *src_cpu, uint16_t idxmap)
401{
402 const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
403
404 tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
405
406 flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
407 fn(src_cpu, RUN_ON_CPU_HOST_INT(idxmap));
408}
409
Richard Henderson64f26742018-10-23 06:01:01 +0100410void tlb_flush_all_cpus(CPUState *src_cpu)
411{
412 tlb_flush_by_mmuidx_all_cpus(src_cpu, ALL_MMUIDX_BITS);
413}
414
415void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *src_cpu, uint16_t idxmap)
Alex Bennéec3b9a072017-02-23 18:29:22 +0000416{
417 const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work;
418
419 tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap);
420
421 flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
422 async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap));
423}
424
Richard Henderson64f26742018-10-23 06:01:01 +0100425void tlb_flush_all_cpus_synced(CPUState *src_cpu)
426{
427 tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, ALL_MMUIDX_BITS);
428}
429
Richard Henderson3ab6e682020-10-16 14:07:53 -0700430static bool tlb_hit_page_mask_anyprot(CPUTLBEntry *tlb_entry,
431 target_ulong page, target_ulong mask)
432{
433 page &= mask;
434 mask &= TARGET_PAGE_MASK | TLB_INVALID_MASK;
435
436 return (page == (tlb_entry->addr_read & mask) ||
437 page == (tlb_addr_write(tlb_entry) & mask) ||
438 page == (tlb_entry->addr_code & mask));
439}
440
Richard Henderson68fea032018-06-29 13:07:08 -0700441static inline bool tlb_hit_page_anyprot(CPUTLBEntry *tlb_entry,
442 target_ulong page)
Blue Swirl0cac1b62012-04-09 16:50:52 +0000443{
Richard Henderson3ab6e682020-10-16 14:07:53 -0700444 return tlb_hit_page_mask_anyprot(tlb_entry, page, -1);
Richard Henderson68fea032018-06-29 13:07:08 -0700445}
446
Emilio G. Cota3cea94b2019-01-16 12:01:12 -0500447/**
448 * tlb_entry_is_empty - return true if the entry is not in use
449 * @te: pointer to CPUTLBEntry
450 */
451static inline bool tlb_entry_is_empty(const CPUTLBEntry *te)
452{
453 return te->addr_read == -1 && te->addr_write == -1 && te->addr_code == -1;
454}
455
Richard Henderson53d28452018-10-23 03:57:11 +0100456/* Called with tlb_c.lock held */
Richard Henderson3ab6e682020-10-16 14:07:53 -0700457static bool tlb_flush_entry_mask_locked(CPUTLBEntry *tlb_entry,
458 target_ulong page,
459 target_ulong mask)
Richard Henderson68fea032018-06-29 13:07:08 -0700460{
Richard Henderson3ab6e682020-10-16 14:07:53 -0700461 if (tlb_hit_page_mask_anyprot(tlb_entry, page, mask)) {
Richard Henderson4fadb3b2013-12-07 10:44:51 +1300462 memset(tlb_entry, -1, sizeof(*tlb_entry));
Emilio G. Cota86e1eff2019-01-16 12:01:13 -0500463 return true;
Blue Swirl0cac1b62012-04-09 16:50:52 +0000464 }
Emilio G. Cota86e1eff2019-01-16 12:01:13 -0500465 return false;
Blue Swirl0cac1b62012-04-09 16:50:52 +0000466}
467
Richard Henderson3ab6e682020-10-16 14:07:53 -0700468static inline bool tlb_flush_entry_locked(CPUTLBEntry *tlb_entry,
469 target_ulong page)
470{
471 return tlb_flush_entry_mask_locked(tlb_entry, page, -1);
472}
473
Richard Henderson53d28452018-10-23 03:57:11 +0100474/* Called with tlb_c.lock held */
Richard Henderson3ab6e682020-10-16 14:07:53 -0700475static void tlb_flush_vtlb_page_mask_locked(CPUArchState *env, int mmu_idx,
476 target_ulong page,
477 target_ulong mask)
Richard Henderson68fea032018-06-29 13:07:08 -0700478{
Richard Hendersona40ec842019-03-22 13:52:09 -0700479 CPUTLBDesc *d = &env_tlb(env)->d[mmu_idx];
Richard Henderson68fea032018-06-29 13:07:08 -0700480 int k;
Emilio G. Cota71aec352018-10-09 13:45:56 -0400481
Richard Henderson29a0af62019-03-22 16:07:18 -0700482 assert_cpu_is_self(env_cpu(env));
Richard Henderson68fea032018-06-29 13:07:08 -0700483 for (k = 0; k < CPU_VTLB_SIZE; k++) {
Richard Henderson3ab6e682020-10-16 14:07:53 -0700484 if (tlb_flush_entry_mask_locked(&d->vtable[k], page, mask)) {
Emilio G. Cota86e1eff2019-01-16 12:01:13 -0500485 tlb_n_used_entries_dec(env, mmu_idx);
486 }
Richard Henderson68fea032018-06-29 13:07:08 -0700487 }
488}
489
Richard Henderson3ab6e682020-10-16 14:07:53 -0700490static inline void tlb_flush_vtlb_page_locked(CPUArchState *env, int mmu_idx,
491 target_ulong page)
492{
493 tlb_flush_vtlb_page_mask_locked(env, mmu_idx, page, -1);
494}
495
Richard Henderson1308e022018-10-17 11:48:40 -0700496static void tlb_flush_page_locked(CPUArchState *env, int midx,
497 target_ulong page)
498{
Richard Hendersona40ec842019-03-22 13:52:09 -0700499 target_ulong lp_addr = env_tlb(env)->d[midx].large_page_addr;
500 target_ulong lp_mask = env_tlb(env)->d[midx].large_page_mask;
Richard Henderson1308e022018-10-17 11:48:40 -0700501
502 /* Check if we need to flush due to large pages. */
503 if ((page & lp_mask) == lp_addr) {
504 tlb_debug("forcing full flush midx %d ("
505 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
506 midx, lp_addr, lp_mask);
Richard Henderson3c3959f2019-12-07 14:36:01 -0800507 tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime());
Richard Henderson1308e022018-10-17 11:48:40 -0700508 } else {
Emilio G. Cota86e1eff2019-01-16 12:01:13 -0500509 if (tlb_flush_entry_locked(tlb_entry(env, midx, page), page)) {
510 tlb_n_used_entries_dec(env, midx);
511 }
Richard Henderson1308e022018-10-17 11:48:40 -0700512 tlb_flush_vtlb_page_locked(env, midx, page);
513 }
514}
515
Richard Henderson7b7d00e2019-11-11 14:53:30 +0100516/**
517 * tlb_flush_page_by_mmuidx_async_0:
518 * @cpu: cpu on which to flush
519 * @addr: page of virtual address to flush
520 * @idxmap: set of mmu_idx to flush
521 *
522 * Helper for tlb_flush_page_by_mmuidx and friends, flush one page
523 * at @addr from the tlbs indicated by @idxmap from @cpu.
Alex Bennéee7218442017-02-23 18:29:20 +0000524 */
Richard Henderson7b7d00e2019-11-11 14:53:30 +0100525static void tlb_flush_page_by_mmuidx_async_0(CPUState *cpu,
526 target_ulong addr,
527 uint16_t idxmap)
Peter Maydelld7a74a92015-08-25 15:45:09 +0100528{
529 CPUArchState *env = cpu->env_ptr;
Alex Bennéee7218442017-02-23 18:29:20 +0000530 int mmu_idx;
Peter Maydelld7a74a92015-08-25 15:45:09 +0100531
Alex Bennéef0aff0f2017-02-23 18:29:16 +0000532 assert_cpu_is_self(cpu);
Alex Bennée8526e1f2016-03-15 14:30:24 +0000533
Richard Henderson7b7d00e2019-11-11 14:53:30 +0100534 tlb_debug("page addr:" TARGET_FMT_lx " mmu_map:0x%x\n", addr, idxmap);
Peter Maydelld7a74a92015-08-25 15:45:09 +0100535
Richard Hendersona40ec842019-03-22 13:52:09 -0700536 qemu_spin_lock(&env_tlb(env)->c.lock);
Alex Bennée0336cbf2017-02-23 18:29:19 +0000537 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
Richard Henderson7b7d00e2019-11-11 14:53:30 +0100538 if ((idxmap >> mmu_idx) & 1) {
Richard Henderson1308e022018-10-17 11:48:40 -0700539 tlb_flush_page_locked(env, mmu_idx, addr);
Peter Maydelld7a74a92015-08-25 15:45:09 +0100540 }
541 }
Richard Hendersona40ec842019-03-22 13:52:09 -0700542 qemu_spin_unlock(&env_tlb(env)->c.lock);
Peter Maydelld7a74a92015-08-25 15:45:09 +0100543
Peter Maydelld7a74a92015-08-25 15:45:09 +0100544 tb_flush_jmp_cache(cpu, addr);
545}
546
Richard Henderson7b7d00e2019-11-11 14:53:30 +0100547/**
548 * tlb_flush_page_by_mmuidx_async_1:
549 * @cpu: cpu on which to flush
550 * @data: encoded addr + idxmap
551 *
552 * Helper for tlb_flush_page_by_mmuidx and friends, called through
553 * async_run_on_cpu. The idxmap parameter is encoded in the page
554 * offset of the target_ptr field. This limits the set of mmu_idx
555 * that can be passed via this method.
556 */
557static void tlb_flush_page_by_mmuidx_async_1(CPUState *cpu,
558 run_on_cpu_data data)
559{
560 target_ulong addr_and_idxmap = (target_ulong) data.target_ptr;
561 target_ulong addr = addr_and_idxmap & TARGET_PAGE_MASK;
562 uint16_t idxmap = addr_and_idxmap & ~TARGET_PAGE_MASK;
563
564 tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap);
565}
566
567typedef struct {
568 target_ulong addr;
569 uint16_t idxmap;
570} TLBFlushPageByMMUIdxData;
571
572/**
573 * tlb_flush_page_by_mmuidx_async_2:
574 * @cpu: cpu on which to flush
575 * @data: allocated addr + idxmap
576 *
577 * Helper for tlb_flush_page_by_mmuidx and friends, called through
578 * async_run_on_cpu. The addr+idxmap parameters are stored in a
579 * TLBFlushPageByMMUIdxData structure that has been allocated
580 * specifically for this helper. Free the structure when done.
581 */
582static void tlb_flush_page_by_mmuidx_async_2(CPUState *cpu,
583 run_on_cpu_data data)
584{
585 TLBFlushPageByMMUIdxData *d = data.host_ptr;
586
587 tlb_flush_page_by_mmuidx_async_0(cpu, d->addr, d->idxmap);
588 g_free(d);
589}
590
Alex Bennéee7218442017-02-23 18:29:20 +0000591void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, uint16_t idxmap)
592{
Alex Bennéee7218442017-02-23 18:29:20 +0000593 tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%" PRIx16 "\n", addr, idxmap);
594
595 /* This should already be page aligned */
Richard Henderson7b7d00e2019-11-11 14:53:30 +0100596 addr &= TARGET_PAGE_MASK;
Alex Bennéee7218442017-02-23 18:29:20 +0000597
Richard Henderson7b7d00e2019-11-11 14:53:30 +0100598 if (qemu_cpu_is_self(cpu)) {
599 tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap);
600 } else if (idxmap < TARGET_PAGE_SIZE) {
601 /*
602 * Most targets have only a few mmu_idx. In the case where
603 * we can stuff idxmap into the low TARGET_PAGE_BITS, avoid
604 * allocating memory for this operation.
605 */
606 async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_1,
607 RUN_ON_CPU_TARGET_PTR(addr | idxmap));
Alex Bennéee7218442017-02-23 18:29:20 +0000608 } else {
Richard Henderson7b7d00e2019-11-11 14:53:30 +0100609 TLBFlushPageByMMUIdxData *d = g_new(TLBFlushPageByMMUIdxData, 1);
610
611 /* Otherwise allocate a structure, freed by the worker. */
612 d->addr = addr;
613 d->idxmap = idxmap;
614 async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_2,
615 RUN_ON_CPU_HOST_PTR(d));
Alex Bennéee7218442017-02-23 18:29:20 +0000616 }
617}
618
Richard Hendersonf8144c62018-10-19 14:25:09 -0700619void tlb_flush_page(CPUState *cpu, target_ulong addr)
620{
621 tlb_flush_page_by_mmuidx(cpu, addr, ALL_MMUIDX_BITS);
622}
623
Alex Bennéec3b9a072017-02-23 18:29:22 +0000624void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, target_ulong addr,
625 uint16_t idxmap)
KONRAD Frederice3b9ca82017-02-23 18:29:18 +0000626{
Alex Bennéec3b9a072017-02-23 18:29:22 +0000627 tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
628
629 /* This should already be page aligned */
Richard Henderson7b7d00e2019-11-11 14:53:30 +0100630 addr &= TARGET_PAGE_MASK;
Alex Bennéec3b9a072017-02-23 18:29:22 +0000631
Richard Henderson7b7d00e2019-11-11 14:53:30 +0100632 /*
633 * Allocate memory to hold addr+idxmap only when needed.
634 * See tlb_flush_page_by_mmuidx for details.
635 */
636 if (idxmap < TARGET_PAGE_SIZE) {
637 flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1,
638 RUN_ON_CPU_TARGET_PTR(addr | idxmap));
639 } else {
640 CPUState *dst_cpu;
641
642 /* Allocate a separate data block for each destination cpu. */
643 CPU_FOREACH(dst_cpu) {
644 if (dst_cpu != src_cpu) {
645 TLBFlushPageByMMUIdxData *d
646 = g_new(TLBFlushPageByMMUIdxData, 1);
647
648 d->addr = addr;
649 d->idxmap = idxmap;
650 async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2,
651 RUN_ON_CPU_HOST_PTR(d));
652 }
653 }
654 }
655
656 tlb_flush_page_by_mmuidx_async_0(src_cpu, addr, idxmap);
Alex Bennéec3b9a072017-02-23 18:29:22 +0000657}
658
Richard Hendersonf8144c62018-10-19 14:25:09 -0700659void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr)
660{
661 tlb_flush_page_by_mmuidx_all_cpus(src, addr, ALL_MMUIDX_BITS);
662}
663
Alex Bennéec3b9a072017-02-23 18:29:22 +0000664void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
Richard Henderson1308e022018-10-17 11:48:40 -0700665 target_ulong addr,
666 uint16_t idxmap)
Alex Bennéec3b9a072017-02-23 18:29:22 +0000667{
Alex Bennéec3b9a072017-02-23 18:29:22 +0000668 tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap);
669
670 /* This should already be page aligned */
Richard Henderson7b7d00e2019-11-11 14:53:30 +0100671 addr &= TARGET_PAGE_MASK;
Alex Bennéec3b9a072017-02-23 18:29:22 +0000672
Richard Henderson7b7d00e2019-11-11 14:53:30 +0100673 /*
674 * Allocate memory to hold addr+idxmap only when needed.
675 * See tlb_flush_page_by_mmuidx for details.
676 */
677 if (idxmap < TARGET_PAGE_SIZE) {
678 flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1,
679 RUN_ON_CPU_TARGET_PTR(addr | idxmap));
680 async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_1,
681 RUN_ON_CPU_TARGET_PTR(addr | idxmap));
682 } else {
683 CPUState *dst_cpu;
684 TLBFlushPageByMMUIdxData *d;
685
686 /* Allocate a separate data block for each destination cpu. */
687 CPU_FOREACH(dst_cpu) {
688 if (dst_cpu != src_cpu) {
689 d = g_new(TLBFlushPageByMMUIdxData, 1);
690 d->addr = addr;
691 d->idxmap = idxmap;
692 async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2,
693 RUN_ON_CPU_HOST_PTR(d));
694 }
695 }
696
697 d = g_new(TLBFlushPageByMMUIdxData, 1);
698 d->addr = addr;
699 d->idxmap = idxmap;
700 async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_2,
701 RUN_ON_CPU_HOST_PTR(d));
702 }
Alex Bennéec3b9a072017-02-23 18:29:22 +0000703}
704
Richard Hendersonf8144c62018-10-19 14:25:09 -0700705void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr)
Alex Bennéec3b9a072017-02-23 18:29:22 +0000706{
Richard Hendersonf8144c62018-10-19 14:25:09 -0700707 tlb_flush_page_by_mmuidx_all_cpus_synced(src, addr, ALL_MMUIDX_BITS);
KONRAD Frederice3b9ca82017-02-23 18:29:18 +0000708}
709
Richard Henderson3c4ddec2021-05-09 17:16:11 +0200710static void tlb_flush_range_locked(CPUArchState *env, int midx,
711 target_ulong addr, target_ulong len,
712 unsigned bits)
Richard Henderson3ab6e682020-10-16 14:07:53 -0700713{
714 CPUTLBDesc *d = &env_tlb(env)->d[midx];
715 CPUTLBDescFast *f = &env_tlb(env)->f[midx];
716 target_ulong mask = MAKE_64BIT_MASK(0, bits);
717
718 /*
719 * If @bits is smaller than the tlb size, there may be multiple entries
720 * within the TLB; otherwise all addresses that match under @mask hit
721 * the same TLB entry.
Richard Henderson3ab6e682020-10-16 14:07:53 -0700722 * TODO: Perhaps allow bits to be a few bits less than the size.
723 * For now, just flush the entire TLB.
Richard Henderson3c4ddec2021-05-09 17:16:11 +0200724 *
725 * If @len is larger than the tlb size, then it will take longer to
726 * test all of the entries in the TLB than it will to flush it all.
Richard Henderson3ab6e682020-10-16 14:07:53 -0700727 */
Richard Henderson3c4ddec2021-05-09 17:16:11 +0200728 if (mask < f->mask || len > f->mask) {
Richard Henderson3ab6e682020-10-16 14:07:53 -0700729 tlb_debug("forcing full flush midx %d ("
Richard Henderson3c4ddec2021-05-09 17:16:11 +0200730 TARGET_FMT_lx "/" TARGET_FMT_lx "+" TARGET_FMT_lx ")\n",
731 midx, addr, mask, len);
Richard Henderson3ab6e682020-10-16 14:07:53 -0700732 tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime());
733 return;
734 }
735
Richard Henderson3c4ddec2021-05-09 17:16:11 +0200736 /*
737 * Check if we need to flush due to large pages.
738 * Because large_page_mask contains all 1's from the msb,
739 * we only need to test the end of the range.
740 */
741 if (((addr + len - 1) & d->large_page_mask) == d->large_page_addr) {
Richard Henderson3ab6e682020-10-16 14:07:53 -0700742 tlb_debug("forcing full flush midx %d ("
743 TARGET_FMT_lx "/" TARGET_FMT_lx ")\n",
744 midx, d->large_page_addr, d->large_page_mask);
745 tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime());
746 return;
747 }
748
Richard Henderson3c4ddec2021-05-09 17:16:11 +0200749 for (target_ulong i = 0; i < len; i += TARGET_PAGE_SIZE) {
750 target_ulong page = addr + i;
751 CPUTLBEntry *entry = tlb_entry(env, midx, page);
752
753 if (tlb_flush_entry_mask_locked(entry, page, mask)) {
754 tlb_n_used_entries_dec(env, midx);
755 }
756 tlb_flush_vtlb_page_mask_locked(env, midx, page, mask);
Richard Henderson3ab6e682020-10-16 14:07:53 -0700757 }
Richard Henderson3ab6e682020-10-16 14:07:53 -0700758}
759
760typedef struct {
761 target_ulong addr;
Richard Henderson3c4ddec2021-05-09 17:16:11 +0200762 target_ulong len;
Richard Henderson3ab6e682020-10-16 14:07:53 -0700763 uint16_t idxmap;
764 uint16_t bits;
Richard Henderson3960a592021-05-09 17:16:12 +0200765} TLBFlushRangeData;
Richard Henderson3ab6e682020-10-16 14:07:53 -0700766
Richard Henderson6be48e42021-05-09 17:16:16 +0200767static void tlb_flush_range_by_mmuidx_async_0(CPUState *cpu,
768 TLBFlushRangeData d)
Richard Henderson3ab6e682020-10-16 14:07:53 -0700769{
770 CPUArchState *env = cpu->env_ptr;
771 int mmu_idx;
772
773 assert_cpu_is_self(cpu);
774
Richard Henderson3c4ddec2021-05-09 17:16:11 +0200775 tlb_debug("range:" TARGET_FMT_lx "/%u+" TARGET_FMT_lx " mmu_map:0x%x\n",
776 d.addr, d.bits, d.len, d.idxmap);
Richard Henderson3ab6e682020-10-16 14:07:53 -0700777
778 qemu_spin_lock(&env_tlb(env)->c.lock);
779 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
780 if ((d.idxmap >> mmu_idx) & 1) {
Richard Henderson3c4ddec2021-05-09 17:16:11 +0200781 tlb_flush_range_locked(env, mmu_idx, d.addr, d.len, d.bits);
Richard Henderson3ab6e682020-10-16 14:07:53 -0700782 }
783 }
784 qemu_spin_unlock(&env_tlb(env)->c.lock);
785
Idan Horowitzcfc2a2d2022-01-10 18:47:53 +0200786 /*
787 * If the length is larger than the jump cache size, then it will take
788 * longer to clear each entry individually than it will to clear it all.
789 */
790 if (d.len >= (TARGET_PAGE_SIZE * TB_JMP_CACHE_SIZE)) {
791 cpu_tb_jmp_cache_clear(cpu);
792 return;
793 }
794
Richard Henderson3c4ddec2021-05-09 17:16:11 +0200795 for (target_ulong i = 0; i < d.len; i += TARGET_PAGE_SIZE) {
796 tb_flush_jmp_cache(cpu, d.addr + i);
797 }
Richard Henderson3ab6e682020-10-16 14:07:53 -0700798}
799
Richard Henderson206a5832021-05-09 17:16:17 +0200800static void tlb_flush_range_by_mmuidx_async_1(CPUState *cpu,
801 run_on_cpu_data data)
Richard Henderson3ab6e682020-10-16 14:07:53 -0700802{
Richard Henderson3960a592021-05-09 17:16:12 +0200803 TLBFlushRangeData *d = data.host_ptr;
Richard Henderson6be48e42021-05-09 17:16:16 +0200804 tlb_flush_range_by_mmuidx_async_0(cpu, *d);
Richard Henderson3ab6e682020-10-16 14:07:53 -0700805 g_free(d);
806}
807
Richard Hendersone5b19212021-05-09 17:16:13 +0200808void tlb_flush_range_by_mmuidx(CPUState *cpu, target_ulong addr,
809 target_ulong len, uint16_t idxmap,
810 unsigned bits)
Richard Henderson3ab6e682020-10-16 14:07:53 -0700811{
Richard Henderson3960a592021-05-09 17:16:12 +0200812 TLBFlushRangeData d;
Richard Henderson3ab6e682020-10-16 14:07:53 -0700813
Richard Hendersone5b19212021-05-09 17:16:13 +0200814 /*
815 * If all bits are significant, and len is small,
816 * this devolves to tlb_flush_page.
817 */
818 if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) {
Richard Henderson3ab6e682020-10-16 14:07:53 -0700819 tlb_flush_page_by_mmuidx(cpu, addr, idxmap);
820 return;
821 }
822 /* If no page bits are significant, this devolves to tlb_flush. */
823 if (bits < TARGET_PAGE_BITS) {
824 tlb_flush_by_mmuidx(cpu, idxmap);
825 return;
826 }
827
828 /* This should already be page aligned */
829 d.addr = addr & TARGET_PAGE_MASK;
Richard Hendersone5b19212021-05-09 17:16:13 +0200830 d.len = len;
Richard Henderson3ab6e682020-10-16 14:07:53 -0700831 d.idxmap = idxmap;
832 d.bits = bits;
833
834 if (qemu_cpu_is_self(cpu)) {
Richard Henderson6be48e42021-05-09 17:16:16 +0200835 tlb_flush_range_by_mmuidx_async_0(cpu, d);
Richard Henderson3ab6e682020-10-16 14:07:53 -0700836 } else {
Richard Henderson3ab6e682020-10-16 14:07:53 -0700837 /* Otherwise allocate a structure, freed by the worker. */
Richard Henderson3960a592021-05-09 17:16:12 +0200838 TLBFlushRangeData *p = g_memdup(&d, sizeof(d));
Richard Henderson206a5832021-05-09 17:16:17 +0200839 async_run_on_cpu(cpu, tlb_flush_range_by_mmuidx_async_1,
Richard Henderson3ab6e682020-10-16 14:07:53 -0700840 RUN_ON_CPU_HOST_PTR(p));
841 }
842}
843
Richard Hendersone5b19212021-05-09 17:16:13 +0200844void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, target_ulong addr,
845 uint16_t idxmap, unsigned bits)
846{
847 tlb_flush_range_by_mmuidx(cpu, addr, TARGET_PAGE_SIZE, idxmap, bits);
848}
849
Richard Henderson600b8192021-05-09 17:16:14 +0200850void tlb_flush_range_by_mmuidx_all_cpus(CPUState *src_cpu,
851 target_ulong addr, target_ulong len,
852 uint16_t idxmap, unsigned bits)
Richard Henderson3ab6e682020-10-16 14:07:53 -0700853{
Richard Henderson3960a592021-05-09 17:16:12 +0200854 TLBFlushRangeData d;
Richard Hendersond34e4d12021-05-09 17:16:18 +0200855 CPUState *dst_cpu;
Richard Henderson3ab6e682020-10-16 14:07:53 -0700856
Richard Henderson600b8192021-05-09 17:16:14 +0200857 /*
858 * If all bits are significant, and len is small,
859 * this devolves to tlb_flush_page.
860 */
861 if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) {
Richard Henderson3ab6e682020-10-16 14:07:53 -0700862 tlb_flush_page_by_mmuidx_all_cpus(src_cpu, addr, idxmap);
863 return;
864 }
865 /* If no page bits are significant, this devolves to tlb_flush. */
866 if (bits < TARGET_PAGE_BITS) {
867 tlb_flush_by_mmuidx_all_cpus(src_cpu, idxmap);
868 return;
869 }
870
871 /* This should already be page aligned */
872 d.addr = addr & TARGET_PAGE_MASK;
Richard Henderson600b8192021-05-09 17:16:14 +0200873 d.len = len;
Richard Henderson3ab6e682020-10-16 14:07:53 -0700874 d.idxmap = idxmap;
875 d.bits = bits;
876
Richard Hendersond34e4d12021-05-09 17:16:18 +0200877 /* Allocate a separate data block for each destination cpu. */
878 CPU_FOREACH(dst_cpu) {
879 if (dst_cpu != src_cpu) {
880 TLBFlushRangeData *p = g_memdup(&d, sizeof(d));
881 async_run_on_cpu(dst_cpu,
Richard Henderson206a5832021-05-09 17:16:17 +0200882 tlb_flush_range_by_mmuidx_async_1,
Richard Hendersond34e4d12021-05-09 17:16:18 +0200883 RUN_ON_CPU_HOST_PTR(p));
Richard Henderson3ab6e682020-10-16 14:07:53 -0700884 }
885 }
886
Richard Henderson6be48e42021-05-09 17:16:16 +0200887 tlb_flush_range_by_mmuidx_async_0(src_cpu, d);
Richard Henderson3ab6e682020-10-16 14:07:53 -0700888}
889
Richard Henderson600b8192021-05-09 17:16:14 +0200890void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *src_cpu,
891 target_ulong addr,
892 uint16_t idxmap, unsigned bits)
893{
894 tlb_flush_range_by_mmuidx_all_cpus(src_cpu, addr, TARGET_PAGE_SIZE,
895 idxmap, bits);
896}
897
Richard Hendersonc13b27d2021-05-09 17:16:15 +0200898void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
899 target_ulong addr,
900 target_ulong len,
901 uint16_t idxmap,
902 unsigned bits)
Richard Henderson3ab6e682020-10-16 14:07:53 -0700903{
Richard Hendersond34e4d12021-05-09 17:16:18 +0200904 TLBFlushRangeData d, *p;
905 CPUState *dst_cpu;
Richard Henderson3ab6e682020-10-16 14:07:53 -0700906
Richard Hendersonc13b27d2021-05-09 17:16:15 +0200907 /*
908 * If all bits are significant, and len is small,
909 * this devolves to tlb_flush_page.
910 */
911 if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) {
Richard Henderson3ab6e682020-10-16 14:07:53 -0700912 tlb_flush_page_by_mmuidx_all_cpus_synced(src_cpu, addr, idxmap);
913 return;
914 }
915 /* If no page bits are significant, this devolves to tlb_flush. */
916 if (bits < TARGET_PAGE_BITS) {
917 tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, idxmap);
918 return;
919 }
920
921 /* This should already be page aligned */
922 d.addr = addr & TARGET_PAGE_MASK;
Richard Hendersonc13b27d2021-05-09 17:16:15 +0200923 d.len = len;
Richard Henderson3ab6e682020-10-16 14:07:53 -0700924 d.idxmap = idxmap;
925 d.bits = bits;
926
Richard Hendersond34e4d12021-05-09 17:16:18 +0200927 /* Allocate a separate data block for each destination cpu. */
928 CPU_FOREACH(dst_cpu) {
929 if (dst_cpu != src_cpu) {
930 p = g_memdup(&d, sizeof(d));
Richard Henderson206a5832021-05-09 17:16:17 +0200931 async_run_on_cpu(dst_cpu, tlb_flush_range_by_mmuidx_async_1,
Richard Hendersond34e4d12021-05-09 17:16:18 +0200932 RUN_ON_CPU_HOST_PTR(p));
Richard Henderson3ab6e682020-10-16 14:07:53 -0700933 }
Richard Henderson3ab6e682020-10-16 14:07:53 -0700934 }
Richard Hendersond34e4d12021-05-09 17:16:18 +0200935
936 p = g_memdup(&d, sizeof(d));
Richard Henderson206a5832021-05-09 17:16:17 +0200937 async_safe_run_on_cpu(src_cpu, tlb_flush_range_by_mmuidx_async_1,
Richard Hendersond34e4d12021-05-09 17:16:18 +0200938 RUN_ON_CPU_HOST_PTR(p));
Richard Henderson3ab6e682020-10-16 14:07:53 -0700939}
940
Richard Hendersonc13b27d2021-05-09 17:16:15 +0200941void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu,
942 target_ulong addr,
943 uint16_t idxmap,
944 unsigned bits)
945{
946 tlb_flush_range_by_mmuidx_all_cpus_synced(src_cpu, addr, TARGET_PAGE_SIZE,
947 idxmap, bits);
948}
949
Blue Swirl0cac1b62012-04-09 16:50:52 +0000950/* update the TLBs so that writes to code in the virtual page 'addr'
951 can be detected */
952void tlb_protect_code(ram_addr_t ram_addr)
953{
Stefan Hajnoczi03eebc92014-12-02 11:23:18 +0000954 cpu_physical_memory_test_and_clear_dirty(ram_addr, TARGET_PAGE_SIZE,
955 DIRTY_MEMORY_CODE);
Blue Swirl0cac1b62012-04-09 16:50:52 +0000956}
957
958/* update the TLB so that writes in physical page 'phys_addr' are no longer
959 tested for self modifying code */
Paolo Bonzini9564f522015-04-22 14:24:54 +0200960void tlb_unprotect_code(ram_addr_t ram_addr)
Blue Swirl0cac1b62012-04-09 16:50:52 +0000961{
Juan Quintela52159192013-10-08 12:44:04 +0200962 cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE);
Blue Swirl0cac1b62012-04-09 16:50:52 +0000963}
964
Blue Swirl0cac1b62012-04-09 16:50:52 +0000965
Alex Bennéeb0706b72017-02-23 18:29:21 +0000966/*
967 * Dirty write flag handling
968 *
969 * When the TCG code writes to a location it looks up the address in
970 * the TLB and uses that data to compute the final address. If any of
971 * the lower bits of the address are set then the slow path is forced.
972 * There are a number of reasons to do this but for normal RAM the
973 * most usual is detecting writes to code regions which may invalidate
974 * generated code.
975 *
Emilio G. Cota71aec352018-10-09 13:45:56 -0400976 * Other vCPUs might be reading their TLBs during guest execution, so we update
Stefan Hajnoczid73415a2020-09-23 11:56:46 +0100977 * te->addr_write with qatomic_set. We don't need to worry about this for
Emilio G. Cota71aec352018-10-09 13:45:56 -0400978 * oversized guests as MTTCG is disabled for them.
Alex Bennéeb0706b72017-02-23 18:29:21 +0000979 *
Richard Henderson53d28452018-10-23 03:57:11 +0100980 * Called with tlb_c.lock held.
Alex Bennéeb0706b72017-02-23 18:29:21 +0000981 */
Emilio G. Cota71aec352018-10-09 13:45:56 -0400982static void tlb_reset_dirty_range_locked(CPUTLBEntry *tlb_entry,
983 uintptr_t start, uintptr_t length)
Blue Swirl0cac1b62012-04-09 16:50:52 +0000984{
Alex Bennéeb0706b72017-02-23 18:29:21 +0000985 uintptr_t addr = tlb_entry->addr_write;
Blue Swirl0cac1b62012-04-09 16:50:52 +0000986
Richard Henderson7b0d7922019-09-19 17:54:10 -0700987 if ((addr & (TLB_INVALID_MASK | TLB_MMIO |
988 TLB_DISCARD_WRITE | TLB_NOTDIRTY)) == 0) {
Alex Bennéeb0706b72017-02-23 18:29:21 +0000989 addr &= TARGET_PAGE_MASK;
990 addr += tlb_entry->addend;
Blue Swirl0cac1b62012-04-09 16:50:52 +0000991 if ((addr - start) < length) {
Emilio G. Cota71aec352018-10-09 13:45:56 -0400992#if TCG_OVERSIZED_GUEST
Blue Swirl0cac1b62012-04-09 16:50:52 +0000993 tlb_entry->addr_write |= TLB_NOTDIRTY;
Alex Bennéeb0706b72017-02-23 18:29:21 +0000994#else
Stefan Hajnoczid73415a2020-09-23 11:56:46 +0100995 qatomic_set(&tlb_entry->addr_write,
Emilio G. Cota71aec352018-10-09 13:45:56 -0400996 tlb_entry->addr_write | TLB_NOTDIRTY);
997#endif
Alex Bennéeb0706b72017-02-23 18:29:21 +0000998 }
999 }
Blue Swirl0cac1b62012-04-09 16:50:52 +00001000}
1001
Emilio G. Cota71aec352018-10-09 13:45:56 -04001002/*
Richard Henderson53d28452018-10-23 03:57:11 +01001003 * Called with tlb_c.lock held.
Emilio G. Cota71aec352018-10-09 13:45:56 -04001004 * Called only from the vCPU context, i.e. the TLB's owner thread.
1005 */
1006static inline void copy_tlb_helper_locked(CPUTLBEntry *d, const CPUTLBEntry *s)
Alex Bennéeb0706b72017-02-23 18:29:21 +00001007{
Alex Bennéeb0706b72017-02-23 18:29:21 +00001008 *d = *s;
Alex Bennéeb0706b72017-02-23 18:29:21 +00001009}
1010
1011/* This is a cross vCPU call (i.e. another vCPU resetting the flags of
Emilio G. Cota71aec352018-10-09 13:45:56 -04001012 * the target vCPU).
Richard Henderson53d28452018-10-23 03:57:11 +01001013 * We must take tlb_c.lock to avoid racing with another vCPU update. The only
Emilio G. Cota71aec352018-10-09 13:45:56 -04001014 * thing actually updated is the target TLB entry ->addr_write flags.
Alex Bennéeb0706b72017-02-23 18:29:21 +00001015 */
Peter Crosthwaite9a135652015-09-10 22:39:41 -07001016void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length)
Blue Swirl0cac1b62012-04-09 16:50:52 +00001017{
1018 CPUArchState *env;
1019
Peter Crosthwaite9a135652015-09-10 22:39:41 -07001020 int mmu_idx;
Blue Swirl0cac1b62012-04-09 16:50:52 +00001021
Peter Crosthwaite9a135652015-09-10 22:39:41 -07001022 env = cpu->env_ptr;
Richard Hendersona40ec842019-03-22 13:52:09 -07001023 qemu_spin_lock(&env_tlb(env)->c.lock);
Peter Crosthwaite9a135652015-09-10 22:39:41 -07001024 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1025 unsigned int i;
Richard Henderson722a1c12019-12-07 11:47:41 -08001026 unsigned int n = tlb_n_entries(&env_tlb(env)->f[mmu_idx]);
Blue Swirl0cac1b62012-04-09 16:50:52 +00001027
Emilio G. Cota86e1eff2019-01-16 12:01:13 -05001028 for (i = 0; i < n; i++) {
Richard Hendersona40ec842019-03-22 13:52:09 -07001029 tlb_reset_dirty_range_locked(&env_tlb(env)->f[mmu_idx].table[i],
1030 start1, length);
Peter Crosthwaite9a135652015-09-10 22:39:41 -07001031 }
Xin Tong88e89a52014-08-04 20:35:23 -05001032
Peter Crosthwaite9a135652015-09-10 22:39:41 -07001033 for (i = 0; i < CPU_VTLB_SIZE; i++) {
Richard Hendersona40ec842019-03-22 13:52:09 -07001034 tlb_reset_dirty_range_locked(&env_tlb(env)->d[mmu_idx].vtable[i],
1035 start1, length);
Blue Swirl0cac1b62012-04-09 16:50:52 +00001036 }
1037 }
Richard Hendersona40ec842019-03-22 13:52:09 -07001038 qemu_spin_unlock(&env_tlb(env)->c.lock);
Blue Swirl0cac1b62012-04-09 16:50:52 +00001039}
1040
Richard Henderson53d28452018-10-23 03:57:11 +01001041/* Called with tlb_c.lock held */
Emilio G. Cota71aec352018-10-09 13:45:56 -04001042static inline void tlb_set_dirty1_locked(CPUTLBEntry *tlb_entry,
1043 target_ulong vaddr)
Blue Swirl0cac1b62012-04-09 16:50:52 +00001044{
1045 if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) {
1046 tlb_entry->addr_write = vaddr;
1047 }
1048}
1049
1050/* update the TLB corresponding to virtual page vaddr
1051 so that it is no longer dirty */
Peter Crosthwaitebcae01e2015-09-10 22:39:42 -07001052void tlb_set_dirty(CPUState *cpu, target_ulong vaddr)
Blue Swirl0cac1b62012-04-09 16:50:52 +00001053{
Peter Crosthwaitebcae01e2015-09-10 22:39:42 -07001054 CPUArchState *env = cpu->env_ptr;
Blue Swirl0cac1b62012-04-09 16:50:52 +00001055 int mmu_idx;
1056
Alex Bennéef0aff0f2017-02-23 18:29:16 +00001057 assert_cpu_is_self(cpu);
1058
Blue Swirl0cac1b62012-04-09 16:50:52 +00001059 vaddr &= TARGET_PAGE_MASK;
Richard Hendersona40ec842019-03-22 13:52:09 -07001060 qemu_spin_lock(&env_tlb(env)->c.lock);
Blue Swirl0cac1b62012-04-09 16:50:52 +00001061 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
Richard Henderson383beda2018-10-09 13:51:25 -04001062 tlb_set_dirty1_locked(tlb_entry(env, mmu_idx, vaddr), vaddr);
Blue Swirl0cac1b62012-04-09 16:50:52 +00001063 }
Xin Tong88e89a52014-08-04 20:35:23 -05001064
1065 for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) {
1066 int k;
1067 for (k = 0; k < CPU_VTLB_SIZE; k++) {
Richard Hendersona40ec842019-03-22 13:52:09 -07001068 tlb_set_dirty1_locked(&env_tlb(env)->d[mmu_idx].vtable[k], vaddr);
Xin Tong88e89a52014-08-04 20:35:23 -05001069 }
1070 }
Richard Hendersona40ec842019-03-22 13:52:09 -07001071 qemu_spin_unlock(&env_tlb(env)->c.lock);
Blue Swirl0cac1b62012-04-09 16:50:52 +00001072}
1073
1074/* Our TLB does not support large pages, so remember the area covered by
1075 large pages and trigger a full TLB flush if these are invalidated. */
Richard Henderson1308e022018-10-17 11:48:40 -07001076static void tlb_add_large_page(CPUArchState *env, int mmu_idx,
1077 target_ulong vaddr, target_ulong size)
Blue Swirl0cac1b62012-04-09 16:50:52 +00001078{
Richard Hendersona40ec842019-03-22 13:52:09 -07001079 target_ulong lp_addr = env_tlb(env)->d[mmu_idx].large_page_addr;
Richard Henderson1308e022018-10-17 11:48:40 -07001080 target_ulong lp_mask = ~(size - 1);
Blue Swirl0cac1b62012-04-09 16:50:52 +00001081
Richard Henderson1308e022018-10-17 11:48:40 -07001082 if (lp_addr == (target_ulong)-1) {
1083 /* No previous large page. */
1084 lp_addr = vaddr;
1085 } else {
1086 /* Extend the existing region to include the new page.
1087 This is a compromise between unnecessary flushes and
1088 the cost of maintaining a full variable size TLB. */
Richard Hendersona40ec842019-03-22 13:52:09 -07001089 lp_mask &= env_tlb(env)->d[mmu_idx].large_page_mask;
Richard Henderson1308e022018-10-17 11:48:40 -07001090 while (((lp_addr ^ vaddr) & lp_mask) != 0) {
1091 lp_mask <<= 1;
1092 }
Blue Swirl0cac1b62012-04-09 16:50:52 +00001093 }
Richard Hendersona40ec842019-03-22 13:52:09 -07001094 env_tlb(env)->d[mmu_idx].large_page_addr = lp_addr & lp_mask;
1095 env_tlb(env)->d[mmu_idx].large_page_mask = lp_mask;
Blue Swirl0cac1b62012-04-09 16:50:52 +00001096}
1097
1098/* Add a new TLB entry. At most one entry for a given virtual address
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01001099 * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the
1100 * supplied size is only used by tlb_flush_page.
1101 *
1102 * Called from TCG-generated code, which is under an RCU read-side
1103 * critical section.
1104 */
Peter Maydellfadc1cb2015-04-26 16:49:24 +01001105void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr,
1106 hwaddr paddr, MemTxAttrs attrs, int prot,
1107 int mmu_idx, target_ulong size)
Blue Swirl0cac1b62012-04-09 16:50:52 +00001108{
Andreas Färber0c591eb2013-09-03 13:59:37 +02001109 CPUArchState *env = cpu->env_ptr;
Richard Hendersona40ec842019-03-22 13:52:09 -07001110 CPUTLB *tlb = env_tlb(env);
1111 CPUTLBDesc *desc = &tlb->d[mmu_idx];
Blue Swirl0cac1b62012-04-09 16:50:52 +00001112 MemoryRegionSection *section;
1113 unsigned int index;
1114 target_ulong address;
Richard Henderson8f5db642019-09-19 21:09:58 -07001115 target_ulong write_address;
Blue Swirl0cac1b62012-04-09 16:50:52 +00001116 uintptr_t addend;
Richard Henderson68fea032018-06-29 13:07:08 -07001117 CPUTLBEntry *te, tn;
Peter Maydell55df6fc2018-06-26 17:50:41 +01001118 hwaddr iotlb, xlat, sz, paddr_page;
1119 target_ulong vaddr_page;
Peter Maydelld7898cd2016-01-21 14:15:05 +00001120 int asidx = cpu_asidx_from_attrs(cpu, attrs);
Richard Henderson50b107c2019-08-24 09:51:09 -07001121 int wp_flags;
Richard Henderson8f5db642019-09-19 21:09:58 -07001122 bool is_ram, is_romd;
Blue Swirl0cac1b62012-04-09 16:50:52 +00001123
Alex Bennéef0aff0f2017-02-23 18:29:16 +00001124 assert_cpu_is_self(cpu);
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001125
Richard Henderson1308e022018-10-17 11:48:40 -07001126 if (size <= TARGET_PAGE_SIZE) {
Peter Maydell55df6fc2018-06-26 17:50:41 +01001127 sz = TARGET_PAGE_SIZE;
1128 } else {
Richard Henderson1308e022018-10-17 11:48:40 -07001129 tlb_add_large_page(env, mmu_idx, vaddr, size);
Peter Maydell55df6fc2018-06-26 17:50:41 +01001130 sz = size;
1131 }
1132 vaddr_page = vaddr & TARGET_PAGE_MASK;
1133 paddr_page = paddr & TARGET_PAGE_MASK;
1134
1135 section = address_space_translate_for_iotlb(cpu, asidx, paddr_page,
1136 &xlat, &sz, attrs, &prot);
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001137 assert(sz >= TARGET_PAGE_SIZE);
1138
Alex Bennée8526e1f2016-03-15 14:30:24 +00001139 tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx
1140 " prot=%x idx=%d\n",
1141 vaddr, paddr, prot, mmu_idx);
Blue Swirl0cac1b62012-04-09 16:50:52 +00001142
Peter Maydell55df6fc2018-06-26 17:50:41 +01001143 address = vaddr_page;
1144 if (size < TARGET_PAGE_SIZE) {
Richard Henderson30d7e092019-08-23 15:12:32 -07001145 /* Repeat the MMU check and TLB fill on every access. */
1146 address |= TLB_INVALID_MASK;
Peter Maydell55df6fc2018-06-26 17:50:41 +01001147 }
Tony Nguyena26fc6f2019-08-24 04:36:56 +10001148 if (attrs.byte_swap) {
Richard Henderson5b87b3e2019-09-10 15:47:39 -04001149 address |= TLB_BSWAP;
Tony Nguyena26fc6f2019-08-24 04:36:56 +10001150 }
Richard Henderson8f5db642019-09-19 21:09:58 -07001151
1152 is_ram = memory_region_is_ram(section->mr);
1153 is_romd = memory_region_is_romd(section->mr);
1154
1155 if (is_ram || is_romd) {
1156 /* RAM and ROMD both have associated host memory. */
Paolo Bonzini149f54b2013-05-24 12:59:37 +02001157 addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat;
Richard Henderson8f5db642019-09-19 21:09:58 -07001158 } else {
1159 /* I/O does not; force the host address to NULL. */
1160 addend = 0;
Blue Swirl0cac1b62012-04-09 16:50:52 +00001161 }
Blue Swirl0cac1b62012-04-09 16:50:52 +00001162
Richard Henderson8f5db642019-09-19 21:09:58 -07001163 write_address = address;
1164 if (is_ram) {
1165 iotlb = memory_region_get_ram_addr(section->mr) + xlat;
1166 /*
1167 * Computing is_clean is expensive; avoid all that unless
1168 * the page is actually writable.
1169 */
1170 if (prot & PAGE_WRITE) {
1171 if (section->readonly) {
1172 write_address |= TLB_DISCARD_WRITE;
1173 } else if (cpu_physical_memory_is_clean(iotlb)) {
1174 write_address |= TLB_NOTDIRTY;
1175 }
1176 }
1177 } else {
1178 /* I/O or ROMD */
1179 iotlb = memory_region_section_get_iotlb(cpu, section) + xlat;
1180 /*
1181 * Writes to romd devices must go through MMIO to enable write.
1182 * Reads to romd devices go through the ram_ptr found above,
1183 * but of course reads to I/O must go through MMIO.
1184 */
1185 write_address |= TLB_MMIO;
1186 if (!is_romd) {
1187 address = write_address;
1188 }
1189 }
1190
Richard Henderson50b107c2019-08-24 09:51:09 -07001191 wp_flags = cpu_watchpoint_address_matches(cpu, vaddr_page,
1192 TARGET_PAGE_SIZE);
Blue Swirl0cac1b62012-04-09 16:50:52 +00001193
Richard Henderson383beda2018-10-09 13:51:25 -04001194 index = tlb_index(env, mmu_idx, vaddr_page);
1195 te = tlb_entry(env, mmu_idx, vaddr_page);
Alex Bennéeb0706b72017-02-23 18:29:21 +00001196
Richard Henderson68fea032018-06-29 13:07:08 -07001197 /*
Emilio G. Cota71aec352018-10-09 13:45:56 -04001198 * Hold the TLB lock for the rest of the function. We could acquire/release
1199 * the lock several times in the function, but it is faster to amortize the
1200 * acquisition cost by acquiring it just once. Note that this leads to
1201 * a longer critical section, but this is not a concern since the TLB lock
1202 * is unlikely to be contended.
1203 */
Richard Hendersona40ec842019-03-22 13:52:09 -07001204 qemu_spin_lock(&tlb->c.lock);
Emilio G. Cota71aec352018-10-09 13:45:56 -04001205
Richard Henderson3d1523c2018-10-20 12:04:57 -07001206 /* Note that the tlb is no longer clean. */
Richard Hendersona40ec842019-03-22 13:52:09 -07001207 tlb->c.dirty |= 1 << mmu_idx;
Richard Henderson3d1523c2018-10-20 12:04:57 -07001208
Emilio G. Cota71aec352018-10-09 13:45:56 -04001209 /* Make sure there's no cached translation for the new page. */
1210 tlb_flush_vtlb_page_locked(env, mmu_idx, vaddr_page);
1211
1212 /*
Richard Henderson68fea032018-06-29 13:07:08 -07001213 * Only evict the old entry to the victim tlb if it's for a
1214 * different page; otherwise just overwrite the stale data.
1215 */
Emilio G. Cota3cea94b2019-01-16 12:01:12 -05001216 if (!tlb_hit_page_anyprot(te, vaddr_page) && !tlb_entry_is_empty(te)) {
Richard Hendersona40ec842019-03-22 13:52:09 -07001217 unsigned vidx = desc->vindex++ % CPU_VTLB_SIZE;
1218 CPUTLBEntry *tv = &desc->vtable[vidx];
Alex Bennéeb0706b72017-02-23 18:29:21 +00001219
Richard Henderson68fea032018-06-29 13:07:08 -07001220 /* Evict the old entry into the victim tlb. */
Emilio G. Cota71aec352018-10-09 13:45:56 -04001221 copy_tlb_helper_locked(tv, te);
Richard Hendersona40ec842019-03-22 13:52:09 -07001222 desc->viotlb[vidx] = desc->iotlb[index];
Emilio G. Cota86e1eff2019-01-16 12:01:13 -05001223 tlb_n_used_entries_dec(env, mmu_idx);
Richard Henderson68fea032018-06-29 13:07:08 -07001224 }
Xin Tong88e89a52014-08-04 20:35:23 -05001225
1226 /* refill the tlb */
Peter Maydellace41092018-06-15 14:57:14 +01001227 /*
1228 * At this point iotlb contains a physical section number in the lower
1229 * TARGET_PAGE_BITS, and either
Richard Henderson8f5db642019-09-19 21:09:58 -07001230 * + the ram_addr_t of the page base of the target RAM (RAM)
1231 * + the offset within section->mr of the page base (I/O, ROMD)
Peter Maydell55df6fc2018-06-26 17:50:41 +01001232 * We subtract the vaddr_page (which is page aligned and thus won't
Peter Maydellace41092018-06-15 14:57:14 +01001233 * disturb the low bits) to give an offset which can be added to the
1234 * (non-page-aligned) vaddr of the eventual memory access to get
1235 * the MemoryRegion offset for the access. Note that the vaddr we
1236 * subtract here is that of the page base, and not the same as the
1237 * vaddr we add back in io_readx()/io_writex()/get_page_addr_code().
1238 */
Richard Hendersona40ec842019-03-22 13:52:09 -07001239 desc->iotlb[index].addr = iotlb - vaddr_page;
1240 desc->iotlb[index].attrs = attrs;
Alex Bennéeb0706b72017-02-23 18:29:21 +00001241
1242 /* Now calculate the new entry */
Peter Maydell55df6fc2018-06-26 17:50:41 +01001243 tn.addend = addend - vaddr_page;
Blue Swirl0cac1b62012-04-09 16:50:52 +00001244 if (prot & PAGE_READ) {
Alex Bennéeb0706b72017-02-23 18:29:21 +00001245 tn.addr_read = address;
Richard Henderson50b107c2019-08-24 09:51:09 -07001246 if (wp_flags & BP_MEM_READ) {
1247 tn.addr_read |= TLB_WATCHPOINT;
1248 }
Blue Swirl0cac1b62012-04-09 16:50:52 +00001249 } else {
Alex Bennéeb0706b72017-02-23 18:29:21 +00001250 tn.addr_read = -1;
Blue Swirl0cac1b62012-04-09 16:50:52 +00001251 }
1252
1253 if (prot & PAGE_EXEC) {
Richard Henderson8f5db642019-09-19 21:09:58 -07001254 tn.addr_code = address;
Blue Swirl0cac1b62012-04-09 16:50:52 +00001255 } else {
Alex Bennéeb0706b72017-02-23 18:29:21 +00001256 tn.addr_code = -1;
Blue Swirl0cac1b62012-04-09 16:50:52 +00001257 }
Alex Bennéeb0706b72017-02-23 18:29:21 +00001258
1259 tn.addr_write = -1;
Blue Swirl0cac1b62012-04-09 16:50:52 +00001260 if (prot & PAGE_WRITE) {
Richard Henderson8f5db642019-09-19 21:09:58 -07001261 tn.addr_write = write_address;
David Hildenbrandf52bfb12017-10-16 22:23:57 +02001262 if (prot & PAGE_WRITE_INV) {
1263 tn.addr_write |= TLB_INVALID_MASK;
1264 }
Richard Henderson50b107c2019-08-24 09:51:09 -07001265 if (wp_flags & BP_MEM_WRITE) {
1266 tn.addr_write |= TLB_WATCHPOINT;
1267 }
Blue Swirl0cac1b62012-04-09 16:50:52 +00001268 }
Alex Bennéeb0706b72017-02-23 18:29:21 +00001269
Emilio G. Cota71aec352018-10-09 13:45:56 -04001270 copy_tlb_helper_locked(te, &tn);
Emilio G. Cota86e1eff2019-01-16 12:01:13 -05001271 tlb_n_used_entries_inc(env, mmu_idx);
Richard Hendersona40ec842019-03-22 13:52:09 -07001272 qemu_spin_unlock(&tlb->c.lock);
Blue Swirl0cac1b62012-04-09 16:50:52 +00001273}
1274
Peter Maydellfadc1cb2015-04-26 16:49:24 +01001275/* Add a new TLB entry, but without specifying the memory
1276 * transaction attributes to be used.
1277 */
1278void tlb_set_page(CPUState *cpu, target_ulong vaddr,
1279 hwaddr paddr, int prot,
1280 int mmu_idx, target_ulong size)
1281{
1282 tlb_set_page_with_attrs(cpu, vaddr, paddr, MEMTXATTRS_UNSPECIFIED,
1283 prot, mmu_idx, size);
1284}
1285
Alex Bennée857baec2017-02-23 18:29:17 +00001286static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr)
1287{
1288 ram_addr_t ram_addr;
1289
1290 ram_addr = qemu_ram_addr_from_host(ptr);
1291 if (ram_addr == RAM_ADDR_INVALID) {
1292 error_report("Bad ram pointer %p", ptr);
1293 abort();
1294 }
1295 return ram_addr;
1296}
1297
Richard Hendersonc319dc12019-04-03 09:07:11 +07001298/*
1299 * Note: tlb_fill() can trigger a resize of the TLB. This means that all of the
1300 * caller's prior references to the TLB table (e.g. CPUTLBEntry pointers) must
1301 * be discarded and looked up again (e.g. via tlb_entry()).
1302 */
1303static void tlb_fill(CPUState *cpu, target_ulong addr, int size,
1304 MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
1305{
1306 CPUClass *cc = CPU_GET_CLASS(cpu);
1307 bool ok;
1308
1309 /*
1310 * This is not a probe, so only valid return is success; failure
1311 * should result in exception + longjmp to the cpu loop.
1312 */
Claudio Fontana78271682021-02-04 17:39:23 +01001313 ok = cc->tcg_ops->tlb_fill(cpu, addr, size,
1314 access_type, mmu_idx, false, retaddr);
Richard Hendersonc319dc12019-04-03 09:07:11 +07001315 assert(ok);
1316}
1317
Claudio Fontana78271682021-02-04 17:39:23 +01001318static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr,
1319 MMUAccessType access_type,
1320 int mmu_idx, uintptr_t retaddr)
1321{
1322 CPUClass *cc = CPU_GET_CLASS(cpu);
1323
1324 cc->tcg_ops->do_unaligned_access(cpu, addr, access_type, mmu_idx, retaddr);
1325}
1326
1327static inline void cpu_transaction_failed(CPUState *cpu, hwaddr physaddr,
1328 vaddr addr, unsigned size,
1329 MMUAccessType access_type,
1330 int mmu_idx, MemTxAttrs attrs,
1331 MemTxResult response,
1332 uintptr_t retaddr)
1333{
1334 CPUClass *cc = CPU_GET_CLASS(cpu);
1335
1336 if (!cpu->ignore_memory_transaction_failures &&
1337 cc->tcg_ops->do_transaction_failed) {
1338 cc->tcg_ops->do_transaction_failed(cpu, physaddr, addr, size,
1339 access_type, mmu_idx, attrs,
1340 response, retaddr);
1341 }
1342}
1343
Richard Henderson82a45b92016-07-08 18:51:28 -07001344static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
Richard Hendersonf1be3692019-04-25 14:16:34 -07001345 int mmu_idx, target_ulong addr, uintptr_t retaddr,
Tony Nguyenbe5c4782019-08-24 04:36:53 +10001346 MMUAccessType access_type, MemOp op)
Richard Henderson82a45b92016-07-08 18:51:28 -07001347{
Richard Henderson29a0af62019-03-22 16:07:18 -07001348 CPUState *cpu = env_cpu(env);
Peter Maydell2d54f192018-06-15 14:57:14 +01001349 hwaddr mr_offset;
1350 MemoryRegionSection *section;
1351 MemoryRegion *mr;
Richard Henderson82a45b92016-07-08 18:51:28 -07001352 uint64_t val;
Jan Kiszka8d04fb52017-02-23 18:29:11 +00001353 bool locked = false;
Peter Maydell04e3aab2017-09-04 15:21:55 +01001354 MemTxResult r;
Richard Henderson82a45b92016-07-08 18:51:28 -07001355
Peter Maydell2d54f192018-06-15 14:57:14 +01001356 section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
1357 mr = section->mr;
1358 mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
Richard Henderson82a45b92016-07-08 18:51:28 -07001359 cpu->mem_io_pc = retaddr;
Richard Henderson08565552019-09-18 09:15:44 -07001360 if (!cpu->can_do_io) {
Richard Henderson82a45b92016-07-08 18:51:28 -07001361 cpu_io_recompile(cpu, retaddr);
1362 }
1363
Philippe Mathieu-Daudé41744952020-08-06 17:07:26 +02001364 if (!qemu_mutex_iothread_locked()) {
Jan Kiszka8d04fb52017-02-23 18:29:11 +00001365 qemu_mutex_lock_iothread();
1366 locked = true;
1367 }
Tony Nguyenbe5c4782019-08-24 04:36:53 +10001368 r = memory_region_dispatch_read(mr, mr_offset, &val, op, iotlbentry->attrs);
Peter Maydell04e3aab2017-09-04 15:21:55 +01001369 if (r != MEMTX_OK) {
Peter Maydell2d54f192018-06-15 14:57:14 +01001370 hwaddr physaddr = mr_offset +
1371 section->offset_within_address_space -
1372 section->offset_within_region;
1373
Tony Nguyenbe5c4782019-08-24 04:36:53 +10001374 cpu_transaction_failed(cpu, physaddr, addr, memop_size(op), access_type,
Peter Maydell04e3aab2017-09-04 15:21:55 +01001375 mmu_idx, iotlbentry->attrs, r, retaddr);
1376 }
Jan Kiszka8d04fb52017-02-23 18:29:11 +00001377 if (locked) {
1378 qemu_mutex_unlock_iothread();
1379 }
1380
Richard Henderson82a45b92016-07-08 18:51:28 -07001381 return val;
1382}
1383
Alex Bennée2f3a57e2020-07-13 21:04:10 +01001384/*
1385 * Save a potentially trashed IOTLB entry for later lookup by plugin.
Alex Bennée570ef302020-07-20 13:23:58 +01001386 * This is read by tlb_plugin_lookup if the iotlb entry doesn't match
1387 * because of the side effect of io_writex changing memory layout.
Alex Bennée2f3a57e2020-07-13 21:04:10 +01001388 */
1389static void save_iotlb_data(CPUState *cs, hwaddr addr,
1390 MemoryRegionSection *section, hwaddr mr_offset)
1391{
1392#ifdef CONFIG_PLUGIN
1393 SavedIOTLB *saved = &cs->saved_iotlb;
1394 saved->addr = addr;
1395 saved->section = section;
1396 saved->mr_offset = mr_offset;
1397#endif
1398}
1399
Richard Henderson82a45b92016-07-08 18:51:28 -07001400static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry,
Richard Hendersonf1be3692019-04-25 14:16:34 -07001401 int mmu_idx, uint64_t val, target_ulong addr,
Tony Nguyenbe5c4782019-08-24 04:36:53 +10001402 uintptr_t retaddr, MemOp op)
Richard Henderson82a45b92016-07-08 18:51:28 -07001403{
Richard Henderson29a0af62019-03-22 16:07:18 -07001404 CPUState *cpu = env_cpu(env);
Peter Maydell2d54f192018-06-15 14:57:14 +01001405 hwaddr mr_offset;
1406 MemoryRegionSection *section;
1407 MemoryRegion *mr;
Jan Kiszka8d04fb52017-02-23 18:29:11 +00001408 bool locked = false;
Peter Maydell04e3aab2017-09-04 15:21:55 +01001409 MemTxResult r;
Richard Henderson82a45b92016-07-08 18:51:28 -07001410
Peter Maydell2d54f192018-06-15 14:57:14 +01001411 section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
1412 mr = section->mr;
1413 mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
Richard Henderson08565552019-09-18 09:15:44 -07001414 if (!cpu->can_do_io) {
Richard Henderson82a45b92016-07-08 18:51:28 -07001415 cpu_io_recompile(cpu, retaddr);
1416 }
Richard Henderson82a45b92016-07-08 18:51:28 -07001417 cpu->mem_io_pc = retaddr;
Jan Kiszka8d04fb52017-02-23 18:29:11 +00001418
Alex Bennée2f3a57e2020-07-13 21:04:10 +01001419 /*
1420 * The memory_region_dispatch may trigger a flush/resize
1421 * so for plugins we save the iotlb_data just in case.
1422 */
1423 save_iotlb_data(cpu, iotlbentry->addr, section, mr_offset);
1424
Philippe Mathieu-Daudé41744952020-08-06 17:07:26 +02001425 if (!qemu_mutex_iothread_locked()) {
Jan Kiszka8d04fb52017-02-23 18:29:11 +00001426 qemu_mutex_lock_iothread();
1427 locked = true;
1428 }
Tony Nguyenbe5c4782019-08-24 04:36:53 +10001429 r = memory_region_dispatch_write(mr, mr_offset, val, op, iotlbentry->attrs);
Peter Maydell04e3aab2017-09-04 15:21:55 +01001430 if (r != MEMTX_OK) {
Peter Maydell2d54f192018-06-15 14:57:14 +01001431 hwaddr physaddr = mr_offset +
1432 section->offset_within_address_space -
1433 section->offset_within_region;
1434
Tony Nguyenbe5c4782019-08-24 04:36:53 +10001435 cpu_transaction_failed(cpu, physaddr, addr, memop_size(op),
1436 MMU_DATA_STORE, mmu_idx, iotlbentry->attrs, r,
1437 retaddr);
Peter Maydell04e3aab2017-09-04 15:21:55 +01001438 }
Jan Kiszka8d04fb52017-02-23 18:29:11 +00001439 if (locked) {
1440 qemu_mutex_unlock_iothread();
1441 }
Richard Henderson82a45b92016-07-08 18:51:28 -07001442}
1443
Richard Henderson4811e902019-04-03 10:16:56 +07001444static inline target_ulong tlb_read_ofs(CPUTLBEntry *entry, size_t ofs)
1445{
1446#if TCG_OVERSIZED_GUEST
1447 return *(target_ulong *)((uintptr_t)entry + ofs);
1448#else
Stefan Hajnoczid73415a2020-09-23 11:56:46 +01001449 /* ofs might correspond to .addr_write, so use qatomic_read */
1450 return qatomic_read((target_ulong *)((uintptr_t)entry + ofs));
Richard Henderson4811e902019-04-03 10:16:56 +07001451#endif
1452}
1453
Richard Henderson7e9a7c52016-07-08 12:19:32 -07001454/* Return true if ADDR is present in the victim tlb, and has been copied
1455 back to the main tlb. */
1456static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index,
1457 size_t elt_ofs, target_ulong page)
1458{
1459 size_t vidx;
Emilio G. Cota71aec352018-10-09 13:45:56 -04001460
Richard Henderson29a0af62019-03-22 16:07:18 -07001461 assert_cpu_is_self(env_cpu(env));
Richard Henderson7e9a7c52016-07-08 12:19:32 -07001462 for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) {
Richard Hendersona40ec842019-03-22 13:52:09 -07001463 CPUTLBEntry *vtlb = &env_tlb(env)->d[mmu_idx].vtable[vidx];
1464 target_ulong cmp;
1465
Stefan Hajnoczid73415a2020-09-23 11:56:46 +01001466 /* elt_ofs might correspond to .addr_write, so use qatomic_read */
Richard Hendersona40ec842019-03-22 13:52:09 -07001467#if TCG_OVERSIZED_GUEST
1468 cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs);
1469#else
Stefan Hajnoczid73415a2020-09-23 11:56:46 +01001470 cmp = qatomic_read((target_ulong *)((uintptr_t)vtlb + elt_ofs));
Richard Hendersona40ec842019-03-22 13:52:09 -07001471#endif
Richard Henderson7e9a7c52016-07-08 12:19:32 -07001472
1473 if (cmp == page) {
1474 /* Found entry in victim tlb, swap tlb and iotlb. */
Richard Hendersona40ec842019-03-22 13:52:09 -07001475 CPUTLBEntry tmptlb, *tlb = &env_tlb(env)->f[mmu_idx].table[index];
Alex Bennéeb0706b72017-02-23 18:29:21 +00001476
Richard Hendersona40ec842019-03-22 13:52:09 -07001477 qemu_spin_lock(&env_tlb(env)->c.lock);
Emilio G. Cota71aec352018-10-09 13:45:56 -04001478 copy_tlb_helper_locked(&tmptlb, tlb);
1479 copy_tlb_helper_locked(tlb, vtlb);
1480 copy_tlb_helper_locked(vtlb, &tmptlb);
Richard Hendersona40ec842019-03-22 13:52:09 -07001481 qemu_spin_unlock(&env_tlb(env)->c.lock);
Alex Bennéeb0706b72017-02-23 18:29:21 +00001482
Richard Hendersona40ec842019-03-22 13:52:09 -07001483 CPUIOTLBEntry tmpio, *io = &env_tlb(env)->d[mmu_idx].iotlb[index];
1484 CPUIOTLBEntry *vio = &env_tlb(env)->d[mmu_idx].viotlb[vidx];
Richard Henderson7e9a7c52016-07-08 12:19:32 -07001485 tmpio = *io; *io = *vio; *vio = tmpio;
1486 return true;
1487 }
1488 }
1489 return false;
1490}
1491
1492/* Macro to call the above, with local variables from the use context. */
Samuel Damasheka3902842016-07-06 14:26:52 -04001493#define VICTIM_TLB_HIT(TY, ADDR) \
Richard Henderson7e9a7c52016-07-08 12:19:32 -07001494 victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \
Samuel Damasheka3902842016-07-06 14:26:52 -04001495 (ADDR) & TARGET_PAGE_MASK)
Richard Henderson7e9a7c52016-07-08 12:19:32 -07001496
Richard Henderson30d7e092019-08-23 15:12:32 -07001497/*
1498 * Return a ram_addr_t for the virtual address for execution.
1499 *
1500 * Return -1 if we can't translate and execute from an entire page
1501 * of RAM. This will force us to execute by loading and translating
1502 * one insn at a time, without caching.
1503 *
1504 * NOTE: This function will trigger an exception if the page is
1505 * not executable.
KONRAD Fredericf2553f02017-02-03 16:29:50 +01001506 */
Emilio G. Cota4b2190d2018-11-03 17:40:22 -04001507tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr,
1508 void **hostp)
KONRAD Fredericf2553f02017-02-03 16:29:50 +01001509{
Richard Henderson383beda2018-10-09 13:51:25 -04001510 uintptr_t mmu_idx = cpu_mmu_index(env, true);
1511 uintptr_t index = tlb_index(env, mmu_idx, addr);
1512 CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
KONRAD Fredericf2553f02017-02-03 16:29:50 +01001513 void *p;
KONRAD Fredericf2553f02017-02-03 16:29:50 +01001514
Richard Henderson383beda2018-10-09 13:51:25 -04001515 if (unlikely(!tlb_hit(entry->addr_code, addr))) {
Peter Maydellb493ccf2018-07-13 15:16:35 +01001516 if (!VICTIM_TLB_HIT(addr_code, addr)) {
Richard Henderson29a0af62019-03-22 16:07:18 -07001517 tlb_fill(env_cpu(env), addr, 0, MMU_INST_FETCH, mmu_idx, 0);
Emilio G. Cota6d967cb2019-02-09 11:27:45 -05001518 index = tlb_index(env, mmu_idx, addr);
1519 entry = tlb_entry(env, mmu_idx, addr);
Richard Henderson30d7e092019-08-23 15:12:32 -07001520
1521 if (unlikely(entry->addr_code & TLB_INVALID_MASK)) {
1522 /*
1523 * The MMU protection covers a smaller range than a target
1524 * page, so we must redo the MMU check for every insn.
1525 */
1526 return -1;
1527 }
KONRAD Frederic71b9a452017-02-03 16:32:12 +01001528 }
Richard Henderson383beda2018-10-09 13:51:25 -04001529 assert(tlb_hit(entry->addr_code, addr));
KONRAD Fredericf2553f02017-02-03 16:29:50 +01001530 }
Peter Maydell55df6fc2018-06-26 17:50:41 +01001531
Richard Henderson30d7e092019-08-23 15:12:32 -07001532 if (unlikely(entry->addr_code & TLB_MMIO)) {
1533 /* The region is not backed by RAM. */
Emilio G. Cota4b2190d2018-11-03 17:40:22 -04001534 if (hostp) {
1535 *hostp = NULL;
1536 }
Peter Maydell20cb6ae2018-08-14 17:17:19 +01001537 return -1;
Peter Maydell55df6fc2018-06-26 17:50:41 +01001538 }
1539
Richard Henderson383beda2018-10-09 13:51:25 -04001540 p = (void *)((uintptr_t)addr + entry->addend);
Emilio G. Cota4b2190d2018-11-03 17:40:22 -04001541 if (hostp) {
1542 *hostp = p;
1543 }
KONRAD Fredericf2553f02017-02-03 16:29:50 +01001544 return qemu_ram_addr_from_host_nofail(p);
1545}
1546
Emilio G. Cota4b2190d2018-11-03 17:40:22 -04001547tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr)
1548{
1549 return get_page_addr_code_hostp(env, addr, NULL);
1550}
1551
Richard Henderson707526a2019-09-21 18:47:59 -07001552static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size,
1553 CPUIOTLBEntry *iotlbentry, uintptr_t retaddr)
1554{
1555 ram_addr_t ram_addr = mem_vaddr + iotlbentry->addr;
1556
1557 trace_memory_notdirty_write_access(mem_vaddr, ram_addr, size);
1558
1559 if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) {
1560 struct page_collection *pages
1561 = page_collection_lock(ram_addr, ram_addr + size);
Richard Henderson5a7c27b2019-09-21 20:16:09 -07001562 tb_invalidate_phys_page_fast(pages, ram_addr, size, retaddr);
Richard Henderson707526a2019-09-21 18:47:59 -07001563 page_collection_unlock(pages);
1564 }
1565
1566 /*
1567 * Set both VGA and migration bits for simplicity and to remove
1568 * the notdirty callback faster.
1569 */
1570 cpu_physical_memory_set_dirty_range(ram_addr, size, DIRTY_CLIENTS_NOCODE);
1571
1572 /* We remove the notdirty callback only if the code has been flushed. */
1573 if (!cpu_physical_memory_is_clean(ram_addr)) {
1574 trace_memory_notdirty_set_dirty(mem_vaddr);
1575 tlb_set_dirty(cpu, mem_vaddr);
1576 }
1577}
1578
Richard Henderson069cfe72020-05-08 08:43:45 -07001579static int probe_access_internal(CPUArchState *env, target_ulong addr,
1580 int fault_size, MMUAccessType access_type,
1581 int mmu_idx, bool nonfault,
1582 void **phost, uintptr_t retaddr)
Richard Henderson3b08f0a2016-07-08 18:22:26 -07001583{
Richard Henderson383beda2018-10-09 13:51:25 -04001584 uintptr_t index = tlb_index(env, mmu_idx, addr);
1585 CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr);
Richard Henderson069cfe72020-05-08 08:43:45 -07001586 target_ulong tlb_addr, page_addr;
David Hildenbrandc25c2832019-08-30 12:09:59 +02001587 size_t elt_ofs;
Richard Henderson069cfe72020-05-08 08:43:45 -07001588 int flags;
Richard Henderson4811e902019-04-03 10:16:56 +07001589
1590 switch (access_type) {
1591 case MMU_DATA_LOAD:
1592 elt_ofs = offsetof(CPUTLBEntry, addr_read);
1593 break;
1594 case MMU_DATA_STORE:
1595 elt_ofs = offsetof(CPUTLBEntry, addr_write);
1596 break;
1597 case MMU_INST_FETCH:
1598 elt_ofs = offsetof(CPUTLBEntry, addr_code);
1599 break;
1600 default:
1601 g_assert_not_reached();
1602 }
Richard Henderson4811e902019-04-03 10:16:56 +07001603 tlb_addr = tlb_read_ofs(entry, elt_ofs);
1604
Richard Henderson069cfe72020-05-08 08:43:45 -07001605 page_addr = addr & TARGET_PAGE_MASK;
1606 if (!tlb_hit_page(tlb_addr, page_addr)) {
1607 if (!victim_tlb_hit(env, mmu_idx, index, elt_ofs, page_addr)) {
Richard Henderson29a0af62019-03-22 16:07:18 -07001608 CPUState *cs = env_cpu(env);
Richard Henderson4811e902019-04-03 10:16:56 +07001609 CPUClass *cc = CPU_GET_CLASS(cs);
1610
Claudio Fontana78271682021-02-04 17:39:23 +01001611 if (!cc->tcg_ops->tlb_fill(cs, addr, fault_size, access_type,
1612 mmu_idx, nonfault, retaddr)) {
Richard Henderson4811e902019-04-03 10:16:56 +07001613 /* Non-faulting page table read failed. */
Richard Henderson069cfe72020-05-08 08:43:45 -07001614 *phost = NULL;
1615 return TLB_INVALID_MASK;
Richard Henderson4811e902019-04-03 10:16:56 +07001616 }
1617
1618 /* TLB resize via tlb_fill may have moved the entry. */
1619 entry = tlb_entry(env, mmu_idx, addr);
1620 }
1621 tlb_addr = tlb_read_ofs(entry, elt_ofs);
1622 }
Richard Henderson069cfe72020-05-08 08:43:45 -07001623 flags = tlb_addr & TLB_FLAGS_MASK;
Richard Henderson4811e902019-04-03 10:16:56 +07001624
Richard Henderson069cfe72020-05-08 08:43:45 -07001625 /* Fold all "mmio-like" bits into TLB_MMIO. This is not RAM. */
1626 if (unlikely(flags & ~(TLB_WATCHPOINT | TLB_NOTDIRTY))) {
1627 *phost = NULL;
1628 return TLB_MMIO;
1629 }
1630
1631 /* Everything else is RAM. */
1632 *phost = (void *)((uintptr_t)addr + entry->addend);
1633 return flags;
1634}
1635
1636int probe_access_flags(CPUArchState *env, target_ulong addr,
1637 MMUAccessType access_type, int mmu_idx,
1638 bool nonfault, void **phost, uintptr_t retaddr)
1639{
1640 int flags;
1641
1642 flags = probe_access_internal(env, addr, 0, access_type, mmu_idx,
1643 nonfault, phost, retaddr);
1644
1645 /* Handle clean RAM pages. */
1646 if (unlikely(flags & TLB_NOTDIRTY)) {
1647 uintptr_t index = tlb_index(env, mmu_idx, addr);
1648 CPUIOTLBEntry *iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
1649
1650 notdirty_write(env_cpu(env), addr, 1, iotlbentry, retaddr);
1651 flags &= ~TLB_NOTDIRTY;
1652 }
1653
1654 return flags;
1655}
1656
1657void *probe_access(CPUArchState *env, target_ulong addr, int size,
1658 MMUAccessType access_type, int mmu_idx, uintptr_t retaddr)
1659{
1660 void *host;
1661 int flags;
1662
1663 g_assert(-(addr | TARGET_PAGE_MASK) >= size);
1664
1665 flags = probe_access_internal(env, addr, size, access_type, mmu_idx,
1666 false, &host, retaddr);
1667
1668 /* Per the interface, size == 0 merely faults the access. */
1669 if (size == 0) {
Richard Henderson4811e902019-04-03 10:16:56 +07001670 return NULL;
1671 }
1672
Richard Henderson069cfe72020-05-08 08:43:45 -07001673 if (unlikely(flags & (TLB_NOTDIRTY | TLB_WATCHPOINT))) {
1674 uintptr_t index = tlb_index(env, mmu_idx, addr);
1675 CPUIOTLBEntry *iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
1676
1677 /* Handle watchpoints. */
1678 if (flags & TLB_WATCHPOINT) {
1679 int wp_access = (access_type == MMU_DATA_STORE
1680 ? BP_MEM_WRITE : BP_MEM_READ);
1681 cpu_check_watchpoint(env_cpu(env), addr, size,
1682 iotlbentry->attrs, wp_access, retaddr);
1683 }
1684
1685 /* Handle clean RAM pages. */
1686 if (flags & TLB_NOTDIRTY) {
1687 notdirty_write(env_cpu(env), addr, 1, iotlbentry, retaddr);
1688 }
1689 }
1690
1691 return host;
Richard Henderson4811e902019-04-03 10:16:56 +07001692}
1693
Richard Henderson069cfe72020-05-08 08:43:45 -07001694void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr,
1695 MMUAccessType access_type, int mmu_idx)
1696{
1697 void *host;
1698 int flags;
1699
1700 flags = probe_access_internal(env, addr, 0, access_type,
1701 mmu_idx, true, &host, 0);
1702
1703 /* No combination of flags are expected by the caller. */
1704 return flags ? NULL : host;
1705}
Alex Bennée235537f2019-06-19 20:20:08 +01001706
1707#ifdef CONFIG_PLUGIN
1708/*
1709 * Perform a TLB lookup and populate the qemu_plugin_hwaddr structure.
1710 * This should be a hot path as we will have just looked this path up
1711 * in the softmmu lookup code (or helper). We don't handle re-fills or
1712 * checking the victim table. This is purely informational.
1713 *
Alex Bennée2f3a57e2020-07-13 21:04:10 +01001714 * This almost never fails as the memory access being instrumented
1715 * should have just filled the TLB. The one corner case is io_writex
1716 * which can cause TLB flushes and potential resizing of the TLBs
Alex Bennée570ef302020-07-20 13:23:58 +01001717 * losing the information we need. In those cases we need to recover
1718 * data from a copy of the iotlbentry. As long as this always occurs
1719 * from the same thread (which a mem callback will be) this is safe.
Alex Bennée235537f2019-06-19 20:20:08 +01001720 */
1721
1722bool tlb_plugin_lookup(CPUState *cpu, target_ulong addr, int mmu_idx,
1723 bool is_store, struct qemu_plugin_hwaddr *data)
1724{
1725 CPUArchState *env = cpu->env_ptr;
1726 CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr);
1727 uintptr_t index = tlb_index(env, mmu_idx, addr);
1728 target_ulong tlb_addr = is_store ? tlb_addr_write(tlbe) : tlbe->addr_read;
1729
1730 if (likely(tlb_hit(tlb_addr, addr))) {
1731 /* We must have an iotlb entry for MMIO */
1732 if (tlb_addr & TLB_MMIO) {
1733 CPUIOTLBEntry *iotlbentry;
1734 iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
1735 data->is_io = true;
1736 data->v.io.section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs);
1737 data->v.io.offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr;
1738 } else {
1739 data->is_io = false;
Alex Bennée2d932032021-07-09 15:29:52 +01001740 data->v.ram.hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
Alex Bennée235537f2019-06-19 20:20:08 +01001741 }
1742 return true;
Alex Bennée2f3a57e2020-07-13 21:04:10 +01001743 } else {
1744 SavedIOTLB *saved = &cpu->saved_iotlb;
1745 data->is_io = true;
1746 data->v.io.section = saved->section;
1747 data->v.io.offset = saved->mr_offset;
1748 return true;
Alex Bennée235537f2019-06-19 20:20:08 +01001749 }
Alex Bennée235537f2019-06-19 20:20:08 +01001750}
1751
1752#endif
1753
Richard Henderson08dff432021-06-12 17:21:06 -07001754/*
1755 * Probe for an atomic operation. Do not allow unaligned operations,
1756 * or io operations to proceed. Return the host address.
1757 *
1758 * @prot may be PAGE_READ, PAGE_WRITE, or PAGE_READ|PAGE_WRITE.
1759 */
Richard Hendersonc482cb12016-06-28 11:37:27 -07001760static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr,
Richard Henderson9002ffc2021-07-25 12:06:49 -10001761 MemOpIdx oi, int size, int prot,
Richard Henderson08dff432021-06-12 17:21:06 -07001762 uintptr_t retaddr)
Richard Hendersonc482cb12016-06-28 11:37:27 -07001763{
Richard Hendersonb8260442022-04-01 11:08:13 -06001764 uintptr_t mmu_idx = get_mmuidx(oi);
Tony Nguyen14776ab2019-08-24 04:10:58 +10001765 MemOp mop = get_memop(oi);
Richard Hendersonc482cb12016-06-28 11:37:27 -07001766 int a_bits = get_alignment_bits(mop);
Richard Henderson08dff432021-06-12 17:21:06 -07001767 uintptr_t index;
1768 CPUTLBEntry *tlbe;
1769 target_ulong tlb_addr;
Peter Maydell34d49932017-11-20 18:08:28 +00001770 void *hostaddr;
Richard Hendersonc482cb12016-06-28 11:37:27 -07001771
Richard Hendersonb8260442022-04-01 11:08:13 -06001772 tcg_debug_assert(mmu_idx < NB_MMU_MODES);
1773
Richard Hendersonc482cb12016-06-28 11:37:27 -07001774 /* Adjust the given return address. */
1775 retaddr -= GETPC_ADJ;
1776
1777 /* Enforce guest required alignment. */
1778 if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) {
1779 /* ??? Maybe indicate atomic op to cpu_unaligned_access */
Richard Henderson29a0af62019-03-22 16:07:18 -07001780 cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE,
Richard Hendersonc482cb12016-06-28 11:37:27 -07001781 mmu_idx, retaddr);
1782 }
1783
1784 /* Enforce qemu required alignment. */
Richard Henderson08dff432021-06-12 17:21:06 -07001785 if (unlikely(addr & (size - 1))) {
Richard Hendersonc482cb12016-06-28 11:37:27 -07001786 /* We get here if guest alignment was not requested,
1787 or was not enforced by cpu_unaligned_access above.
1788 We might widen the access and emulate, but for now
1789 mark an exception and exit the cpu loop. */
1790 goto stop_the_world;
1791 }
1792
Richard Henderson08dff432021-06-12 17:21:06 -07001793 index = tlb_index(env, mmu_idx, addr);
1794 tlbe = tlb_entry(env, mmu_idx, addr);
1795
Richard Hendersonc482cb12016-06-28 11:37:27 -07001796 /* Check TLB entry and enforce page permissions. */
Richard Henderson08dff432021-06-12 17:21:06 -07001797 if (prot & PAGE_WRITE) {
1798 tlb_addr = tlb_addr_write(tlbe);
1799 if (!tlb_hit(tlb_addr, addr)) {
1800 if (!VICTIM_TLB_HIT(addr_write, addr)) {
1801 tlb_fill(env_cpu(env), addr, size,
1802 MMU_DATA_STORE, mmu_idx, retaddr);
1803 index = tlb_index(env, mmu_idx, addr);
1804 tlbe = tlb_entry(env, mmu_idx, addr);
1805 }
1806 tlb_addr = tlb_addr_write(tlbe) & ~TLB_INVALID_MASK;
Richard Hendersonc482cb12016-06-28 11:37:27 -07001807 }
Richard Henderson08dff432021-06-12 17:21:06 -07001808
1809 /* Let the guest notice RMW on a write-only page. */
1810 if ((prot & PAGE_READ) &&
1811 unlikely(tlbe->addr_read != (tlb_addr & ~TLB_NOTDIRTY))) {
1812 tlb_fill(env_cpu(env), addr, size,
1813 MMU_DATA_LOAD, mmu_idx, retaddr);
1814 /*
1815 * Since we don't support reads and writes to different addresses,
1816 * and we do have the proper page loaded for write, this shouldn't
1817 * ever return. But just in case, handle via stop-the-world.
1818 */
1819 goto stop_the_world;
1820 }
1821 } else /* if (prot & PAGE_READ) */ {
1822 tlb_addr = tlbe->addr_read;
1823 if (!tlb_hit(tlb_addr, addr)) {
1824 if (!VICTIM_TLB_HIT(addr_write, addr)) {
1825 tlb_fill(env_cpu(env), addr, size,
1826 MMU_DATA_LOAD, mmu_idx, retaddr);
1827 index = tlb_index(env, mmu_idx, addr);
1828 tlbe = tlb_entry(env, mmu_idx, addr);
1829 }
1830 tlb_addr = tlbe->addr_read & ~TLB_INVALID_MASK;
1831 }
Richard Hendersonc482cb12016-06-28 11:37:27 -07001832 }
1833
Peter Maydell55df6fc2018-06-26 17:50:41 +01001834 /* Notice an IO access or a needs-MMU-lookup access */
Richard Henderson30d7e092019-08-23 15:12:32 -07001835 if (unlikely(tlb_addr & TLB_MMIO)) {
Richard Hendersonc482cb12016-06-28 11:37:27 -07001836 /* There's really nothing that can be done to
1837 support this apart from stop-the-world. */
1838 goto stop_the_world;
1839 }
1840
Peter Maydell34d49932017-11-20 18:08:28 +00001841 hostaddr = (void *)((uintptr_t)addr + tlbe->addend);
1842
Peter Maydell34d49932017-11-20 18:08:28 +00001843 if (unlikely(tlb_addr & TLB_NOTDIRTY)) {
Richard Henderson08dff432021-06-12 17:21:06 -07001844 notdirty_write(env_cpu(env), addr, size,
Richard Henderson707526a2019-09-21 18:47:59 -07001845 &env_tlb(env)->d[mmu_idx].iotlb[index], retaddr);
Peter Maydell34d49932017-11-20 18:08:28 +00001846 }
1847
1848 return hostaddr;
Richard Hendersonc482cb12016-06-28 11:37:27 -07001849
1850 stop_the_world:
Richard Henderson29a0af62019-03-22 16:07:18 -07001851 cpu_loop_exit_atomic(env_cpu(env), retaddr);
Richard Hendersonc482cb12016-06-28 11:37:27 -07001852}
1853
Alex Bennéeeed56642019-02-15 14:31:13 +00001854/*
Richard Hendersonf83bcec2021-07-27 07:48:55 -10001855 * Verify that we have passed the correct MemOp to the correct function.
1856 *
1857 * In the case of the helper_*_mmu functions, we will have done this by
1858 * using the MemOp to look up the helper during code generation.
1859 *
1860 * In the case of the cpu_*_mmu functions, this is up to the caller.
1861 * We could present one function to target code, and dispatch based on
1862 * the MemOp, but so far we have worked hard to avoid an indirect function
1863 * call along the memory path.
1864 */
1865static void validate_memop(MemOpIdx oi, MemOp expected)
1866{
1867#ifdef CONFIG_DEBUG_TCG
1868 MemOp have = get_memop(oi) & (MO_SIZE | MO_BSWAP);
1869 assert(have == expected);
1870#endif
1871}
1872
1873/*
Alex Bennéeeed56642019-02-15 14:31:13 +00001874 * Load Helpers
1875 *
1876 * We support two different access types. SOFTMMU_CODE_ACCESS is
1877 * specifically for reading instructions from system memory. It is
1878 * called by the translation loop and in some helpers where the code
1879 * is disassembled. It shouldn't be called directly by guest code.
1880 */
Paolo Bonzini0f590e742014-03-28 17:55:24 +01001881
Richard Henderson2dd92602019-04-25 20:48:57 -07001882typedef uint64_t FullLoadHelper(CPUArchState *env, target_ulong addr,
Richard Henderson9002ffc2021-07-25 12:06:49 -10001883 MemOpIdx oi, uintptr_t retaddr);
Richard Henderson2dd92602019-04-25 20:48:57 -07001884
Richard Hendersonc6b716c2019-09-10 12:02:36 -04001885static inline uint64_t QEMU_ALWAYS_INLINE
Richard Henderson80d9d1c2019-09-10 14:56:12 -04001886load_memop(const void *haddr, MemOp op)
1887{
1888 switch (op) {
1889 case MO_UB:
1890 return ldub_p(haddr);
1891 case MO_BEUW:
1892 return lduw_be_p(haddr);
1893 case MO_LEUW:
1894 return lduw_le_p(haddr);
1895 case MO_BEUL:
1896 return (uint32_t)ldl_be_p(haddr);
1897 case MO_LEUL:
1898 return (uint32_t)ldl_le_p(haddr);
Frédéric Pétrotfc313c62022-01-06 22:00:51 +01001899 case MO_BEUQ:
Richard Henderson80d9d1c2019-09-10 14:56:12 -04001900 return ldq_be_p(haddr);
Frédéric Pétrotfc313c62022-01-06 22:00:51 +01001901 case MO_LEUQ:
Richard Henderson80d9d1c2019-09-10 14:56:12 -04001902 return ldq_le_p(haddr);
1903 default:
1904 qemu_build_not_reached();
1905 }
1906}
1907
1908static inline uint64_t QEMU_ALWAYS_INLINE
Richard Henderson9002ffc2021-07-25 12:06:49 -10001909load_helper(CPUArchState *env, target_ulong addr, MemOpIdx oi,
Tony Nguyenbe5c4782019-08-24 04:36:53 +10001910 uintptr_t retaddr, MemOp op, bool code_read,
Richard Henderson2dd92602019-04-25 20:48:57 -07001911 FullLoadHelper *full_load)
Alex Bennéeeed56642019-02-15 14:31:13 +00001912{
Alex Bennéeeed56642019-02-15 14:31:13 +00001913 const size_t tlb_off = code_read ?
1914 offsetof(CPUTLBEntry, addr_code) : offsetof(CPUTLBEntry, addr_read);
Richard Hendersonf1be3692019-04-25 14:16:34 -07001915 const MMUAccessType access_type =
1916 code_read ? MMU_INST_FETCH : MMU_DATA_LOAD;
Richard Hendersonb8260442022-04-01 11:08:13 -06001917 const unsigned a_bits = get_alignment_bits(get_memop(oi));
1918 const size_t size = memop_size(op);
1919 uintptr_t mmu_idx = get_mmuidx(oi);
1920 uintptr_t index;
1921 CPUTLBEntry *entry;
1922 target_ulong tlb_addr;
Alex Bennéeeed56642019-02-15 14:31:13 +00001923 void *haddr;
1924 uint64_t res;
Richard Hendersonb8260442022-04-01 11:08:13 -06001925
1926 tcg_debug_assert(mmu_idx < NB_MMU_MODES);
Paolo Bonzini0f590e742014-03-28 17:55:24 +01001927
Alex Bennéeeed56642019-02-15 14:31:13 +00001928 /* Handle CPU specific unaligned behaviour */
1929 if (addr & ((1 << a_bits) - 1)) {
Richard Henderson29a0af62019-03-22 16:07:18 -07001930 cpu_unaligned_access(env_cpu(env), addr, access_type,
Alex Bennéeeed56642019-02-15 14:31:13 +00001931 mmu_idx, retaddr);
1932 }
1933
Richard Hendersonb8260442022-04-01 11:08:13 -06001934 index = tlb_index(env, mmu_idx, addr);
1935 entry = tlb_entry(env, mmu_idx, addr);
1936 tlb_addr = code_read ? entry->addr_code : entry->addr_read;
1937
Alex Bennéeeed56642019-02-15 14:31:13 +00001938 /* If the TLB entry is for a different page, reload and try again. */
1939 if (!tlb_hit(tlb_addr, addr)) {
1940 if (!victim_tlb_hit(env, mmu_idx, index, tlb_off,
1941 addr & TARGET_PAGE_MASK)) {
Richard Henderson29a0af62019-03-22 16:07:18 -07001942 tlb_fill(env_cpu(env), addr, size,
Richard Hendersonf1be3692019-04-25 14:16:34 -07001943 access_type, mmu_idx, retaddr);
Alex Bennéeeed56642019-02-15 14:31:13 +00001944 index = tlb_index(env, mmu_idx, addr);
1945 entry = tlb_entry(env, mmu_idx, addr);
1946 }
1947 tlb_addr = code_read ? entry->addr_code : entry->addr_read;
Richard Henderson30d7e092019-08-23 15:12:32 -07001948 tlb_addr &= ~TLB_INVALID_MASK;
Alex Bennéeeed56642019-02-15 14:31:13 +00001949 }
1950
Richard Henderson50b107c2019-08-24 09:51:09 -07001951 /* Handle anything that isn't just a straight memory access. */
Alex Bennéeeed56642019-02-15 14:31:13 +00001952 if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
Richard Henderson50b107c2019-08-24 09:51:09 -07001953 CPUIOTLBEntry *iotlbentry;
Richard Henderson5b87b3e2019-09-10 15:47:39 -04001954 bool need_swap;
Richard Henderson50b107c2019-08-24 09:51:09 -07001955
1956 /* For anything that is unaligned, recurse through full_load. */
Alex Bennéeeed56642019-02-15 14:31:13 +00001957 if ((addr & (size - 1)) != 0) {
1958 goto do_unaligned_access;
1959 }
Richard Henderson50b107c2019-08-24 09:51:09 -07001960
1961 iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
1962
1963 /* Handle watchpoints. */
1964 if (unlikely(tlb_addr & TLB_WATCHPOINT)) {
1965 /* On watchpoint hit, this will longjmp out. */
1966 cpu_check_watchpoint(env_cpu(env), addr, size,
1967 iotlbentry->attrs, BP_MEM_READ, retaddr);
Richard Henderson50b107c2019-08-24 09:51:09 -07001968 }
1969
Richard Henderson5b87b3e2019-09-10 15:47:39 -04001970 need_swap = size > 1 && (tlb_addr & TLB_BSWAP);
1971
Richard Henderson50b107c2019-08-24 09:51:09 -07001972 /* Handle I/O access. */
Richard Henderson5b87b3e2019-09-10 15:47:39 -04001973 if (likely(tlb_addr & TLB_MMIO)) {
1974 return io_readx(env, iotlbentry, mmu_idx, addr, retaddr,
1975 access_type, op ^ (need_swap * MO_BSWAP));
1976 }
1977
1978 haddr = (void *)((uintptr_t)addr + entry->addend);
1979
1980 /*
1981 * Keep these two load_memop separate to ensure that the compiler
1982 * is able to fold the entire function to a single instruction.
1983 * There is a build-time assert inside to remind you of this. ;-)
1984 */
1985 if (unlikely(need_swap)) {
1986 return load_memop(haddr, op ^ MO_BSWAP);
1987 }
1988 return load_memop(haddr, op);
Alex Bennéeeed56642019-02-15 14:31:13 +00001989 }
1990
1991 /* Handle slow unaligned access (it spans two pages or IO). */
1992 if (size > 1
1993 && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1
1994 >= TARGET_PAGE_SIZE)) {
1995 target_ulong addr1, addr2;
Alex Bennée8c79b282019-06-03 15:56:32 +01001996 uint64_t r1, r2;
Alex Bennéeeed56642019-02-15 14:31:13 +00001997 unsigned shift;
1998 do_unaligned_access:
Alex Bennéeab7a2002019-06-06 16:38:19 +01001999 addr1 = addr & ~((target_ulong)size - 1);
Alex Bennéeeed56642019-02-15 14:31:13 +00002000 addr2 = addr1 + size;
Richard Henderson2dd92602019-04-25 20:48:57 -07002001 r1 = full_load(env, addr1, oi, retaddr);
2002 r2 = full_load(env, addr2, oi, retaddr);
Alex Bennéeeed56642019-02-15 14:31:13 +00002003 shift = (addr & (size - 1)) * 8;
2004
Tony Nguyenbe5c4782019-08-24 04:36:53 +10002005 if (memop_big_endian(op)) {
Alex Bennéeeed56642019-02-15 14:31:13 +00002006 /* Big-endian combine. */
2007 res = (r1 << shift) | (r2 >> ((size * 8) - shift));
2008 } else {
2009 /* Little-endian combine. */
2010 res = (r1 >> shift) | (r2 << ((size * 8) - shift));
2011 }
2012 return res & MAKE_64BIT_MASK(0, size * 8);
2013 }
2014
2015 haddr = (void *)((uintptr_t)addr + entry->addend);
Richard Henderson80d9d1c2019-09-10 14:56:12 -04002016 return load_memop(haddr, op);
Alex Bennéeeed56642019-02-15 14:31:13 +00002017}
2018
2019/*
2020 * For the benefit of TCG generated code, we want to avoid the
2021 * complication of ABI-specific return type promotion and always
2022 * return a value extended to the register size of the host. This is
2023 * tcg_target_long, except in the case of a 32-bit host and 64-bit
2024 * data, and for that we always have uint64_t.
2025 *
2026 * We don't bother with this widened value for SOFTMMU_CODE_ACCESS.
2027 */
2028
Richard Henderson2dd92602019-04-25 20:48:57 -07002029static uint64_t full_ldub_mmu(CPUArchState *env, target_ulong addr,
Richard Henderson9002ffc2021-07-25 12:06:49 -10002030 MemOpIdx oi, uintptr_t retaddr)
Richard Henderson2dd92602019-04-25 20:48:57 -07002031{
Richard Hendersonf83bcec2021-07-27 07:48:55 -10002032 validate_memop(oi, MO_UB);
Tony Nguyenbe5c4782019-08-24 04:36:53 +10002033 return load_helper(env, addr, oi, retaddr, MO_UB, false, full_ldub_mmu);
Richard Henderson2dd92602019-04-25 20:48:57 -07002034}
2035
Richard Hendersonfc1bc772019-04-25 20:01:37 -07002036tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr,
Richard Henderson9002ffc2021-07-25 12:06:49 -10002037 MemOpIdx oi, uintptr_t retaddr)
Alex Bennéeeed56642019-02-15 14:31:13 +00002038{
Richard Henderson2dd92602019-04-25 20:48:57 -07002039 return full_ldub_mmu(env, addr, oi, retaddr);
2040}
2041
2042static uint64_t full_le_lduw_mmu(CPUArchState *env, target_ulong addr,
Richard Henderson9002ffc2021-07-25 12:06:49 -10002043 MemOpIdx oi, uintptr_t retaddr)
Richard Henderson2dd92602019-04-25 20:48:57 -07002044{
Richard Hendersonf83bcec2021-07-27 07:48:55 -10002045 validate_memop(oi, MO_LEUW);
Tony Nguyenbe5c4782019-08-24 04:36:53 +10002046 return load_helper(env, addr, oi, retaddr, MO_LEUW, false,
Richard Henderson2dd92602019-04-25 20:48:57 -07002047 full_le_lduw_mmu);
Alex Bennéeeed56642019-02-15 14:31:13 +00002048}
2049
Richard Hendersonfc1bc772019-04-25 20:01:37 -07002050tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr,
Richard Henderson9002ffc2021-07-25 12:06:49 -10002051 MemOpIdx oi, uintptr_t retaddr)
Alex Bennéeeed56642019-02-15 14:31:13 +00002052{
Richard Henderson2dd92602019-04-25 20:48:57 -07002053 return full_le_lduw_mmu(env, addr, oi, retaddr);
2054}
2055
2056static uint64_t full_be_lduw_mmu(CPUArchState *env, target_ulong addr,
Richard Henderson9002ffc2021-07-25 12:06:49 -10002057 MemOpIdx oi, uintptr_t retaddr)
Richard Henderson2dd92602019-04-25 20:48:57 -07002058{
Richard Hendersonf83bcec2021-07-27 07:48:55 -10002059 validate_memop(oi, MO_BEUW);
Tony Nguyenbe5c4782019-08-24 04:36:53 +10002060 return load_helper(env, addr, oi, retaddr, MO_BEUW, false,
Richard Henderson2dd92602019-04-25 20:48:57 -07002061 full_be_lduw_mmu);
Alex Bennéeeed56642019-02-15 14:31:13 +00002062}
2063
Richard Hendersonfc1bc772019-04-25 20:01:37 -07002064tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr,
Richard Henderson9002ffc2021-07-25 12:06:49 -10002065 MemOpIdx oi, uintptr_t retaddr)
Alex Bennéeeed56642019-02-15 14:31:13 +00002066{
Richard Henderson2dd92602019-04-25 20:48:57 -07002067 return full_be_lduw_mmu(env, addr, oi, retaddr);
2068}
2069
2070static uint64_t full_le_ldul_mmu(CPUArchState *env, target_ulong addr,
Richard Henderson9002ffc2021-07-25 12:06:49 -10002071 MemOpIdx oi, uintptr_t retaddr)
Richard Henderson2dd92602019-04-25 20:48:57 -07002072{
Richard Hendersonf83bcec2021-07-27 07:48:55 -10002073 validate_memop(oi, MO_LEUL);
Tony Nguyenbe5c4782019-08-24 04:36:53 +10002074 return load_helper(env, addr, oi, retaddr, MO_LEUL, false,
Richard Henderson2dd92602019-04-25 20:48:57 -07002075 full_le_ldul_mmu);
Alex Bennéeeed56642019-02-15 14:31:13 +00002076}
2077
Richard Hendersonfc1bc772019-04-25 20:01:37 -07002078tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr,
Richard Henderson9002ffc2021-07-25 12:06:49 -10002079 MemOpIdx oi, uintptr_t retaddr)
Alex Bennéeeed56642019-02-15 14:31:13 +00002080{
Richard Henderson2dd92602019-04-25 20:48:57 -07002081 return full_le_ldul_mmu(env, addr, oi, retaddr);
2082}
2083
2084static uint64_t full_be_ldul_mmu(CPUArchState *env, target_ulong addr,
Richard Henderson9002ffc2021-07-25 12:06:49 -10002085 MemOpIdx oi, uintptr_t retaddr)
Richard Henderson2dd92602019-04-25 20:48:57 -07002086{
Richard Hendersonf83bcec2021-07-27 07:48:55 -10002087 validate_memop(oi, MO_BEUL);
Tony Nguyenbe5c4782019-08-24 04:36:53 +10002088 return load_helper(env, addr, oi, retaddr, MO_BEUL, false,
Richard Henderson2dd92602019-04-25 20:48:57 -07002089 full_be_ldul_mmu);
Alex Bennéeeed56642019-02-15 14:31:13 +00002090}
2091
Richard Hendersonfc1bc772019-04-25 20:01:37 -07002092tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr,
Richard Henderson9002ffc2021-07-25 12:06:49 -10002093 MemOpIdx oi, uintptr_t retaddr)
Alex Bennéeeed56642019-02-15 14:31:13 +00002094{
Richard Henderson2dd92602019-04-25 20:48:57 -07002095 return full_be_ldul_mmu(env, addr, oi, retaddr);
Alex Bennéeeed56642019-02-15 14:31:13 +00002096}
2097
Richard Hendersonfc1bc772019-04-25 20:01:37 -07002098uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr,
Richard Henderson9002ffc2021-07-25 12:06:49 -10002099 MemOpIdx oi, uintptr_t retaddr)
Alex Bennéeeed56642019-02-15 14:31:13 +00002100{
Frédéric Pétrotfc313c62022-01-06 22:00:51 +01002101 validate_memop(oi, MO_LEUQ);
2102 return load_helper(env, addr, oi, retaddr, MO_LEUQ, false,
Richard Henderson2dd92602019-04-25 20:48:57 -07002103 helper_le_ldq_mmu);
Alex Bennéeeed56642019-02-15 14:31:13 +00002104}
2105
Richard Hendersonfc1bc772019-04-25 20:01:37 -07002106uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr,
Richard Henderson9002ffc2021-07-25 12:06:49 -10002107 MemOpIdx oi, uintptr_t retaddr)
Alex Bennéeeed56642019-02-15 14:31:13 +00002108{
Frédéric Pétrotfc313c62022-01-06 22:00:51 +01002109 validate_memop(oi, MO_BEUQ);
2110 return load_helper(env, addr, oi, retaddr, MO_BEUQ, false,
Richard Henderson2dd92602019-04-25 20:48:57 -07002111 helper_be_ldq_mmu);
Alex Bennéeeed56642019-02-15 14:31:13 +00002112}
2113
2114/*
2115 * Provide signed versions of the load routines as well. We can of course
2116 * avoid this for 64-bit data, or for 32-bit data on 32-bit host.
2117 */
2118
2119
2120tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr,
Richard Henderson9002ffc2021-07-25 12:06:49 -10002121 MemOpIdx oi, uintptr_t retaddr)
Alex Bennéeeed56642019-02-15 14:31:13 +00002122{
2123 return (int8_t)helper_ret_ldub_mmu(env, addr, oi, retaddr);
2124}
2125
2126tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr,
Richard Henderson9002ffc2021-07-25 12:06:49 -10002127 MemOpIdx oi, uintptr_t retaddr)
Alex Bennéeeed56642019-02-15 14:31:13 +00002128{
2129 return (int16_t)helper_le_lduw_mmu(env, addr, oi, retaddr);
2130}
2131
2132tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr,
Richard Henderson9002ffc2021-07-25 12:06:49 -10002133 MemOpIdx oi, uintptr_t retaddr)
Alex Bennéeeed56642019-02-15 14:31:13 +00002134{
2135 return (int16_t)helper_be_lduw_mmu(env, addr, oi, retaddr);
2136}
2137
2138tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr,
Richard Henderson9002ffc2021-07-25 12:06:49 -10002139 MemOpIdx oi, uintptr_t retaddr)
Alex Bennéeeed56642019-02-15 14:31:13 +00002140{
2141 return (int32_t)helper_le_ldul_mmu(env, addr, oi, retaddr);
2142}
2143
2144tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr,
Richard Henderson9002ffc2021-07-25 12:06:49 -10002145 MemOpIdx oi, uintptr_t retaddr)
Alex Bennéeeed56642019-02-15 14:31:13 +00002146{
2147 return (int32_t)helper_be_ldul_mmu(env, addr, oi, retaddr);
2148}
2149
2150/*
Richard Hendersond03f1402019-12-09 13:49:58 -08002151 * Load helpers for cpu_ldst.h.
2152 */
2153
2154static inline uint64_t cpu_load_helper(CPUArchState *env, abi_ptr addr,
Richard Hendersonf83bcec2021-07-27 07:48:55 -10002155 MemOpIdx oi, uintptr_t retaddr,
2156 FullLoadHelper *full_load)
Richard Hendersond03f1402019-12-09 13:49:58 -08002157{
Richard Hendersond03f1402019-12-09 13:49:58 -08002158 uint64_t ret;
2159
Richard Hendersond03f1402019-12-09 13:49:58 -08002160 ret = full_load(env, addr, oi, retaddr);
Richard Henderson37aff082021-07-26 11:48:30 -10002161 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R);
Richard Hendersond03f1402019-12-09 13:49:58 -08002162 return ret;
2163}
2164
Richard Hendersonf83bcec2021-07-27 07:48:55 -10002165uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr, MemOpIdx oi, uintptr_t ra)
Richard Hendersond03f1402019-12-09 13:49:58 -08002166{
Richard Hendersonf83bcec2021-07-27 07:48:55 -10002167 return cpu_load_helper(env, addr, oi, ra, full_ldub_mmu);
Richard Hendersond03f1402019-12-09 13:49:58 -08002168}
2169
Richard Hendersonf83bcec2021-07-27 07:48:55 -10002170uint16_t cpu_ldw_be_mmu(CPUArchState *env, abi_ptr addr,
2171 MemOpIdx oi, uintptr_t ra)
Richard Hendersond03f1402019-12-09 13:49:58 -08002172{
Richard Hendersonf83bcec2021-07-27 07:48:55 -10002173 return cpu_load_helper(env, addr, oi, ra, full_be_lduw_mmu);
Richard Hendersond03f1402019-12-09 13:49:58 -08002174}
2175
Richard Hendersonf83bcec2021-07-27 07:48:55 -10002176uint32_t cpu_ldl_be_mmu(CPUArchState *env, abi_ptr addr,
2177 MemOpIdx oi, uintptr_t ra)
Richard Hendersond03f1402019-12-09 13:49:58 -08002178{
Richard Hendersonf83bcec2021-07-27 07:48:55 -10002179 return cpu_load_helper(env, addr, oi, ra, full_be_ldul_mmu);
Richard Hendersond03f1402019-12-09 13:49:58 -08002180}
2181
Richard Hendersonf83bcec2021-07-27 07:48:55 -10002182uint64_t cpu_ldq_be_mmu(CPUArchState *env, abi_ptr addr,
2183 MemOpIdx oi, uintptr_t ra)
Richard Hendersond03f1402019-12-09 13:49:58 -08002184{
Richard Henderson46697cb2022-03-14 17:25:06 -07002185 return cpu_load_helper(env, addr, oi, ra, helper_be_ldq_mmu);
Richard Hendersond03f1402019-12-09 13:49:58 -08002186}
2187
Richard Hendersonf83bcec2021-07-27 07:48:55 -10002188uint16_t cpu_ldw_le_mmu(CPUArchState *env, abi_ptr addr,
2189 MemOpIdx oi, uintptr_t ra)
Richard Hendersond03f1402019-12-09 13:49:58 -08002190{
Richard Hendersonf83bcec2021-07-27 07:48:55 -10002191 return cpu_load_helper(env, addr, oi, ra, full_le_lduw_mmu);
Richard Hendersond03f1402019-12-09 13:49:58 -08002192}
2193
Richard Hendersonf83bcec2021-07-27 07:48:55 -10002194uint32_t cpu_ldl_le_mmu(CPUArchState *env, abi_ptr addr,
2195 MemOpIdx oi, uintptr_t ra)
Richard Hendersond03f1402019-12-09 13:49:58 -08002196{
Richard Hendersonf83bcec2021-07-27 07:48:55 -10002197 return cpu_load_helper(env, addr, oi, ra, full_le_ldul_mmu);
Richard Hendersonb9e60252020-05-08 08:43:46 -07002198}
2199
Richard Hendersonf83bcec2021-07-27 07:48:55 -10002200uint64_t cpu_ldq_le_mmu(CPUArchState *env, abi_ptr addr,
2201 MemOpIdx oi, uintptr_t ra)
Richard Hendersonb9e60252020-05-08 08:43:46 -07002202{
Richard Hendersonf83bcec2021-07-27 07:48:55 -10002203 return cpu_load_helper(env, addr, oi, ra, helper_le_ldq_mmu);
Richard Hendersoncfe04a42019-12-11 10:33:26 -08002204}
2205
Richard Hendersond03f1402019-12-09 13:49:58 -08002206/*
Alex Bennéeeed56642019-02-15 14:31:13 +00002207 * Store Helpers
2208 */
2209
Richard Hendersonc6b716c2019-09-10 12:02:36 -04002210static inline void QEMU_ALWAYS_INLINE
Richard Henderson80d9d1c2019-09-10 14:56:12 -04002211store_memop(void *haddr, uint64_t val, MemOp op)
2212{
2213 switch (op) {
2214 case MO_UB:
2215 stb_p(haddr, val);
2216 break;
2217 case MO_BEUW:
2218 stw_be_p(haddr, val);
2219 break;
2220 case MO_LEUW:
2221 stw_le_p(haddr, val);
2222 break;
2223 case MO_BEUL:
2224 stl_be_p(haddr, val);
2225 break;
2226 case MO_LEUL:
2227 stl_le_p(haddr, val);
2228 break;
Frédéric Pétrotfc313c62022-01-06 22:00:51 +01002229 case MO_BEUQ:
Richard Henderson80d9d1c2019-09-10 14:56:12 -04002230 stq_be_p(haddr, val);
2231 break;
Frédéric Pétrotfc313c62022-01-06 22:00:51 +01002232 case MO_LEUQ:
Richard Henderson80d9d1c2019-09-10 14:56:12 -04002233 stq_le_p(haddr, val);
2234 break;
2235 default:
2236 qemu_build_not_reached();
2237 }
2238}
2239
Richard Hendersonf83bcec2021-07-27 07:48:55 -10002240static void full_stb_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
2241 MemOpIdx oi, uintptr_t retaddr);
2242
Richard Henderson6b8b6222020-07-26 15:39:53 -07002243static void __attribute__((noinline))
2244store_helper_unaligned(CPUArchState *env, target_ulong addr, uint64_t val,
2245 uintptr_t retaddr, size_t size, uintptr_t mmu_idx,
2246 bool big_endian)
2247{
2248 const size_t tlb_off = offsetof(CPUTLBEntry, addr_write);
2249 uintptr_t index, index2;
2250 CPUTLBEntry *entry, *entry2;
2251 target_ulong page2, tlb_addr, tlb_addr2;
Richard Henderson9002ffc2021-07-25 12:06:49 -10002252 MemOpIdx oi;
Richard Henderson6b8b6222020-07-26 15:39:53 -07002253 size_t size2;
2254 int i;
2255
2256 /*
2257 * Ensure the second page is in the TLB. Note that the first page
2258 * is already guaranteed to be filled, and that the second page
2259 * cannot evict the first.
2260 */
2261 page2 = (addr + size) & TARGET_PAGE_MASK;
2262 size2 = (addr + size) & ~TARGET_PAGE_MASK;
2263 index2 = tlb_index(env, mmu_idx, page2);
2264 entry2 = tlb_entry(env, mmu_idx, page2);
2265
2266 tlb_addr2 = tlb_addr_write(entry2);
2267 if (!tlb_hit_page(tlb_addr2, page2)) {
2268 if (!victim_tlb_hit(env, mmu_idx, index2, tlb_off, page2)) {
2269 tlb_fill(env_cpu(env), page2, size2, MMU_DATA_STORE,
2270 mmu_idx, retaddr);
2271 index2 = tlb_index(env, mmu_idx, page2);
2272 entry2 = tlb_entry(env, mmu_idx, page2);
2273 }
2274 tlb_addr2 = tlb_addr_write(entry2);
2275 }
2276
2277 index = tlb_index(env, mmu_idx, addr);
2278 entry = tlb_entry(env, mmu_idx, addr);
2279 tlb_addr = tlb_addr_write(entry);
2280
2281 /*
2282 * Handle watchpoints. Since this may trap, all checks
2283 * must happen before any store.
2284 */
2285 if (unlikely(tlb_addr & TLB_WATCHPOINT)) {
2286 cpu_check_watchpoint(env_cpu(env), addr, size - size2,
2287 env_tlb(env)->d[mmu_idx].iotlb[index].attrs,
2288 BP_MEM_WRITE, retaddr);
2289 }
2290 if (unlikely(tlb_addr2 & TLB_WATCHPOINT)) {
2291 cpu_check_watchpoint(env_cpu(env), page2, size2,
2292 env_tlb(env)->d[mmu_idx].iotlb[index2].attrs,
2293 BP_MEM_WRITE, retaddr);
2294 }
2295
2296 /*
2297 * XXX: not efficient, but simple.
2298 * This loop must go in the forward direction to avoid issues
2299 * with self-modifying code in Windows 64-bit.
2300 */
2301 oi = make_memop_idx(MO_UB, mmu_idx);
2302 if (big_endian) {
2303 for (i = 0; i < size; ++i) {
2304 /* Big-endian extract. */
2305 uint8_t val8 = val >> (((size - 1) * 8) - (i * 8));
Richard Hendersonf83bcec2021-07-27 07:48:55 -10002306 full_stb_mmu(env, addr + i, val8, oi, retaddr);
Richard Henderson6b8b6222020-07-26 15:39:53 -07002307 }
2308 } else {
2309 for (i = 0; i < size; ++i) {
2310 /* Little-endian extract. */
2311 uint8_t val8 = val >> (i * 8);
Richard Hendersonf83bcec2021-07-27 07:48:55 -10002312 full_stb_mmu(env, addr + i, val8, oi, retaddr);
Richard Henderson6b8b6222020-07-26 15:39:53 -07002313 }
2314 }
2315}
2316
Richard Henderson80d9d1c2019-09-10 14:56:12 -04002317static inline void QEMU_ALWAYS_INLINE
Richard Henderson4601f8d2019-04-25 21:12:59 -07002318store_helper(CPUArchState *env, target_ulong addr, uint64_t val,
Richard Henderson9002ffc2021-07-25 12:06:49 -10002319 MemOpIdx oi, uintptr_t retaddr, MemOp op)
Alex Bennéeeed56642019-02-15 14:31:13 +00002320{
Alex Bennéeeed56642019-02-15 14:31:13 +00002321 const size_t tlb_off = offsetof(CPUTLBEntry, addr_write);
Richard Hendersonb8260442022-04-01 11:08:13 -06002322 const unsigned a_bits = get_alignment_bits(get_memop(oi));
2323 const size_t size = memop_size(op);
2324 uintptr_t mmu_idx = get_mmuidx(oi);
2325 uintptr_t index;
2326 CPUTLBEntry *entry;
2327 target_ulong tlb_addr;
Alex Bennéeeed56642019-02-15 14:31:13 +00002328 void *haddr;
Richard Hendersonb8260442022-04-01 11:08:13 -06002329
2330 tcg_debug_assert(mmu_idx < NB_MMU_MODES);
Alex Bennéeeed56642019-02-15 14:31:13 +00002331
2332 /* Handle CPU specific unaligned behaviour */
2333 if (addr & ((1 << a_bits) - 1)) {
Richard Henderson29a0af62019-03-22 16:07:18 -07002334 cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE,
Alex Bennéeeed56642019-02-15 14:31:13 +00002335 mmu_idx, retaddr);
2336 }
2337
Richard Hendersonb8260442022-04-01 11:08:13 -06002338 index = tlb_index(env, mmu_idx, addr);
2339 entry = tlb_entry(env, mmu_idx, addr);
2340 tlb_addr = tlb_addr_write(entry);
2341
Alex Bennéeeed56642019-02-15 14:31:13 +00002342 /* If the TLB entry is for a different page, reload and try again. */
2343 if (!tlb_hit(tlb_addr, addr)) {
2344 if (!victim_tlb_hit(env, mmu_idx, index, tlb_off,
2345 addr & TARGET_PAGE_MASK)) {
Richard Henderson29a0af62019-03-22 16:07:18 -07002346 tlb_fill(env_cpu(env), addr, size, MMU_DATA_STORE,
Alex Bennéeeed56642019-02-15 14:31:13 +00002347 mmu_idx, retaddr);
2348 index = tlb_index(env, mmu_idx, addr);
2349 entry = tlb_entry(env, mmu_idx, addr);
2350 }
2351 tlb_addr = tlb_addr_write(entry) & ~TLB_INVALID_MASK;
2352 }
2353
Richard Henderson50b107c2019-08-24 09:51:09 -07002354 /* Handle anything that isn't just a straight memory access. */
Alex Bennéeeed56642019-02-15 14:31:13 +00002355 if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) {
Richard Henderson50b107c2019-08-24 09:51:09 -07002356 CPUIOTLBEntry *iotlbentry;
Richard Henderson5b87b3e2019-09-10 15:47:39 -04002357 bool need_swap;
Richard Henderson50b107c2019-08-24 09:51:09 -07002358
2359 /* For anything that is unaligned, recurse through byte stores. */
Alex Bennéeeed56642019-02-15 14:31:13 +00002360 if ((addr & (size - 1)) != 0) {
2361 goto do_unaligned_access;
2362 }
Richard Henderson50b107c2019-08-24 09:51:09 -07002363
2364 iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index];
2365
2366 /* Handle watchpoints. */
2367 if (unlikely(tlb_addr & TLB_WATCHPOINT)) {
2368 /* On watchpoint hit, this will longjmp out. */
2369 cpu_check_watchpoint(env_cpu(env), addr, size,
2370 iotlbentry->attrs, BP_MEM_WRITE, retaddr);
Richard Henderson50b107c2019-08-24 09:51:09 -07002371 }
2372
Richard Henderson5b87b3e2019-09-10 15:47:39 -04002373 need_swap = size > 1 && (tlb_addr & TLB_BSWAP);
2374
Richard Henderson50b107c2019-08-24 09:51:09 -07002375 /* Handle I/O access. */
Richard Henderson08565552019-09-18 09:15:44 -07002376 if (tlb_addr & TLB_MMIO) {
Richard Henderson5b87b3e2019-09-10 15:47:39 -04002377 io_writex(env, iotlbentry, mmu_idx, val, addr, retaddr,
2378 op ^ (need_swap * MO_BSWAP));
2379 return;
2380 }
2381
Richard Henderson7b0d7922019-09-19 17:54:10 -07002382 /* Ignore writes to ROM. */
2383 if (unlikely(tlb_addr & TLB_DISCARD_WRITE)) {
2384 return;
2385 }
2386
Richard Henderson08565552019-09-18 09:15:44 -07002387 /* Handle clean RAM pages. */
2388 if (tlb_addr & TLB_NOTDIRTY) {
Richard Henderson707526a2019-09-21 18:47:59 -07002389 notdirty_write(env_cpu(env), addr, size, iotlbentry, retaddr);
Richard Henderson08565552019-09-18 09:15:44 -07002390 }
2391
Richard Henderson707526a2019-09-21 18:47:59 -07002392 haddr = (void *)((uintptr_t)addr + entry->addend);
2393
Richard Henderson5b87b3e2019-09-10 15:47:39 -04002394 /*
2395 * Keep these two store_memop separate to ensure that the compiler
2396 * is able to fold the entire function to a single instruction.
2397 * There is a build-time assert inside to remind you of this. ;-)
2398 */
2399 if (unlikely(need_swap)) {
2400 store_memop(haddr, val, op ^ MO_BSWAP);
2401 } else {
2402 store_memop(haddr, val, op);
2403 }
Alex Bennéeeed56642019-02-15 14:31:13 +00002404 return;
2405 }
2406
2407 /* Handle slow unaligned access (it spans two pages or IO). */
2408 if (size > 1
2409 && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1
2410 >= TARGET_PAGE_SIZE)) {
Alex Bennéeeed56642019-02-15 14:31:13 +00002411 do_unaligned_access:
Richard Henderson6b8b6222020-07-26 15:39:53 -07002412 store_helper_unaligned(env, addr, val, retaddr, size,
2413 mmu_idx, memop_big_endian(op));
Alex Bennéeeed56642019-02-15 14:31:13 +00002414 return;
2415 }
2416
2417 haddr = (void *)((uintptr_t)addr + entry->addend);
Richard Henderson80d9d1c2019-09-10 14:56:12 -04002418 store_memop(haddr, val, op);
Alex Bennéeeed56642019-02-15 14:31:13 +00002419}
2420
Richard Hendersonf83bcec2021-07-27 07:48:55 -10002421static void __attribute__((noinline))
2422full_stb_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
2423 MemOpIdx oi, uintptr_t retaddr)
Alex Bennéeeed56642019-02-15 14:31:13 +00002424{
Richard Hendersonf83bcec2021-07-27 07:48:55 -10002425 validate_memop(oi, MO_UB);
Tony Nguyenbe5c4782019-08-24 04:36:53 +10002426 store_helper(env, addr, val, oi, retaddr, MO_UB);
Alex Bennéeeed56642019-02-15 14:31:13 +00002427}
2428
Richard Hendersonf83bcec2021-07-27 07:48:55 -10002429void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
2430 MemOpIdx oi, uintptr_t retaddr)
2431{
2432 full_stb_mmu(env, addr, val, oi, retaddr);
2433}
2434
2435static void full_le_stw_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
2436 MemOpIdx oi, uintptr_t retaddr)
2437{
2438 validate_memop(oi, MO_LEUW);
2439 store_helper(env, addr, val, oi, retaddr, MO_LEUW);
2440}
2441
Richard Hendersonfc1bc772019-04-25 20:01:37 -07002442void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
Richard Henderson9002ffc2021-07-25 12:06:49 -10002443 MemOpIdx oi, uintptr_t retaddr)
Alex Bennéeeed56642019-02-15 14:31:13 +00002444{
Richard Hendersonf83bcec2021-07-27 07:48:55 -10002445 full_le_stw_mmu(env, addr, val, oi, retaddr);
2446}
2447
2448static void full_be_stw_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
2449 MemOpIdx oi, uintptr_t retaddr)
2450{
2451 validate_memop(oi, MO_BEUW);
2452 store_helper(env, addr, val, oi, retaddr, MO_BEUW);
Alex Bennéeeed56642019-02-15 14:31:13 +00002453}
2454
Richard Hendersonfc1bc772019-04-25 20:01:37 -07002455void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
Richard Henderson9002ffc2021-07-25 12:06:49 -10002456 MemOpIdx oi, uintptr_t retaddr)
Alex Bennéeeed56642019-02-15 14:31:13 +00002457{
Richard Hendersonf83bcec2021-07-27 07:48:55 -10002458 full_be_stw_mmu(env, addr, val, oi, retaddr);
2459}
2460
2461static void full_le_stl_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
2462 MemOpIdx oi, uintptr_t retaddr)
2463{
2464 validate_memop(oi, MO_LEUL);
2465 store_helper(env, addr, val, oi, retaddr, MO_LEUL);
Alex Bennéeeed56642019-02-15 14:31:13 +00002466}
2467
Richard Hendersonfc1bc772019-04-25 20:01:37 -07002468void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
Richard Henderson9002ffc2021-07-25 12:06:49 -10002469 MemOpIdx oi, uintptr_t retaddr)
Alex Bennéeeed56642019-02-15 14:31:13 +00002470{
Richard Hendersonf83bcec2021-07-27 07:48:55 -10002471 full_le_stl_mmu(env, addr, val, oi, retaddr);
2472}
2473
2474static void full_be_stl_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
2475 MemOpIdx oi, uintptr_t retaddr)
2476{
2477 validate_memop(oi, MO_BEUL);
2478 store_helper(env, addr, val, oi, retaddr, MO_BEUL);
Alex Bennéeeed56642019-02-15 14:31:13 +00002479}
2480
Richard Hendersonfc1bc772019-04-25 20:01:37 -07002481void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
Richard Henderson9002ffc2021-07-25 12:06:49 -10002482 MemOpIdx oi, uintptr_t retaddr)
Alex Bennéeeed56642019-02-15 14:31:13 +00002483{
Richard Hendersonf83bcec2021-07-27 07:48:55 -10002484 full_be_stl_mmu(env, addr, val, oi, retaddr);
Alex Bennéeeed56642019-02-15 14:31:13 +00002485}
2486
Richard Hendersonfc1bc772019-04-25 20:01:37 -07002487void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
Richard Henderson9002ffc2021-07-25 12:06:49 -10002488 MemOpIdx oi, uintptr_t retaddr)
Alex Bennéeeed56642019-02-15 14:31:13 +00002489{
Frédéric Pétrotfc313c62022-01-06 22:00:51 +01002490 validate_memop(oi, MO_LEUQ);
2491 store_helper(env, addr, val, oi, retaddr, MO_LEUQ);
Alex Bennéeeed56642019-02-15 14:31:13 +00002492}
2493
Richard Hendersonfc1bc772019-04-25 20:01:37 -07002494void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
Richard Henderson9002ffc2021-07-25 12:06:49 -10002495 MemOpIdx oi, uintptr_t retaddr)
Alex Bennéeeed56642019-02-15 14:31:13 +00002496{
Frédéric Pétrotfc313c62022-01-06 22:00:51 +01002497 validate_memop(oi, MO_BEUQ);
2498 store_helper(env, addr, val, oi, retaddr, MO_BEUQ);
Alex Bennéeeed56642019-02-15 14:31:13 +00002499}
Paolo Bonzini0f590e742014-03-28 17:55:24 +01002500
Richard Hendersond03f1402019-12-09 13:49:58 -08002501/*
2502 * Store Helpers for cpu_ldst.h
2503 */
2504
Richard Hendersonf83bcec2021-07-27 07:48:55 -10002505typedef void FullStoreHelper(CPUArchState *env, target_ulong addr,
2506 uint64_t val, MemOpIdx oi, uintptr_t retaddr);
2507
2508static inline void cpu_store_helper(CPUArchState *env, target_ulong addr,
2509 uint64_t val, MemOpIdx oi, uintptr_t ra,
2510 FullStoreHelper *full_store)
Richard Hendersond03f1402019-12-09 13:49:58 -08002511{
Richard Hendersonf83bcec2021-07-27 07:48:55 -10002512 full_store(env, addr, val, oi, ra);
Richard Henderson37aff082021-07-26 11:48:30 -10002513 qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W);
Richard Hendersond03f1402019-12-09 13:49:58 -08002514}
2515
Richard Hendersonf83bcec2021-07-27 07:48:55 -10002516void cpu_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val,
2517 MemOpIdx oi, uintptr_t retaddr)
Richard Hendersond03f1402019-12-09 13:49:58 -08002518{
Richard Hendersonf83bcec2021-07-27 07:48:55 -10002519 cpu_store_helper(env, addr, val, oi, retaddr, full_stb_mmu);
Richard Hendersond03f1402019-12-09 13:49:58 -08002520}
2521
Richard Hendersonf83bcec2021-07-27 07:48:55 -10002522void cpu_stw_be_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
2523 MemOpIdx oi, uintptr_t retaddr)
Richard Hendersond03f1402019-12-09 13:49:58 -08002524{
Richard Hendersonf83bcec2021-07-27 07:48:55 -10002525 cpu_store_helper(env, addr, val, oi, retaddr, full_be_stw_mmu);
Richard Hendersond03f1402019-12-09 13:49:58 -08002526}
2527
Richard Hendersonf83bcec2021-07-27 07:48:55 -10002528void cpu_stl_be_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
2529 MemOpIdx oi, uintptr_t retaddr)
Richard Hendersond03f1402019-12-09 13:49:58 -08002530{
Richard Hendersonf83bcec2021-07-27 07:48:55 -10002531 cpu_store_helper(env, addr, val, oi, retaddr, full_be_stl_mmu);
Richard Hendersond03f1402019-12-09 13:49:58 -08002532}
2533
Richard Hendersonf83bcec2021-07-27 07:48:55 -10002534void cpu_stq_be_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
2535 MemOpIdx oi, uintptr_t retaddr)
Richard Hendersond03f1402019-12-09 13:49:58 -08002536{
Richard Hendersonf83bcec2021-07-27 07:48:55 -10002537 cpu_store_helper(env, addr, val, oi, retaddr, helper_be_stq_mmu);
Richard Hendersonb9e60252020-05-08 08:43:46 -07002538}
2539
Richard Hendersonf83bcec2021-07-27 07:48:55 -10002540void cpu_stw_le_mmu(CPUArchState *env, target_ulong addr, uint16_t val,
2541 MemOpIdx oi, uintptr_t retaddr)
Richard Hendersonb9e60252020-05-08 08:43:46 -07002542{
Richard Hendersonf83bcec2021-07-27 07:48:55 -10002543 cpu_store_helper(env, addr, val, oi, retaddr, full_le_stw_mmu);
Richard Hendersonb9e60252020-05-08 08:43:46 -07002544}
2545
Richard Hendersonf83bcec2021-07-27 07:48:55 -10002546void cpu_stl_le_mmu(CPUArchState *env, target_ulong addr, uint32_t val,
2547 MemOpIdx oi, uintptr_t retaddr)
Richard Hendersonb9e60252020-05-08 08:43:46 -07002548{
Richard Hendersonf83bcec2021-07-27 07:48:55 -10002549 cpu_store_helper(env, addr, val, oi, retaddr, full_le_stl_mmu);
Richard Hendersonb9e60252020-05-08 08:43:46 -07002550}
2551
Richard Hendersonf83bcec2021-07-27 07:48:55 -10002552void cpu_stq_le_mmu(CPUArchState *env, target_ulong addr, uint64_t val,
2553 MemOpIdx oi, uintptr_t retaddr)
Richard Hendersonb9e60252020-05-08 08:43:46 -07002554{
Richard Hendersonf83bcec2021-07-27 07:48:55 -10002555 cpu_store_helper(env, addr, val, oi, retaddr, helper_le_stq_mmu);
Richard Hendersond03f1402019-12-09 13:49:58 -08002556}
2557
Richard Hendersonf83bcec2021-07-27 07:48:55 -10002558#include "ldst_common.c.inc"
Richard Hendersoncfe04a42019-12-11 10:33:26 -08002559
Richard Hendersonbe9568b2021-07-16 14:20:49 -07002560/*
2561 * First set of functions passes in OI and RETADDR.
2562 * This makes them callable from other helpers.
2563 */
Richard Hendersonc482cb12016-06-28 11:37:27 -07002564
Richard Hendersonc482cb12016-06-28 11:37:27 -07002565#define ATOMIC_NAME(X) \
Richard Hendersonbe9568b2021-07-16 14:20:49 -07002566 glue(glue(glue(cpu_atomic_ ## X, SUFFIX), END), _mmu)
Richard Hendersona754f7f2021-07-16 17:49:09 -07002567
Richard Henderson707526a2019-09-21 18:47:59 -07002568#define ATOMIC_MMU_CLEANUP
Richard Hendersonc482cb12016-06-28 11:37:27 -07002569
Paolo Bonzini139c1832020-02-04 12:41:01 +01002570#include "atomic_common.c.inc"
Richard Hendersonc482cb12016-06-28 11:37:27 -07002571
2572#define DATA_SIZE 1
2573#include "atomic_template.h"
2574
2575#define DATA_SIZE 2
2576#include "atomic_template.h"
2577
2578#define DATA_SIZE 4
2579#include "atomic_template.h"
2580
Richard Hendersondf79b992016-09-02 12:23:57 -07002581#ifdef CONFIG_ATOMIC64
Richard Hendersonc482cb12016-06-28 11:37:27 -07002582#define DATA_SIZE 8
2583#include "atomic_template.h"
Richard Hendersondf79b992016-09-02 12:23:57 -07002584#endif
Richard Hendersonc482cb12016-06-28 11:37:27 -07002585
Richard Hendersone6cd4bb2018-08-15 16:31:47 -07002586#if HAVE_CMPXCHG128 || HAVE_ATOMIC128
Richard Henderson7ebee432016-06-29 21:10:59 -07002587#define DATA_SIZE 16
2588#include "atomic_template.h"
2589#endif
2590
Richard Hendersonc482cb12016-06-28 11:37:27 -07002591/* Code access functions. */
2592
Richard Hendersonfc4120a2019-12-11 11:25:10 -08002593static uint64_t full_ldub_code(CPUArchState *env, target_ulong addr,
Richard Henderson9002ffc2021-07-25 12:06:49 -10002594 MemOpIdx oi, uintptr_t retaddr)
Richard Henderson2dd92602019-04-25 20:48:57 -07002595{
Richard Hendersonfc4120a2019-12-11 11:25:10 -08002596 return load_helper(env, addr, oi, retaddr, MO_8, true, full_ldub_code);
Richard Henderson2dd92602019-04-25 20:48:57 -07002597}
2598
Richard Hendersonfc4120a2019-12-11 11:25:10 -08002599uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr)
Alex Bennéeeed56642019-02-15 14:31:13 +00002600{
Richard Henderson9002ffc2021-07-25 12:06:49 -10002601 MemOpIdx oi = make_memop_idx(MO_UB, cpu_mmu_index(env, true));
Richard Hendersonfc4120a2019-12-11 11:25:10 -08002602 return full_ldub_code(env, addr, oi, 0);
Richard Henderson2dd92602019-04-25 20:48:57 -07002603}
2604
Richard Hendersonfc4120a2019-12-11 11:25:10 -08002605static uint64_t full_lduw_code(CPUArchState *env, target_ulong addr,
Richard Henderson9002ffc2021-07-25 12:06:49 -10002606 MemOpIdx oi, uintptr_t retaddr)
Alex Bennée4cef72d2019-10-21 16:09:10 +01002607{
Richard Hendersonfc4120a2019-12-11 11:25:10 -08002608 return load_helper(env, addr, oi, retaddr, MO_TEUW, true, full_lduw_code);
Alex Bennée4cef72d2019-10-21 16:09:10 +01002609}
2610
Richard Hendersonfc4120a2019-12-11 11:25:10 -08002611uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr)
Richard Henderson2dd92602019-04-25 20:48:57 -07002612{
Richard Henderson9002ffc2021-07-25 12:06:49 -10002613 MemOpIdx oi = make_memop_idx(MO_TEUW, cpu_mmu_index(env, true));
Richard Hendersonfc4120a2019-12-11 11:25:10 -08002614 return full_lduw_code(env, addr, oi, 0);
Alex Bennéeeed56642019-02-15 14:31:13 +00002615}
Blue Swirl0cac1b62012-04-09 16:50:52 +00002616
Richard Hendersonfc4120a2019-12-11 11:25:10 -08002617static uint64_t full_ldl_code(CPUArchState *env, target_ulong addr,
Richard Henderson9002ffc2021-07-25 12:06:49 -10002618 MemOpIdx oi, uintptr_t retaddr)
Alex Bennéeeed56642019-02-15 14:31:13 +00002619{
Richard Hendersonfc4120a2019-12-11 11:25:10 -08002620 return load_helper(env, addr, oi, retaddr, MO_TEUL, true, full_ldl_code);
Richard Henderson2dd92602019-04-25 20:48:57 -07002621}
2622
Richard Hendersonfc4120a2019-12-11 11:25:10 -08002623uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr)
Alex Bennée4cef72d2019-10-21 16:09:10 +01002624{
Richard Henderson9002ffc2021-07-25 12:06:49 -10002625 MemOpIdx oi = make_memop_idx(MO_TEUL, cpu_mmu_index(env, true));
Richard Hendersonfc4120a2019-12-11 11:25:10 -08002626 return full_ldl_code(env, addr, oi, 0);
Alex Bennée4cef72d2019-10-21 16:09:10 +01002627}
2628
Richard Hendersonfc4120a2019-12-11 11:25:10 -08002629static uint64_t full_ldq_code(CPUArchState *env, target_ulong addr,
Richard Henderson9002ffc2021-07-25 12:06:49 -10002630 MemOpIdx oi, uintptr_t retaddr)
Richard Henderson2dd92602019-04-25 20:48:57 -07002631{
Frédéric Pétrotfc313c62022-01-06 22:00:51 +01002632 return load_helper(env, addr, oi, retaddr, MO_TEUQ, true, full_ldq_code);
Alex Bennéeeed56642019-02-15 14:31:13 +00002633}
Blue Swirl0cac1b62012-04-09 16:50:52 +00002634
Richard Hendersonfc4120a2019-12-11 11:25:10 -08002635uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr)
Alex Bennéeeed56642019-02-15 14:31:13 +00002636{
Frédéric Pétrotfc313c62022-01-06 22:00:51 +01002637 MemOpIdx oi = make_memop_idx(MO_TEUQ, cpu_mmu_index(env, true));
Richard Hendersonfc4120a2019-12-11 11:25:10 -08002638 return full_ldq_code(env, addr, oi, 0);
Alex Bennéeeed56642019-02-15 14:31:13 +00002639}