Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 1 | /* |
| 2 | * Common CPU TLB handling |
| 3 | * |
| 4 | * Copyright (c) 2003 Fabrice Bellard |
| 5 | * |
| 6 | * This library is free software; you can redistribute it and/or |
| 7 | * modify it under the terms of the GNU Lesser General Public |
| 8 | * License as published by the Free Software Foundation; either |
Thomas Huth | fb0343d | 2019-01-23 15:08:56 +0100 | [diff] [blame] | 9 | * version 2.1 of the License, or (at your option) any later version. |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 10 | * |
| 11 | * This library is distributed in the hope that it will be useful, |
| 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 14 | * Lesser General Public License for more details. |
| 15 | * |
| 16 | * You should have received a copy of the GNU Lesser General Public |
| 17 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. |
| 18 | */ |
| 19 | |
Peter Maydell | 7b31bbc | 2016-01-26 18:16:56 +0000 | [diff] [blame] | 20 | #include "qemu/osdep.h" |
Jan Kiszka | 8d04fb5 | 2017-02-23 18:29:11 +0000 | [diff] [blame] | 21 | #include "qemu/main-loop.h" |
Claudio Fontana | 7827168 | 2021-02-04 17:39:23 +0100 | [diff] [blame] | 22 | #include "hw/core/tcg-cpu-ops.h" |
Paolo Bonzini | 022c62c | 2012-12-17 18:19:49 +0100 | [diff] [blame] | 23 | #include "exec/exec-all.h" |
| 24 | #include "exec/memory.h" |
Paolo Bonzini | f08b617 | 2014-03-28 19:42:10 +0100 | [diff] [blame] | 25 | #include "exec/cpu_ldst.h" |
Paolo Bonzini | 022c62c | 2012-12-17 18:19:49 +0100 | [diff] [blame] | 26 | #include "exec/cputlb.h" |
Paolo Bonzini | 022c62c | 2012-12-17 18:19:49 +0100 | [diff] [blame] | 27 | #include "exec/memory-internal.h" |
Juan Quintela | 220c3eb | 2013-10-14 17:13:59 +0200 | [diff] [blame] | 28 | #include "exec/ram_addr.h" |
Paolo Bonzini | 0f590e74 | 2014-03-28 17:55:24 +0100 | [diff] [blame] | 29 | #include "tcg/tcg.h" |
Peter Maydell | d7f3040 | 2016-06-20 18:07:05 +0100 | [diff] [blame] | 30 | #include "qemu/error-report.h" |
| 31 | #include "exec/log.h" |
Richard Henderson | c482cb1 | 2016-06-28 11:37:27 -0700 | [diff] [blame] | 32 | #include "exec/helper-proto.h" |
| 33 | #include "qemu/atomic.h" |
Richard Henderson | e6cd4bb | 2018-08-15 16:31:47 -0700 | [diff] [blame] | 34 | #include "qemu/atomic128.h" |
Paolo Bonzini | 3b9bd3f | 2020-12-16 13:27:58 +0100 | [diff] [blame] | 35 | #include "exec/translate-all.h" |
Paolo Bonzini | 243af02 | 2020-02-04 12:20:10 +0100 | [diff] [blame] | 36 | #include "trace/trace-root.h" |
Philippe Mathieu-Daudé | e5ceadf | 2021-05-24 19:04:53 +0200 | [diff] [blame] | 37 | #include "tb-hash.h" |
Philippe Mathieu-Daudé | 6526919 | 2021-01-17 17:48:12 +0100 | [diff] [blame] | 38 | #include "internal.h" |
Alex Bennée | 235537f | 2019-06-19 20:20:08 +0100 | [diff] [blame] | 39 | #ifdef CONFIG_PLUGIN |
| 40 | #include "qemu/plugin-memory.h" |
| 41 | #endif |
Richard Henderson | d2ba802 | 2021-07-27 11:10:22 -1000 | [diff] [blame] | 42 | #include "tcg/tcg-ldst.h" |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 43 | |
Alex Bennée | 8526e1f | 2016-03-15 14:30:24 +0000 | [diff] [blame] | 44 | /* DEBUG defines, enable DEBUG_TLB_LOG to log to the CPU_LOG_MMU target */ |
| 45 | /* #define DEBUG_TLB */ |
| 46 | /* #define DEBUG_TLB_LOG */ |
| 47 | |
| 48 | #ifdef DEBUG_TLB |
| 49 | # define DEBUG_TLB_GATE 1 |
| 50 | # ifdef DEBUG_TLB_LOG |
| 51 | # define DEBUG_TLB_LOG_GATE 1 |
| 52 | # else |
| 53 | # define DEBUG_TLB_LOG_GATE 0 |
| 54 | # endif |
| 55 | #else |
| 56 | # define DEBUG_TLB_GATE 0 |
| 57 | # define DEBUG_TLB_LOG_GATE 0 |
| 58 | #endif |
| 59 | |
| 60 | #define tlb_debug(fmt, ...) do { \ |
| 61 | if (DEBUG_TLB_LOG_GATE) { \ |
| 62 | qemu_log_mask(CPU_LOG_MMU, "%s: " fmt, __func__, \ |
| 63 | ## __VA_ARGS__); \ |
| 64 | } else if (DEBUG_TLB_GATE) { \ |
| 65 | fprintf(stderr, "%s: " fmt, __func__, ## __VA_ARGS__); \ |
| 66 | } \ |
| 67 | } while (0) |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 68 | |
Emilio G. Cota | ea9025c | 2018-10-09 13:45:55 -0400 | [diff] [blame] | 69 | #define assert_cpu_is_self(cpu) do { \ |
Alex Bennée | f0aff0f | 2017-02-23 18:29:16 +0000 | [diff] [blame] | 70 | if (DEBUG_TLB_GATE) { \ |
Emilio G. Cota | ea9025c | 2018-10-09 13:45:55 -0400 | [diff] [blame] | 71 | g_assert(!(cpu)->created || qemu_cpu_is_self(cpu)); \ |
Alex Bennée | f0aff0f | 2017-02-23 18:29:16 +0000 | [diff] [blame] | 72 | } \ |
| 73 | } while (0) |
| 74 | |
KONRAD Frederic | e3b9ca8 | 2017-02-23 18:29:18 +0000 | [diff] [blame] | 75 | /* run_on_cpu_data.target_ptr should always be big enough for a |
| 76 | * target_ulong even on 32 bit builds */ |
| 77 | QEMU_BUILD_BUG_ON(sizeof(target_ulong) > sizeof(run_on_cpu_data)); |
| 78 | |
Alex Bennée | e721844 | 2017-02-23 18:29:20 +0000 | [diff] [blame] | 79 | /* We currently can't handle more than 16 bits in the MMUIDX bitmask. |
| 80 | */ |
| 81 | QEMU_BUILD_BUG_ON(NB_MMU_MODES > 16); |
| 82 | #define ALL_MMUIDX_BITS ((1 << NB_MMU_MODES) - 1) |
| 83 | |
Richard Henderson | 722a1c1 | 2019-12-07 11:47:41 -0800 | [diff] [blame] | 84 | static inline size_t tlb_n_entries(CPUTLBDescFast *fast) |
Richard Henderson | 7a1efe1 | 2019-12-07 11:37:57 -0800 | [diff] [blame] | 85 | { |
Richard Henderson | 722a1c1 | 2019-12-07 11:47:41 -0800 | [diff] [blame] | 86 | return (fast->mask >> CPU_TLB_ENTRY_BITS) + 1; |
Richard Henderson | 7a1efe1 | 2019-12-07 11:37:57 -0800 | [diff] [blame] | 87 | } |
| 88 | |
Richard Henderson | 722a1c1 | 2019-12-07 11:47:41 -0800 | [diff] [blame] | 89 | static inline size_t sizeof_tlb(CPUTLBDescFast *fast) |
Emilio G. Cota | 86e1eff | 2019-01-16 12:01:13 -0500 | [diff] [blame] | 90 | { |
Richard Henderson | 722a1c1 | 2019-12-07 11:47:41 -0800 | [diff] [blame] | 91 | return fast->mask + (1 << CPU_TLB_ENTRY_BITS); |
Emilio G. Cota | 86e1eff | 2019-01-16 12:01:13 -0500 | [diff] [blame] | 92 | } |
| 93 | |
Richard Henderson | 79e4208 | 2019-03-22 08:36:40 -0700 | [diff] [blame] | 94 | static void tlb_window_reset(CPUTLBDesc *desc, int64_t ns, |
Emilio G. Cota | 86e1eff | 2019-01-16 12:01:13 -0500 | [diff] [blame] | 95 | size_t max_entries) |
| 96 | { |
Richard Henderson | 79e4208 | 2019-03-22 08:36:40 -0700 | [diff] [blame] | 97 | desc->window_begin_ns = ns; |
| 98 | desc->window_max_entries = max_entries; |
Emilio G. Cota | 86e1eff | 2019-01-16 12:01:13 -0500 | [diff] [blame] | 99 | } |
| 100 | |
Richard Henderson | 0f4abea | 2021-01-20 19:53:20 -1000 | [diff] [blame] | 101 | static void tb_jmp_cache_clear_page(CPUState *cpu, target_ulong page_addr) |
| 102 | { |
| 103 | unsigned int i, i0 = tb_jmp_cache_hash_page(page_addr); |
| 104 | |
| 105 | for (i = 0; i < TB_JMP_PAGE_SIZE; i++) { |
| 106 | qatomic_set(&cpu->tb_jmp_cache[i0 + i], NULL); |
| 107 | } |
| 108 | } |
| 109 | |
| 110 | static void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr) |
| 111 | { |
| 112 | /* Discard jump cache entries for any tb which might potentially |
| 113 | overlap the flushed page. */ |
| 114 | tb_jmp_cache_clear_page(cpu, addr - TARGET_PAGE_SIZE); |
| 115 | tb_jmp_cache_clear_page(cpu, addr); |
| 116 | } |
| 117 | |
Emilio G. Cota | 86e1eff | 2019-01-16 12:01:13 -0500 | [diff] [blame] | 118 | /** |
| 119 | * tlb_mmu_resize_locked() - perform TLB resize bookkeeping; resize if necessary |
Richard Henderson | 71ccd47 | 2019-12-07 11:58:50 -0800 | [diff] [blame] | 120 | * @desc: The CPUTLBDesc portion of the TLB |
| 121 | * @fast: The CPUTLBDescFast portion of the same TLB |
Emilio G. Cota | 86e1eff | 2019-01-16 12:01:13 -0500 | [diff] [blame] | 122 | * |
| 123 | * Called with tlb_lock_held. |
| 124 | * |
| 125 | * We have two main constraints when resizing a TLB: (1) we only resize it |
| 126 | * on a TLB flush (otherwise we'd have to take a perf hit by either rehashing |
| 127 | * the array or unnecessarily flushing it), which means we do not control how |
| 128 | * frequently the resizing can occur; (2) we don't have access to the guest's |
| 129 | * future scheduling decisions, and therefore have to decide the magnitude of |
| 130 | * the resize based on past observations. |
| 131 | * |
| 132 | * In general, a memory-hungry process can benefit greatly from an appropriately |
| 133 | * sized TLB, since a guest TLB miss is very expensive. This doesn't mean that |
| 134 | * we just have to make the TLB as large as possible; while an oversized TLB |
| 135 | * results in minimal TLB miss rates, it also takes longer to be flushed |
| 136 | * (flushes can be _very_ frequent), and the reduced locality can also hurt |
| 137 | * performance. |
| 138 | * |
| 139 | * To achieve near-optimal performance for all kinds of workloads, we: |
| 140 | * |
| 141 | * 1. Aggressively increase the size of the TLB when the use rate of the |
| 142 | * TLB being flushed is high, since it is likely that in the near future this |
| 143 | * memory-hungry process will execute again, and its memory hungriness will |
| 144 | * probably be similar. |
| 145 | * |
| 146 | * 2. Slowly reduce the size of the TLB as the use rate declines over a |
| 147 | * reasonably large time window. The rationale is that if in such a time window |
| 148 | * we have not observed a high TLB use rate, it is likely that we won't observe |
| 149 | * it in the near future. In that case, once a time window expires we downsize |
| 150 | * the TLB to match the maximum use rate observed in the window. |
| 151 | * |
| 152 | * 3. Try to keep the maximum use rate in a time window in the 30-70% range, |
| 153 | * since in that range performance is likely near-optimal. Recall that the TLB |
| 154 | * is direct mapped, so we want the use rate to be low (or at least not too |
| 155 | * high), since otherwise we are likely to have a significant amount of |
| 156 | * conflict misses. |
| 157 | */ |
Richard Henderson | 3c3959f | 2019-12-07 14:36:01 -0800 | [diff] [blame] | 158 | static void tlb_mmu_resize_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast, |
| 159 | int64_t now) |
Emilio G. Cota | 86e1eff | 2019-01-16 12:01:13 -0500 | [diff] [blame] | 160 | { |
Richard Henderson | 71ccd47 | 2019-12-07 11:58:50 -0800 | [diff] [blame] | 161 | size_t old_size = tlb_n_entries(fast); |
Emilio G. Cota | 86e1eff | 2019-01-16 12:01:13 -0500 | [diff] [blame] | 162 | size_t rate; |
| 163 | size_t new_size = old_size; |
Emilio G. Cota | 86e1eff | 2019-01-16 12:01:13 -0500 | [diff] [blame] | 164 | int64_t window_len_ms = 100; |
| 165 | int64_t window_len_ns = window_len_ms * 1000 * 1000; |
Richard Henderson | 79e4208 | 2019-03-22 08:36:40 -0700 | [diff] [blame] | 166 | bool window_expired = now > desc->window_begin_ns + window_len_ns; |
Emilio G. Cota | 86e1eff | 2019-01-16 12:01:13 -0500 | [diff] [blame] | 167 | |
Richard Henderson | 79e4208 | 2019-03-22 08:36:40 -0700 | [diff] [blame] | 168 | if (desc->n_used_entries > desc->window_max_entries) { |
| 169 | desc->window_max_entries = desc->n_used_entries; |
Emilio G. Cota | 86e1eff | 2019-01-16 12:01:13 -0500 | [diff] [blame] | 170 | } |
Richard Henderson | 79e4208 | 2019-03-22 08:36:40 -0700 | [diff] [blame] | 171 | rate = desc->window_max_entries * 100 / old_size; |
Emilio G. Cota | 86e1eff | 2019-01-16 12:01:13 -0500 | [diff] [blame] | 172 | |
| 173 | if (rate > 70) { |
| 174 | new_size = MIN(old_size << 1, 1 << CPU_TLB_DYN_MAX_BITS); |
| 175 | } else if (rate < 30 && window_expired) { |
Richard Henderson | 79e4208 | 2019-03-22 08:36:40 -0700 | [diff] [blame] | 176 | size_t ceil = pow2ceil(desc->window_max_entries); |
| 177 | size_t expected_rate = desc->window_max_entries * 100 / ceil; |
Emilio G. Cota | 86e1eff | 2019-01-16 12:01:13 -0500 | [diff] [blame] | 178 | |
| 179 | /* |
| 180 | * Avoid undersizing when the max number of entries seen is just below |
| 181 | * a pow2. For instance, if max_entries == 1025, the expected use rate |
| 182 | * would be 1025/2048==50%. However, if max_entries == 1023, we'd get |
| 183 | * 1023/1024==99.9% use rate, so we'd likely end up doubling the size |
| 184 | * later. Thus, make sure that the expected use rate remains below 70%. |
| 185 | * (and since we double the size, that means the lowest rate we'd |
| 186 | * expect to get is 35%, which is still in the 30-70% range where |
| 187 | * we consider that the size is appropriate.) |
| 188 | */ |
| 189 | if (expected_rate > 70) { |
| 190 | ceil *= 2; |
| 191 | } |
| 192 | new_size = MAX(ceil, 1 << CPU_TLB_DYN_MIN_BITS); |
| 193 | } |
| 194 | |
| 195 | if (new_size == old_size) { |
| 196 | if (window_expired) { |
Richard Henderson | 79e4208 | 2019-03-22 08:36:40 -0700 | [diff] [blame] | 197 | tlb_window_reset(desc, now, desc->n_used_entries); |
Emilio G. Cota | 86e1eff | 2019-01-16 12:01:13 -0500 | [diff] [blame] | 198 | } |
| 199 | return; |
| 200 | } |
| 201 | |
Richard Henderson | 71ccd47 | 2019-12-07 11:58:50 -0800 | [diff] [blame] | 202 | g_free(fast->table); |
| 203 | g_free(desc->iotlb); |
Emilio G. Cota | 86e1eff | 2019-01-16 12:01:13 -0500 | [diff] [blame] | 204 | |
Richard Henderson | 79e4208 | 2019-03-22 08:36:40 -0700 | [diff] [blame] | 205 | tlb_window_reset(desc, now, 0); |
Emilio G. Cota | 86e1eff | 2019-01-16 12:01:13 -0500 | [diff] [blame] | 206 | /* desc->n_used_entries is cleared by the caller */ |
Richard Henderson | 71ccd47 | 2019-12-07 11:58:50 -0800 | [diff] [blame] | 207 | fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS; |
| 208 | fast->table = g_try_new(CPUTLBEntry, new_size); |
| 209 | desc->iotlb = g_try_new(CPUIOTLBEntry, new_size); |
| 210 | |
Emilio G. Cota | 86e1eff | 2019-01-16 12:01:13 -0500 | [diff] [blame] | 211 | /* |
| 212 | * If the allocations fail, try smaller sizes. We just freed some |
| 213 | * memory, so going back to half of new_size has a good chance of working. |
| 214 | * Increased memory pressure elsewhere in the system might cause the |
| 215 | * allocations to fail though, so we progressively reduce the allocation |
| 216 | * size, aborting if we cannot even allocate the smallest TLB we support. |
| 217 | */ |
Richard Henderson | 71ccd47 | 2019-12-07 11:58:50 -0800 | [diff] [blame] | 218 | while (fast->table == NULL || desc->iotlb == NULL) { |
Emilio G. Cota | 86e1eff | 2019-01-16 12:01:13 -0500 | [diff] [blame] | 219 | if (new_size == (1 << CPU_TLB_DYN_MIN_BITS)) { |
| 220 | error_report("%s: %s", __func__, strerror(errno)); |
| 221 | abort(); |
| 222 | } |
| 223 | new_size = MAX(new_size >> 1, 1 << CPU_TLB_DYN_MIN_BITS); |
Richard Henderson | 71ccd47 | 2019-12-07 11:58:50 -0800 | [diff] [blame] | 224 | fast->mask = (new_size - 1) << CPU_TLB_ENTRY_BITS; |
Emilio G. Cota | 86e1eff | 2019-01-16 12:01:13 -0500 | [diff] [blame] | 225 | |
Richard Henderson | 71ccd47 | 2019-12-07 11:58:50 -0800 | [diff] [blame] | 226 | g_free(fast->table); |
| 227 | g_free(desc->iotlb); |
| 228 | fast->table = g_try_new(CPUTLBEntry, new_size); |
| 229 | desc->iotlb = g_try_new(CPUIOTLBEntry, new_size); |
Emilio G. Cota | 86e1eff | 2019-01-16 12:01:13 -0500 | [diff] [blame] | 230 | } |
| 231 | } |
| 232 | |
Richard Henderson | bbf021b | 2019-12-07 12:08:04 -0800 | [diff] [blame] | 233 | static void tlb_mmu_flush_locked(CPUTLBDesc *desc, CPUTLBDescFast *fast) |
Emilio G. Cota | 86e1eff | 2019-01-16 12:01:13 -0500 | [diff] [blame] | 234 | { |
Richard Henderson | 5c948e3 | 2019-12-07 12:00:56 -0800 | [diff] [blame] | 235 | desc->n_used_entries = 0; |
| 236 | desc->large_page_addr = -1; |
| 237 | desc->large_page_mask = -1; |
| 238 | desc->vindex = 0; |
| 239 | memset(fast->table, -1, sizeof_tlb(fast)); |
| 240 | memset(desc->vtable, -1, sizeof(desc->vtable)); |
Emilio G. Cota | 86e1eff | 2019-01-16 12:01:13 -0500 | [diff] [blame] | 241 | } |
| 242 | |
Richard Henderson | 3c3959f | 2019-12-07 14:36:01 -0800 | [diff] [blame] | 243 | static void tlb_flush_one_mmuidx_locked(CPUArchState *env, int mmu_idx, |
| 244 | int64_t now) |
Richard Henderson | bbf021b | 2019-12-07 12:08:04 -0800 | [diff] [blame] | 245 | { |
| 246 | CPUTLBDesc *desc = &env_tlb(env)->d[mmu_idx]; |
| 247 | CPUTLBDescFast *fast = &env_tlb(env)->f[mmu_idx]; |
| 248 | |
Richard Henderson | 3c3959f | 2019-12-07 14:36:01 -0800 | [diff] [blame] | 249 | tlb_mmu_resize_locked(desc, fast, now); |
Richard Henderson | bbf021b | 2019-12-07 12:08:04 -0800 | [diff] [blame] | 250 | tlb_mmu_flush_locked(desc, fast); |
| 251 | } |
| 252 | |
Richard Henderson | 56e89f7 | 2019-12-07 13:22:19 -0800 | [diff] [blame] | 253 | static void tlb_mmu_init(CPUTLBDesc *desc, CPUTLBDescFast *fast, int64_t now) |
| 254 | { |
| 255 | size_t n_entries = 1 << CPU_TLB_DYN_DEFAULT_BITS; |
| 256 | |
| 257 | tlb_window_reset(desc, now, 0); |
| 258 | desc->n_used_entries = 0; |
| 259 | fast->mask = (n_entries - 1) << CPU_TLB_ENTRY_BITS; |
| 260 | fast->table = g_new(CPUTLBEntry, n_entries); |
| 261 | desc->iotlb = g_new(CPUIOTLBEntry, n_entries); |
Richard Henderson | 3c16304 | 2020-01-09 11:23:56 +1100 | [diff] [blame] | 262 | tlb_mmu_flush_locked(desc, fast); |
Richard Henderson | 56e89f7 | 2019-12-07 13:22:19 -0800 | [diff] [blame] | 263 | } |
| 264 | |
Emilio G. Cota | 86e1eff | 2019-01-16 12:01:13 -0500 | [diff] [blame] | 265 | static inline void tlb_n_used_entries_inc(CPUArchState *env, uintptr_t mmu_idx) |
| 266 | { |
Richard Henderson | a40ec84 | 2019-03-22 13:52:09 -0700 | [diff] [blame] | 267 | env_tlb(env)->d[mmu_idx].n_used_entries++; |
Emilio G. Cota | 86e1eff | 2019-01-16 12:01:13 -0500 | [diff] [blame] | 268 | } |
| 269 | |
| 270 | static inline void tlb_n_used_entries_dec(CPUArchState *env, uintptr_t mmu_idx) |
| 271 | { |
Richard Henderson | a40ec84 | 2019-03-22 13:52:09 -0700 | [diff] [blame] | 272 | env_tlb(env)->d[mmu_idx].n_used_entries--; |
Emilio G. Cota | 86e1eff | 2019-01-16 12:01:13 -0500 | [diff] [blame] | 273 | } |
| 274 | |
Emilio G. Cota | 5005e25 | 2018-10-09 13:45:54 -0400 | [diff] [blame] | 275 | void tlb_init(CPUState *cpu) |
| 276 | { |
Emilio G. Cota | 71aec35 | 2018-10-09 13:45:56 -0400 | [diff] [blame] | 277 | CPUArchState *env = cpu->env_ptr; |
Richard Henderson | 56e89f7 | 2019-12-07 13:22:19 -0800 | [diff] [blame] | 278 | int64_t now = get_clock_realtime(); |
| 279 | int i; |
Emilio G. Cota | 71aec35 | 2018-10-09 13:45:56 -0400 | [diff] [blame] | 280 | |
Richard Henderson | a40ec84 | 2019-03-22 13:52:09 -0700 | [diff] [blame] | 281 | qemu_spin_init(&env_tlb(env)->c.lock); |
Richard Henderson | 3d1523c | 2018-10-20 12:04:57 -0700 | [diff] [blame] | 282 | |
Richard Henderson | 3c16304 | 2020-01-09 11:23:56 +1100 | [diff] [blame] | 283 | /* All tlbs are initialized flushed. */ |
| 284 | env_tlb(env)->c.dirty = 0; |
Emilio G. Cota | 86e1eff | 2019-01-16 12:01:13 -0500 | [diff] [blame] | 285 | |
Richard Henderson | 56e89f7 | 2019-12-07 13:22:19 -0800 | [diff] [blame] | 286 | for (i = 0; i < NB_MMU_MODES; i++) { |
| 287 | tlb_mmu_init(&env_tlb(env)->d[i], &env_tlb(env)->f[i], now); |
| 288 | } |
Emilio G. Cota | 5005e25 | 2018-10-09 13:45:54 -0400 | [diff] [blame] | 289 | } |
| 290 | |
Emilio G. Cota | 816d9be | 2020-06-12 20:02:26 +0100 | [diff] [blame] | 291 | void tlb_destroy(CPUState *cpu) |
| 292 | { |
| 293 | CPUArchState *env = cpu->env_ptr; |
| 294 | int i; |
| 295 | |
| 296 | qemu_spin_destroy(&env_tlb(env)->c.lock); |
| 297 | for (i = 0; i < NB_MMU_MODES; i++) { |
| 298 | CPUTLBDesc *desc = &env_tlb(env)->d[i]; |
| 299 | CPUTLBDescFast *fast = &env_tlb(env)->f[i]; |
| 300 | |
| 301 | g_free(fast->table); |
| 302 | g_free(desc->iotlb); |
| 303 | } |
| 304 | } |
| 305 | |
Alex Bennée | c3b9a07 | 2017-02-23 18:29:22 +0000 | [diff] [blame] | 306 | /* flush_all_helper: run fn across all cpus |
| 307 | * |
| 308 | * If the wait flag is set then the src cpu's helper will be queued as |
| 309 | * "safe" work and the loop exited creating a synchronisation point |
| 310 | * where all queued work will be finished before execution starts |
| 311 | * again. |
| 312 | */ |
| 313 | static void flush_all_helper(CPUState *src, run_on_cpu_func fn, |
| 314 | run_on_cpu_data d) |
| 315 | { |
| 316 | CPUState *cpu; |
| 317 | |
| 318 | CPU_FOREACH(cpu) { |
| 319 | if (cpu != src) { |
| 320 | async_run_on_cpu(cpu, fn, d); |
| 321 | } |
| 322 | } |
| 323 | } |
| 324 | |
Richard Henderson | e09de0a | 2018-10-19 14:36:43 -0700 | [diff] [blame] | 325 | void tlb_flush_counts(size_t *pfull, size_t *ppart, size_t *pelide) |
Emilio G. Cota | 83974cf | 2017-07-06 14:42:26 -0400 | [diff] [blame] | 326 | { |
| 327 | CPUState *cpu; |
Richard Henderson | e09de0a | 2018-10-19 14:36:43 -0700 | [diff] [blame] | 328 | size_t full = 0, part = 0, elide = 0; |
Emilio G. Cota | 83974cf | 2017-07-06 14:42:26 -0400 | [diff] [blame] | 329 | |
| 330 | CPU_FOREACH(cpu) { |
| 331 | CPUArchState *env = cpu->env_ptr; |
| 332 | |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 333 | full += qatomic_read(&env_tlb(env)->c.full_flush_count); |
| 334 | part += qatomic_read(&env_tlb(env)->c.part_flush_count); |
| 335 | elide += qatomic_read(&env_tlb(env)->c.elide_flush_count); |
Emilio G. Cota | 83974cf | 2017-07-06 14:42:26 -0400 | [diff] [blame] | 336 | } |
Richard Henderson | e09de0a | 2018-10-19 14:36:43 -0700 | [diff] [blame] | 337 | *pfull = full; |
| 338 | *ppart = part; |
| 339 | *pelide = elide; |
Emilio G. Cota | 83974cf | 2017-07-06 14:42:26 -0400 | [diff] [blame] | 340 | } |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 341 | |
Alex Bennée | e721844 | 2017-02-23 18:29:20 +0000 | [diff] [blame] | 342 | static void tlb_flush_by_mmuidx_async_work(CPUState *cpu, run_on_cpu_data data) |
Peter Maydell | d7a74a9 | 2015-08-25 15:45:09 +0100 | [diff] [blame] | 343 | { |
| 344 | CPUArchState *env = cpu->env_ptr; |
Richard Henderson | 3d1523c | 2018-10-20 12:04:57 -0700 | [diff] [blame] | 345 | uint16_t asked = data.host_int; |
| 346 | uint16_t all_dirty, work, to_clean; |
Richard Henderson | 3c3959f | 2019-12-07 14:36:01 -0800 | [diff] [blame] | 347 | int64_t now = get_clock_realtime(); |
Peter Maydell | d7a74a9 | 2015-08-25 15:45:09 +0100 | [diff] [blame] | 348 | |
Alex Bennée | f0aff0f | 2017-02-23 18:29:16 +0000 | [diff] [blame] | 349 | assert_cpu_is_self(cpu); |
Peter Maydell | d7a74a9 | 2015-08-25 15:45:09 +0100 | [diff] [blame] | 350 | |
Richard Henderson | 3d1523c | 2018-10-20 12:04:57 -0700 | [diff] [blame] | 351 | tlb_debug("mmu_idx:0x%04" PRIx16 "\n", asked); |
Alex Bennée | e721844 | 2017-02-23 18:29:20 +0000 | [diff] [blame] | 352 | |
Richard Henderson | a40ec84 | 2019-03-22 13:52:09 -0700 | [diff] [blame] | 353 | qemu_spin_lock(&env_tlb(env)->c.lock); |
Richard Henderson | 60a2ad7 | 2018-10-20 13:54:46 -0700 | [diff] [blame] | 354 | |
Richard Henderson | a40ec84 | 2019-03-22 13:52:09 -0700 | [diff] [blame] | 355 | all_dirty = env_tlb(env)->c.dirty; |
Richard Henderson | 3d1523c | 2018-10-20 12:04:57 -0700 | [diff] [blame] | 356 | to_clean = asked & all_dirty; |
| 357 | all_dirty &= ~to_clean; |
Richard Henderson | a40ec84 | 2019-03-22 13:52:09 -0700 | [diff] [blame] | 358 | env_tlb(env)->c.dirty = all_dirty; |
Richard Henderson | 3d1523c | 2018-10-20 12:04:57 -0700 | [diff] [blame] | 359 | |
| 360 | for (work = to_clean; work != 0; work &= work - 1) { |
| 361 | int mmu_idx = ctz32(work); |
Richard Henderson | 3c3959f | 2019-12-07 14:36:01 -0800 | [diff] [blame] | 362 | tlb_flush_one_mmuidx_locked(env, mmu_idx, now); |
Peter Maydell | d7a74a9 | 2015-08-25 15:45:09 +0100 | [diff] [blame] | 363 | } |
Richard Henderson | 3d1523c | 2018-10-20 12:04:57 -0700 | [diff] [blame] | 364 | |
Richard Henderson | a40ec84 | 2019-03-22 13:52:09 -0700 | [diff] [blame] | 365 | qemu_spin_unlock(&env_tlb(env)->c.lock); |
Peter Maydell | d7a74a9 | 2015-08-25 15:45:09 +0100 | [diff] [blame] | 366 | |
Emilio G. Cota | f3ced3c | 2017-06-14 20:36:13 -0400 | [diff] [blame] | 367 | cpu_tb_jmp_cache_clear(cpu); |
Richard Henderson | 64f2674 | 2018-10-23 06:01:01 +0100 | [diff] [blame] | 368 | |
Richard Henderson | 3d1523c | 2018-10-20 12:04:57 -0700 | [diff] [blame] | 369 | if (to_clean == ALL_MMUIDX_BITS) { |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 370 | qatomic_set(&env_tlb(env)->c.full_flush_count, |
Richard Henderson | a40ec84 | 2019-03-22 13:52:09 -0700 | [diff] [blame] | 371 | env_tlb(env)->c.full_flush_count + 1); |
Richard Henderson | e09de0a | 2018-10-19 14:36:43 -0700 | [diff] [blame] | 372 | } else { |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 373 | qatomic_set(&env_tlb(env)->c.part_flush_count, |
Richard Henderson | a40ec84 | 2019-03-22 13:52:09 -0700 | [diff] [blame] | 374 | env_tlb(env)->c.part_flush_count + ctpop16(to_clean)); |
Richard Henderson | 3d1523c | 2018-10-20 12:04:57 -0700 | [diff] [blame] | 375 | if (to_clean != asked) { |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 376 | qatomic_set(&env_tlb(env)->c.elide_flush_count, |
Richard Henderson | a40ec84 | 2019-03-22 13:52:09 -0700 | [diff] [blame] | 377 | env_tlb(env)->c.elide_flush_count + |
Richard Henderson | 3d1523c | 2018-10-20 12:04:57 -0700 | [diff] [blame] | 378 | ctpop16(asked & ~to_clean)); |
| 379 | } |
Richard Henderson | 64f2674 | 2018-10-23 06:01:01 +0100 | [diff] [blame] | 380 | } |
Peter Maydell | d7a74a9 | 2015-08-25 15:45:09 +0100 | [diff] [blame] | 381 | } |
| 382 | |
Alex Bennée | 0336cbf | 2017-02-23 18:29:19 +0000 | [diff] [blame] | 383 | void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap) |
Peter Maydell | d7a74a9 | 2015-08-25 15:45:09 +0100 | [diff] [blame] | 384 | { |
Alex Bennée | e721844 | 2017-02-23 18:29:20 +0000 | [diff] [blame] | 385 | tlb_debug("mmu_idx: 0x%" PRIx16 "\n", idxmap); |
| 386 | |
Richard Henderson | 64f2674 | 2018-10-23 06:01:01 +0100 | [diff] [blame] | 387 | if (cpu->created && !qemu_cpu_is_self(cpu)) { |
Richard Henderson | ab65110 | 2018-10-23 06:58:03 +0100 | [diff] [blame] | 388 | async_run_on_cpu(cpu, tlb_flush_by_mmuidx_async_work, |
| 389 | RUN_ON_CPU_HOST_INT(idxmap)); |
Alex Bennée | e721844 | 2017-02-23 18:29:20 +0000 | [diff] [blame] | 390 | } else { |
Richard Henderson | 60a2ad7 | 2018-10-20 13:54:46 -0700 | [diff] [blame] | 391 | tlb_flush_by_mmuidx_async_work(cpu, RUN_ON_CPU_HOST_INT(idxmap)); |
Alex Bennée | e721844 | 2017-02-23 18:29:20 +0000 | [diff] [blame] | 392 | } |
Peter Maydell | d7a74a9 | 2015-08-25 15:45:09 +0100 | [diff] [blame] | 393 | } |
| 394 | |
Richard Henderson | 64f2674 | 2018-10-23 06:01:01 +0100 | [diff] [blame] | 395 | void tlb_flush(CPUState *cpu) |
| 396 | { |
| 397 | tlb_flush_by_mmuidx(cpu, ALL_MMUIDX_BITS); |
| 398 | } |
| 399 | |
Alex Bennée | c3b9a07 | 2017-02-23 18:29:22 +0000 | [diff] [blame] | 400 | void tlb_flush_by_mmuidx_all_cpus(CPUState *src_cpu, uint16_t idxmap) |
| 401 | { |
| 402 | const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work; |
| 403 | |
| 404 | tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap); |
| 405 | |
| 406 | flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap)); |
| 407 | fn(src_cpu, RUN_ON_CPU_HOST_INT(idxmap)); |
| 408 | } |
| 409 | |
Richard Henderson | 64f2674 | 2018-10-23 06:01:01 +0100 | [diff] [blame] | 410 | void tlb_flush_all_cpus(CPUState *src_cpu) |
| 411 | { |
| 412 | tlb_flush_by_mmuidx_all_cpus(src_cpu, ALL_MMUIDX_BITS); |
| 413 | } |
| 414 | |
| 415 | void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *src_cpu, uint16_t idxmap) |
Alex Bennée | c3b9a07 | 2017-02-23 18:29:22 +0000 | [diff] [blame] | 416 | { |
| 417 | const run_on_cpu_func fn = tlb_flush_by_mmuidx_async_work; |
| 418 | |
| 419 | tlb_debug("mmu_idx: 0x%"PRIx16"\n", idxmap); |
| 420 | |
| 421 | flush_all_helper(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap)); |
| 422 | async_safe_run_on_cpu(src_cpu, fn, RUN_ON_CPU_HOST_INT(idxmap)); |
| 423 | } |
| 424 | |
Richard Henderson | 64f2674 | 2018-10-23 06:01:01 +0100 | [diff] [blame] | 425 | void tlb_flush_all_cpus_synced(CPUState *src_cpu) |
| 426 | { |
| 427 | tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, ALL_MMUIDX_BITS); |
| 428 | } |
| 429 | |
Richard Henderson | 3ab6e68 | 2020-10-16 14:07:53 -0700 | [diff] [blame] | 430 | static bool tlb_hit_page_mask_anyprot(CPUTLBEntry *tlb_entry, |
| 431 | target_ulong page, target_ulong mask) |
| 432 | { |
| 433 | page &= mask; |
| 434 | mask &= TARGET_PAGE_MASK | TLB_INVALID_MASK; |
| 435 | |
| 436 | return (page == (tlb_entry->addr_read & mask) || |
| 437 | page == (tlb_addr_write(tlb_entry) & mask) || |
| 438 | page == (tlb_entry->addr_code & mask)); |
| 439 | } |
| 440 | |
Richard Henderson | 68fea03 | 2018-06-29 13:07:08 -0700 | [diff] [blame] | 441 | static inline bool tlb_hit_page_anyprot(CPUTLBEntry *tlb_entry, |
| 442 | target_ulong page) |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 443 | { |
Richard Henderson | 3ab6e68 | 2020-10-16 14:07:53 -0700 | [diff] [blame] | 444 | return tlb_hit_page_mask_anyprot(tlb_entry, page, -1); |
Richard Henderson | 68fea03 | 2018-06-29 13:07:08 -0700 | [diff] [blame] | 445 | } |
| 446 | |
Emilio G. Cota | 3cea94b | 2019-01-16 12:01:12 -0500 | [diff] [blame] | 447 | /** |
| 448 | * tlb_entry_is_empty - return true if the entry is not in use |
| 449 | * @te: pointer to CPUTLBEntry |
| 450 | */ |
| 451 | static inline bool tlb_entry_is_empty(const CPUTLBEntry *te) |
| 452 | { |
| 453 | return te->addr_read == -1 && te->addr_write == -1 && te->addr_code == -1; |
| 454 | } |
| 455 | |
Richard Henderson | 53d2845 | 2018-10-23 03:57:11 +0100 | [diff] [blame] | 456 | /* Called with tlb_c.lock held */ |
Richard Henderson | 3ab6e68 | 2020-10-16 14:07:53 -0700 | [diff] [blame] | 457 | static bool tlb_flush_entry_mask_locked(CPUTLBEntry *tlb_entry, |
| 458 | target_ulong page, |
| 459 | target_ulong mask) |
Richard Henderson | 68fea03 | 2018-06-29 13:07:08 -0700 | [diff] [blame] | 460 | { |
Richard Henderson | 3ab6e68 | 2020-10-16 14:07:53 -0700 | [diff] [blame] | 461 | if (tlb_hit_page_mask_anyprot(tlb_entry, page, mask)) { |
Richard Henderson | 4fadb3b | 2013-12-07 10:44:51 +1300 | [diff] [blame] | 462 | memset(tlb_entry, -1, sizeof(*tlb_entry)); |
Emilio G. Cota | 86e1eff | 2019-01-16 12:01:13 -0500 | [diff] [blame] | 463 | return true; |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 464 | } |
Emilio G. Cota | 86e1eff | 2019-01-16 12:01:13 -0500 | [diff] [blame] | 465 | return false; |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 466 | } |
| 467 | |
Richard Henderson | 3ab6e68 | 2020-10-16 14:07:53 -0700 | [diff] [blame] | 468 | static inline bool tlb_flush_entry_locked(CPUTLBEntry *tlb_entry, |
| 469 | target_ulong page) |
| 470 | { |
| 471 | return tlb_flush_entry_mask_locked(tlb_entry, page, -1); |
| 472 | } |
| 473 | |
Richard Henderson | 53d2845 | 2018-10-23 03:57:11 +0100 | [diff] [blame] | 474 | /* Called with tlb_c.lock held */ |
Richard Henderson | 3ab6e68 | 2020-10-16 14:07:53 -0700 | [diff] [blame] | 475 | static void tlb_flush_vtlb_page_mask_locked(CPUArchState *env, int mmu_idx, |
| 476 | target_ulong page, |
| 477 | target_ulong mask) |
Richard Henderson | 68fea03 | 2018-06-29 13:07:08 -0700 | [diff] [blame] | 478 | { |
Richard Henderson | a40ec84 | 2019-03-22 13:52:09 -0700 | [diff] [blame] | 479 | CPUTLBDesc *d = &env_tlb(env)->d[mmu_idx]; |
Richard Henderson | 68fea03 | 2018-06-29 13:07:08 -0700 | [diff] [blame] | 480 | int k; |
Emilio G. Cota | 71aec35 | 2018-10-09 13:45:56 -0400 | [diff] [blame] | 481 | |
Richard Henderson | 29a0af6 | 2019-03-22 16:07:18 -0700 | [diff] [blame] | 482 | assert_cpu_is_self(env_cpu(env)); |
Richard Henderson | 68fea03 | 2018-06-29 13:07:08 -0700 | [diff] [blame] | 483 | for (k = 0; k < CPU_VTLB_SIZE; k++) { |
Richard Henderson | 3ab6e68 | 2020-10-16 14:07:53 -0700 | [diff] [blame] | 484 | if (tlb_flush_entry_mask_locked(&d->vtable[k], page, mask)) { |
Emilio G. Cota | 86e1eff | 2019-01-16 12:01:13 -0500 | [diff] [blame] | 485 | tlb_n_used_entries_dec(env, mmu_idx); |
| 486 | } |
Richard Henderson | 68fea03 | 2018-06-29 13:07:08 -0700 | [diff] [blame] | 487 | } |
| 488 | } |
| 489 | |
Richard Henderson | 3ab6e68 | 2020-10-16 14:07:53 -0700 | [diff] [blame] | 490 | static inline void tlb_flush_vtlb_page_locked(CPUArchState *env, int mmu_idx, |
| 491 | target_ulong page) |
| 492 | { |
| 493 | tlb_flush_vtlb_page_mask_locked(env, mmu_idx, page, -1); |
| 494 | } |
| 495 | |
Richard Henderson | 1308e02 | 2018-10-17 11:48:40 -0700 | [diff] [blame] | 496 | static void tlb_flush_page_locked(CPUArchState *env, int midx, |
| 497 | target_ulong page) |
| 498 | { |
Richard Henderson | a40ec84 | 2019-03-22 13:52:09 -0700 | [diff] [blame] | 499 | target_ulong lp_addr = env_tlb(env)->d[midx].large_page_addr; |
| 500 | target_ulong lp_mask = env_tlb(env)->d[midx].large_page_mask; |
Richard Henderson | 1308e02 | 2018-10-17 11:48:40 -0700 | [diff] [blame] | 501 | |
| 502 | /* Check if we need to flush due to large pages. */ |
| 503 | if ((page & lp_mask) == lp_addr) { |
| 504 | tlb_debug("forcing full flush midx %d (" |
| 505 | TARGET_FMT_lx "/" TARGET_FMT_lx ")\n", |
| 506 | midx, lp_addr, lp_mask); |
Richard Henderson | 3c3959f | 2019-12-07 14:36:01 -0800 | [diff] [blame] | 507 | tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime()); |
Richard Henderson | 1308e02 | 2018-10-17 11:48:40 -0700 | [diff] [blame] | 508 | } else { |
Emilio G. Cota | 86e1eff | 2019-01-16 12:01:13 -0500 | [diff] [blame] | 509 | if (tlb_flush_entry_locked(tlb_entry(env, midx, page), page)) { |
| 510 | tlb_n_used_entries_dec(env, midx); |
| 511 | } |
Richard Henderson | 1308e02 | 2018-10-17 11:48:40 -0700 | [diff] [blame] | 512 | tlb_flush_vtlb_page_locked(env, midx, page); |
| 513 | } |
| 514 | } |
| 515 | |
Richard Henderson | 7b7d00e | 2019-11-11 14:53:30 +0100 | [diff] [blame] | 516 | /** |
| 517 | * tlb_flush_page_by_mmuidx_async_0: |
| 518 | * @cpu: cpu on which to flush |
| 519 | * @addr: page of virtual address to flush |
| 520 | * @idxmap: set of mmu_idx to flush |
| 521 | * |
| 522 | * Helper for tlb_flush_page_by_mmuidx and friends, flush one page |
| 523 | * at @addr from the tlbs indicated by @idxmap from @cpu. |
Alex Bennée | e721844 | 2017-02-23 18:29:20 +0000 | [diff] [blame] | 524 | */ |
Richard Henderson | 7b7d00e | 2019-11-11 14:53:30 +0100 | [diff] [blame] | 525 | static void tlb_flush_page_by_mmuidx_async_0(CPUState *cpu, |
| 526 | target_ulong addr, |
| 527 | uint16_t idxmap) |
Peter Maydell | d7a74a9 | 2015-08-25 15:45:09 +0100 | [diff] [blame] | 528 | { |
| 529 | CPUArchState *env = cpu->env_ptr; |
Alex Bennée | e721844 | 2017-02-23 18:29:20 +0000 | [diff] [blame] | 530 | int mmu_idx; |
Peter Maydell | d7a74a9 | 2015-08-25 15:45:09 +0100 | [diff] [blame] | 531 | |
Alex Bennée | f0aff0f | 2017-02-23 18:29:16 +0000 | [diff] [blame] | 532 | assert_cpu_is_self(cpu); |
Alex Bennée | 8526e1f | 2016-03-15 14:30:24 +0000 | [diff] [blame] | 533 | |
Richard Henderson | 7b7d00e | 2019-11-11 14:53:30 +0100 | [diff] [blame] | 534 | tlb_debug("page addr:" TARGET_FMT_lx " mmu_map:0x%x\n", addr, idxmap); |
Peter Maydell | d7a74a9 | 2015-08-25 15:45:09 +0100 | [diff] [blame] | 535 | |
Richard Henderson | a40ec84 | 2019-03-22 13:52:09 -0700 | [diff] [blame] | 536 | qemu_spin_lock(&env_tlb(env)->c.lock); |
Alex Bennée | 0336cbf | 2017-02-23 18:29:19 +0000 | [diff] [blame] | 537 | for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { |
Richard Henderson | 7b7d00e | 2019-11-11 14:53:30 +0100 | [diff] [blame] | 538 | if ((idxmap >> mmu_idx) & 1) { |
Richard Henderson | 1308e02 | 2018-10-17 11:48:40 -0700 | [diff] [blame] | 539 | tlb_flush_page_locked(env, mmu_idx, addr); |
Peter Maydell | d7a74a9 | 2015-08-25 15:45:09 +0100 | [diff] [blame] | 540 | } |
| 541 | } |
Richard Henderson | a40ec84 | 2019-03-22 13:52:09 -0700 | [diff] [blame] | 542 | qemu_spin_unlock(&env_tlb(env)->c.lock); |
Peter Maydell | d7a74a9 | 2015-08-25 15:45:09 +0100 | [diff] [blame] | 543 | |
Peter Maydell | d7a74a9 | 2015-08-25 15:45:09 +0100 | [diff] [blame] | 544 | tb_flush_jmp_cache(cpu, addr); |
| 545 | } |
| 546 | |
Richard Henderson | 7b7d00e | 2019-11-11 14:53:30 +0100 | [diff] [blame] | 547 | /** |
| 548 | * tlb_flush_page_by_mmuidx_async_1: |
| 549 | * @cpu: cpu on which to flush |
| 550 | * @data: encoded addr + idxmap |
| 551 | * |
| 552 | * Helper for tlb_flush_page_by_mmuidx and friends, called through |
| 553 | * async_run_on_cpu. The idxmap parameter is encoded in the page |
| 554 | * offset of the target_ptr field. This limits the set of mmu_idx |
| 555 | * that can be passed via this method. |
| 556 | */ |
| 557 | static void tlb_flush_page_by_mmuidx_async_1(CPUState *cpu, |
| 558 | run_on_cpu_data data) |
| 559 | { |
| 560 | target_ulong addr_and_idxmap = (target_ulong) data.target_ptr; |
| 561 | target_ulong addr = addr_and_idxmap & TARGET_PAGE_MASK; |
| 562 | uint16_t idxmap = addr_and_idxmap & ~TARGET_PAGE_MASK; |
| 563 | |
| 564 | tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap); |
| 565 | } |
| 566 | |
| 567 | typedef struct { |
| 568 | target_ulong addr; |
| 569 | uint16_t idxmap; |
| 570 | } TLBFlushPageByMMUIdxData; |
| 571 | |
| 572 | /** |
| 573 | * tlb_flush_page_by_mmuidx_async_2: |
| 574 | * @cpu: cpu on which to flush |
| 575 | * @data: allocated addr + idxmap |
| 576 | * |
| 577 | * Helper for tlb_flush_page_by_mmuidx and friends, called through |
| 578 | * async_run_on_cpu. The addr+idxmap parameters are stored in a |
| 579 | * TLBFlushPageByMMUIdxData structure that has been allocated |
| 580 | * specifically for this helper. Free the structure when done. |
| 581 | */ |
| 582 | static void tlb_flush_page_by_mmuidx_async_2(CPUState *cpu, |
| 583 | run_on_cpu_data data) |
| 584 | { |
| 585 | TLBFlushPageByMMUIdxData *d = data.host_ptr; |
| 586 | |
| 587 | tlb_flush_page_by_mmuidx_async_0(cpu, d->addr, d->idxmap); |
| 588 | g_free(d); |
| 589 | } |
| 590 | |
Alex Bennée | e721844 | 2017-02-23 18:29:20 +0000 | [diff] [blame] | 591 | void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, uint16_t idxmap) |
| 592 | { |
Alex Bennée | e721844 | 2017-02-23 18:29:20 +0000 | [diff] [blame] | 593 | tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%" PRIx16 "\n", addr, idxmap); |
| 594 | |
| 595 | /* This should already be page aligned */ |
Richard Henderson | 7b7d00e | 2019-11-11 14:53:30 +0100 | [diff] [blame] | 596 | addr &= TARGET_PAGE_MASK; |
Alex Bennée | e721844 | 2017-02-23 18:29:20 +0000 | [diff] [blame] | 597 | |
Richard Henderson | 7b7d00e | 2019-11-11 14:53:30 +0100 | [diff] [blame] | 598 | if (qemu_cpu_is_self(cpu)) { |
| 599 | tlb_flush_page_by_mmuidx_async_0(cpu, addr, idxmap); |
| 600 | } else if (idxmap < TARGET_PAGE_SIZE) { |
| 601 | /* |
| 602 | * Most targets have only a few mmu_idx. In the case where |
| 603 | * we can stuff idxmap into the low TARGET_PAGE_BITS, avoid |
| 604 | * allocating memory for this operation. |
| 605 | */ |
| 606 | async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_1, |
| 607 | RUN_ON_CPU_TARGET_PTR(addr | idxmap)); |
Alex Bennée | e721844 | 2017-02-23 18:29:20 +0000 | [diff] [blame] | 608 | } else { |
Richard Henderson | 7b7d00e | 2019-11-11 14:53:30 +0100 | [diff] [blame] | 609 | TLBFlushPageByMMUIdxData *d = g_new(TLBFlushPageByMMUIdxData, 1); |
| 610 | |
| 611 | /* Otherwise allocate a structure, freed by the worker. */ |
| 612 | d->addr = addr; |
| 613 | d->idxmap = idxmap; |
| 614 | async_run_on_cpu(cpu, tlb_flush_page_by_mmuidx_async_2, |
| 615 | RUN_ON_CPU_HOST_PTR(d)); |
Alex Bennée | e721844 | 2017-02-23 18:29:20 +0000 | [diff] [blame] | 616 | } |
| 617 | } |
| 618 | |
Richard Henderson | f8144c6 | 2018-10-19 14:25:09 -0700 | [diff] [blame] | 619 | void tlb_flush_page(CPUState *cpu, target_ulong addr) |
| 620 | { |
| 621 | tlb_flush_page_by_mmuidx(cpu, addr, ALL_MMUIDX_BITS); |
| 622 | } |
| 623 | |
Alex Bennée | c3b9a07 | 2017-02-23 18:29:22 +0000 | [diff] [blame] | 624 | void tlb_flush_page_by_mmuidx_all_cpus(CPUState *src_cpu, target_ulong addr, |
| 625 | uint16_t idxmap) |
KONRAD Frederic | e3b9ca8 | 2017-02-23 18:29:18 +0000 | [diff] [blame] | 626 | { |
Alex Bennée | c3b9a07 | 2017-02-23 18:29:22 +0000 | [diff] [blame] | 627 | tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap); |
| 628 | |
| 629 | /* This should already be page aligned */ |
Richard Henderson | 7b7d00e | 2019-11-11 14:53:30 +0100 | [diff] [blame] | 630 | addr &= TARGET_PAGE_MASK; |
Alex Bennée | c3b9a07 | 2017-02-23 18:29:22 +0000 | [diff] [blame] | 631 | |
Richard Henderson | 7b7d00e | 2019-11-11 14:53:30 +0100 | [diff] [blame] | 632 | /* |
| 633 | * Allocate memory to hold addr+idxmap only when needed. |
| 634 | * See tlb_flush_page_by_mmuidx for details. |
| 635 | */ |
| 636 | if (idxmap < TARGET_PAGE_SIZE) { |
| 637 | flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1, |
| 638 | RUN_ON_CPU_TARGET_PTR(addr | idxmap)); |
| 639 | } else { |
| 640 | CPUState *dst_cpu; |
| 641 | |
| 642 | /* Allocate a separate data block for each destination cpu. */ |
| 643 | CPU_FOREACH(dst_cpu) { |
| 644 | if (dst_cpu != src_cpu) { |
| 645 | TLBFlushPageByMMUIdxData *d |
| 646 | = g_new(TLBFlushPageByMMUIdxData, 1); |
| 647 | |
| 648 | d->addr = addr; |
| 649 | d->idxmap = idxmap; |
| 650 | async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2, |
| 651 | RUN_ON_CPU_HOST_PTR(d)); |
| 652 | } |
| 653 | } |
| 654 | } |
| 655 | |
| 656 | tlb_flush_page_by_mmuidx_async_0(src_cpu, addr, idxmap); |
Alex Bennée | c3b9a07 | 2017-02-23 18:29:22 +0000 | [diff] [blame] | 657 | } |
| 658 | |
Richard Henderson | f8144c6 | 2018-10-19 14:25:09 -0700 | [diff] [blame] | 659 | void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr) |
| 660 | { |
| 661 | tlb_flush_page_by_mmuidx_all_cpus(src, addr, ALL_MMUIDX_BITS); |
| 662 | } |
| 663 | |
Alex Bennée | c3b9a07 | 2017-02-23 18:29:22 +0000 | [diff] [blame] | 664 | void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *src_cpu, |
Richard Henderson | 1308e02 | 2018-10-17 11:48:40 -0700 | [diff] [blame] | 665 | target_ulong addr, |
| 666 | uint16_t idxmap) |
Alex Bennée | c3b9a07 | 2017-02-23 18:29:22 +0000 | [diff] [blame] | 667 | { |
Alex Bennée | c3b9a07 | 2017-02-23 18:29:22 +0000 | [diff] [blame] | 668 | tlb_debug("addr: "TARGET_FMT_lx" mmu_idx:%"PRIx16"\n", addr, idxmap); |
| 669 | |
| 670 | /* This should already be page aligned */ |
Richard Henderson | 7b7d00e | 2019-11-11 14:53:30 +0100 | [diff] [blame] | 671 | addr &= TARGET_PAGE_MASK; |
Alex Bennée | c3b9a07 | 2017-02-23 18:29:22 +0000 | [diff] [blame] | 672 | |
Richard Henderson | 7b7d00e | 2019-11-11 14:53:30 +0100 | [diff] [blame] | 673 | /* |
| 674 | * Allocate memory to hold addr+idxmap only when needed. |
| 675 | * See tlb_flush_page_by_mmuidx for details. |
| 676 | */ |
| 677 | if (idxmap < TARGET_PAGE_SIZE) { |
| 678 | flush_all_helper(src_cpu, tlb_flush_page_by_mmuidx_async_1, |
| 679 | RUN_ON_CPU_TARGET_PTR(addr | idxmap)); |
| 680 | async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_1, |
| 681 | RUN_ON_CPU_TARGET_PTR(addr | idxmap)); |
| 682 | } else { |
| 683 | CPUState *dst_cpu; |
| 684 | TLBFlushPageByMMUIdxData *d; |
| 685 | |
| 686 | /* Allocate a separate data block for each destination cpu. */ |
| 687 | CPU_FOREACH(dst_cpu) { |
| 688 | if (dst_cpu != src_cpu) { |
| 689 | d = g_new(TLBFlushPageByMMUIdxData, 1); |
| 690 | d->addr = addr; |
| 691 | d->idxmap = idxmap; |
| 692 | async_run_on_cpu(dst_cpu, tlb_flush_page_by_mmuidx_async_2, |
| 693 | RUN_ON_CPU_HOST_PTR(d)); |
| 694 | } |
| 695 | } |
| 696 | |
| 697 | d = g_new(TLBFlushPageByMMUIdxData, 1); |
| 698 | d->addr = addr; |
| 699 | d->idxmap = idxmap; |
| 700 | async_safe_run_on_cpu(src_cpu, tlb_flush_page_by_mmuidx_async_2, |
| 701 | RUN_ON_CPU_HOST_PTR(d)); |
| 702 | } |
Alex Bennée | c3b9a07 | 2017-02-23 18:29:22 +0000 | [diff] [blame] | 703 | } |
| 704 | |
Richard Henderson | f8144c6 | 2018-10-19 14:25:09 -0700 | [diff] [blame] | 705 | void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr) |
Alex Bennée | c3b9a07 | 2017-02-23 18:29:22 +0000 | [diff] [blame] | 706 | { |
Richard Henderson | f8144c6 | 2018-10-19 14:25:09 -0700 | [diff] [blame] | 707 | tlb_flush_page_by_mmuidx_all_cpus_synced(src, addr, ALL_MMUIDX_BITS); |
KONRAD Frederic | e3b9ca8 | 2017-02-23 18:29:18 +0000 | [diff] [blame] | 708 | } |
| 709 | |
Richard Henderson | 3c4ddec | 2021-05-09 17:16:11 +0200 | [diff] [blame] | 710 | static void tlb_flush_range_locked(CPUArchState *env, int midx, |
| 711 | target_ulong addr, target_ulong len, |
| 712 | unsigned bits) |
Richard Henderson | 3ab6e68 | 2020-10-16 14:07:53 -0700 | [diff] [blame] | 713 | { |
| 714 | CPUTLBDesc *d = &env_tlb(env)->d[midx]; |
| 715 | CPUTLBDescFast *f = &env_tlb(env)->f[midx]; |
| 716 | target_ulong mask = MAKE_64BIT_MASK(0, bits); |
| 717 | |
| 718 | /* |
| 719 | * If @bits is smaller than the tlb size, there may be multiple entries |
| 720 | * within the TLB; otherwise all addresses that match under @mask hit |
| 721 | * the same TLB entry. |
Richard Henderson | 3ab6e68 | 2020-10-16 14:07:53 -0700 | [diff] [blame] | 722 | * TODO: Perhaps allow bits to be a few bits less than the size. |
| 723 | * For now, just flush the entire TLB. |
Richard Henderson | 3c4ddec | 2021-05-09 17:16:11 +0200 | [diff] [blame] | 724 | * |
| 725 | * If @len is larger than the tlb size, then it will take longer to |
| 726 | * test all of the entries in the TLB than it will to flush it all. |
Richard Henderson | 3ab6e68 | 2020-10-16 14:07:53 -0700 | [diff] [blame] | 727 | */ |
Richard Henderson | 3c4ddec | 2021-05-09 17:16:11 +0200 | [diff] [blame] | 728 | if (mask < f->mask || len > f->mask) { |
Richard Henderson | 3ab6e68 | 2020-10-16 14:07:53 -0700 | [diff] [blame] | 729 | tlb_debug("forcing full flush midx %d (" |
Richard Henderson | 3c4ddec | 2021-05-09 17:16:11 +0200 | [diff] [blame] | 730 | TARGET_FMT_lx "/" TARGET_FMT_lx "+" TARGET_FMT_lx ")\n", |
| 731 | midx, addr, mask, len); |
Richard Henderson | 3ab6e68 | 2020-10-16 14:07:53 -0700 | [diff] [blame] | 732 | tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime()); |
| 733 | return; |
| 734 | } |
| 735 | |
Richard Henderson | 3c4ddec | 2021-05-09 17:16:11 +0200 | [diff] [blame] | 736 | /* |
| 737 | * Check if we need to flush due to large pages. |
| 738 | * Because large_page_mask contains all 1's from the msb, |
| 739 | * we only need to test the end of the range. |
| 740 | */ |
| 741 | if (((addr + len - 1) & d->large_page_mask) == d->large_page_addr) { |
Richard Henderson | 3ab6e68 | 2020-10-16 14:07:53 -0700 | [diff] [blame] | 742 | tlb_debug("forcing full flush midx %d (" |
| 743 | TARGET_FMT_lx "/" TARGET_FMT_lx ")\n", |
| 744 | midx, d->large_page_addr, d->large_page_mask); |
| 745 | tlb_flush_one_mmuidx_locked(env, midx, get_clock_realtime()); |
| 746 | return; |
| 747 | } |
| 748 | |
Richard Henderson | 3c4ddec | 2021-05-09 17:16:11 +0200 | [diff] [blame] | 749 | for (target_ulong i = 0; i < len; i += TARGET_PAGE_SIZE) { |
| 750 | target_ulong page = addr + i; |
| 751 | CPUTLBEntry *entry = tlb_entry(env, midx, page); |
| 752 | |
| 753 | if (tlb_flush_entry_mask_locked(entry, page, mask)) { |
| 754 | tlb_n_used_entries_dec(env, midx); |
| 755 | } |
| 756 | tlb_flush_vtlb_page_mask_locked(env, midx, page, mask); |
Richard Henderson | 3ab6e68 | 2020-10-16 14:07:53 -0700 | [diff] [blame] | 757 | } |
Richard Henderson | 3ab6e68 | 2020-10-16 14:07:53 -0700 | [diff] [blame] | 758 | } |
| 759 | |
| 760 | typedef struct { |
| 761 | target_ulong addr; |
Richard Henderson | 3c4ddec | 2021-05-09 17:16:11 +0200 | [diff] [blame] | 762 | target_ulong len; |
Richard Henderson | 3ab6e68 | 2020-10-16 14:07:53 -0700 | [diff] [blame] | 763 | uint16_t idxmap; |
| 764 | uint16_t bits; |
Richard Henderson | 3960a59 | 2021-05-09 17:16:12 +0200 | [diff] [blame] | 765 | } TLBFlushRangeData; |
Richard Henderson | 3ab6e68 | 2020-10-16 14:07:53 -0700 | [diff] [blame] | 766 | |
Richard Henderson | 6be48e4 | 2021-05-09 17:16:16 +0200 | [diff] [blame] | 767 | static void tlb_flush_range_by_mmuidx_async_0(CPUState *cpu, |
| 768 | TLBFlushRangeData d) |
Richard Henderson | 3ab6e68 | 2020-10-16 14:07:53 -0700 | [diff] [blame] | 769 | { |
| 770 | CPUArchState *env = cpu->env_ptr; |
| 771 | int mmu_idx; |
| 772 | |
| 773 | assert_cpu_is_self(cpu); |
| 774 | |
Richard Henderson | 3c4ddec | 2021-05-09 17:16:11 +0200 | [diff] [blame] | 775 | tlb_debug("range:" TARGET_FMT_lx "/%u+" TARGET_FMT_lx " mmu_map:0x%x\n", |
| 776 | d.addr, d.bits, d.len, d.idxmap); |
Richard Henderson | 3ab6e68 | 2020-10-16 14:07:53 -0700 | [diff] [blame] | 777 | |
| 778 | qemu_spin_lock(&env_tlb(env)->c.lock); |
| 779 | for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { |
| 780 | if ((d.idxmap >> mmu_idx) & 1) { |
Richard Henderson | 3c4ddec | 2021-05-09 17:16:11 +0200 | [diff] [blame] | 781 | tlb_flush_range_locked(env, mmu_idx, d.addr, d.len, d.bits); |
Richard Henderson | 3ab6e68 | 2020-10-16 14:07:53 -0700 | [diff] [blame] | 782 | } |
| 783 | } |
| 784 | qemu_spin_unlock(&env_tlb(env)->c.lock); |
| 785 | |
Idan Horowitz | cfc2a2d | 2022-01-10 18:47:53 +0200 | [diff] [blame] | 786 | /* |
| 787 | * If the length is larger than the jump cache size, then it will take |
| 788 | * longer to clear each entry individually than it will to clear it all. |
| 789 | */ |
| 790 | if (d.len >= (TARGET_PAGE_SIZE * TB_JMP_CACHE_SIZE)) { |
| 791 | cpu_tb_jmp_cache_clear(cpu); |
| 792 | return; |
| 793 | } |
| 794 | |
Richard Henderson | 3c4ddec | 2021-05-09 17:16:11 +0200 | [diff] [blame] | 795 | for (target_ulong i = 0; i < d.len; i += TARGET_PAGE_SIZE) { |
| 796 | tb_flush_jmp_cache(cpu, d.addr + i); |
| 797 | } |
Richard Henderson | 3ab6e68 | 2020-10-16 14:07:53 -0700 | [diff] [blame] | 798 | } |
| 799 | |
Richard Henderson | 206a583 | 2021-05-09 17:16:17 +0200 | [diff] [blame] | 800 | static void tlb_flush_range_by_mmuidx_async_1(CPUState *cpu, |
| 801 | run_on_cpu_data data) |
Richard Henderson | 3ab6e68 | 2020-10-16 14:07:53 -0700 | [diff] [blame] | 802 | { |
Richard Henderson | 3960a59 | 2021-05-09 17:16:12 +0200 | [diff] [blame] | 803 | TLBFlushRangeData *d = data.host_ptr; |
Richard Henderson | 6be48e4 | 2021-05-09 17:16:16 +0200 | [diff] [blame] | 804 | tlb_flush_range_by_mmuidx_async_0(cpu, *d); |
Richard Henderson | 3ab6e68 | 2020-10-16 14:07:53 -0700 | [diff] [blame] | 805 | g_free(d); |
| 806 | } |
| 807 | |
Richard Henderson | e5b1921 | 2021-05-09 17:16:13 +0200 | [diff] [blame] | 808 | void tlb_flush_range_by_mmuidx(CPUState *cpu, target_ulong addr, |
| 809 | target_ulong len, uint16_t idxmap, |
| 810 | unsigned bits) |
Richard Henderson | 3ab6e68 | 2020-10-16 14:07:53 -0700 | [diff] [blame] | 811 | { |
Richard Henderson | 3960a59 | 2021-05-09 17:16:12 +0200 | [diff] [blame] | 812 | TLBFlushRangeData d; |
Richard Henderson | 3ab6e68 | 2020-10-16 14:07:53 -0700 | [diff] [blame] | 813 | |
Richard Henderson | e5b1921 | 2021-05-09 17:16:13 +0200 | [diff] [blame] | 814 | /* |
| 815 | * If all bits are significant, and len is small, |
| 816 | * this devolves to tlb_flush_page. |
| 817 | */ |
| 818 | if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) { |
Richard Henderson | 3ab6e68 | 2020-10-16 14:07:53 -0700 | [diff] [blame] | 819 | tlb_flush_page_by_mmuidx(cpu, addr, idxmap); |
| 820 | return; |
| 821 | } |
| 822 | /* If no page bits are significant, this devolves to tlb_flush. */ |
| 823 | if (bits < TARGET_PAGE_BITS) { |
| 824 | tlb_flush_by_mmuidx(cpu, idxmap); |
| 825 | return; |
| 826 | } |
| 827 | |
| 828 | /* This should already be page aligned */ |
| 829 | d.addr = addr & TARGET_PAGE_MASK; |
Richard Henderson | e5b1921 | 2021-05-09 17:16:13 +0200 | [diff] [blame] | 830 | d.len = len; |
Richard Henderson | 3ab6e68 | 2020-10-16 14:07:53 -0700 | [diff] [blame] | 831 | d.idxmap = idxmap; |
| 832 | d.bits = bits; |
| 833 | |
| 834 | if (qemu_cpu_is_self(cpu)) { |
Richard Henderson | 6be48e4 | 2021-05-09 17:16:16 +0200 | [diff] [blame] | 835 | tlb_flush_range_by_mmuidx_async_0(cpu, d); |
Richard Henderson | 3ab6e68 | 2020-10-16 14:07:53 -0700 | [diff] [blame] | 836 | } else { |
Richard Henderson | 3ab6e68 | 2020-10-16 14:07:53 -0700 | [diff] [blame] | 837 | /* Otherwise allocate a structure, freed by the worker. */ |
Richard Henderson | 3960a59 | 2021-05-09 17:16:12 +0200 | [diff] [blame] | 838 | TLBFlushRangeData *p = g_memdup(&d, sizeof(d)); |
Richard Henderson | 206a583 | 2021-05-09 17:16:17 +0200 | [diff] [blame] | 839 | async_run_on_cpu(cpu, tlb_flush_range_by_mmuidx_async_1, |
Richard Henderson | 3ab6e68 | 2020-10-16 14:07:53 -0700 | [diff] [blame] | 840 | RUN_ON_CPU_HOST_PTR(p)); |
| 841 | } |
| 842 | } |
| 843 | |
Richard Henderson | e5b1921 | 2021-05-09 17:16:13 +0200 | [diff] [blame] | 844 | void tlb_flush_page_bits_by_mmuidx(CPUState *cpu, target_ulong addr, |
| 845 | uint16_t idxmap, unsigned bits) |
| 846 | { |
| 847 | tlb_flush_range_by_mmuidx(cpu, addr, TARGET_PAGE_SIZE, idxmap, bits); |
| 848 | } |
| 849 | |
Richard Henderson | 600b819 | 2021-05-09 17:16:14 +0200 | [diff] [blame] | 850 | void tlb_flush_range_by_mmuidx_all_cpus(CPUState *src_cpu, |
| 851 | target_ulong addr, target_ulong len, |
| 852 | uint16_t idxmap, unsigned bits) |
Richard Henderson | 3ab6e68 | 2020-10-16 14:07:53 -0700 | [diff] [blame] | 853 | { |
Richard Henderson | 3960a59 | 2021-05-09 17:16:12 +0200 | [diff] [blame] | 854 | TLBFlushRangeData d; |
Richard Henderson | d34e4d1 | 2021-05-09 17:16:18 +0200 | [diff] [blame] | 855 | CPUState *dst_cpu; |
Richard Henderson | 3ab6e68 | 2020-10-16 14:07:53 -0700 | [diff] [blame] | 856 | |
Richard Henderson | 600b819 | 2021-05-09 17:16:14 +0200 | [diff] [blame] | 857 | /* |
| 858 | * If all bits are significant, and len is small, |
| 859 | * this devolves to tlb_flush_page. |
| 860 | */ |
| 861 | if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) { |
Richard Henderson | 3ab6e68 | 2020-10-16 14:07:53 -0700 | [diff] [blame] | 862 | tlb_flush_page_by_mmuidx_all_cpus(src_cpu, addr, idxmap); |
| 863 | return; |
| 864 | } |
| 865 | /* If no page bits are significant, this devolves to tlb_flush. */ |
| 866 | if (bits < TARGET_PAGE_BITS) { |
| 867 | tlb_flush_by_mmuidx_all_cpus(src_cpu, idxmap); |
| 868 | return; |
| 869 | } |
| 870 | |
| 871 | /* This should already be page aligned */ |
| 872 | d.addr = addr & TARGET_PAGE_MASK; |
Richard Henderson | 600b819 | 2021-05-09 17:16:14 +0200 | [diff] [blame] | 873 | d.len = len; |
Richard Henderson | 3ab6e68 | 2020-10-16 14:07:53 -0700 | [diff] [blame] | 874 | d.idxmap = idxmap; |
| 875 | d.bits = bits; |
| 876 | |
Richard Henderson | d34e4d1 | 2021-05-09 17:16:18 +0200 | [diff] [blame] | 877 | /* Allocate a separate data block for each destination cpu. */ |
| 878 | CPU_FOREACH(dst_cpu) { |
| 879 | if (dst_cpu != src_cpu) { |
| 880 | TLBFlushRangeData *p = g_memdup(&d, sizeof(d)); |
| 881 | async_run_on_cpu(dst_cpu, |
Richard Henderson | 206a583 | 2021-05-09 17:16:17 +0200 | [diff] [blame] | 882 | tlb_flush_range_by_mmuidx_async_1, |
Richard Henderson | d34e4d1 | 2021-05-09 17:16:18 +0200 | [diff] [blame] | 883 | RUN_ON_CPU_HOST_PTR(p)); |
Richard Henderson | 3ab6e68 | 2020-10-16 14:07:53 -0700 | [diff] [blame] | 884 | } |
| 885 | } |
| 886 | |
Richard Henderson | 6be48e4 | 2021-05-09 17:16:16 +0200 | [diff] [blame] | 887 | tlb_flush_range_by_mmuidx_async_0(src_cpu, d); |
Richard Henderson | 3ab6e68 | 2020-10-16 14:07:53 -0700 | [diff] [blame] | 888 | } |
| 889 | |
Richard Henderson | 600b819 | 2021-05-09 17:16:14 +0200 | [diff] [blame] | 890 | void tlb_flush_page_bits_by_mmuidx_all_cpus(CPUState *src_cpu, |
| 891 | target_ulong addr, |
| 892 | uint16_t idxmap, unsigned bits) |
| 893 | { |
| 894 | tlb_flush_range_by_mmuidx_all_cpus(src_cpu, addr, TARGET_PAGE_SIZE, |
| 895 | idxmap, bits); |
| 896 | } |
| 897 | |
Richard Henderson | c13b27d | 2021-05-09 17:16:15 +0200 | [diff] [blame] | 898 | void tlb_flush_range_by_mmuidx_all_cpus_synced(CPUState *src_cpu, |
| 899 | target_ulong addr, |
| 900 | target_ulong len, |
| 901 | uint16_t idxmap, |
| 902 | unsigned bits) |
Richard Henderson | 3ab6e68 | 2020-10-16 14:07:53 -0700 | [diff] [blame] | 903 | { |
Richard Henderson | d34e4d1 | 2021-05-09 17:16:18 +0200 | [diff] [blame] | 904 | TLBFlushRangeData d, *p; |
| 905 | CPUState *dst_cpu; |
Richard Henderson | 3ab6e68 | 2020-10-16 14:07:53 -0700 | [diff] [blame] | 906 | |
Richard Henderson | c13b27d | 2021-05-09 17:16:15 +0200 | [diff] [blame] | 907 | /* |
| 908 | * If all bits are significant, and len is small, |
| 909 | * this devolves to tlb_flush_page. |
| 910 | */ |
| 911 | if (bits >= TARGET_LONG_BITS && len <= TARGET_PAGE_SIZE) { |
Richard Henderson | 3ab6e68 | 2020-10-16 14:07:53 -0700 | [diff] [blame] | 912 | tlb_flush_page_by_mmuidx_all_cpus_synced(src_cpu, addr, idxmap); |
| 913 | return; |
| 914 | } |
| 915 | /* If no page bits are significant, this devolves to tlb_flush. */ |
| 916 | if (bits < TARGET_PAGE_BITS) { |
| 917 | tlb_flush_by_mmuidx_all_cpus_synced(src_cpu, idxmap); |
| 918 | return; |
| 919 | } |
| 920 | |
| 921 | /* This should already be page aligned */ |
| 922 | d.addr = addr & TARGET_PAGE_MASK; |
Richard Henderson | c13b27d | 2021-05-09 17:16:15 +0200 | [diff] [blame] | 923 | d.len = len; |
Richard Henderson | 3ab6e68 | 2020-10-16 14:07:53 -0700 | [diff] [blame] | 924 | d.idxmap = idxmap; |
| 925 | d.bits = bits; |
| 926 | |
Richard Henderson | d34e4d1 | 2021-05-09 17:16:18 +0200 | [diff] [blame] | 927 | /* Allocate a separate data block for each destination cpu. */ |
| 928 | CPU_FOREACH(dst_cpu) { |
| 929 | if (dst_cpu != src_cpu) { |
| 930 | p = g_memdup(&d, sizeof(d)); |
Richard Henderson | 206a583 | 2021-05-09 17:16:17 +0200 | [diff] [blame] | 931 | async_run_on_cpu(dst_cpu, tlb_flush_range_by_mmuidx_async_1, |
Richard Henderson | d34e4d1 | 2021-05-09 17:16:18 +0200 | [diff] [blame] | 932 | RUN_ON_CPU_HOST_PTR(p)); |
Richard Henderson | 3ab6e68 | 2020-10-16 14:07:53 -0700 | [diff] [blame] | 933 | } |
Richard Henderson | 3ab6e68 | 2020-10-16 14:07:53 -0700 | [diff] [blame] | 934 | } |
Richard Henderson | d34e4d1 | 2021-05-09 17:16:18 +0200 | [diff] [blame] | 935 | |
| 936 | p = g_memdup(&d, sizeof(d)); |
Richard Henderson | 206a583 | 2021-05-09 17:16:17 +0200 | [diff] [blame] | 937 | async_safe_run_on_cpu(src_cpu, tlb_flush_range_by_mmuidx_async_1, |
Richard Henderson | d34e4d1 | 2021-05-09 17:16:18 +0200 | [diff] [blame] | 938 | RUN_ON_CPU_HOST_PTR(p)); |
Richard Henderson | 3ab6e68 | 2020-10-16 14:07:53 -0700 | [diff] [blame] | 939 | } |
| 940 | |
Richard Henderson | c13b27d | 2021-05-09 17:16:15 +0200 | [diff] [blame] | 941 | void tlb_flush_page_bits_by_mmuidx_all_cpus_synced(CPUState *src_cpu, |
| 942 | target_ulong addr, |
| 943 | uint16_t idxmap, |
| 944 | unsigned bits) |
| 945 | { |
| 946 | tlb_flush_range_by_mmuidx_all_cpus_synced(src_cpu, addr, TARGET_PAGE_SIZE, |
| 947 | idxmap, bits); |
| 948 | } |
| 949 | |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 950 | /* update the TLBs so that writes to code in the virtual page 'addr' |
| 951 | can be detected */ |
| 952 | void tlb_protect_code(ram_addr_t ram_addr) |
| 953 | { |
Stefan Hajnoczi | 03eebc9 | 2014-12-02 11:23:18 +0000 | [diff] [blame] | 954 | cpu_physical_memory_test_and_clear_dirty(ram_addr, TARGET_PAGE_SIZE, |
| 955 | DIRTY_MEMORY_CODE); |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 956 | } |
| 957 | |
| 958 | /* update the TLB so that writes in physical page 'phys_addr' are no longer |
| 959 | tested for self modifying code */ |
Paolo Bonzini | 9564f52 | 2015-04-22 14:24:54 +0200 | [diff] [blame] | 960 | void tlb_unprotect_code(ram_addr_t ram_addr) |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 961 | { |
Juan Quintela | 5215919 | 2013-10-08 12:44:04 +0200 | [diff] [blame] | 962 | cpu_physical_memory_set_dirty_flag(ram_addr, DIRTY_MEMORY_CODE); |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 963 | } |
| 964 | |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 965 | |
Alex Bennée | b0706b7 | 2017-02-23 18:29:21 +0000 | [diff] [blame] | 966 | /* |
| 967 | * Dirty write flag handling |
| 968 | * |
| 969 | * When the TCG code writes to a location it looks up the address in |
| 970 | * the TLB and uses that data to compute the final address. If any of |
| 971 | * the lower bits of the address are set then the slow path is forced. |
| 972 | * There are a number of reasons to do this but for normal RAM the |
| 973 | * most usual is detecting writes to code regions which may invalidate |
| 974 | * generated code. |
| 975 | * |
Emilio G. Cota | 71aec35 | 2018-10-09 13:45:56 -0400 | [diff] [blame] | 976 | * Other vCPUs might be reading their TLBs during guest execution, so we update |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 977 | * te->addr_write with qatomic_set. We don't need to worry about this for |
Emilio G. Cota | 71aec35 | 2018-10-09 13:45:56 -0400 | [diff] [blame] | 978 | * oversized guests as MTTCG is disabled for them. |
Alex Bennée | b0706b7 | 2017-02-23 18:29:21 +0000 | [diff] [blame] | 979 | * |
Richard Henderson | 53d2845 | 2018-10-23 03:57:11 +0100 | [diff] [blame] | 980 | * Called with tlb_c.lock held. |
Alex Bennée | b0706b7 | 2017-02-23 18:29:21 +0000 | [diff] [blame] | 981 | */ |
Emilio G. Cota | 71aec35 | 2018-10-09 13:45:56 -0400 | [diff] [blame] | 982 | static void tlb_reset_dirty_range_locked(CPUTLBEntry *tlb_entry, |
| 983 | uintptr_t start, uintptr_t length) |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 984 | { |
Alex Bennée | b0706b7 | 2017-02-23 18:29:21 +0000 | [diff] [blame] | 985 | uintptr_t addr = tlb_entry->addr_write; |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 986 | |
Richard Henderson | 7b0d792 | 2019-09-19 17:54:10 -0700 | [diff] [blame] | 987 | if ((addr & (TLB_INVALID_MASK | TLB_MMIO | |
| 988 | TLB_DISCARD_WRITE | TLB_NOTDIRTY)) == 0) { |
Alex Bennée | b0706b7 | 2017-02-23 18:29:21 +0000 | [diff] [blame] | 989 | addr &= TARGET_PAGE_MASK; |
| 990 | addr += tlb_entry->addend; |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 991 | if ((addr - start) < length) { |
Emilio G. Cota | 71aec35 | 2018-10-09 13:45:56 -0400 | [diff] [blame] | 992 | #if TCG_OVERSIZED_GUEST |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 993 | tlb_entry->addr_write |= TLB_NOTDIRTY; |
Alex Bennée | b0706b7 | 2017-02-23 18:29:21 +0000 | [diff] [blame] | 994 | #else |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 995 | qatomic_set(&tlb_entry->addr_write, |
Emilio G. Cota | 71aec35 | 2018-10-09 13:45:56 -0400 | [diff] [blame] | 996 | tlb_entry->addr_write | TLB_NOTDIRTY); |
| 997 | #endif |
Alex Bennée | b0706b7 | 2017-02-23 18:29:21 +0000 | [diff] [blame] | 998 | } |
| 999 | } |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 1000 | } |
| 1001 | |
Emilio G. Cota | 71aec35 | 2018-10-09 13:45:56 -0400 | [diff] [blame] | 1002 | /* |
Richard Henderson | 53d2845 | 2018-10-23 03:57:11 +0100 | [diff] [blame] | 1003 | * Called with tlb_c.lock held. |
Emilio G. Cota | 71aec35 | 2018-10-09 13:45:56 -0400 | [diff] [blame] | 1004 | * Called only from the vCPU context, i.e. the TLB's owner thread. |
| 1005 | */ |
| 1006 | static inline void copy_tlb_helper_locked(CPUTLBEntry *d, const CPUTLBEntry *s) |
Alex Bennée | b0706b7 | 2017-02-23 18:29:21 +0000 | [diff] [blame] | 1007 | { |
Alex Bennée | b0706b7 | 2017-02-23 18:29:21 +0000 | [diff] [blame] | 1008 | *d = *s; |
Alex Bennée | b0706b7 | 2017-02-23 18:29:21 +0000 | [diff] [blame] | 1009 | } |
| 1010 | |
| 1011 | /* This is a cross vCPU call (i.e. another vCPU resetting the flags of |
Emilio G. Cota | 71aec35 | 2018-10-09 13:45:56 -0400 | [diff] [blame] | 1012 | * the target vCPU). |
Richard Henderson | 53d2845 | 2018-10-23 03:57:11 +0100 | [diff] [blame] | 1013 | * We must take tlb_c.lock to avoid racing with another vCPU update. The only |
Emilio G. Cota | 71aec35 | 2018-10-09 13:45:56 -0400 | [diff] [blame] | 1014 | * thing actually updated is the target TLB entry ->addr_write flags. |
Alex Bennée | b0706b7 | 2017-02-23 18:29:21 +0000 | [diff] [blame] | 1015 | */ |
Peter Crosthwaite | 9a13565 | 2015-09-10 22:39:41 -0700 | [diff] [blame] | 1016 | void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length) |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 1017 | { |
| 1018 | CPUArchState *env; |
| 1019 | |
Peter Crosthwaite | 9a13565 | 2015-09-10 22:39:41 -0700 | [diff] [blame] | 1020 | int mmu_idx; |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 1021 | |
Peter Crosthwaite | 9a13565 | 2015-09-10 22:39:41 -0700 | [diff] [blame] | 1022 | env = cpu->env_ptr; |
Richard Henderson | a40ec84 | 2019-03-22 13:52:09 -0700 | [diff] [blame] | 1023 | qemu_spin_lock(&env_tlb(env)->c.lock); |
Peter Crosthwaite | 9a13565 | 2015-09-10 22:39:41 -0700 | [diff] [blame] | 1024 | for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { |
| 1025 | unsigned int i; |
Richard Henderson | 722a1c1 | 2019-12-07 11:47:41 -0800 | [diff] [blame] | 1026 | unsigned int n = tlb_n_entries(&env_tlb(env)->f[mmu_idx]); |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 1027 | |
Emilio G. Cota | 86e1eff | 2019-01-16 12:01:13 -0500 | [diff] [blame] | 1028 | for (i = 0; i < n; i++) { |
Richard Henderson | a40ec84 | 2019-03-22 13:52:09 -0700 | [diff] [blame] | 1029 | tlb_reset_dirty_range_locked(&env_tlb(env)->f[mmu_idx].table[i], |
| 1030 | start1, length); |
Peter Crosthwaite | 9a13565 | 2015-09-10 22:39:41 -0700 | [diff] [blame] | 1031 | } |
Xin Tong | 88e89a5 | 2014-08-04 20:35:23 -0500 | [diff] [blame] | 1032 | |
Peter Crosthwaite | 9a13565 | 2015-09-10 22:39:41 -0700 | [diff] [blame] | 1033 | for (i = 0; i < CPU_VTLB_SIZE; i++) { |
Richard Henderson | a40ec84 | 2019-03-22 13:52:09 -0700 | [diff] [blame] | 1034 | tlb_reset_dirty_range_locked(&env_tlb(env)->d[mmu_idx].vtable[i], |
| 1035 | start1, length); |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 1036 | } |
| 1037 | } |
Richard Henderson | a40ec84 | 2019-03-22 13:52:09 -0700 | [diff] [blame] | 1038 | qemu_spin_unlock(&env_tlb(env)->c.lock); |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 1039 | } |
| 1040 | |
Richard Henderson | 53d2845 | 2018-10-23 03:57:11 +0100 | [diff] [blame] | 1041 | /* Called with tlb_c.lock held */ |
Emilio G. Cota | 71aec35 | 2018-10-09 13:45:56 -0400 | [diff] [blame] | 1042 | static inline void tlb_set_dirty1_locked(CPUTLBEntry *tlb_entry, |
| 1043 | target_ulong vaddr) |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 1044 | { |
| 1045 | if (tlb_entry->addr_write == (vaddr | TLB_NOTDIRTY)) { |
| 1046 | tlb_entry->addr_write = vaddr; |
| 1047 | } |
| 1048 | } |
| 1049 | |
| 1050 | /* update the TLB corresponding to virtual page vaddr |
| 1051 | so that it is no longer dirty */ |
Peter Crosthwaite | bcae01e | 2015-09-10 22:39:42 -0700 | [diff] [blame] | 1052 | void tlb_set_dirty(CPUState *cpu, target_ulong vaddr) |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 1053 | { |
Peter Crosthwaite | bcae01e | 2015-09-10 22:39:42 -0700 | [diff] [blame] | 1054 | CPUArchState *env = cpu->env_ptr; |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 1055 | int mmu_idx; |
| 1056 | |
Alex Bennée | f0aff0f | 2017-02-23 18:29:16 +0000 | [diff] [blame] | 1057 | assert_cpu_is_self(cpu); |
| 1058 | |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 1059 | vaddr &= TARGET_PAGE_MASK; |
Richard Henderson | a40ec84 | 2019-03-22 13:52:09 -0700 | [diff] [blame] | 1060 | qemu_spin_lock(&env_tlb(env)->c.lock); |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 1061 | for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { |
Richard Henderson | 383beda | 2018-10-09 13:51:25 -0400 | [diff] [blame] | 1062 | tlb_set_dirty1_locked(tlb_entry(env, mmu_idx, vaddr), vaddr); |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 1063 | } |
Xin Tong | 88e89a5 | 2014-08-04 20:35:23 -0500 | [diff] [blame] | 1064 | |
| 1065 | for (mmu_idx = 0; mmu_idx < NB_MMU_MODES; mmu_idx++) { |
| 1066 | int k; |
| 1067 | for (k = 0; k < CPU_VTLB_SIZE; k++) { |
Richard Henderson | a40ec84 | 2019-03-22 13:52:09 -0700 | [diff] [blame] | 1068 | tlb_set_dirty1_locked(&env_tlb(env)->d[mmu_idx].vtable[k], vaddr); |
Xin Tong | 88e89a5 | 2014-08-04 20:35:23 -0500 | [diff] [blame] | 1069 | } |
| 1070 | } |
Richard Henderson | a40ec84 | 2019-03-22 13:52:09 -0700 | [diff] [blame] | 1071 | qemu_spin_unlock(&env_tlb(env)->c.lock); |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 1072 | } |
| 1073 | |
| 1074 | /* Our TLB does not support large pages, so remember the area covered by |
| 1075 | large pages and trigger a full TLB flush if these are invalidated. */ |
Richard Henderson | 1308e02 | 2018-10-17 11:48:40 -0700 | [diff] [blame] | 1076 | static void tlb_add_large_page(CPUArchState *env, int mmu_idx, |
| 1077 | target_ulong vaddr, target_ulong size) |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 1078 | { |
Richard Henderson | a40ec84 | 2019-03-22 13:52:09 -0700 | [diff] [blame] | 1079 | target_ulong lp_addr = env_tlb(env)->d[mmu_idx].large_page_addr; |
Richard Henderson | 1308e02 | 2018-10-17 11:48:40 -0700 | [diff] [blame] | 1080 | target_ulong lp_mask = ~(size - 1); |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 1081 | |
Richard Henderson | 1308e02 | 2018-10-17 11:48:40 -0700 | [diff] [blame] | 1082 | if (lp_addr == (target_ulong)-1) { |
| 1083 | /* No previous large page. */ |
| 1084 | lp_addr = vaddr; |
| 1085 | } else { |
| 1086 | /* Extend the existing region to include the new page. |
| 1087 | This is a compromise between unnecessary flushes and |
| 1088 | the cost of maintaining a full variable size TLB. */ |
Richard Henderson | a40ec84 | 2019-03-22 13:52:09 -0700 | [diff] [blame] | 1089 | lp_mask &= env_tlb(env)->d[mmu_idx].large_page_mask; |
Richard Henderson | 1308e02 | 2018-10-17 11:48:40 -0700 | [diff] [blame] | 1090 | while (((lp_addr ^ vaddr) & lp_mask) != 0) { |
| 1091 | lp_mask <<= 1; |
| 1092 | } |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 1093 | } |
Richard Henderson | a40ec84 | 2019-03-22 13:52:09 -0700 | [diff] [blame] | 1094 | env_tlb(env)->d[mmu_idx].large_page_addr = lp_addr & lp_mask; |
| 1095 | env_tlb(env)->d[mmu_idx].large_page_mask = lp_mask; |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 1096 | } |
| 1097 | |
| 1098 | /* Add a new TLB entry. At most one entry for a given virtual address |
Paolo Bonzini | 79e2b9a | 2015-01-21 12:09:14 +0100 | [diff] [blame] | 1099 | * is permitted. Only a single TARGET_PAGE_SIZE region is mapped, the |
| 1100 | * supplied size is only used by tlb_flush_page. |
| 1101 | * |
| 1102 | * Called from TCG-generated code, which is under an RCU read-side |
| 1103 | * critical section. |
| 1104 | */ |
Peter Maydell | fadc1cb | 2015-04-26 16:49:24 +0100 | [diff] [blame] | 1105 | void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, |
| 1106 | hwaddr paddr, MemTxAttrs attrs, int prot, |
| 1107 | int mmu_idx, target_ulong size) |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 1108 | { |
Andreas Färber | 0c591eb | 2013-09-03 13:59:37 +0200 | [diff] [blame] | 1109 | CPUArchState *env = cpu->env_ptr; |
Richard Henderson | a40ec84 | 2019-03-22 13:52:09 -0700 | [diff] [blame] | 1110 | CPUTLB *tlb = env_tlb(env); |
| 1111 | CPUTLBDesc *desc = &tlb->d[mmu_idx]; |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 1112 | MemoryRegionSection *section; |
| 1113 | unsigned int index; |
| 1114 | target_ulong address; |
Richard Henderson | 8f5db64 | 2019-09-19 21:09:58 -0700 | [diff] [blame] | 1115 | target_ulong write_address; |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 1116 | uintptr_t addend; |
Richard Henderson | 68fea03 | 2018-06-29 13:07:08 -0700 | [diff] [blame] | 1117 | CPUTLBEntry *te, tn; |
Peter Maydell | 55df6fc | 2018-06-26 17:50:41 +0100 | [diff] [blame] | 1118 | hwaddr iotlb, xlat, sz, paddr_page; |
| 1119 | target_ulong vaddr_page; |
Peter Maydell | d7898cd | 2016-01-21 14:15:05 +0000 | [diff] [blame] | 1120 | int asidx = cpu_asidx_from_attrs(cpu, attrs); |
Richard Henderson | 50b107c | 2019-08-24 09:51:09 -0700 | [diff] [blame] | 1121 | int wp_flags; |
Richard Henderson | 8f5db64 | 2019-09-19 21:09:58 -0700 | [diff] [blame] | 1122 | bool is_ram, is_romd; |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 1123 | |
Alex Bennée | f0aff0f | 2017-02-23 18:29:16 +0000 | [diff] [blame] | 1124 | assert_cpu_is_self(cpu); |
Paolo Bonzini | 149f54b | 2013-05-24 12:59:37 +0200 | [diff] [blame] | 1125 | |
Richard Henderson | 1308e02 | 2018-10-17 11:48:40 -0700 | [diff] [blame] | 1126 | if (size <= TARGET_PAGE_SIZE) { |
Peter Maydell | 55df6fc | 2018-06-26 17:50:41 +0100 | [diff] [blame] | 1127 | sz = TARGET_PAGE_SIZE; |
| 1128 | } else { |
Richard Henderson | 1308e02 | 2018-10-17 11:48:40 -0700 | [diff] [blame] | 1129 | tlb_add_large_page(env, mmu_idx, vaddr, size); |
Peter Maydell | 55df6fc | 2018-06-26 17:50:41 +0100 | [diff] [blame] | 1130 | sz = size; |
| 1131 | } |
| 1132 | vaddr_page = vaddr & TARGET_PAGE_MASK; |
| 1133 | paddr_page = paddr & TARGET_PAGE_MASK; |
| 1134 | |
| 1135 | section = address_space_translate_for_iotlb(cpu, asidx, paddr_page, |
| 1136 | &xlat, &sz, attrs, &prot); |
Paolo Bonzini | 149f54b | 2013-05-24 12:59:37 +0200 | [diff] [blame] | 1137 | assert(sz >= TARGET_PAGE_SIZE); |
| 1138 | |
Alex Bennée | 8526e1f | 2016-03-15 14:30:24 +0000 | [diff] [blame] | 1139 | tlb_debug("vaddr=" TARGET_FMT_lx " paddr=0x" TARGET_FMT_plx |
| 1140 | " prot=%x idx=%d\n", |
| 1141 | vaddr, paddr, prot, mmu_idx); |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 1142 | |
Peter Maydell | 55df6fc | 2018-06-26 17:50:41 +0100 | [diff] [blame] | 1143 | address = vaddr_page; |
| 1144 | if (size < TARGET_PAGE_SIZE) { |
Richard Henderson | 30d7e09 | 2019-08-23 15:12:32 -0700 | [diff] [blame] | 1145 | /* Repeat the MMU check and TLB fill on every access. */ |
| 1146 | address |= TLB_INVALID_MASK; |
Peter Maydell | 55df6fc | 2018-06-26 17:50:41 +0100 | [diff] [blame] | 1147 | } |
Tony Nguyen | a26fc6f | 2019-08-24 04:36:56 +1000 | [diff] [blame] | 1148 | if (attrs.byte_swap) { |
Richard Henderson | 5b87b3e | 2019-09-10 15:47:39 -0400 | [diff] [blame] | 1149 | address |= TLB_BSWAP; |
Tony Nguyen | a26fc6f | 2019-08-24 04:36:56 +1000 | [diff] [blame] | 1150 | } |
Richard Henderson | 8f5db64 | 2019-09-19 21:09:58 -0700 | [diff] [blame] | 1151 | |
| 1152 | is_ram = memory_region_is_ram(section->mr); |
| 1153 | is_romd = memory_region_is_romd(section->mr); |
| 1154 | |
| 1155 | if (is_ram || is_romd) { |
| 1156 | /* RAM and ROMD both have associated host memory. */ |
Paolo Bonzini | 149f54b | 2013-05-24 12:59:37 +0200 | [diff] [blame] | 1157 | addend = (uintptr_t)memory_region_get_ram_ptr(section->mr) + xlat; |
Richard Henderson | 8f5db64 | 2019-09-19 21:09:58 -0700 | [diff] [blame] | 1158 | } else { |
| 1159 | /* I/O does not; force the host address to NULL. */ |
| 1160 | addend = 0; |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 1161 | } |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 1162 | |
Richard Henderson | 8f5db64 | 2019-09-19 21:09:58 -0700 | [diff] [blame] | 1163 | write_address = address; |
| 1164 | if (is_ram) { |
| 1165 | iotlb = memory_region_get_ram_addr(section->mr) + xlat; |
| 1166 | /* |
| 1167 | * Computing is_clean is expensive; avoid all that unless |
| 1168 | * the page is actually writable. |
| 1169 | */ |
| 1170 | if (prot & PAGE_WRITE) { |
| 1171 | if (section->readonly) { |
| 1172 | write_address |= TLB_DISCARD_WRITE; |
| 1173 | } else if (cpu_physical_memory_is_clean(iotlb)) { |
| 1174 | write_address |= TLB_NOTDIRTY; |
| 1175 | } |
| 1176 | } |
| 1177 | } else { |
| 1178 | /* I/O or ROMD */ |
| 1179 | iotlb = memory_region_section_get_iotlb(cpu, section) + xlat; |
| 1180 | /* |
| 1181 | * Writes to romd devices must go through MMIO to enable write. |
| 1182 | * Reads to romd devices go through the ram_ptr found above, |
| 1183 | * but of course reads to I/O must go through MMIO. |
| 1184 | */ |
| 1185 | write_address |= TLB_MMIO; |
| 1186 | if (!is_romd) { |
| 1187 | address = write_address; |
| 1188 | } |
| 1189 | } |
| 1190 | |
Richard Henderson | 50b107c | 2019-08-24 09:51:09 -0700 | [diff] [blame] | 1191 | wp_flags = cpu_watchpoint_address_matches(cpu, vaddr_page, |
| 1192 | TARGET_PAGE_SIZE); |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 1193 | |
Richard Henderson | 383beda | 2018-10-09 13:51:25 -0400 | [diff] [blame] | 1194 | index = tlb_index(env, mmu_idx, vaddr_page); |
| 1195 | te = tlb_entry(env, mmu_idx, vaddr_page); |
Alex Bennée | b0706b7 | 2017-02-23 18:29:21 +0000 | [diff] [blame] | 1196 | |
Richard Henderson | 68fea03 | 2018-06-29 13:07:08 -0700 | [diff] [blame] | 1197 | /* |
Emilio G. Cota | 71aec35 | 2018-10-09 13:45:56 -0400 | [diff] [blame] | 1198 | * Hold the TLB lock for the rest of the function. We could acquire/release |
| 1199 | * the lock several times in the function, but it is faster to amortize the |
| 1200 | * acquisition cost by acquiring it just once. Note that this leads to |
| 1201 | * a longer critical section, but this is not a concern since the TLB lock |
| 1202 | * is unlikely to be contended. |
| 1203 | */ |
Richard Henderson | a40ec84 | 2019-03-22 13:52:09 -0700 | [diff] [blame] | 1204 | qemu_spin_lock(&tlb->c.lock); |
Emilio G. Cota | 71aec35 | 2018-10-09 13:45:56 -0400 | [diff] [blame] | 1205 | |
Richard Henderson | 3d1523c | 2018-10-20 12:04:57 -0700 | [diff] [blame] | 1206 | /* Note that the tlb is no longer clean. */ |
Richard Henderson | a40ec84 | 2019-03-22 13:52:09 -0700 | [diff] [blame] | 1207 | tlb->c.dirty |= 1 << mmu_idx; |
Richard Henderson | 3d1523c | 2018-10-20 12:04:57 -0700 | [diff] [blame] | 1208 | |
Emilio G. Cota | 71aec35 | 2018-10-09 13:45:56 -0400 | [diff] [blame] | 1209 | /* Make sure there's no cached translation for the new page. */ |
| 1210 | tlb_flush_vtlb_page_locked(env, mmu_idx, vaddr_page); |
| 1211 | |
| 1212 | /* |
Richard Henderson | 68fea03 | 2018-06-29 13:07:08 -0700 | [diff] [blame] | 1213 | * Only evict the old entry to the victim tlb if it's for a |
| 1214 | * different page; otherwise just overwrite the stale data. |
| 1215 | */ |
Emilio G. Cota | 3cea94b | 2019-01-16 12:01:12 -0500 | [diff] [blame] | 1216 | if (!tlb_hit_page_anyprot(te, vaddr_page) && !tlb_entry_is_empty(te)) { |
Richard Henderson | a40ec84 | 2019-03-22 13:52:09 -0700 | [diff] [blame] | 1217 | unsigned vidx = desc->vindex++ % CPU_VTLB_SIZE; |
| 1218 | CPUTLBEntry *tv = &desc->vtable[vidx]; |
Alex Bennée | b0706b7 | 2017-02-23 18:29:21 +0000 | [diff] [blame] | 1219 | |
Richard Henderson | 68fea03 | 2018-06-29 13:07:08 -0700 | [diff] [blame] | 1220 | /* Evict the old entry into the victim tlb. */ |
Emilio G. Cota | 71aec35 | 2018-10-09 13:45:56 -0400 | [diff] [blame] | 1221 | copy_tlb_helper_locked(tv, te); |
Richard Henderson | a40ec84 | 2019-03-22 13:52:09 -0700 | [diff] [blame] | 1222 | desc->viotlb[vidx] = desc->iotlb[index]; |
Emilio G. Cota | 86e1eff | 2019-01-16 12:01:13 -0500 | [diff] [blame] | 1223 | tlb_n_used_entries_dec(env, mmu_idx); |
Richard Henderson | 68fea03 | 2018-06-29 13:07:08 -0700 | [diff] [blame] | 1224 | } |
Xin Tong | 88e89a5 | 2014-08-04 20:35:23 -0500 | [diff] [blame] | 1225 | |
| 1226 | /* refill the tlb */ |
Peter Maydell | ace4109 | 2018-06-15 14:57:14 +0100 | [diff] [blame] | 1227 | /* |
| 1228 | * At this point iotlb contains a physical section number in the lower |
| 1229 | * TARGET_PAGE_BITS, and either |
Richard Henderson | 8f5db64 | 2019-09-19 21:09:58 -0700 | [diff] [blame] | 1230 | * + the ram_addr_t of the page base of the target RAM (RAM) |
| 1231 | * + the offset within section->mr of the page base (I/O, ROMD) |
Peter Maydell | 55df6fc | 2018-06-26 17:50:41 +0100 | [diff] [blame] | 1232 | * We subtract the vaddr_page (which is page aligned and thus won't |
Peter Maydell | ace4109 | 2018-06-15 14:57:14 +0100 | [diff] [blame] | 1233 | * disturb the low bits) to give an offset which can be added to the |
| 1234 | * (non-page-aligned) vaddr of the eventual memory access to get |
| 1235 | * the MemoryRegion offset for the access. Note that the vaddr we |
| 1236 | * subtract here is that of the page base, and not the same as the |
| 1237 | * vaddr we add back in io_readx()/io_writex()/get_page_addr_code(). |
| 1238 | */ |
Richard Henderson | a40ec84 | 2019-03-22 13:52:09 -0700 | [diff] [blame] | 1239 | desc->iotlb[index].addr = iotlb - vaddr_page; |
| 1240 | desc->iotlb[index].attrs = attrs; |
Alex Bennée | b0706b7 | 2017-02-23 18:29:21 +0000 | [diff] [blame] | 1241 | |
| 1242 | /* Now calculate the new entry */ |
Peter Maydell | 55df6fc | 2018-06-26 17:50:41 +0100 | [diff] [blame] | 1243 | tn.addend = addend - vaddr_page; |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 1244 | if (prot & PAGE_READ) { |
Alex Bennée | b0706b7 | 2017-02-23 18:29:21 +0000 | [diff] [blame] | 1245 | tn.addr_read = address; |
Richard Henderson | 50b107c | 2019-08-24 09:51:09 -0700 | [diff] [blame] | 1246 | if (wp_flags & BP_MEM_READ) { |
| 1247 | tn.addr_read |= TLB_WATCHPOINT; |
| 1248 | } |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 1249 | } else { |
Alex Bennée | b0706b7 | 2017-02-23 18:29:21 +0000 | [diff] [blame] | 1250 | tn.addr_read = -1; |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 1251 | } |
| 1252 | |
| 1253 | if (prot & PAGE_EXEC) { |
Richard Henderson | 8f5db64 | 2019-09-19 21:09:58 -0700 | [diff] [blame] | 1254 | tn.addr_code = address; |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 1255 | } else { |
Alex Bennée | b0706b7 | 2017-02-23 18:29:21 +0000 | [diff] [blame] | 1256 | tn.addr_code = -1; |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 1257 | } |
Alex Bennée | b0706b7 | 2017-02-23 18:29:21 +0000 | [diff] [blame] | 1258 | |
| 1259 | tn.addr_write = -1; |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 1260 | if (prot & PAGE_WRITE) { |
Richard Henderson | 8f5db64 | 2019-09-19 21:09:58 -0700 | [diff] [blame] | 1261 | tn.addr_write = write_address; |
David Hildenbrand | f52bfb1 | 2017-10-16 22:23:57 +0200 | [diff] [blame] | 1262 | if (prot & PAGE_WRITE_INV) { |
| 1263 | tn.addr_write |= TLB_INVALID_MASK; |
| 1264 | } |
Richard Henderson | 50b107c | 2019-08-24 09:51:09 -0700 | [diff] [blame] | 1265 | if (wp_flags & BP_MEM_WRITE) { |
| 1266 | tn.addr_write |= TLB_WATCHPOINT; |
| 1267 | } |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 1268 | } |
Alex Bennée | b0706b7 | 2017-02-23 18:29:21 +0000 | [diff] [blame] | 1269 | |
Emilio G. Cota | 71aec35 | 2018-10-09 13:45:56 -0400 | [diff] [blame] | 1270 | copy_tlb_helper_locked(te, &tn); |
Emilio G. Cota | 86e1eff | 2019-01-16 12:01:13 -0500 | [diff] [blame] | 1271 | tlb_n_used_entries_inc(env, mmu_idx); |
Richard Henderson | a40ec84 | 2019-03-22 13:52:09 -0700 | [diff] [blame] | 1272 | qemu_spin_unlock(&tlb->c.lock); |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 1273 | } |
| 1274 | |
Peter Maydell | fadc1cb | 2015-04-26 16:49:24 +0100 | [diff] [blame] | 1275 | /* Add a new TLB entry, but without specifying the memory |
| 1276 | * transaction attributes to be used. |
| 1277 | */ |
| 1278 | void tlb_set_page(CPUState *cpu, target_ulong vaddr, |
| 1279 | hwaddr paddr, int prot, |
| 1280 | int mmu_idx, target_ulong size) |
| 1281 | { |
| 1282 | tlb_set_page_with_attrs(cpu, vaddr, paddr, MEMTXATTRS_UNSPECIFIED, |
| 1283 | prot, mmu_idx, size); |
| 1284 | } |
| 1285 | |
Alex Bennée | 857baec | 2017-02-23 18:29:17 +0000 | [diff] [blame] | 1286 | static inline ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr) |
| 1287 | { |
| 1288 | ram_addr_t ram_addr; |
| 1289 | |
| 1290 | ram_addr = qemu_ram_addr_from_host(ptr); |
| 1291 | if (ram_addr == RAM_ADDR_INVALID) { |
| 1292 | error_report("Bad ram pointer %p", ptr); |
| 1293 | abort(); |
| 1294 | } |
| 1295 | return ram_addr; |
| 1296 | } |
| 1297 | |
Richard Henderson | c319dc1 | 2019-04-03 09:07:11 +0700 | [diff] [blame] | 1298 | /* |
| 1299 | * Note: tlb_fill() can trigger a resize of the TLB. This means that all of the |
| 1300 | * caller's prior references to the TLB table (e.g. CPUTLBEntry pointers) must |
| 1301 | * be discarded and looked up again (e.g. via tlb_entry()). |
| 1302 | */ |
| 1303 | static void tlb_fill(CPUState *cpu, target_ulong addr, int size, |
| 1304 | MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) |
| 1305 | { |
| 1306 | CPUClass *cc = CPU_GET_CLASS(cpu); |
| 1307 | bool ok; |
| 1308 | |
| 1309 | /* |
| 1310 | * This is not a probe, so only valid return is success; failure |
| 1311 | * should result in exception + longjmp to the cpu loop. |
| 1312 | */ |
Claudio Fontana | 7827168 | 2021-02-04 17:39:23 +0100 | [diff] [blame] | 1313 | ok = cc->tcg_ops->tlb_fill(cpu, addr, size, |
| 1314 | access_type, mmu_idx, false, retaddr); |
Richard Henderson | c319dc1 | 2019-04-03 09:07:11 +0700 | [diff] [blame] | 1315 | assert(ok); |
| 1316 | } |
| 1317 | |
Claudio Fontana | 7827168 | 2021-02-04 17:39:23 +0100 | [diff] [blame] | 1318 | static inline void cpu_unaligned_access(CPUState *cpu, vaddr addr, |
| 1319 | MMUAccessType access_type, |
| 1320 | int mmu_idx, uintptr_t retaddr) |
| 1321 | { |
| 1322 | CPUClass *cc = CPU_GET_CLASS(cpu); |
| 1323 | |
| 1324 | cc->tcg_ops->do_unaligned_access(cpu, addr, access_type, mmu_idx, retaddr); |
| 1325 | } |
| 1326 | |
| 1327 | static inline void cpu_transaction_failed(CPUState *cpu, hwaddr physaddr, |
| 1328 | vaddr addr, unsigned size, |
| 1329 | MMUAccessType access_type, |
| 1330 | int mmu_idx, MemTxAttrs attrs, |
| 1331 | MemTxResult response, |
| 1332 | uintptr_t retaddr) |
| 1333 | { |
| 1334 | CPUClass *cc = CPU_GET_CLASS(cpu); |
| 1335 | |
| 1336 | if (!cpu->ignore_memory_transaction_failures && |
| 1337 | cc->tcg_ops->do_transaction_failed) { |
| 1338 | cc->tcg_ops->do_transaction_failed(cpu, physaddr, addr, size, |
| 1339 | access_type, mmu_idx, attrs, |
| 1340 | response, retaddr); |
| 1341 | } |
| 1342 | } |
| 1343 | |
Richard Henderson | 82a45b9 | 2016-07-08 18:51:28 -0700 | [diff] [blame] | 1344 | static uint64_t io_readx(CPUArchState *env, CPUIOTLBEntry *iotlbentry, |
Richard Henderson | f1be369 | 2019-04-25 14:16:34 -0700 | [diff] [blame] | 1345 | int mmu_idx, target_ulong addr, uintptr_t retaddr, |
Tony Nguyen | be5c478 | 2019-08-24 04:36:53 +1000 | [diff] [blame] | 1346 | MMUAccessType access_type, MemOp op) |
Richard Henderson | 82a45b9 | 2016-07-08 18:51:28 -0700 | [diff] [blame] | 1347 | { |
Richard Henderson | 29a0af6 | 2019-03-22 16:07:18 -0700 | [diff] [blame] | 1348 | CPUState *cpu = env_cpu(env); |
Peter Maydell | 2d54f19 | 2018-06-15 14:57:14 +0100 | [diff] [blame] | 1349 | hwaddr mr_offset; |
| 1350 | MemoryRegionSection *section; |
| 1351 | MemoryRegion *mr; |
Richard Henderson | 82a45b9 | 2016-07-08 18:51:28 -0700 | [diff] [blame] | 1352 | uint64_t val; |
Jan Kiszka | 8d04fb5 | 2017-02-23 18:29:11 +0000 | [diff] [blame] | 1353 | bool locked = false; |
Peter Maydell | 04e3aab | 2017-09-04 15:21:55 +0100 | [diff] [blame] | 1354 | MemTxResult r; |
Richard Henderson | 82a45b9 | 2016-07-08 18:51:28 -0700 | [diff] [blame] | 1355 | |
Peter Maydell | 2d54f19 | 2018-06-15 14:57:14 +0100 | [diff] [blame] | 1356 | section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs); |
| 1357 | mr = section->mr; |
| 1358 | mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr; |
Richard Henderson | 82a45b9 | 2016-07-08 18:51:28 -0700 | [diff] [blame] | 1359 | cpu->mem_io_pc = retaddr; |
Richard Henderson | 0856555 | 2019-09-18 09:15:44 -0700 | [diff] [blame] | 1360 | if (!cpu->can_do_io) { |
Richard Henderson | 82a45b9 | 2016-07-08 18:51:28 -0700 | [diff] [blame] | 1361 | cpu_io_recompile(cpu, retaddr); |
| 1362 | } |
| 1363 | |
Philippe Mathieu-Daudé | 4174495 | 2020-08-06 17:07:26 +0200 | [diff] [blame] | 1364 | if (!qemu_mutex_iothread_locked()) { |
Jan Kiszka | 8d04fb5 | 2017-02-23 18:29:11 +0000 | [diff] [blame] | 1365 | qemu_mutex_lock_iothread(); |
| 1366 | locked = true; |
| 1367 | } |
Tony Nguyen | be5c478 | 2019-08-24 04:36:53 +1000 | [diff] [blame] | 1368 | r = memory_region_dispatch_read(mr, mr_offset, &val, op, iotlbentry->attrs); |
Peter Maydell | 04e3aab | 2017-09-04 15:21:55 +0100 | [diff] [blame] | 1369 | if (r != MEMTX_OK) { |
Peter Maydell | 2d54f19 | 2018-06-15 14:57:14 +0100 | [diff] [blame] | 1370 | hwaddr physaddr = mr_offset + |
| 1371 | section->offset_within_address_space - |
| 1372 | section->offset_within_region; |
| 1373 | |
Tony Nguyen | be5c478 | 2019-08-24 04:36:53 +1000 | [diff] [blame] | 1374 | cpu_transaction_failed(cpu, physaddr, addr, memop_size(op), access_type, |
Peter Maydell | 04e3aab | 2017-09-04 15:21:55 +0100 | [diff] [blame] | 1375 | mmu_idx, iotlbentry->attrs, r, retaddr); |
| 1376 | } |
Jan Kiszka | 8d04fb5 | 2017-02-23 18:29:11 +0000 | [diff] [blame] | 1377 | if (locked) { |
| 1378 | qemu_mutex_unlock_iothread(); |
| 1379 | } |
| 1380 | |
Richard Henderson | 82a45b9 | 2016-07-08 18:51:28 -0700 | [diff] [blame] | 1381 | return val; |
| 1382 | } |
| 1383 | |
Alex Bennée | 2f3a57e | 2020-07-13 21:04:10 +0100 | [diff] [blame] | 1384 | /* |
| 1385 | * Save a potentially trashed IOTLB entry for later lookup by plugin. |
Alex Bennée | 570ef30 | 2020-07-20 13:23:58 +0100 | [diff] [blame] | 1386 | * This is read by tlb_plugin_lookup if the iotlb entry doesn't match |
| 1387 | * because of the side effect of io_writex changing memory layout. |
Alex Bennée | 2f3a57e | 2020-07-13 21:04:10 +0100 | [diff] [blame] | 1388 | */ |
| 1389 | static void save_iotlb_data(CPUState *cs, hwaddr addr, |
| 1390 | MemoryRegionSection *section, hwaddr mr_offset) |
| 1391 | { |
| 1392 | #ifdef CONFIG_PLUGIN |
| 1393 | SavedIOTLB *saved = &cs->saved_iotlb; |
| 1394 | saved->addr = addr; |
| 1395 | saved->section = section; |
| 1396 | saved->mr_offset = mr_offset; |
| 1397 | #endif |
| 1398 | } |
| 1399 | |
Richard Henderson | 82a45b9 | 2016-07-08 18:51:28 -0700 | [diff] [blame] | 1400 | static void io_writex(CPUArchState *env, CPUIOTLBEntry *iotlbentry, |
Richard Henderson | f1be369 | 2019-04-25 14:16:34 -0700 | [diff] [blame] | 1401 | int mmu_idx, uint64_t val, target_ulong addr, |
Tony Nguyen | be5c478 | 2019-08-24 04:36:53 +1000 | [diff] [blame] | 1402 | uintptr_t retaddr, MemOp op) |
Richard Henderson | 82a45b9 | 2016-07-08 18:51:28 -0700 | [diff] [blame] | 1403 | { |
Richard Henderson | 29a0af6 | 2019-03-22 16:07:18 -0700 | [diff] [blame] | 1404 | CPUState *cpu = env_cpu(env); |
Peter Maydell | 2d54f19 | 2018-06-15 14:57:14 +0100 | [diff] [blame] | 1405 | hwaddr mr_offset; |
| 1406 | MemoryRegionSection *section; |
| 1407 | MemoryRegion *mr; |
Jan Kiszka | 8d04fb5 | 2017-02-23 18:29:11 +0000 | [diff] [blame] | 1408 | bool locked = false; |
Peter Maydell | 04e3aab | 2017-09-04 15:21:55 +0100 | [diff] [blame] | 1409 | MemTxResult r; |
Richard Henderson | 82a45b9 | 2016-07-08 18:51:28 -0700 | [diff] [blame] | 1410 | |
Peter Maydell | 2d54f19 | 2018-06-15 14:57:14 +0100 | [diff] [blame] | 1411 | section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs); |
| 1412 | mr = section->mr; |
| 1413 | mr_offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr; |
Richard Henderson | 0856555 | 2019-09-18 09:15:44 -0700 | [diff] [blame] | 1414 | if (!cpu->can_do_io) { |
Richard Henderson | 82a45b9 | 2016-07-08 18:51:28 -0700 | [diff] [blame] | 1415 | cpu_io_recompile(cpu, retaddr); |
| 1416 | } |
Richard Henderson | 82a45b9 | 2016-07-08 18:51:28 -0700 | [diff] [blame] | 1417 | cpu->mem_io_pc = retaddr; |
Jan Kiszka | 8d04fb5 | 2017-02-23 18:29:11 +0000 | [diff] [blame] | 1418 | |
Alex Bennée | 2f3a57e | 2020-07-13 21:04:10 +0100 | [diff] [blame] | 1419 | /* |
| 1420 | * The memory_region_dispatch may trigger a flush/resize |
| 1421 | * so for plugins we save the iotlb_data just in case. |
| 1422 | */ |
| 1423 | save_iotlb_data(cpu, iotlbentry->addr, section, mr_offset); |
| 1424 | |
Philippe Mathieu-Daudé | 4174495 | 2020-08-06 17:07:26 +0200 | [diff] [blame] | 1425 | if (!qemu_mutex_iothread_locked()) { |
Jan Kiszka | 8d04fb5 | 2017-02-23 18:29:11 +0000 | [diff] [blame] | 1426 | qemu_mutex_lock_iothread(); |
| 1427 | locked = true; |
| 1428 | } |
Tony Nguyen | be5c478 | 2019-08-24 04:36:53 +1000 | [diff] [blame] | 1429 | r = memory_region_dispatch_write(mr, mr_offset, val, op, iotlbentry->attrs); |
Peter Maydell | 04e3aab | 2017-09-04 15:21:55 +0100 | [diff] [blame] | 1430 | if (r != MEMTX_OK) { |
Peter Maydell | 2d54f19 | 2018-06-15 14:57:14 +0100 | [diff] [blame] | 1431 | hwaddr physaddr = mr_offset + |
| 1432 | section->offset_within_address_space - |
| 1433 | section->offset_within_region; |
| 1434 | |
Tony Nguyen | be5c478 | 2019-08-24 04:36:53 +1000 | [diff] [blame] | 1435 | cpu_transaction_failed(cpu, physaddr, addr, memop_size(op), |
| 1436 | MMU_DATA_STORE, mmu_idx, iotlbentry->attrs, r, |
| 1437 | retaddr); |
Peter Maydell | 04e3aab | 2017-09-04 15:21:55 +0100 | [diff] [blame] | 1438 | } |
Jan Kiszka | 8d04fb5 | 2017-02-23 18:29:11 +0000 | [diff] [blame] | 1439 | if (locked) { |
| 1440 | qemu_mutex_unlock_iothread(); |
| 1441 | } |
Richard Henderson | 82a45b9 | 2016-07-08 18:51:28 -0700 | [diff] [blame] | 1442 | } |
| 1443 | |
Richard Henderson | 4811e90 | 2019-04-03 10:16:56 +0700 | [diff] [blame] | 1444 | static inline target_ulong tlb_read_ofs(CPUTLBEntry *entry, size_t ofs) |
| 1445 | { |
| 1446 | #if TCG_OVERSIZED_GUEST |
| 1447 | return *(target_ulong *)((uintptr_t)entry + ofs); |
| 1448 | #else |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 1449 | /* ofs might correspond to .addr_write, so use qatomic_read */ |
| 1450 | return qatomic_read((target_ulong *)((uintptr_t)entry + ofs)); |
Richard Henderson | 4811e90 | 2019-04-03 10:16:56 +0700 | [diff] [blame] | 1451 | #endif |
| 1452 | } |
| 1453 | |
Richard Henderson | 7e9a7c5 | 2016-07-08 12:19:32 -0700 | [diff] [blame] | 1454 | /* Return true if ADDR is present in the victim tlb, and has been copied |
| 1455 | back to the main tlb. */ |
| 1456 | static bool victim_tlb_hit(CPUArchState *env, size_t mmu_idx, size_t index, |
| 1457 | size_t elt_ofs, target_ulong page) |
| 1458 | { |
| 1459 | size_t vidx; |
Emilio G. Cota | 71aec35 | 2018-10-09 13:45:56 -0400 | [diff] [blame] | 1460 | |
Richard Henderson | 29a0af6 | 2019-03-22 16:07:18 -0700 | [diff] [blame] | 1461 | assert_cpu_is_self(env_cpu(env)); |
Richard Henderson | 7e9a7c5 | 2016-07-08 12:19:32 -0700 | [diff] [blame] | 1462 | for (vidx = 0; vidx < CPU_VTLB_SIZE; ++vidx) { |
Richard Henderson | a40ec84 | 2019-03-22 13:52:09 -0700 | [diff] [blame] | 1463 | CPUTLBEntry *vtlb = &env_tlb(env)->d[mmu_idx].vtable[vidx]; |
| 1464 | target_ulong cmp; |
| 1465 | |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 1466 | /* elt_ofs might correspond to .addr_write, so use qatomic_read */ |
Richard Henderson | a40ec84 | 2019-03-22 13:52:09 -0700 | [diff] [blame] | 1467 | #if TCG_OVERSIZED_GUEST |
| 1468 | cmp = *(target_ulong *)((uintptr_t)vtlb + elt_ofs); |
| 1469 | #else |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 1470 | cmp = qatomic_read((target_ulong *)((uintptr_t)vtlb + elt_ofs)); |
Richard Henderson | a40ec84 | 2019-03-22 13:52:09 -0700 | [diff] [blame] | 1471 | #endif |
Richard Henderson | 7e9a7c5 | 2016-07-08 12:19:32 -0700 | [diff] [blame] | 1472 | |
| 1473 | if (cmp == page) { |
| 1474 | /* Found entry in victim tlb, swap tlb and iotlb. */ |
Richard Henderson | a40ec84 | 2019-03-22 13:52:09 -0700 | [diff] [blame] | 1475 | CPUTLBEntry tmptlb, *tlb = &env_tlb(env)->f[mmu_idx].table[index]; |
Alex Bennée | b0706b7 | 2017-02-23 18:29:21 +0000 | [diff] [blame] | 1476 | |
Richard Henderson | a40ec84 | 2019-03-22 13:52:09 -0700 | [diff] [blame] | 1477 | qemu_spin_lock(&env_tlb(env)->c.lock); |
Emilio G. Cota | 71aec35 | 2018-10-09 13:45:56 -0400 | [diff] [blame] | 1478 | copy_tlb_helper_locked(&tmptlb, tlb); |
| 1479 | copy_tlb_helper_locked(tlb, vtlb); |
| 1480 | copy_tlb_helper_locked(vtlb, &tmptlb); |
Richard Henderson | a40ec84 | 2019-03-22 13:52:09 -0700 | [diff] [blame] | 1481 | qemu_spin_unlock(&env_tlb(env)->c.lock); |
Alex Bennée | b0706b7 | 2017-02-23 18:29:21 +0000 | [diff] [blame] | 1482 | |
Richard Henderson | a40ec84 | 2019-03-22 13:52:09 -0700 | [diff] [blame] | 1483 | CPUIOTLBEntry tmpio, *io = &env_tlb(env)->d[mmu_idx].iotlb[index]; |
| 1484 | CPUIOTLBEntry *vio = &env_tlb(env)->d[mmu_idx].viotlb[vidx]; |
Richard Henderson | 7e9a7c5 | 2016-07-08 12:19:32 -0700 | [diff] [blame] | 1485 | tmpio = *io; *io = *vio; *vio = tmpio; |
| 1486 | return true; |
| 1487 | } |
| 1488 | } |
| 1489 | return false; |
| 1490 | } |
| 1491 | |
| 1492 | /* Macro to call the above, with local variables from the use context. */ |
Samuel Damashek | a390284 | 2016-07-06 14:26:52 -0400 | [diff] [blame] | 1493 | #define VICTIM_TLB_HIT(TY, ADDR) \ |
Richard Henderson | 7e9a7c5 | 2016-07-08 12:19:32 -0700 | [diff] [blame] | 1494 | victim_tlb_hit(env, mmu_idx, index, offsetof(CPUTLBEntry, TY), \ |
Samuel Damashek | a390284 | 2016-07-06 14:26:52 -0400 | [diff] [blame] | 1495 | (ADDR) & TARGET_PAGE_MASK) |
Richard Henderson | 7e9a7c5 | 2016-07-08 12:19:32 -0700 | [diff] [blame] | 1496 | |
Richard Henderson | 30d7e09 | 2019-08-23 15:12:32 -0700 | [diff] [blame] | 1497 | /* |
| 1498 | * Return a ram_addr_t for the virtual address for execution. |
| 1499 | * |
| 1500 | * Return -1 if we can't translate and execute from an entire page |
| 1501 | * of RAM. This will force us to execute by loading and translating |
| 1502 | * one insn at a time, without caching. |
| 1503 | * |
| 1504 | * NOTE: This function will trigger an exception if the page is |
| 1505 | * not executable. |
KONRAD Frederic | f2553f0 | 2017-02-03 16:29:50 +0100 | [diff] [blame] | 1506 | */ |
Emilio G. Cota | 4b2190d | 2018-11-03 17:40:22 -0400 | [diff] [blame] | 1507 | tb_page_addr_t get_page_addr_code_hostp(CPUArchState *env, target_ulong addr, |
| 1508 | void **hostp) |
KONRAD Frederic | f2553f0 | 2017-02-03 16:29:50 +0100 | [diff] [blame] | 1509 | { |
Richard Henderson | 383beda | 2018-10-09 13:51:25 -0400 | [diff] [blame] | 1510 | uintptr_t mmu_idx = cpu_mmu_index(env, true); |
| 1511 | uintptr_t index = tlb_index(env, mmu_idx, addr); |
| 1512 | CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); |
KONRAD Frederic | f2553f0 | 2017-02-03 16:29:50 +0100 | [diff] [blame] | 1513 | void *p; |
KONRAD Frederic | f2553f0 | 2017-02-03 16:29:50 +0100 | [diff] [blame] | 1514 | |
Richard Henderson | 383beda | 2018-10-09 13:51:25 -0400 | [diff] [blame] | 1515 | if (unlikely(!tlb_hit(entry->addr_code, addr))) { |
Peter Maydell | b493ccf | 2018-07-13 15:16:35 +0100 | [diff] [blame] | 1516 | if (!VICTIM_TLB_HIT(addr_code, addr)) { |
Richard Henderson | 29a0af6 | 2019-03-22 16:07:18 -0700 | [diff] [blame] | 1517 | tlb_fill(env_cpu(env), addr, 0, MMU_INST_FETCH, mmu_idx, 0); |
Emilio G. Cota | 6d967cb | 2019-02-09 11:27:45 -0500 | [diff] [blame] | 1518 | index = tlb_index(env, mmu_idx, addr); |
| 1519 | entry = tlb_entry(env, mmu_idx, addr); |
Richard Henderson | 30d7e09 | 2019-08-23 15:12:32 -0700 | [diff] [blame] | 1520 | |
| 1521 | if (unlikely(entry->addr_code & TLB_INVALID_MASK)) { |
| 1522 | /* |
| 1523 | * The MMU protection covers a smaller range than a target |
| 1524 | * page, so we must redo the MMU check for every insn. |
| 1525 | */ |
| 1526 | return -1; |
| 1527 | } |
KONRAD Frederic | 71b9a45 | 2017-02-03 16:32:12 +0100 | [diff] [blame] | 1528 | } |
Richard Henderson | 383beda | 2018-10-09 13:51:25 -0400 | [diff] [blame] | 1529 | assert(tlb_hit(entry->addr_code, addr)); |
KONRAD Frederic | f2553f0 | 2017-02-03 16:29:50 +0100 | [diff] [blame] | 1530 | } |
Peter Maydell | 55df6fc | 2018-06-26 17:50:41 +0100 | [diff] [blame] | 1531 | |
Richard Henderson | 30d7e09 | 2019-08-23 15:12:32 -0700 | [diff] [blame] | 1532 | if (unlikely(entry->addr_code & TLB_MMIO)) { |
| 1533 | /* The region is not backed by RAM. */ |
Emilio G. Cota | 4b2190d | 2018-11-03 17:40:22 -0400 | [diff] [blame] | 1534 | if (hostp) { |
| 1535 | *hostp = NULL; |
| 1536 | } |
Peter Maydell | 20cb6ae | 2018-08-14 17:17:19 +0100 | [diff] [blame] | 1537 | return -1; |
Peter Maydell | 55df6fc | 2018-06-26 17:50:41 +0100 | [diff] [blame] | 1538 | } |
| 1539 | |
Richard Henderson | 383beda | 2018-10-09 13:51:25 -0400 | [diff] [blame] | 1540 | p = (void *)((uintptr_t)addr + entry->addend); |
Emilio G. Cota | 4b2190d | 2018-11-03 17:40:22 -0400 | [diff] [blame] | 1541 | if (hostp) { |
| 1542 | *hostp = p; |
| 1543 | } |
KONRAD Frederic | f2553f0 | 2017-02-03 16:29:50 +0100 | [diff] [blame] | 1544 | return qemu_ram_addr_from_host_nofail(p); |
| 1545 | } |
| 1546 | |
Emilio G. Cota | 4b2190d | 2018-11-03 17:40:22 -0400 | [diff] [blame] | 1547 | tb_page_addr_t get_page_addr_code(CPUArchState *env, target_ulong addr) |
| 1548 | { |
| 1549 | return get_page_addr_code_hostp(env, addr, NULL); |
| 1550 | } |
| 1551 | |
Richard Henderson | 707526a | 2019-09-21 18:47:59 -0700 | [diff] [blame] | 1552 | static void notdirty_write(CPUState *cpu, vaddr mem_vaddr, unsigned size, |
| 1553 | CPUIOTLBEntry *iotlbentry, uintptr_t retaddr) |
| 1554 | { |
| 1555 | ram_addr_t ram_addr = mem_vaddr + iotlbentry->addr; |
| 1556 | |
| 1557 | trace_memory_notdirty_write_access(mem_vaddr, ram_addr, size); |
| 1558 | |
| 1559 | if (!cpu_physical_memory_get_dirty_flag(ram_addr, DIRTY_MEMORY_CODE)) { |
| 1560 | struct page_collection *pages |
| 1561 | = page_collection_lock(ram_addr, ram_addr + size); |
Richard Henderson | 5a7c27b | 2019-09-21 20:16:09 -0700 | [diff] [blame] | 1562 | tb_invalidate_phys_page_fast(pages, ram_addr, size, retaddr); |
Richard Henderson | 707526a | 2019-09-21 18:47:59 -0700 | [diff] [blame] | 1563 | page_collection_unlock(pages); |
| 1564 | } |
| 1565 | |
| 1566 | /* |
| 1567 | * Set both VGA and migration bits for simplicity and to remove |
| 1568 | * the notdirty callback faster. |
| 1569 | */ |
| 1570 | cpu_physical_memory_set_dirty_range(ram_addr, size, DIRTY_CLIENTS_NOCODE); |
| 1571 | |
| 1572 | /* We remove the notdirty callback only if the code has been flushed. */ |
| 1573 | if (!cpu_physical_memory_is_clean(ram_addr)) { |
| 1574 | trace_memory_notdirty_set_dirty(mem_vaddr); |
| 1575 | tlb_set_dirty(cpu, mem_vaddr); |
| 1576 | } |
| 1577 | } |
| 1578 | |
Richard Henderson | 069cfe7 | 2020-05-08 08:43:45 -0700 | [diff] [blame] | 1579 | static int probe_access_internal(CPUArchState *env, target_ulong addr, |
| 1580 | int fault_size, MMUAccessType access_type, |
| 1581 | int mmu_idx, bool nonfault, |
| 1582 | void **phost, uintptr_t retaddr) |
Richard Henderson | 3b08f0a | 2016-07-08 18:22:26 -0700 | [diff] [blame] | 1583 | { |
Richard Henderson | 383beda | 2018-10-09 13:51:25 -0400 | [diff] [blame] | 1584 | uintptr_t index = tlb_index(env, mmu_idx, addr); |
| 1585 | CPUTLBEntry *entry = tlb_entry(env, mmu_idx, addr); |
Richard Henderson | 069cfe7 | 2020-05-08 08:43:45 -0700 | [diff] [blame] | 1586 | target_ulong tlb_addr, page_addr; |
David Hildenbrand | c25c283 | 2019-08-30 12:09:59 +0200 | [diff] [blame] | 1587 | size_t elt_ofs; |
Richard Henderson | 069cfe7 | 2020-05-08 08:43:45 -0700 | [diff] [blame] | 1588 | int flags; |
Richard Henderson | 4811e90 | 2019-04-03 10:16:56 +0700 | [diff] [blame] | 1589 | |
| 1590 | switch (access_type) { |
| 1591 | case MMU_DATA_LOAD: |
| 1592 | elt_ofs = offsetof(CPUTLBEntry, addr_read); |
| 1593 | break; |
| 1594 | case MMU_DATA_STORE: |
| 1595 | elt_ofs = offsetof(CPUTLBEntry, addr_write); |
| 1596 | break; |
| 1597 | case MMU_INST_FETCH: |
| 1598 | elt_ofs = offsetof(CPUTLBEntry, addr_code); |
| 1599 | break; |
| 1600 | default: |
| 1601 | g_assert_not_reached(); |
| 1602 | } |
Richard Henderson | 4811e90 | 2019-04-03 10:16:56 +0700 | [diff] [blame] | 1603 | tlb_addr = tlb_read_ofs(entry, elt_ofs); |
| 1604 | |
Richard Henderson | 069cfe7 | 2020-05-08 08:43:45 -0700 | [diff] [blame] | 1605 | page_addr = addr & TARGET_PAGE_MASK; |
| 1606 | if (!tlb_hit_page(tlb_addr, page_addr)) { |
| 1607 | if (!victim_tlb_hit(env, mmu_idx, index, elt_ofs, page_addr)) { |
Richard Henderson | 29a0af6 | 2019-03-22 16:07:18 -0700 | [diff] [blame] | 1608 | CPUState *cs = env_cpu(env); |
Richard Henderson | 4811e90 | 2019-04-03 10:16:56 +0700 | [diff] [blame] | 1609 | CPUClass *cc = CPU_GET_CLASS(cs); |
| 1610 | |
Claudio Fontana | 7827168 | 2021-02-04 17:39:23 +0100 | [diff] [blame] | 1611 | if (!cc->tcg_ops->tlb_fill(cs, addr, fault_size, access_type, |
| 1612 | mmu_idx, nonfault, retaddr)) { |
Richard Henderson | 4811e90 | 2019-04-03 10:16:56 +0700 | [diff] [blame] | 1613 | /* Non-faulting page table read failed. */ |
Richard Henderson | 069cfe7 | 2020-05-08 08:43:45 -0700 | [diff] [blame] | 1614 | *phost = NULL; |
| 1615 | return TLB_INVALID_MASK; |
Richard Henderson | 4811e90 | 2019-04-03 10:16:56 +0700 | [diff] [blame] | 1616 | } |
| 1617 | |
| 1618 | /* TLB resize via tlb_fill may have moved the entry. */ |
| 1619 | entry = tlb_entry(env, mmu_idx, addr); |
| 1620 | } |
| 1621 | tlb_addr = tlb_read_ofs(entry, elt_ofs); |
| 1622 | } |
Richard Henderson | 069cfe7 | 2020-05-08 08:43:45 -0700 | [diff] [blame] | 1623 | flags = tlb_addr & TLB_FLAGS_MASK; |
Richard Henderson | 4811e90 | 2019-04-03 10:16:56 +0700 | [diff] [blame] | 1624 | |
Richard Henderson | 069cfe7 | 2020-05-08 08:43:45 -0700 | [diff] [blame] | 1625 | /* Fold all "mmio-like" bits into TLB_MMIO. This is not RAM. */ |
| 1626 | if (unlikely(flags & ~(TLB_WATCHPOINT | TLB_NOTDIRTY))) { |
| 1627 | *phost = NULL; |
| 1628 | return TLB_MMIO; |
| 1629 | } |
| 1630 | |
| 1631 | /* Everything else is RAM. */ |
| 1632 | *phost = (void *)((uintptr_t)addr + entry->addend); |
| 1633 | return flags; |
| 1634 | } |
| 1635 | |
| 1636 | int probe_access_flags(CPUArchState *env, target_ulong addr, |
| 1637 | MMUAccessType access_type, int mmu_idx, |
| 1638 | bool nonfault, void **phost, uintptr_t retaddr) |
| 1639 | { |
| 1640 | int flags; |
| 1641 | |
| 1642 | flags = probe_access_internal(env, addr, 0, access_type, mmu_idx, |
| 1643 | nonfault, phost, retaddr); |
| 1644 | |
| 1645 | /* Handle clean RAM pages. */ |
| 1646 | if (unlikely(flags & TLB_NOTDIRTY)) { |
| 1647 | uintptr_t index = tlb_index(env, mmu_idx, addr); |
| 1648 | CPUIOTLBEntry *iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index]; |
| 1649 | |
| 1650 | notdirty_write(env_cpu(env), addr, 1, iotlbentry, retaddr); |
| 1651 | flags &= ~TLB_NOTDIRTY; |
| 1652 | } |
| 1653 | |
| 1654 | return flags; |
| 1655 | } |
| 1656 | |
| 1657 | void *probe_access(CPUArchState *env, target_ulong addr, int size, |
| 1658 | MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) |
| 1659 | { |
| 1660 | void *host; |
| 1661 | int flags; |
| 1662 | |
| 1663 | g_assert(-(addr | TARGET_PAGE_MASK) >= size); |
| 1664 | |
| 1665 | flags = probe_access_internal(env, addr, size, access_type, mmu_idx, |
| 1666 | false, &host, retaddr); |
| 1667 | |
| 1668 | /* Per the interface, size == 0 merely faults the access. */ |
| 1669 | if (size == 0) { |
Richard Henderson | 4811e90 | 2019-04-03 10:16:56 +0700 | [diff] [blame] | 1670 | return NULL; |
| 1671 | } |
| 1672 | |
Richard Henderson | 069cfe7 | 2020-05-08 08:43:45 -0700 | [diff] [blame] | 1673 | if (unlikely(flags & (TLB_NOTDIRTY | TLB_WATCHPOINT))) { |
| 1674 | uintptr_t index = tlb_index(env, mmu_idx, addr); |
| 1675 | CPUIOTLBEntry *iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index]; |
| 1676 | |
| 1677 | /* Handle watchpoints. */ |
| 1678 | if (flags & TLB_WATCHPOINT) { |
| 1679 | int wp_access = (access_type == MMU_DATA_STORE |
| 1680 | ? BP_MEM_WRITE : BP_MEM_READ); |
| 1681 | cpu_check_watchpoint(env_cpu(env), addr, size, |
| 1682 | iotlbentry->attrs, wp_access, retaddr); |
| 1683 | } |
| 1684 | |
| 1685 | /* Handle clean RAM pages. */ |
| 1686 | if (flags & TLB_NOTDIRTY) { |
| 1687 | notdirty_write(env_cpu(env), addr, 1, iotlbentry, retaddr); |
| 1688 | } |
| 1689 | } |
| 1690 | |
| 1691 | return host; |
Richard Henderson | 4811e90 | 2019-04-03 10:16:56 +0700 | [diff] [blame] | 1692 | } |
| 1693 | |
Richard Henderson | 069cfe7 | 2020-05-08 08:43:45 -0700 | [diff] [blame] | 1694 | void *tlb_vaddr_to_host(CPUArchState *env, abi_ptr addr, |
| 1695 | MMUAccessType access_type, int mmu_idx) |
| 1696 | { |
| 1697 | void *host; |
| 1698 | int flags; |
| 1699 | |
| 1700 | flags = probe_access_internal(env, addr, 0, access_type, |
| 1701 | mmu_idx, true, &host, 0); |
| 1702 | |
| 1703 | /* No combination of flags are expected by the caller. */ |
| 1704 | return flags ? NULL : host; |
| 1705 | } |
Alex Bennée | 235537f | 2019-06-19 20:20:08 +0100 | [diff] [blame] | 1706 | |
| 1707 | #ifdef CONFIG_PLUGIN |
| 1708 | /* |
| 1709 | * Perform a TLB lookup and populate the qemu_plugin_hwaddr structure. |
| 1710 | * This should be a hot path as we will have just looked this path up |
| 1711 | * in the softmmu lookup code (or helper). We don't handle re-fills or |
| 1712 | * checking the victim table. This is purely informational. |
| 1713 | * |
Alex Bennée | 2f3a57e | 2020-07-13 21:04:10 +0100 | [diff] [blame] | 1714 | * This almost never fails as the memory access being instrumented |
| 1715 | * should have just filled the TLB. The one corner case is io_writex |
| 1716 | * which can cause TLB flushes and potential resizing of the TLBs |
Alex Bennée | 570ef30 | 2020-07-20 13:23:58 +0100 | [diff] [blame] | 1717 | * losing the information we need. In those cases we need to recover |
| 1718 | * data from a copy of the iotlbentry. As long as this always occurs |
| 1719 | * from the same thread (which a mem callback will be) this is safe. |
Alex Bennée | 235537f | 2019-06-19 20:20:08 +0100 | [diff] [blame] | 1720 | */ |
| 1721 | |
| 1722 | bool tlb_plugin_lookup(CPUState *cpu, target_ulong addr, int mmu_idx, |
| 1723 | bool is_store, struct qemu_plugin_hwaddr *data) |
| 1724 | { |
| 1725 | CPUArchState *env = cpu->env_ptr; |
| 1726 | CPUTLBEntry *tlbe = tlb_entry(env, mmu_idx, addr); |
| 1727 | uintptr_t index = tlb_index(env, mmu_idx, addr); |
| 1728 | target_ulong tlb_addr = is_store ? tlb_addr_write(tlbe) : tlbe->addr_read; |
| 1729 | |
| 1730 | if (likely(tlb_hit(tlb_addr, addr))) { |
| 1731 | /* We must have an iotlb entry for MMIO */ |
| 1732 | if (tlb_addr & TLB_MMIO) { |
| 1733 | CPUIOTLBEntry *iotlbentry; |
| 1734 | iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index]; |
| 1735 | data->is_io = true; |
| 1736 | data->v.io.section = iotlb_to_section(cpu, iotlbentry->addr, iotlbentry->attrs); |
| 1737 | data->v.io.offset = (iotlbentry->addr & TARGET_PAGE_MASK) + addr; |
| 1738 | } else { |
| 1739 | data->is_io = false; |
Alex Bennée | 2d93203 | 2021-07-09 15:29:52 +0100 | [diff] [blame] | 1740 | data->v.ram.hostaddr = (void *)((uintptr_t)addr + tlbe->addend); |
Alex Bennée | 235537f | 2019-06-19 20:20:08 +0100 | [diff] [blame] | 1741 | } |
| 1742 | return true; |
Alex Bennée | 2f3a57e | 2020-07-13 21:04:10 +0100 | [diff] [blame] | 1743 | } else { |
| 1744 | SavedIOTLB *saved = &cpu->saved_iotlb; |
| 1745 | data->is_io = true; |
| 1746 | data->v.io.section = saved->section; |
| 1747 | data->v.io.offset = saved->mr_offset; |
| 1748 | return true; |
Alex Bennée | 235537f | 2019-06-19 20:20:08 +0100 | [diff] [blame] | 1749 | } |
Alex Bennée | 235537f | 2019-06-19 20:20:08 +0100 | [diff] [blame] | 1750 | } |
| 1751 | |
| 1752 | #endif |
| 1753 | |
Richard Henderson | 08dff43 | 2021-06-12 17:21:06 -0700 | [diff] [blame] | 1754 | /* |
| 1755 | * Probe for an atomic operation. Do not allow unaligned operations, |
| 1756 | * or io operations to proceed. Return the host address. |
| 1757 | * |
| 1758 | * @prot may be PAGE_READ, PAGE_WRITE, or PAGE_READ|PAGE_WRITE. |
| 1759 | */ |
Richard Henderson | c482cb1 | 2016-06-28 11:37:27 -0700 | [diff] [blame] | 1760 | static void *atomic_mmu_lookup(CPUArchState *env, target_ulong addr, |
Richard Henderson | 9002ffc | 2021-07-25 12:06:49 -1000 | [diff] [blame] | 1761 | MemOpIdx oi, int size, int prot, |
Richard Henderson | 08dff43 | 2021-06-12 17:21:06 -0700 | [diff] [blame] | 1762 | uintptr_t retaddr) |
Richard Henderson | c482cb1 | 2016-06-28 11:37:27 -0700 | [diff] [blame] | 1763 | { |
Richard Henderson | b826044 | 2022-04-01 11:08:13 -0600 | [diff] [blame] | 1764 | uintptr_t mmu_idx = get_mmuidx(oi); |
Tony Nguyen | 14776ab | 2019-08-24 04:10:58 +1000 | [diff] [blame] | 1765 | MemOp mop = get_memop(oi); |
Richard Henderson | c482cb1 | 2016-06-28 11:37:27 -0700 | [diff] [blame] | 1766 | int a_bits = get_alignment_bits(mop); |
Richard Henderson | 08dff43 | 2021-06-12 17:21:06 -0700 | [diff] [blame] | 1767 | uintptr_t index; |
| 1768 | CPUTLBEntry *tlbe; |
| 1769 | target_ulong tlb_addr; |
Peter Maydell | 34d4993 | 2017-11-20 18:08:28 +0000 | [diff] [blame] | 1770 | void *hostaddr; |
Richard Henderson | c482cb1 | 2016-06-28 11:37:27 -0700 | [diff] [blame] | 1771 | |
Richard Henderson | b826044 | 2022-04-01 11:08:13 -0600 | [diff] [blame] | 1772 | tcg_debug_assert(mmu_idx < NB_MMU_MODES); |
| 1773 | |
Richard Henderson | c482cb1 | 2016-06-28 11:37:27 -0700 | [diff] [blame] | 1774 | /* Adjust the given return address. */ |
| 1775 | retaddr -= GETPC_ADJ; |
| 1776 | |
| 1777 | /* Enforce guest required alignment. */ |
| 1778 | if (unlikely(a_bits > 0 && (addr & ((1 << a_bits) - 1)))) { |
| 1779 | /* ??? Maybe indicate atomic op to cpu_unaligned_access */ |
Richard Henderson | 29a0af6 | 2019-03-22 16:07:18 -0700 | [diff] [blame] | 1780 | cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE, |
Richard Henderson | c482cb1 | 2016-06-28 11:37:27 -0700 | [diff] [blame] | 1781 | mmu_idx, retaddr); |
| 1782 | } |
| 1783 | |
| 1784 | /* Enforce qemu required alignment. */ |
Richard Henderson | 08dff43 | 2021-06-12 17:21:06 -0700 | [diff] [blame] | 1785 | if (unlikely(addr & (size - 1))) { |
Richard Henderson | c482cb1 | 2016-06-28 11:37:27 -0700 | [diff] [blame] | 1786 | /* We get here if guest alignment was not requested, |
| 1787 | or was not enforced by cpu_unaligned_access above. |
| 1788 | We might widen the access and emulate, but for now |
| 1789 | mark an exception and exit the cpu loop. */ |
| 1790 | goto stop_the_world; |
| 1791 | } |
| 1792 | |
Richard Henderson | 08dff43 | 2021-06-12 17:21:06 -0700 | [diff] [blame] | 1793 | index = tlb_index(env, mmu_idx, addr); |
| 1794 | tlbe = tlb_entry(env, mmu_idx, addr); |
| 1795 | |
Richard Henderson | c482cb1 | 2016-06-28 11:37:27 -0700 | [diff] [blame] | 1796 | /* Check TLB entry and enforce page permissions. */ |
Richard Henderson | 08dff43 | 2021-06-12 17:21:06 -0700 | [diff] [blame] | 1797 | if (prot & PAGE_WRITE) { |
| 1798 | tlb_addr = tlb_addr_write(tlbe); |
| 1799 | if (!tlb_hit(tlb_addr, addr)) { |
| 1800 | if (!VICTIM_TLB_HIT(addr_write, addr)) { |
| 1801 | tlb_fill(env_cpu(env), addr, size, |
| 1802 | MMU_DATA_STORE, mmu_idx, retaddr); |
| 1803 | index = tlb_index(env, mmu_idx, addr); |
| 1804 | tlbe = tlb_entry(env, mmu_idx, addr); |
| 1805 | } |
| 1806 | tlb_addr = tlb_addr_write(tlbe) & ~TLB_INVALID_MASK; |
Richard Henderson | c482cb1 | 2016-06-28 11:37:27 -0700 | [diff] [blame] | 1807 | } |
Richard Henderson | 08dff43 | 2021-06-12 17:21:06 -0700 | [diff] [blame] | 1808 | |
| 1809 | /* Let the guest notice RMW on a write-only page. */ |
| 1810 | if ((prot & PAGE_READ) && |
| 1811 | unlikely(tlbe->addr_read != (tlb_addr & ~TLB_NOTDIRTY))) { |
| 1812 | tlb_fill(env_cpu(env), addr, size, |
| 1813 | MMU_DATA_LOAD, mmu_idx, retaddr); |
| 1814 | /* |
| 1815 | * Since we don't support reads and writes to different addresses, |
| 1816 | * and we do have the proper page loaded for write, this shouldn't |
| 1817 | * ever return. But just in case, handle via stop-the-world. |
| 1818 | */ |
| 1819 | goto stop_the_world; |
| 1820 | } |
| 1821 | } else /* if (prot & PAGE_READ) */ { |
| 1822 | tlb_addr = tlbe->addr_read; |
| 1823 | if (!tlb_hit(tlb_addr, addr)) { |
| 1824 | if (!VICTIM_TLB_HIT(addr_write, addr)) { |
| 1825 | tlb_fill(env_cpu(env), addr, size, |
| 1826 | MMU_DATA_LOAD, mmu_idx, retaddr); |
| 1827 | index = tlb_index(env, mmu_idx, addr); |
| 1828 | tlbe = tlb_entry(env, mmu_idx, addr); |
| 1829 | } |
| 1830 | tlb_addr = tlbe->addr_read & ~TLB_INVALID_MASK; |
| 1831 | } |
Richard Henderson | c482cb1 | 2016-06-28 11:37:27 -0700 | [diff] [blame] | 1832 | } |
| 1833 | |
Peter Maydell | 55df6fc | 2018-06-26 17:50:41 +0100 | [diff] [blame] | 1834 | /* Notice an IO access or a needs-MMU-lookup access */ |
Richard Henderson | 30d7e09 | 2019-08-23 15:12:32 -0700 | [diff] [blame] | 1835 | if (unlikely(tlb_addr & TLB_MMIO)) { |
Richard Henderson | c482cb1 | 2016-06-28 11:37:27 -0700 | [diff] [blame] | 1836 | /* There's really nothing that can be done to |
| 1837 | support this apart from stop-the-world. */ |
| 1838 | goto stop_the_world; |
| 1839 | } |
| 1840 | |
Peter Maydell | 34d4993 | 2017-11-20 18:08:28 +0000 | [diff] [blame] | 1841 | hostaddr = (void *)((uintptr_t)addr + tlbe->addend); |
| 1842 | |
Peter Maydell | 34d4993 | 2017-11-20 18:08:28 +0000 | [diff] [blame] | 1843 | if (unlikely(tlb_addr & TLB_NOTDIRTY)) { |
Richard Henderson | 08dff43 | 2021-06-12 17:21:06 -0700 | [diff] [blame] | 1844 | notdirty_write(env_cpu(env), addr, size, |
Richard Henderson | 707526a | 2019-09-21 18:47:59 -0700 | [diff] [blame] | 1845 | &env_tlb(env)->d[mmu_idx].iotlb[index], retaddr); |
Peter Maydell | 34d4993 | 2017-11-20 18:08:28 +0000 | [diff] [blame] | 1846 | } |
| 1847 | |
| 1848 | return hostaddr; |
Richard Henderson | c482cb1 | 2016-06-28 11:37:27 -0700 | [diff] [blame] | 1849 | |
| 1850 | stop_the_world: |
Richard Henderson | 29a0af6 | 2019-03-22 16:07:18 -0700 | [diff] [blame] | 1851 | cpu_loop_exit_atomic(env_cpu(env), retaddr); |
Richard Henderson | c482cb1 | 2016-06-28 11:37:27 -0700 | [diff] [blame] | 1852 | } |
| 1853 | |
Alex Bennée | eed5664 | 2019-02-15 14:31:13 +0000 | [diff] [blame] | 1854 | /* |
Richard Henderson | f83bcec | 2021-07-27 07:48:55 -1000 | [diff] [blame] | 1855 | * Verify that we have passed the correct MemOp to the correct function. |
| 1856 | * |
| 1857 | * In the case of the helper_*_mmu functions, we will have done this by |
| 1858 | * using the MemOp to look up the helper during code generation. |
| 1859 | * |
| 1860 | * In the case of the cpu_*_mmu functions, this is up to the caller. |
| 1861 | * We could present one function to target code, and dispatch based on |
| 1862 | * the MemOp, but so far we have worked hard to avoid an indirect function |
| 1863 | * call along the memory path. |
| 1864 | */ |
| 1865 | static void validate_memop(MemOpIdx oi, MemOp expected) |
| 1866 | { |
| 1867 | #ifdef CONFIG_DEBUG_TCG |
| 1868 | MemOp have = get_memop(oi) & (MO_SIZE | MO_BSWAP); |
| 1869 | assert(have == expected); |
| 1870 | #endif |
| 1871 | } |
| 1872 | |
| 1873 | /* |
Alex Bennée | eed5664 | 2019-02-15 14:31:13 +0000 | [diff] [blame] | 1874 | * Load Helpers |
| 1875 | * |
| 1876 | * We support two different access types. SOFTMMU_CODE_ACCESS is |
| 1877 | * specifically for reading instructions from system memory. It is |
| 1878 | * called by the translation loop and in some helpers where the code |
| 1879 | * is disassembled. It shouldn't be called directly by guest code. |
| 1880 | */ |
Paolo Bonzini | 0f590e74 | 2014-03-28 17:55:24 +0100 | [diff] [blame] | 1881 | |
Richard Henderson | 2dd9260 | 2019-04-25 20:48:57 -0700 | [diff] [blame] | 1882 | typedef uint64_t FullLoadHelper(CPUArchState *env, target_ulong addr, |
Richard Henderson | 9002ffc | 2021-07-25 12:06:49 -1000 | [diff] [blame] | 1883 | MemOpIdx oi, uintptr_t retaddr); |
Richard Henderson | 2dd9260 | 2019-04-25 20:48:57 -0700 | [diff] [blame] | 1884 | |
Richard Henderson | c6b716c | 2019-09-10 12:02:36 -0400 | [diff] [blame] | 1885 | static inline uint64_t QEMU_ALWAYS_INLINE |
Richard Henderson | 80d9d1c | 2019-09-10 14:56:12 -0400 | [diff] [blame] | 1886 | load_memop(const void *haddr, MemOp op) |
| 1887 | { |
| 1888 | switch (op) { |
| 1889 | case MO_UB: |
| 1890 | return ldub_p(haddr); |
| 1891 | case MO_BEUW: |
| 1892 | return lduw_be_p(haddr); |
| 1893 | case MO_LEUW: |
| 1894 | return lduw_le_p(haddr); |
| 1895 | case MO_BEUL: |
| 1896 | return (uint32_t)ldl_be_p(haddr); |
| 1897 | case MO_LEUL: |
| 1898 | return (uint32_t)ldl_le_p(haddr); |
Frédéric Pétrot | fc313c6 | 2022-01-06 22:00:51 +0100 | [diff] [blame] | 1899 | case MO_BEUQ: |
Richard Henderson | 80d9d1c | 2019-09-10 14:56:12 -0400 | [diff] [blame] | 1900 | return ldq_be_p(haddr); |
Frédéric Pétrot | fc313c6 | 2022-01-06 22:00:51 +0100 | [diff] [blame] | 1901 | case MO_LEUQ: |
Richard Henderson | 80d9d1c | 2019-09-10 14:56:12 -0400 | [diff] [blame] | 1902 | return ldq_le_p(haddr); |
| 1903 | default: |
| 1904 | qemu_build_not_reached(); |
| 1905 | } |
| 1906 | } |
| 1907 | |
| 1908 | static inline uint64_t QEMU_ALWAYS_INLINE |
Richard Henderson | 9002ffc | 2021-07-25 12:06:49 -1000 | [diff] [blame] | 1909 | load_helper(CPUArchState *env, target_ulong addr, MemOpIdx oi, |
Tony Nguyen | be5c478 | 2019-08-24 04:36:53 +1000 | [diff] [blame] | 1910 | uintptr_t retaddr, MemOp op, bool code_read, |
Richard Henderson | 2dd9260 | 2019-04-25 20:48:57 -0700 | [diff] [blame] | 1911 | FullLoadHelper *full_load) |
Alex Bennée | eed5664 | 2019-02-15 14:31:13 +0000 | [diff] [blame] | 1912 | { |
Alex Bennée | eed5664 | 2019-02-15 14:31:13 +0000 | [diff] [blame] | 1913 | const size_t tlb_off = code_read ? |
| 1914 | offsetof(CPUTLBEntry, addr_code) : offsetof(CPUTLBEntry, addr_read); |
Richard Henderson | f1be369 | 2019-04-25 14:16:34 -0700 | [diff] [blame] | 1915 | const MMUAccessType access_type = |
| 1916 | code_read ? MMU_INST_FETCH : MMU_DATA_LOAD; |
Richard Henderson | b826044 | 2022-04-01 11:08:13 -0600 | [diff] [blame] | 1917 | const unsigned a_bits = get_alignment_bits(get_memop(oi)); |
| 1918 | const size_t size = memop_size(op); |
| 1919 | uintptr_t mmu_idx = get_mmuidx(oi); |
| 1920 | uintptr_t index; |
| 1921 | CPUTLBEntry *entry; |
| 1922 | target_ulong tlb_addr; |
Alex Bennée | eed5664 | 2019-02-15 14:31:13 +0000 | [diff] [blame] | 1923 | void *haddr; |
| 1924 | uint64_t res; |
Richard Henderson | b826044 | 2022-04-01 11:08:13 -0600 | [diff] [blame] | 1925 | |
| 1926 | tcg_debug_assert(mmu_idx < NB_MMU_MODES); |
Paolo Bonzini | 0f590e74 | 2014-03-28 17:55:24 +0100 | [diff] [blame] | 1927 | |
Alex Bennée | eed5664 | 2019-02-15 14:31:13 +0000 | [diff] [blame] | 1928 | /* Handle CPU specific unaligned behaviour */ |
| 1929 | if (addr & ((1 << a_bits) - 1)) { |
Richard Henderson | 29a0af6 | 2019-03-22 16:07:18 -0700 | [diff] [blame] | 1930 | cpu_unaligned_access(env_cpu(env), addr, access_type, |
Alex Bennée | eed5664 | 2019-02-15 14:31:13 +0000 | [diff] [blame] | 1931 | mmu_idx, retaddr); |
| 1932 | } |
| 1933 | |
Richard Henderson | b826044 | 2022-04-01 11:08:13 -0600 | [diff] [blame] | 1934 | index = tlb_index(env, mmu_idx, addr); |
| 1935 | entry = tlb_entry(env, mmu_idx, addr); |
| 1936 | tlb_addr = code_read ? entry->addr_code : entry->addr_read; |
| 1937 | |
Alex Bennée | eed5664 | 2019-02-15 14:31:13 +0000 | [diff] [blame] | 1938 | /* If the TLB entry is for a different page, reload and try again. */ |
| 1939 | if (!tlb_hit(tlb_addr, addr)) { |
| 1940 | if (!victim_tlb_hit(env, mmu_idx, index, tlb_off, |
| 1941 | addr & TARGET_PAGE_MASK)) { |
Richard Henderson | 29a0af6 | 2019-03-22 16:07:18 -0700 | [diff] [blame] | 1942 | tlb_fill(env_cpu(env), addr, size, |
Richard Henderson | f1be369 | 2019-04-25 14:16:34 -0700 | [diff] [blame] | 1943 | access_type, mmu_idx, retaddr); |
Alex Bennée | eed5664 | 2019-02-15 14:31:13 +0000 | [diff] [blame] | 1944 | index = tlb_index(env, mmu_idx, addr); |
| 1945 | entry = tlb_entry(env, mmu_idx, addr); |
| 1946 | } |
| 1947 | tlb_addr = code_read ? entry->addr_code : entry->addr_read; |
Richard Henderson | 30d7e09 | 2019-08-23 15:12:32 -0700 | [diff] [blame] | 1948 | tlb_addr &= ~TLB_INVALID_MASK; |
Alex Bennée | eed5664 | 2019-02-15 14:31:13 +0000 | [diff] [blame] | 1949 | } |
| 1950 | |
Richard Henderson | 50b107c | 2019-08-24 09:51:09 -0700 | [diff] [blame] | 1951 | /* Handle anything that isn't just a straight memory access. */ |
Alex Bennée | eed5664 | 2019-02-15 14:31:13 +0000 | [diff] [blame] | 1952 | if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { |
Richard Henderson | 50b107c | 2019-08-24 09:51:09 -0700 | [diff] [blame] | 1953 | CPUIOTLBEntry *iotlbentry; |
Richard Henderson | 5b87b3e | 2019-09-10 15:47:39 -0400 | [diff] [blame] | 1954 | bool need_swap; |
Richard Henderson | 50b107c | 2019-08-24 09:51:09 -0700 | [diff] [blame] | 1955 | |
| 1956 | /* For anything that is unaligned, recurse through full_load. */ |
Alex Bennée | eed5664 | 2019-02-15 14:31:13 +0000 | [diff] [blame] | 1957 | if ((addr & (size - 1)) != 0) { |
| 1958 | goto do_unaligned_access; |
| 1959 | } |
Richard Henderson | 50b107c | 2019-08-24 09:51:09 -0700 | [diff] [blame] | 1960 | |
| 1961 | iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index]; |
| 1962 | |
| 1963 | /* Handle watchpoints. */ |
| 1964 | if (unlikely(tlb_addr & TLB_WATCHPOINT)) { |
| 1965 | /* On watchpoint hit, this will longjmp out. */ |
| 1966 | cpu_check_watchpoint(env_cpu(env), addr, size, |
| 1967 | iotlbentry->attrs, BP_MEM_READ, retaddr); |
Richard Henderson | 50b107c | 2019-08-24 09:51:09 -0700 | [diff] [blame] | 1968 | } |
| 1969 | |
Richard Henderson | 5b87b3e | 2019-09-10 15:47:39 -0400 | [diff] [blame] | 1970 | need_swap = size > 1 && (tlb_addr & TLB_BSWAP); |
| 1971 | |
Richard Henderson | 50b107c | 2019-08-24 09:51:09 -0700 | [diff] [blame] | 1972 | /* Handle I/O access. */ |
Richard Henderson | 5b87b3e | 2019-09-10 15:47:39 -0400 | [diff] [blame] | 1973 | if (likely(tlb_addr & TLB_MMIO)) { |
| 1974 | return io_readx(env, iotlbentry, mmu_idx, addr, retaddr, |
| 1975 | access_type, op ^ (need_swap * MO_BSWAP)); |
| 1976 | } |
| 1977 | |
| 1978 | haddr = (void *)((uintptr_t)addr + entry->addend); |
| 1979 | |
| 1980 | /* |
| 1981 | * Keep these two load_memop separate to ensure that the compiler |
| 1982 | * is able to fold the entire function to a single instruction. |
| 1983 | * There is a build-time assert inside to remind you of this. ;-) |
| 1984 | */ |
| 1985 | if (unlikely(need_swap)) { |
| 1986 | return load_memop(haddr, op ^ MO_BSWAP); |
| 1987 | } |
| 1988 | return load_memop(haddr, op); |
Alex Bennée | eed5664 | 2019-02-15 14:31:13 +0000 | [diff] [blame] | 1989 | } |
| 1990 | |
| 1991 | /* Handle slow unaligned access (it spans two pages or IO). */ |
| 1992 | if (size > 1 |
| 1993 | && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1 |
| 1994 | >= TARGET_PAGE_SIZE)) { |
| 1995 | target_ulong addr1, addr2; |
Alex Bennée | 8c79b28 | 2019-06-03 15:56:32 +0100 | [diff] [blame] | 1996 | uint64_t r1, r2; |
Alex Bennée | eed5664 | 2019-02-15 14:31:13 +0000 | [diff] [blame] | 1997 | unsigned shift; |
| 1998 | do_unaligned_access: |
Alex Bennée | ab7a200 | 2019-06-06 16:38:19 +0100 | [diff] [blame] | 1999 | addr1 = addr & ~((target_ulong)size - 1); |
Alex Bennée | eed5664 | 2019-02-15 14:31:13 +0000 | [diff] [blame] | 2000 | addr2 = addr1 + size; |
Richard Henderson | 2dd9260 | 2019-04-25 20:48:57 -0700 | [diff] [blame] | 2001 | r1 = full_load(env, addr1, oi, retaddr); |
| 2002 | r2 = full_load(env, addr2, oi, retaddr); |
Alex Bennée | eed5664 | 2019-02-15 14:31:13 +0000 | [diff] [blame] | 2003 | shift = (addr & (size - 1)) * 8; |
| 2004 | |
Tony Nguyen | be5c478 | 2019-08-24 04:36:53 +1000 | [diff] [blame] | 2005 | if (memop_big_endian(op)) { |
Alex Bennée | eed5664 | 2019-02-15 14:31:13 +0000 | [diff] [blame] | 2006 | /* Big-endian combine. */ |
| 2007 | res = (r1 << shift) | (r2 >> ((size * 8) - shift)); |
| 2008 | } else { |
| 2009 | /* Little-endian combine. */ |
| 2010 | res = (r1 >> shift) | (r2 << ((size * 8) - shift)); |
| 2011 | } |
| 2012 | return res & MAKE_64BIT_MASK(0, size * 8); |
| 2013 | } |
| 2014 | |
| 2015 | haddr = (void *)((uintptr_t)addr + entry->addend); |
Richard Henderson | 80d9d1c | 2019-09-10 14:56:12 -0400 | [diff] [blame] | 2016 | return load_memop(haddr, op); |
Alex Bennée | eed5664 | 2019-02-15 14:31:13 +0000 | [diff] [blame] | 2017 | } |
| 2018 | |
| 2019 | /* |
| 2020 | * For the benefit of TCG generated code, we want to avoid the |
| 2021 | * complication of ABI-specific return type promotion and always |
| 2022 | * return a value extended to the register size of the host. This is |
| 2023 | * tcg_target_long, except in the case of a 32-bit host and 64-bit |
| 2024 | * data, and for that we always have uint64_t. |
| 2025 | * |
| 2026 | * We don't bother with this widened value for SOFTMMU_CODE_ACCESS. |
| 2027 | */ |
| 2028 | |
Richard Henderson | 2dd9260 | 2019-04-25 20:48:57 -0700 | [diff] [blame] | 2029 | static uint64_t full_ldub_mmu(CPUArchState *env, target_ulong addr, |
Richard Henderson | 9002ffc | 2021-07-25 12:06:49 -1000 | [diff] [blame] | 2030 | MemOpIdx oi, uintptr_t retaddr) |
Richard Henderson | 2dd9260 | 2019-04-25 20:48:57 -0700 | [diff] [blame] | 2031 | { |
Richard Henderson | f83bcec | 2021-07-27 07:48:55 -1000 | [diff] [blame] | 2032 | validate_memop(oi, MO_UB); |
Tony Nguyen | be5c478 | 2019-08-24 04:36:53 +1000 | [diff] [blame] | 2033 | return load_helper(env, addr, oi, retaddr, MO_UB, false, full_ldub_mmu); |
Richard Henderson | 2dd9260 | 2019-04-25 20:48:57 -0700 | [diff] [blame] | 2034 | } |
| 2035 | |
Richard Henderson | fc1bc77 | 2019-04-25 20:01:37 -0700 | [diff] [blame] | 2036 | tcg_target_ulong helper_ret_ldub_mmu(CPUArchState *env, target_ulong addr, |
Richard Henderson | 9002ffc | 2021-07-25 12:06:49 -1000 | [diff] [blame] | 2037 | MemOpIdx oi, uintptr_t retaddr) |
Alex Bennée | eed5664 | 2019-02-15 14:31:13 +0000 | [diff] [blame] | 2038 | { |
Richard Henderson | 2dd9260 | 2019-04-25 20:48:57 -0700 | [diff] [blame] | 2039 | return full_ldub_mmu(env, addr, oi, retaddr); |
| 2040 | } |
| 2041 | |
| 2042 | static uint64_t full_le_lduw_mmu(CPUArchState *env, target_ulong addr, |
Richard Henderson | 9002ffc | 2021-07-25 12:06:49 -1000 | [diff] [blame] | 2043 | MemOpIdx oi, uintptr_t retaddr) |
Richard Henderson | 2dd9260 | 2019-04-25 20:48:57 -0700 | [diff] [blame] | 2044 | { |
Richard Henderson | f83bcec | 2021-07-27 07:48:55 -1000 | [diff] [blame] | 2045 | validate_memop(oi, MO_LEUW); |
Tony Nguyen | be5c478 | 2019-08-24 04:36:53 +1000 | [diff] [blame] | 2046 | return load_helper(env, addr, oi, retaddr, MO_LEUW, false, |
Richard Henderson | 2dd9260 | 2019-04-25 20:48:57 -0700 | [diff] [blame] | 2047 | full_le_lduw_mmu); |
Alex Bennée | eed5664 | 2019-02-15 14:31:13 +0000 | [diff] [blame] | 2048 | } |
| 2049 | |
Richard Henderson | fc1bc77 | 2019-04-25 20:01:37 -0700 | [diff] [blame] | 2050 | tcg_target_ulong helper_le_lduw_mmu(CPUArchState *env, target_ulong addr, |
Richard Henderson | 9002ffc | 2021-07-25 12:06:49 -1000 | [diff] [blame] | 2051 | MemOpIdx oi, uintptr_t retaddr) |
Alex Bennée | eed5664 | 2019-02-15 14:31:13 +0000 | [diff] [blame] | 2052 | { |
Richard Henderson | 2dd9260 | 2019-04-25 20:48:57 -0700 | [diff] [blame] | 2053 | return full_le_lduw_mmu(env, addr, oi, retaddr); |
| 2054 | } |
| 2055 | |
| 2056 | static uint64_t full_be_lduw_mmu(CPUArchState *env, target_ulong addr, |
Richard Henderson | 9002ffc | 2021-07-25 12:06:49 -1000 | [diff] [blame] | 2057 | MemOpIdx oi, uintptr_t retaddr) |
Richard Henderson | 2dd9260 | 2019-04-25 20:48:57 -0700 | [diff] [blame] | 2058 | { |
Richard Henderson | f83bcec | 2021-07-27 07:48:55 -1000 | [diff] [blame] | 2059 | validate_memop(oi, MO_BEUW); |
Tony Nguyen | be5c478 | 2019-08-24 04:36:53 +1000 | [diff] [blame] | 2060 | return load_helper(env, addr, oi, retaddr, MO_BEUW, false, |
Richard Henderson | 2dd9260 | 2019-04-25 20:48:57 -0700 | [diff] [blame] | 2061 | full_be_lduw_mmu); |
Alex Bennée | eed5664 | 2019-02-15 14:31:13 +0000 | [diff] [blame] | 2062 | } |
| 2063 | |
Richard Henderson | fc1bc77 | 2019-04-25 20:01:37 -0700 | [diff] [blame] | 2064 | tcg_target_ulong helper_be_lduw_mmu(CPUArchState *env, target_ulong addr, |
Richard Henderson | 9002ffc | 2021-07-25 12:06:49 -1000 | [diff] [blame] | 2065 | MemOpIdx oi, uintptr_t retaddr) |
Alex Bennée | eed5664 | 2019-02-15 14:31:13 +0000 | [diff] [blame] | 2066 | { |
Richard Henderson | 2dd9260 | 2019-04-25 20:48:57 -0700 | [diff] [blame] | 2067 | return full_be_lduw_mmu(env, addr, oi, retaddr); |
| 2068 | } |
| 2069 | |
| 2070 | static uint64_t full_le_ldul_mmu(CPUArchState *env, target_ulong addr, |
Richard Henderson | 9002ffc | 2021-07-25 12:06:49 -1000 | [diff] [blame] | 2071 | MemOpIdx oi, uintptr_t retaddr) |
Richard Henderson | 2dd9260 | 2019-04-25 20:48:57 -0700 | [diff] [blame] | 2072 | { |
Richard Henderson | f83bcec | 2021-07-27 07:48:55 -1000 | [diff] [blame] | 2073 | validate_memop(oi, MO_LEUL); |
Tony Nguyen | be5c478 | 2019-08-24 04:36:53 +1000 | [diff] [blame] | 2074 | return load_helper(env, addr, oi, retaddr, MO_LEUL, false, |
Richard Henderson | 2dd9260 | 2019-04-25 20:48:57 -0700 | [diff] [blame] | 2075 | full_le_ldul_mmu); |
Alex Bennée | eed5664 | 2019-02-15 14:31:13 +0000 | [diff] [blame] | 2076 | } |
| 2077 | |
Richard Henderson | fc1bc77 | 2019-04-25 20:01:37 -0700 | [diff] [blame] | 2078 | tcg_target_ulong helper_le_ldul_mmu(CPUArchState *env, target_ulong addr, |
Richard Henderson | 9002ffc | 2021-07-25 12:06:49 -1000 | [diff] [blame] | 2079 | MemOpIdx oi, uintptr_t retaddr) |
Alex Bennée | eed5664 | 2019-02-15 14:31:13 +0000 | [diff] [blame] | 2080 | { |
Richard Henderson | 2dd9260 | 2019-04-25 20:48:57 -0700 | [diff] [blame] | 2081 | return full_le_ldul_mmu(env, addr, oi, retaddr); |
| 2082 | } |
| 2083 | |
| 2084 | static uint64_t full_be_ldul_mmu(CPUArchState *env, target_ulong addr, |
Richard Henderson | 9002ffc | 2021-07-25 12:06:49 -1000 | [diff] [blame] | 2085 | MemOpIdx oi, uintptr_t retaddr) |
Richard Henderson | 2dd9260 | 2019-04-25 20:48:57 -0700 | [diff] [blame] | 2086 | { |
Richard Henderson | f83bcec | 2021-07-27 07:48:55 -1000 | [diff] [blame] | 2087 | validate_memop(oi, MO_BEUL); |
Tony Nguyen | be5c478 | 2019-08-24 04:36:53 +1000 | [diff] [blame] | 2088 | return load_helper(env, addr, oi, retaddr, MO_BEUL, false, |
Richard Henderson | 2dd9260 | 2019-04-25 20:48:57 -0700 | [diff] [blame] | 2089 | full_be_ldul_mmu); |
Alex Bennée | eed5664 | 2019-02-15 14:31:13 +0000 | [diff] [blame] | 2090 | } |
| 2091 | |
Richard Henderson | fc1bc77 | 2019-04-25 20:01:37 -0700 | [diff] [blame] | 2092 | tcg_target_ulong helper_be_ldul_mmu(CPUArchState *env, target_ulong addr, |
Richard Henderson | 9002ffc | 2021-07-25 12:06:49 -1000 | [diff] [blame] | 2093 | MemOpIdx oi, uintptr_t retaddr) |
Alex Bennée | eed5664 | 2019-02-15 14:31:13 +0000 | [diff] [blame] | 2094 | { |
Richard Henderson | 2dd9260 | 2019-04-25 20:48:57 -0700 | [diff] [blame] | 2095 | return full_be_ldul_mmu(env, addr, oi, retaddr); |
Alex Bennée | eed5664 | 2019-02-15 14:31:13 +0000 | [diff] [blame] | 2096 | } |
| 2097 | |
Richard Henderson | fc1bc77 | 2019-04-25 20:01:37 -0700 | [diff] [blame] | 2098 | uint64_t helper_le_ldq_mmu(CPUArchState *env, target_ulong addr, |
Richard Henderson | 9002ffc | 2021-07-25 12:06:49 -1000 | [diff] [blame] | 2099 | MemOpIdx oi, uintptr_t retaddr) |
Alex Bennée | eed5664 | 2019-02-15 14:31:13 +0000 | [diff] [blame] | 2100 | { |
Frédéric Pétrot | fc313c6 | 2022-01-06 22:00:51 +0100 | [diff] [blame] | 2101 | validate_memop(oi, MO_LEUQ); |
| 2102 | return load_helper(env, addr, oi, retaddr, MO_LEUQ, false, |
Richard Henderson | 2dd9260 | 2019-04-25 20:48:57 -0700 | [diff] [blame] | 2103 | helper_le_ldq_mmu); |
Alex Bennée | eed5664 | 2019-02-15 14:31:13 +0000 | [diff] [blame] | 2104 | } |
| 2105 | |
Richard Henderson | fc1bc77 | 2019-04-25 20:01:37 -0700 | [diff] [blame] | 2106 | uint64_t helper_be_ldq_mmu(CPUArchState *env, target_ulong addr, |
Richard Henderson | 9002ffc | 2021-07-25 12:06:49 -1000 | [diff] [blame] | 2107 | MemOpIdx oi, uintptr_t retaddr) |
Alex Bennée | eed5664 | 2019-02-15 14:31:13 +0000 | [diff] [blame] | 2108 | { |
Frédéric Pétrot | fc313c6 | 2022-01-06 22:00:51 +0100 | [diff] [blame] | 2109 | validate_memop(oi, MO_BEUQ); |
| 2110 | return load_helper(env, addr, oi, retaddr, MO_BEUQ, false, |
Richard Henderson | 2dd9260 | 2019-04-25 20:48:57 -0700 | [diff] [blame] | 2111 | helper_be_ldq_mmu); |
Alex Bennée | eed5664 | 2019-02-15 14:31:13 +0000 | [diff] [blame] | 2112 | } |
| 2113 | |
| 2114 | /* |
| 2115 | * Provide signed versions of the load routines as well. We can of course |
| 2116 | * avoid this for 64-bit data, or for 32-bit data on 32-bit host. |
| 2117 | */ |
| 2118 | |
| 2119 | |
| 2120 | tcg_target_ulong helper_ret_ldsb_mmu(CPUArchState *env, target_ulong addr, |
Richard Henderson | 9002ffc | 2021-07-25 12:06:49 -1000 | [diff] [blame] | 2121 | MemOpIdx oi, uintptr_t retaddr) |
Alex Bennée | eed5664 | 2019-02-15 14:31:13 +0000 | [diff] [blame] | 2122 | { |
| 2123 | return (int8_t)helper_ret_ldub_mmu(env, addr, oi, retaddr); |
| 2124 | } |
| 2125 | |
| 2126 | tcg_target_ulong helper_le_ldsw_mmu(CPUArchState *env, target_ulong addr, |
Richard Henderson | 9002ffc | 2021-07-25 12:06:49 -1000 | [diff] [blame] | 2127 | MemOpIdx oi, uintptr_t retaddr) |
Alex Bennée | eed5664 | 2019-02-15 14:31:13 +0000 | [diff] [blame] | 2128 | { |
| 2129 | return (int16_t)helper_le_lduw_mmu(env, addr, oi, retaddr); |
| 2130 | } |
| 2131 | |
| 2132 | tcg_target_ulong helper_be_ldsw_mmu(CPUArchState *env, target_ulong addr, |
Richard Henderson | 9002ffc | 2021-07-25 12:06:49 -1000 | [diff] [blame] | 2133 | MemOpIdx oi, uintptr_t retaddr) |
Alex Bennée | eed5664 | 2019-02-15 14:31:13 +0000 | [diff] [blame] | 2134 | { |
| 2135 | return (int16_t)helper_be_lduw_mmu(env, addr, oi, retaddr); |
| 2136 | } |
| 2137 | |
| 2138 | tcg_target_ulong helper_le_ldsl_mmu(CPUArchState *env, target_ulong addr, |
Richard Henderson | 9002ffc | 2021-07-25 12:06:49 -1000 | [diff] [blame] | 2139 | MemOpIdx oi, uintptr_t retaddr) |
Alex Bennée | eed5664 | 2019-02-15 14:31:13 +0000 | [diff] [blame] | 2140 | { |
| 2141 | return (int32_t)helper_le_ldul_mmu(env, addr, oi, retaddr); |
| 2142 | } |
| 2143 | |
| 2144 | tcg_target_ulong helper_be_ldsl_mmu(CPUArchState *env, target_ulong addr, |
Richard Henderson | 9002ffc | 2021-07-25 12:06:49 -1000 | [diff] [blame] | 2145 | MemOpIdx oi, uintptr_t retaddr) |
Alex Bennée | eed5664 | 2019-02-15 14:31:13 +0000 | [diff] [blame] | 2146 | { |
| 2147 | return (int32_t)helper_be_ldul_mmu(env, addr, oi, retaddr); |
| 2148 | } |
| 2149 | |
| 2150 | /* |
Richard Henderson | d03f140 | 2019-12-09 13:49:58 -0800 | [diff] [blame] | 2151 | * Load helpers for cpu_ldst.h. |
| 2152 | */ |
| 2153 | |
| 2154 | static inline uint64_t cpu_load_helper(CPUArchState *env, abi_ptr addr, |
Richard Henderson | f83bcec | 2021-07-27 07:48:55 -1000 | [diff] [blame] | 2155 | MemOpIdx oi, uintptr_t retaddr, |
| 2156 | FullLoadHelper *full_load) |
Richard Henderson | d03f140 | 2019-12-09 13:49:58 -0800 | [diff] [blame] | 2157 | { |
Richard Henderson | d03f140 | 2019-12-09 13:49:58 -0800 | [diff] [blame] | 2158 | uint64_t ret; |
| 2159 | |
Richard Henderson | d03f140 | 2019-12-09 13:49:58 -0800 | [diff] [blame] | 2160 | ret = full_load(env, addr, oi, retaddr); |
Richard Henderson | 37aff08 | 2021-07-26 11:48:30 -1000 | [diff] [blame] | 2161 | qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_R); |
Richard Henderson | d03f140 | 2019-12-09 13:49:58 -0800 | [diff] [blame] | 2162 | return ret; |
| 2163 | } |
| 2164 | |
Richard Henderson | f83bcec | 2021-07-27 07:48:55 -1000 | [diff] [blame] | 2165 | uint8_t cpu_ldb_mmu(CPUArchState *env, abi_ptr addr, MemOpIdx oi, uintptr_t ra) |
Richard Henderson | d03f140 | 2019-12-09 13:49:58 -0800 | [diff] [blame] | 2166 | { |
Richard Henderson | f83bcec | 2021-07-27 07:48:55 -1000 | [diff] [blame] | 2167 | return cpu_load_helper(env, addr, oi, ra, full_ldub_mmu); |
Richard Henderson | d03f140 | 2019-12-09 13:49:58 -0800 | [diff] [blame] | 2168 | } |
| 2169 | |
Richard Henderson | f83bcec | 2021-07-27 07:48:55 -1000 | [diff] [blame] | 2170 | uint16_t cpu_ldw_be_mmu(CPUArchState *env, abi_ptr addr, |
| 2171 | MemOpIdx oi, uintptr_t ra) |
Richard Henderson | d03f140 | 2019-12-09 13:49:58 -0800 | [diff] [blame] | 2172 | { |
Richard Henderson | f83bcec | 2021-07-27 07:48:55 -1000 | [diff] [blame] | 2173 | return cpu_load_helper(env, addr, oi, ra, full_be_lduw_mmu); |
Richard Henderson | d03f140 | 2019-12-09 13:49:58 -0800 | [diff] [blame] | 2174 | } |
| 2175 | |
Richard Henderson | f83bcec | 2021-07-27 07:48:55 -1000 | [diff] [blame] | 2176 | uint32_t cpu_ldl_be_mmu(CPUArchState *env, abi_ptr addr, |
| 2177 | MemOpIdx oi, uintptr_t ra) |
Richard Henderson | d03f140 | 2019-12-09 13:49:58 -0800 | [diff] [blame] | 2178 | { |
Richard Henderson | f83bcec | 2021-07-27 07:48:55 -1000 | [diff] [blame] | 2179 | return cpu_load_helper(env, addr, oi, ra, full_be_ldul_mmu); |
Richard Henderson | d03f140 | 2019-12-09 13:49:58 -0800 | [diff] [blame] | 2180 | } |
| 2181 | |
Richard Henderson | f83bcec | 2021-07-27 07:48:55 -1000 | [diff] [blame] | 2182 | uint64_t cpu_ldq_be_mmu(CPUArchState *env, abi_ptr addr, |
| 2183 | MemOpIdx oi, uintptr_t ra) |
Richard Henderson | d03f140 | 2019-12-09 13:49:58 -0800 | [diff] [blame] | 2184 | { |
Richard Henderson | 46697cb | 2022-03-14 17:25:06 -0700 | [diff] [blame] | 2185 | return cpu_load_helper(env, addr, oi, ra, helper_be_ldq_mmu); |
Richard Henderson | d03f140 | 2019-12-09 13:49:58 -0800 | [diff] [blame] | 2186 | } |
| 2187 | |
Richard Henderson | f83bcec | 2021-07-27 07:48:55 -1000 | [diff] [blame] | 2188 | uint16_t cpu_ldw_le_mmu(CPUArchState *env, abi_ptr addr, |
| 2189 | MemOpIdx oi, uintptr_t ra) |
Richard Henderson | d03f140 | 2019-12-09 13:49:58 -0800 | [diff] [blame] | 2190 | { |
Richard Henderson | f83bcec | 2021-07-27 07:48:55 -1000 | [diff] [blame] | 2191 | return cpu_load_helper(env, addr, oi, ra, full_le_lduw_mmu); |
Richard Henderson | d03f140 | 2019-12-09 13:49:58 -0800 | [diff] [blame] | 2192 | } |
| 2193 | |
Richard Henderson | f83bcec | 2021-07-27 07:48:55 -1000 | [diff] [blame] | 2194 | uint32_t cpu_ldl_le_mmu(CPUArchState *env, abi_ptr addr, |
| 2195 | MemOpIdx oi, uintptr_t ra) |
Richard Henderson | d03f140 | 2019-12-09 13:49:58 -0800 | [diff] [blame] | 2196 | { |
Richard Henderson | f83bcec | 2021-07-27 07:48:55 -1000 | [diff] [blame] | 2197 | return cpu_load_helper(env, addr, oi, ra, full_le_ldul_mmu); |
Richard Henderson | b9e6025 | 2020-05-08 08:43:46 -0700 | [diff] [blame] | 2198 | } |
| 2199 | |
Richard Henderson | f83bcec | 2021-07-27 07:48:55 -1000 | [diff] [blame] | 2200 | uint64_t cpu_ldq_le_mmu(CPUArchState *env, abi_ptr addr, |
| 2201 | MemOpIdx oi, uintptr_t ra) |
Richard Henderson | b9e6025 | 2020-05-08 08:43:46 -0700 | [diff] [blame] | 2202 | { |
Richard Henderson | f83bcec | 2021-07-27 07:48:55 -1000 | [diff] [blame] | 2203 | return cpu_load_helper(env, addr, oi, ra, helper_le_ldq_mmu); |
Richard Henderson | cfe04a4 | 2019-12-11 10:33:26 -0800 | [diff] [blame] | 2204 | } |
| 2205 | |
Richard Henderson | d03f140 | 2019-12-09 13:49:58 -0800 | [diff] [blame] | 2206 | /* |
Alex Bennée | eed5664 | 2019-02-15 14:31:13 +0000 | [diff] [blame] | 2207 | * Store Helpers |
| 2208 | */ |
| 2209 | |
Richard Henderson | c6b716c | 2019-09-10 12:02:36 -0400 | [diff] [blame] | 2210 | static inline void QEMU_ALWAYS_INLINE |
Richard Henderson | 80d9d1c | 2019-09-10 14:56:12 -0400 | [diff] [blame] | 2211 | store_memop(void *haddr, uint64_t val, MemOp op) |
| 2212 | { |
| 2213 | switch (op) { |
| 2214 | case MO_UB: |
| 2215 | stb_p(haddr, val); |
| 2216 | break; |
| 2217 | case MO_BEUW: |
| 2218 | stw_be_p(haddr, val); |
| 2219 | break; |
| 2220 | case MO_LEUW: |
| 2221 | stw_le_p(haddr, val); |
| 2222 | break; |
| 2223 | case MO_BEUL: |
| 2224 | stl_be_p(haddr, val); |
| 2225 | break; |
| 2226 | case MO_LEUL: |
| 2227 | stl_le_p(haddr, val); |
| 2228 | break; |
Frédéric Pétrot | fc313c6 | 2022-01-06 22:00:51 +0100 | [diff] [blame] | 2229 | case MO_BEUQ: |
Richard Henderson | 80d9d1c | 2019-09-10 14:56:12 -0400 | [diff] [blame] | 2230 | stq_be_p(haddr, val); |
| 2231 | break; |
Frédéric Pétrot | fc313c6 | 2022-01-06 22:00:51 +0100 | [diff] [blame] | 2232 | case MO_LEUQ: |
Richard Henderson | 80d9d1c | 2019-09-10 14:56:12 -0400 | [diff] [blame] | 2233 | stq_le_p(haddr, val); |
| 2234 | break; |
| 2235 | default: |
| 2236 | qemu_build_not_reached(); |
| 2237 | } |
| 2238 | } |
| 2239 | |
Richard Henderson | f83bcec | 2021-07-27 07:48:55 -1000 | [diff] [blame] | 2240 | static void full_stb_mmu(CPUArchState *env, target_ulong addr, uint64_t val, |
| 2241 | MemOpIdx oi, uintptr_t retaddr); |
| 2242 | |
Richard Henderson | 6b8b622 | 2020-07-26 15:39:53 -0700 | [diff] [blame] | 2243 | static void __attribute__((noinline)) |
| 2244 | store_helper_unaligned(CPUArchState *env, target_ulong addr, uint64_t val, |
| 2245 | uintptr_t retaddr, size_t size, uintptr_t mmu_idx, |
| 2246 | bool big_endian) |
| 2247 | { |
| 2248 | const size_t tlb_off = offsetof(CPUTLBEntry, addr_write); |
| 2249 | uintptr_t index, index2; |
| 2250 | CPUTLBEntry *entry, *entry2; |
| 2251 | target_ulong page2, tlb_addr, tlb_addr2; |
Richard Henderson | 9002ffc | 2021-07-25 12:06:49 -1000 | [diff] [blame] | 2252 | MemOpIdx oi; |
Richard Henderson | 6b8b622 | 2020-07-26 15:39:53 -0700 | [diff] [blame] | 2253 | size_t size2; |
| 2254 | int i; |
| 2255 | |
| 2256 | /* |
| 2257 | * Ensure the second page is in the TLB. Note that the first page |
| 2258 | * is already guaranteed to be filled, and that the second page |
| 2259 | * cannot evict the first. |
| 2260 | */ |
| 2261 | page2 = (addr + size) & TARGET_PAGE_MASK; |
| 2262 | size2 = (addr + size) & ~TARGET_PAGE_MASK; |
| 2263 | index2 = tlb_index(env, mmu_idx, page2); |
| 2264 | entry2 = tlb_entry(env, mmu_idx, page2); |
| 2265 | |
| 2266 | tlb_addr2 = tlb_addr_write(entry2); |
| 2267 | if (!tlb_hit_page(tlb_addr2, page2)) { |
| 2268 | if (!victim_tlb_hit(env, mmu_idx, index2, tlb_off, page2)) { |
| 2269 | tlb_fill(env_cpu(env), page2, size2, MMU_DATA_STORE, |
| 2270 | mmu_idx, retaddr); |
| 2271 | index2 = tlb_index(env, mmu_idx, page2); |
| 2272 | entry2 = tlb_entry(env, mmu_idx, page2); |
| 2273 | } |
| 2274 | tlb_addr2 = tlb_addr_write(entry2); |
| 2275 | } |
| 2276 | |
| 2277 | index = tlb_index(env, mmu_idx, addr); |
| 2278 | entry = tlb_entry(env, mmu_idx, addr); |
| 2279 | tlb_addr = tlb_addr_write(entry); |
| 2280 | |
| 2281 | /* |
| 2282 | * Handle watchpoints. Since this may trap, all checks |
| 2283 | * must happen before any store. |
| 2284 | */ |
| 2285 | if (unlikely(tlb_addr & TLB_WATCHPOINT)) { |
| 2286 | cpu_check_watchpoint(env_cpu(env), addr, size - size2, |
| 2287 | env_tlb(env)->d[mmu_idx].iotlb[index].attrs, |
| 2288 | BP_MEM_WRITE, retaddr); |
| 2289 | } |
| 2290 | if (unlikely(tlb_addr2 & TLB_WATCHPOINT)) { |
| 2291 | cpu_check_watchpoint(env_cpu(env), page2, size2, |
| 2292 | env_tlb(env)->d[mmu_idx].iotlb[index2].attrs, |
| 2293 | BP_MEM_WRITE, retaddr); |
| 2294 | } |
| 2295 | |
| 2296 | /* |
| 2297 | * XXX: not efficient, but simple. |
| 2298 | * This loop must go in the forward direction to avoid issues |
| 2299 | * with self-modifying code in Windows 64-bit. |
| 2300 | */ |
| 2301 | oi = make_memop_idx(MO_UB, mmu_idx); |
| 2302 | if (big_endian) { |
| 2303 | for (i = 0; i < size; ++i) { |
| 2304 | /* Big-endian extract. */ |
| 2305 | uint8_t val8 = val >> (((size - 1) * 8) - (i * 8)); |
Richard Henderson | f83bcec | 2021-07-27 07:48:55 -1000 | [diff] [blame] | 2306 | full_stb_mmu(env, addr + i, val8, oi, retaddr); |
Richard Henderson | 6b8b622 | 2020-07-26 15:39:53 -0700 | [diff] [blame] | 2307 | } |
| 2308 | } else { |
| 2309 | for (i = 0; i < size; ++i) { |
| 2310 | /* Little-endian extract. */ |
| 2311 | uint8_t val8 = val >> (i * 8); |
Richard Henderson | f83bcec | 2021-07-27 07:48:55 -1000 | [diff] [blame] | 2312 | full_stb_mmu(env, addr + i, val8, oi, retaddr); |
Richard Henderson | 6b8b622 | 2020-07-26 15:39:53 -0700 | [diff] [blame] | 2313 | } |
| 2314 | } |
| 2315 | } |
| 2316 | |
Richard Henderson | 80d9d1c | 2019-09-10 14:56:12 -0400 | [diff] [blame] | 2317 | static inline void QEMU_ALWAYS_INLINE |
Richard Henderson | 4601f8d | 2019-04-25 21:12:59 -0700 | [diff] [blame] | 2318 | store_helper(CPUArchState *env, target_ulong addr, uint64_t val, |
Richard Henderson | 9002ffc | 2021-07-25 12:06:49 -1000 | [diff] [blame] | 2319 | MemOpIdx oi, uintptr_t retaddr, MemOp op) |
Alex Bennée | eed5664 | 2019-02-15 14:31:13 +0000 | [diff] [blame] | 2320 | { |
Alex Bennée | eed5664 | 2019-02-15 14:31:13 +0000 | [diff] [blame] | 2321 | const size_t tlb_off = offsetof(CPUTLBEntry, addr_write); |
Richard Henderson | b826044 | 2022-04-01 11:08:13 -0600 | [diff] [blame] | 2322 | const unsigned a_bits = get_alignment_bits(get_memop(oi)); |
| 2323 | const size_t size = memop_size(op); |
| 2324 | uintptr_t mmu_idx = get_mmuidx(oi); |
| 2325 | uintptr_t index; |
| 2326 | CPUTLBEntry *entry; |
| 2327 | target_ulong tlb_addr; |
Alex Bennée | eed5664 | 2019-02-15 14:31:13 +0000 | [diff] [blame] | 2328 | void *haddr; |
Richard Henderson | b826044 | 2022-04-01 11:08:13 -0600 | [diff] [blame] | 2329 | |
| 2330 | tcg_debug_assert(mmu_idx < NB_MMU_MODES); |
Alex Bennée | eed5664 | 2019-02-15 14:31:13 +0000 | [diff] [blame] | 2331 | |
| 2332 | /* Handle CPU specific unaligned behaviour */ |
| 2333 | if (addr & ((1 << a_bits) - 1)) { |
Richard Henderson | 29a0af6 | 2019-03-22 16:07:18 -0700 | [diff] [blame] | 2334 | cpu_unaligned_access(env_cpu(env), addr, MMU_DATA_STORE, |
Alex Bennée | eed5664 | 2019-02-15 14:31:13 +0000 | [diff] [blame] | 2335 | mmu_idx, retaddr); |
| 2336 | } |
| 2337 | |
Richard Henderson | b826044 | 2022-04-01 11:08:13 -0600 | [diff] [blame] | 2338 | index = tlb_index(env, mmu_idx, addr); |
| 2339 | entry = tlb_entry(env, mmu_idx, addr); |
| 2340 | tlb_addr = tlb_addr_write(entry); |
| 2341 | |
Alex Bennée | eed5664 | 2019-02-15 14:31:13 +0000 | [diff] [blame] | 2342 | /* If the TLB entry is for a different page, reload and try again. */ |
| 2343 | if (!tlb_hit(tlb_addr, addr)) { |
| 2344 | if (!victim_tlb_hit(env, mmu_idx, index, tlb_off, |
| 2345 | addr & TARGET_PAGE_MASK)) { |
Richard Henderson | 29a0af6 | 2019-03-22 16:07:18 -0700 | [diff] [blame] | 2346 | tlb_fill(env_cpu(env), addr, size, MMU_DATA_STORE, |
Alex Bennée | eed5664 | 2019-02-15 14:31:13 +0000 | [diff] [blame] | 2347 | mmu_idx, retaddr); |
| 2348 | index = tlb_index(env, mmu_idx, addr); |
| 2349 | entry = tlb_entry(env, mmu_idx, addr); |
| 2350 | } |
| 2351 | tlb_addr = tlb_addr_write(entry) & ~TLB_INVALID_MASK; |
| 2352 | } |
| 2353 | |
Richard Henderson | 50b107c | 2019-08-24 09:51:09 -0700 | [diff] [blame] | 2354 | /* Handle anything that isn't just a straight memory access. */ |
Alex Bennée | eed5664 | 2019-02-15 14:31:13 +0000 | [diff] [blame] | 2355 | if (unlikely(tlb_addr & ~TARGET_PAGE_MASK)) { |
Richard Henderson | 50b107c | 2019-08-24 09:51:09 -0700 | [diff] [blame] | 2356 | CPUIOTLBEntry *iotlbentry; |
Richard Henderson | 5b87b3e | 2019-09-10 15:47:39 -0400 | [diff] [blame] | 2357 | bool need_swap; |
Richard Henderson | 50b107c | 2019-08-24 09:51:09 -0700 | [diff] [blame] | 2358 | |
| 2359 | /* For anything that is unaligned, recurse through byte stores. */ |
Alex Bennée | eed5664 | 2019-02-15 14:31:13 +0000 | [diff] [blame] | 2360 | if ((addr & (size - 1)) != 0) { |
| 2361 | goto do_unaligned_access; |
| 2362 | } |
Richard Henderson | 50b107c | 2019-08-24 09:51:09 -0700 | [diff] [blame] | 2363 | |
| 2364 | iotlbentry = &env_tlb(env)->d[mmu_idx].iotlb[index]; |
| 2365 | |
| 2366 | /* Handle watchpoints. */ |
| 2367 | if (unlikely(tlb_addr & TLB_WATCHPOINT)) { |
| 2368 | /* On watchpoint hit, this will longjmp out. */ |
| 2369 | cpu_check_watchpoint(env_cpu(env), addr, size, |
| 2370 | iotlbentry->attrs, BP_MEM_WRITE, retaddr); |
Richard Henderson | 50b107c | 2019-08-24 09:51:09 -0700 | [diff] [blame] | 2371 | } |
| 2372 | |
Richard Henderson | 5b87b3e | 2019-09-10 15:47:39 -0400 | [diff] [blame] | 2373 | need_swap = size > 1 && (tlb_addr & TLB_BSWAP); |
| 2374 | |
Richard Henderson | 50b107c | 2019-08-24 09:51:09 -0700 | [diff] [blame] | 2375 | /* Handle I/O access. */ |
Richard Henderson | 0856555 | 2019-09-18 09:15:44 -0700 | [diff] [blame] | 2376 | if (tlb_addr & TLB_MMIO) { |
Richard Henderson | 5b87b3e | 2019-09-10 15:47:39 -0400 | [diff] [blame] | 2377 | io_writex(env, iotlbentry, mmu_idx, val, addr, retaddr, |
| 2378 | op ^ (need_swap * MO_BSWAP)); |
| 2379 | return; |
| 2380 | } |
| 2381 | |
Richard Henderson | 7b0d792 | 2019-09-19 17:54:10 -0700 | [diff] [blame] | 2382 | /* Ignore writes to ROM. */ |
| 2383 | if (unlikely(tlb_addr & TLB_DISCARD_WRITE)) { |
| 2384 | return; |
| 2385 | } |
| 2386 | |
Richard Henderson | 0856555 | 2019-09-18 09:15:44 -0700 | [diff] [blame] | 2387 | /* Handle clean RAM pages. */ |
| 2388 | if (tlb_addr & TLB_NOTDIRTY) { |
Richard Henderson | 707526a | 2019-09-21 18:47:59 -0700 | [diff] [blame] | 2389 | notdirty_write(env_cpu(env), addr, size, iotlbentry, retaddr); |
Richard Henderson | 0856555 | 2019-09-18 09:15:44 -0700 | [diff] [blame] | 2390 | } |
| 2391 | |
Richard Henderson | 707526a | 2019-09-21 18:47:59 -0700 | [diff] [blame] | 2392 | haddr = (void *)((uintptr_t)addr + entry->addend); |
| 2393 | |
Richard Henderson | 5b87b3e | 2019-09-10 15:47:39 -0400 | [diff] [blame] | 2394 | /* |
| 2395 | * Keep these two store_memop separate to ensure that the compiler |
| 2396 | * is able to fold the entire function to a single instruction. |
| 2397 | * There is a build-time assert inside to remind you of this. ;-) |
| 2398 | */ |
| 2399 | if (unlikely(need_swap)) { |
| 2400 | store_memop(haddr, val, op ^ MO_BSWAP); |
| 2401 | } else { |
| 2402 | store_memop(haddr, val, op); |
| 2403 | } |
Alex Bennée | eed5664 | 2019-02-15 14:31:13 +0000 | [diff] [blame] | 2404 | return; |
| 2405 | } |
| 2406 | |
| 2407 | /* Handle slow unaligned access (it spans two pages or IO). */ |
| 2408 | if (size > 1 |
| 2409 | && unlikely((addr & ~TARGET_PAGE_MASK) + size - 1 |
| 2410 | >= TARGET_PAGE_SIZE)) { |
Alex Bennée | eed5664 | 2019-02-15 14:31:13 +0000 | [diff] [blame] | 2411 | do_unaligned_access: |
Richard Henderson | 6b8b622 | 2020-07-26 15:39:53 -0700 | [diff] [blame] | 2412 | store_helper_unaligned(env, addr, val, retaddr, size, |
| 2413 | mmu_idx, memop_big_endian(op)); |
Alex Bennée | eed5664 | 2019-02-15 14:31:13 +0000 | [diff] [blame] | 2414 | return; |
| 2415 | } |
| 2416 | |
| 2417 | haddr = (void *)((uintptr_t)addr + entry->addend); |
Richard Henderson | 80d9d1c | 2019-09-10 14:56:12 -0400 | [diff] [blame] | 2418 | store_memop(haddr, val, op); |
Alex Bennée | eed5664 | 2019-02-15 14:31:13 +0000 | [diff] [blame] | 2419 | } |
| 2420 | |
Richard Henderson | f83bcec | 2021-07-27 07:48:55 -1000 | [diff] [blame] | 2421 | static void __attribute__((noinline)) |
| 2422 | full_stb_mmu(CPUArchState *env, target_ulong addr, uint64_t val, |
| 2423 | MemOpIdx oi, uintptr_t retaddr) |
Alex Bennée | eed5664 | 2019-02-15 14:31:13 +0000 | [diff] [blame] | 2424 | { |
Richard Henderson | f83bcec | 2021-07-27 07:48:55 -1000 | [diff] [blame] | 2425 | validate_memop(oi, MO_UB); |
Tony Nguyen | be5c478 | 2019-08-24 04:36:53 +1000 | [diff] [blame] | 2426 | store_helper(env, addr, val, oi, retaddr, MO_UB); |
Alex Bennée | eed5664 | 2019-02-15 14:31:13 +0000 | [diff] [blame] | 2427 | } |
| 2428 | |
Richard Henderson | f83bcec | 2021-07-27 07:48:55 -1000 | [diff] [blame] | 2429 | void helper_ret_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val, |
| 2430 | MemOpIdx oi, uintptr_t retaddr) |
| 2431 | { |
| 2432 | full_stb_mmu(env, addr, val, oi, retaddr); |
| 2433 | } |
| 2434 | |
| 2435 | static void full_le_stw_mmu(CPUArchState *env, target_ulong addr, uint64_t val, |
| 2436 | MemOpIdx oi, uintptr_t retaddr) |
| 2437 | { |
| 2438 | validate_memop(oi, MO_LEUW); |
| 2439 | store_helper(env, addr, val, oi, retaddr, MO_LEUW); |
| 2440 | } |
| 2441 | |
Richard Henderson | fc1bc77 | 2019-04-25 20:01:37 -0700 | [diff] [blame] | 2442 | void helper_le_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val, |
Richard Henderson | 9002ffc | 2021-07-25 12:06:49 -1000 | [diff] [blame] | 2443 | MemOpIdx oi, uintptr_t retaddr) |
Alex Bennée | eed5664 | 2019-02-15 14:31:13 +0000 | [diff] [blame] | 2444 | { |
Richard Henderson | f83bcec | 2021-07-27 07:48:55 -1000 | [diff] [blame] | 2445 | full_le_stw_mmu(env, addr, val, oi, retaddr); |
| 2446 | } |
| 2447 | |
| 2448 | static void full_be_stw_mmu(CPUArchState *env, target_ulong addr, uint64_t val, |
| 2449 | MemOpIdx oi, uintptr_t retaddr) |
| 2450 | { |
| 2451 | validate_memop(oi, MO_BEUW); |
| 2452 | store_helper(env, addr, val, oi, retaddr, MO_BEUW); |
Alex Bennée | eed5664 | 2019-02-15 14:31:13 +0000 | [diff] [blame] | 2453 | } |
| 2454 | |
Richard Henderson | fc1bc77 | 2019-04-25 20:01:37 -0700 | [diff] [blame] | 2455 | void helper_be_stw_mmu(CPUArchState *env, target_ulong addr, uint16_t val, |
Richard Henderson | 9002ffc | 2021-07-25 12:06:49 -1000 | [diff] [blame] | 2456 | MemOpIdx oi, uintptr_t retaddr) |
Alex Bennée | eed5664 | 2019-02-15 14:31:13 +0000 | [diff] [blame] | 2457 | { |
Richard Henderson | f83bcec | 2021-07-27 07:48:55 -1000 | [diff] [blame] | 2458 | full_be_stw_mmu(env, addr, val, oi, retaddr); |
| 2459 | } |
| 2460 | |
| 2461 | static void full_le_stl_mmu(CPUArchState *env, target_ulong addr, uint64_t val, |
| 2462 | MemOpIdx oi, uintptr_t retaddr) |
| 2463 | { |
| 2464 | validate_memop(oi, MO_LEUL); |
| 2465 | store_helper(env, addr, val, oi, retaddr, MO_LEUL); |
Alex Bennée | eed5664 | 2019-02-15 14:31:13 +0000 | [diff] [blame] | 2466 | } |
| 2467 | |
Richard Henderson | fc1bc77 | 2019-04-25 20:01:37 -0700 | [diff] [blame] | 2468 | void helper_le_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, |
Richard Henderson | 9002ffc | 2021-07-25 12:06:49 -1000 | [diff] [blame] | 2469 | MemOpIdx oi, uintptr_t retaddr) |
Alex Bennée | eed5664 | 2019-02-15 14:31:13 +0000 | [diff] [blame] | 2470 | { |
Richard Henderson | f83bcec | 2021-07-27 07:48:55 -1000 | [diff] [blame] | 2471 | full_le_stl_mmu(env, addr, val, oi, retaddr); |
| 2472 | } |
| 2473 | |
| 2474 | static void full_be_stl_mmu(CPUArchState *env, target_ulong addr, uint64_t val, |
| 2475 | MemOpIdx oi, uintptr_t retaddr) |
| 2476 | { |
| 2477 | validate_memop(oi, MO_BEUL); |
| 2478 | store_helper(env, addr, val, oi, retaddr, MO_BEUL); |
Alex Bennée | eed5664 | 2019-02-15 14:31:13 +0000 | [diff] [blame] | 2479 | } |
| 2480 | |
Richard Henderson | fc1bc77 | 2019-04-25 20:01:37 -0700 | [diff] [blame] | 2481 | void helper_be_stl_mmu(CPUArchState *env, target_ulong addr, uint32_t val, |
Richard Henderson | 9002ffc | 2021-07-25 12:06:49 -1000 | [diff] [blame] | 2482 | MemOpIdx oi, uintptr_t retaddr) |
Alex Bennée | eed5664 | 2019-02-15 14:31:13 +0000 | [diff] [blame] | 2483 | { |
Richard Henderson | f83bcec | 2021-07-27 07:48:55 -1000 | [diff] [blame] | 2484 | full_be_stl_mmu(env, addr, val, oi, retaddr); |
Alex Bennée | eed5664 | 2019-02-15 14:31:13 +0000 | [diff] [blame] | 2485 | } |
| 2486 | |
Richard Henderson | fc1bc77 | 2019-04-25 20:01:37 -0700 | [diff] [blame] | 2487 | void helper_le_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, |
Richard Henderson | 9002ffc | 2021-07-25 12:06:49 -1000 | [diff] [blame] | 2488 | MemOpIdx oi, uintptr_t retaddr) |
Alex Bennée | eed5664 | 2019-02-15 14:31:13 +0000 | [diff] [blame] | 2489 | { |
Frédéric Pétrot | fc313c6 | 2022-01-06 22:00:51 +0100 | [diff] [blame] | 2490 | validate_memop(oi, MO_LEUQ); |
| 2491 | store_helper(env, addr, val, oi, retaddr, MO_LEUQ); |
Alex Bennée | eed5664 | 2019-02-15 14:31:13 +0000 | [diff] [blame] | 2492 | } |
| 2493 | |
Richard Henderson | fc1bc77 | 2019-04-25 20:01:37 -0700 | [diff] [blame] | 2494 | void helper_be_stq_mmu(CPUArchState *env, target_ulong addr, uint64_t val, |
Richard Henderson | 9002ffc | 2021-07-25 12:06:49 -1000 | [diff] [blame] | 2495 | MemOpIdx oi, uintptr_t retaddr) |
Alex Bennée | eed5664 | 2019-02-15 14:31:13 +0000 | [diff] [blame] | 2496 | { |
Frédéric Pétrot | fc313c6 | 2022-01-06 22:00:51 +0100 | [diff] [blame] | 2497 | validate_memop(oi, MO_BEUQ); |
| 2498 | store_helper(env, addr, val, oi, retaddr, MO_BEUQ); |
Alex Bennée | eed5664 | 2019-02-15 14:31:13 +0000 | [diff] [blame] | 2499 | } |
Paolo Bonzini | 0f590e74 | 2014-03-28 17:55:24 +0100 | [diff] [blame] | 2500 | |
Richard Henderson | d03f140 | 2019-12-09 13:49:58 -0800 | [diff] [blame] | 2501 | /* |
| 2502 | * Store Helpers for cpu_ldst.h |
| 2503 | */ |
| 2504 | |
Richard Henderson | f83bcec | 2021-07-27 07:48:55 -1000 | [diff] [blame] | 2505 | typedef void FullStoreHelper(CPUArchState *env, target_ulong addr, |
| 2506 | uint64_t val, MemOpIdx oi, uintptr_t retaddr); |
| 2507 | |
| 2508 | static inline void cpu_store_helper(CPUArchState *env, target_ulong addr, |
| 2509 | uint64_t val, MemOpIdx oi, uintptr_t ra, |
| 2510 | FullStoreHelper *full_store) |
Richard Henderson | d03f140 | 2019-12-09 13:49:58 -0800 | [diff] [blame] | 2511 | { |
Richard Henderson | f83bcec | 2021-07-27 07:48:55 -1000 | [diff] [blame] | 2512 | full_store(env, addr, val, oi, ra); |
Richard Henderson | 37aff08 | 2021-07-26 11:48:30 -1000 | [diff] [blame] | 2513 | qemu_plugin_vcpu_mem_cb(env_cpu(env), addr, oi, QEMU_PLUGIN_MEM_W); |
Richard Henderson | d03f140 | 2019-12-09 13:49:58 -0800 | [diff] [blame] | 2514 | } |
| 2515 | |
Richard Henderson | f83bcec | 2021-07-27 07:48:55 -1000 | [diff] [blame] | 2516 | void cpu_stb_mmu(CPUArchState *env, target_ulong addr, uint8_t val, |
| 2517 | MemOpIdx oi, uintptr_t retaddr) |
Richard Henderson | d03f140 | 2019-12-09 13:49:58 -0800 | [diff] [blame] | 2518 | { |
Richard Henderson | f83bcec | 2021-07-27 07:48:55 -1000 | [diff] [blame] | 2519 | cpu_store_helper(env, addr, val, oi, retaddr, full_stb_mmu); |
Richard Henderson | d03f140 | 2019-12-09 13:49:58 -0800 | [diff] [blame] | 2520 | } |
| 2521 | |
Richard Henderson | f83bcec | 2021-07-27 07:48:55 -1000 | [diff] [blame] | 2522 | void cpu_stw_be_mmu(CPUArchState *env, target_ulong addr, uint16_t val, |
| 2523 | MemOpIdx oi, uintptr_t retaddr) |
Richard Henderson | d03f140 | 2019-12-09 13:49:58 -0800 | [diff] [blame] | 2524 | { |
Richard Henderson | f83bcec | 2021-07-27 07:48:55 -1000 | [diff] [blame] | 2525 | cpu_store_helper(env, addr, val, oi, retaddr, full_be_stw_mmu); |
Richard Henderson | d03f140 | 2019-12-09 13:49:58 -0800 | [diff] [blame] | 2526 | } |
| 2527 | |
Richard Henderson | f83bcec | 2021-07-27 07:48:55 -1000 | [diff] [blame] | 2528 | void cpu_stl_be_mmu(CPUArchState *env, target_ulong addr, uint32_t val, |
| 2529 | MemOpIdx oi, uintptr_t retaddr) |
Richard Henderson | d03f140 | 2019-12-09 13:49:58 -0800 | [diff] [blame] | 2530 | { |
Richard Henderson | f83bcec | 2021-07-27 07:48:55 -1000 | [diff] [blame] | 2531 | cpu_store_helper(env, addr, val, oi, retaddr, full_be_stl_mmu); |
Richard Henderson | d03f140 | 2019-12-09 13:49:58 -0800 | [diff] [blame] | 2532 | } |
| 2533 | |
Richard Henderson | f83bcec | 2021-07-27 07:48:55 -1000 | [diff] [blame] | 2534 | void cpu_stq_be_mmu(CPUArchState *env, target_ulong addr, uint64_t val, |
| 2535 | MemOpIdx oi, uintptr_t retaddr) |
Richard Henderson | d03f140 | 2019-12-09 13:49:58 -0800 | [diff] [blame] | 2536 | { |
Richard Henderson | f83bcec | 2021-07-27 07:48:55 -1000 | [diff] [blame] | 2537 | cpu_store_helper(env, addr, val, oi, retaddr, helper_be_stq_mmu); |
Richard Henderson | b9e6025 | 2020-05-08 08:43:46 -0700 | [diff] [blame] | 2538 | } |
| 2539 | |
Richard Henderson | f83bcec | 2021-07-27 07:48:55 -1000 | [diff] [blame] | 2540 | void cpu_stw_le_mmu(CPUArchState *env, target_ulong addr, uint16_t val, |
| 2541 | MemOpIdx oi, uintptr_t retaddr) |
Richard Henderson | b9e6025 | 2020-05-08 08:43:46 -0700 | [diff] [blame] | 2542 | { |
Richard Henderson | f83bcec | 2021-07-27 07:48:55 -1000 | [diff] [blame] | 2543 | cpu_store_helper(env, addr, val, oi, retaddr, full_le_stw_mmu); |
Richard Henderson | b9e6025 | 2020-05-08 08:43:46 -0700 | [diff] [blame] | 2544 | } |
| 2545 | |
Richard Henderson | f83bcec | 2021-07-27 07:48:55 -1000 | [diff] [blame] | 2546 | void cpu_stl_le_mmu(CPUArchState *env, target_ulong addr, uint32_t val, |
| 2547 | MemOpIdx oi, uintptr_t retaddr) |
Richard Henderson | b9e6025 | 2020-05-08 08:43:46 -0700 | [diff] [blame] | 2548 | { |
Richard Henderson | f83bcec | 2021-07-27 07:48:55 -1000 | [diff] [blame] | 2549 | cpu_store_helper(env, addr, val, oi, retaddr, full_le_stl_mmu); |
Richard Henderson | b9e6025 | 2020-05-08 08:43:46 -0700 | [diff] [blame] | 2550 | } |
| 2551 | |
Richard Henderson | f83bcec | 2021-07-27 07:48:55 -1000 | [diff] [blame] | 2552 | void cpu_stq_le_mmu(CPUArchState *env, target_ulong addr, uint64_t val, |
| 2553 | MemOpIdx oi, uintptr_t retaddr) |
Richard Henderson | b9e6025 | 2020-05-08 08:43:46 -0700 | [diff] [blame] | 2554 | { |
Richard Henderson | f83bcec | 2021-07-27 07:48:55 -1000 | [diff] [blame] | 2555 | cpu_store_helper(env, addr, val, oi, retaddr, helper_le_stq_mmu); |
Richard Henderson | d03f140 | 2019-12-09 13:49:58 -0800 | [diff] [blame] | 2556 | } |
| 2557 | |
Richard Henderson | f83bcec | 2021-07-27 07:48:55 -1000 | [diff] [blame] | 2558 | #include "ldst_common.c.inc" |
Richard Henderson | cfe04a4 | 2019-12-11 10:33:26 -0800 | [diff] [blame] | 2559 | |
Richard Henderson | be9568b | 2021-07-16 14:20:49 -0700 | [diff] [blame] | 2560 | /* |
| 2561 | * First set of functions passes in OI and RETADDR. |
| 2562 | * This makes them callable from other helpers. |
| 2563 | */ |
Richard Henderson | c482cb1 | 2016-06-28 11:37:27 -0700 | [diff] [blame] | 2564 | |
Richard Henderson | c482cb1 | 2016-06-28 11:37:27 -0700 | [diff] [blame] | 2565 | #define ATOMIC_NAME(X) \ |
Richard Henderson | be9568b | 2021-07-16 14:20:49 -0700 | [diff] [blame] | 2566 | glue(glue(glue(cpu_atomic_ ## X, SUFFIX), END), _mmu) |
Richard Henderson | a754f7f | 2021-07-16 17:49:09 -0700 | [diff] [blame] | 2567 | |
Richard Henderson | 707526a | 2019-09-21 18:47:59 -0700 | [diff] [blame] | 2568 | #define ATOMIC_MMU_CLEANUP |
Richard Henderson | c482cb1 | 2016-06-28 11:37:27 -0700 | [diff] [blame] | 2569 | |
Paolo Bonzini | 139c183 | 2020-02-04 12:41:01 +0100 | [diff] [blame] | 2570 | #include "atomic_common.c.inc" |
Richard Henderson | c482cb1 | 2016-06-28 11:37:27 -0700 | [diff] [blame] | 2571 | |
| 2572 | #define DATA_SIZE 1 |
| 2573 | #include "atomic_template.h" |
| 2574 | |
| 2575 | #define DATA_SIZE 2 |
| 2576 | #include "atomic_template.h" |
| 2577 | |
| 2578 | #define DATA_SIZE 4 |
| 2579 | #include "atomic_template.h" |
| 2580 | |
Richard Henderson | df79b99 | 2016-09-02 12:23:57 -0700 | [diff] [blame] | 2581 | #ifdef CONFIG_ATOMIC64 |
Richard Henderson | c482cb1 | 2016-06-28 11:37:27 -0700 | [diff] [blame] | 2582 | #define DATA_SIZE 8 |
| 2583 | #include "atomic_template.h" |
Richard Henderson | df79b99 | 2016-09-02 12:23:57 -0700 | [diff] [blame] | 2584 | #endif |
Richard Henderson | c482cb1 | 2016-06-28 11:37:27 -0700 | [diff] [blame] | 2585 | |
Richard Henderson | e6cd4bb | 2018-08-15 16:31:47 -0700 | [diff] [blame] | 2586 | #if HAVE_CMPXCHG128 || HAVE_ATOMIC128 |
Richard Henderson | 7ebee43 | 2016-06-29 21:10:59 -0700 | [diff] [blame] | 2587 | #define DATA_SIZE 16 |
| 2588 | #include "atomic_template.h" |
| 2589 | #endif |
| 2590 | |
Richard Henderson | c482cb1 | 2016-06-28 11:37:27 -0700 | [diff] [blame] | 2591 | /* Code access functions. */ |
| 2592 | |
Richard Henderson | fc4120a | 2019-12-11 11:25:10 -0800 | [diff] [blame] | 2593 | static uint64_t full_ldub_code(CPUArchState *env, target_ulong addr, |
Richard Henderson | 9002ffc | 2021-07-25 12:06:49 -1000 | [diff] [blame] | 2594 | MemOpIdx oi, uintptr_t retaddr) |
Richard Henderson | 2dd9260 | 2019-04-25 20:48:57 -0700 | [diff] [blame] | 2595 | { |
Richard Henderson | fc4120a | 2019-12-11 11:25:10 -0800 | [diff] [blame] | 2596 | return load_helper(env, addr, oi, retaddr, MO_8, true, full_ldub_code); |
Richard Henderson | 2dd9260 | 2019-04-25 20:48:57 -0700 | [diff] [blame] | 2597 | } |
| 2598 | |
Richard Henderson | fc4120a | 2019-12-11 11:25:10 -0800 | [diff] [blame] | 2599 | uint32_t cpu_ldub_code(CPUArchState *env, abi_ptr addr) |
Alex Bennée | eed5664 | 2019-02-15 14:31:13 +0000 | [diff] [blame] | 2600 | { |
Richard Henderson | 9002ffc | 2021-07-25 12:06:49 -1000 | [diff] [blame] | 2601 | MemOpIdx oi = make_memop_idx(MO_UB, cpu_mmu_index(env, true)); |
Richard Henderson | fc4120a | 2019-12-11 11:25:10 -0800 | [diff] [blame] | 2602 | return full_ldub_code(env, addr, oi, 0); |
Richard Henderson | 2dd9260 | 2019-04-25 20:48:57 -0700 | [diff] [blame] | 2603 | } |
| 2604 | |
Richard Henderson | fc4120a | 2019-12-11 11:25:10 -0800 | [diff] [blame] | 2605 | static uint64_t full_lduw_code(CPUArchState *env, target_ulong addr, |
Richard Henderson | 9002ffc | 2021-07-25 12:06:49 -1000 | [diff] [blame] | 2606 | MemOpIdx oi, uintptr_t retaddr) |
Alex Bennée | 4cef72d | 2019-10-21 16:09:10 +0100 | [diff] [blame] | 2607 | { |
Richard Henderson | fc4120a | 2019-12-11 11:25:10 -0800 | [diff] [blame] | 2608 | return load_helper(env, addr, oi, retaddr, MO_TEUW, true, full_lduw_code); |
Alex Bennée | 4cef72d | 2019-10-21 16:09:10 +0100 | [diff] [blame] | 2609 | } |
| 2610 | |
Richard Henderson | fc4120a | 2019-12-11 11:25:10 -0800 | [diff] [blame] | 2611 | uint32_t cpu_lduw_code(CPUArchState *env, abi_ptr addr) |
Richard Henderson | 2dd9260 | 2019-04-25 20:48:57 -0700 | [diff] [blame] | 2612 | { |
Richard Henderson | 9002ffc | 2021-07-25 12:06:49 -1000 | [diff] [blame] | 2613 | MemOpIdx oi = make_memop_idx(MO_TEUW, cpu_mmu_index(env, true)); |
Richard Henderson | fc4120a | 2019-12-11 11:25:10 -0800 | [diff] [blame] | 2614 | return full_lduw_code(env, addr, oi, 0); |
Alex Bennée | eed5664 | 2019-02-15 14:31:13 +0000 | [diff] [blame] | 2615 | } |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 2616 | |
Richard Henderson | fc4120a | 2019-12-11 11:25:10 -0800 | [diff] [blame] | 2617 | static uint64_t full_ldl_code(CPUArchState *env, target_ulong addr, |
Richard Henderson | 9002ffc | 2021-07-25 12:06:49 -1000 | [diff] [blame] | 2618 | MemOpIdx oi, uintptr_t retaddr) |
Alex Bennée | eed5664 | 2019-02-15 14:31:13 +0000 | [diff] [blame] | 2619 | { |
Richard Henderson | fc4120a | 2019-12-11 11:25:10 -0800 | [diff] [blame] | 2620 | return load_helper(env, addr, oi, retaddr, MO_TEUL, true, full_ldl_code); |
Richard Henderson | 2dd9260 | 2019-04-25 20:48:57 -0700 | [diff] [blame] | 2621 | } |
| 2622 | |
Richard Henderson | fc4120a | 2019-12-11 11:25:10 -0800 | [diff] [blame] | 2623 | uint32_t cpu_ldl_code(CPUArchState *env, abi_ptr addr) |
Alex Bennée | 4cef72d | 2019-10-21 16:09:10 +0100 | [diff] [blame] | 2624 | { |
Richard Henderson | 9002ffc | 2021-07-25 12:06:49 -1000 | [diff] [blame] | 2625 | MemOpIdx oi = make_memop_idx(MO_TEUL, cpu_mmu_index(env, true)); |
Richard Henderson | fc4120a | 2019-12-11 11:25:10 -0800 | [diff] [blame] | 2626 | return full_ldl_code(env, addr, oi, 0); |
Alex Bennée | 4cef72d | 2019-10-21 16:09:10 +0100 | [diff] [blame] | 2627 | } |
| 2628 | |
Richard Henderson | fc4120a | 2019-12-11 11:25:10 -0800 | [diff] [blame] | 2629 | static uint64_t full_ldq_code(CPUArchState *env, target_ulong addr, |
Richard Henderson | 9002ffc | 2021-07-25 12:06:49 -1000 | [diff] [blame] | 2630 | MemOpIdx oi, uintptr_t retaddr) |
Richard Henderson | 2dd9260 | 2019-04-25 20:48:57 -0700 | [diff] [blame] | 2631 | { |
Frédéric Pétrot | fc313c6 | 2022-01-06 22:00:51 +0100 | [diff] [blame] | 2632 | return load_helper(env, addr, oi, retaddr, MO_TEUQ, true, full_ldq_code); |
Alex Bennée | eed5664 | 2019-02-15 14:31:13 +0000 | [diff] [blame] | 2633 | } |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 2634 | |
Richard Henderson | fc4120a | 2019-12-11 11:25:10 -0800 | [diff] [blame] | 2635 | uint64_t cpu_ldq_code(CPUArchState *env, abi_ptr addr) |
Alex Bennée | eed5664 | 2019-02-15 14:31:13 +0000 | [diff] [blame] | 2636 | { |
Frédéric Pétrot | fc313c6 | 2022-01-06 22:00:51 +0100 | [diff] [blame] | 2637 | MemOpIdx oi = make_memop_idx(MO_TEUQ, cpu_mmu_index(env, true)); |
Richard Henderson | fc4120a | 2019-12-11 11:25:10 -0800 | [diff] [blame] | 2638 | return full_ldq_code(env, addr, oi, 0); |
Alex Bennée | eed5664 | 2019-02-15 14:31:13 +0000 | [diff] [blame] | 2639 | } |