bellard | d4e8164 | 2003-05-25 16:46:15 +0000 | [diff] [blame] | 1 | /* |
| 2 | * internal execution defines for qemu |
ths | 5fafdf2 | 2007-09-16 21:08:06 +0000 | [diff] [blame] | 3 | * |
bellard | d4e8164 | 2003-05-25 16:46:15 +0000 | [diff] [blame] | 4 | * Copyright (c) 2003 Fabrice Bellard |
| 5 | * |
| 6 | * This library is free software; you can redistribute it and/or |
| 7 | * modify it under the terms of the GNU Lesser General Public |
| 8 | * License as published by the Free Software Foundation; either |
| 9 | * version 2 of the License, or (at your option) any later version. |
| 10 | * |
| 11 | * This library is distributed in the hope that it will be useful, |
| 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 14 | * Lesser General Public License for more details. |
| 15 | * |
| 16 | * You should have received a copy of the GNU Lesser General Public |
Blue Swirl | 8167ee8 | 2009-07-16 20:47:01 +0000 | [diff] [blame] | 17 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. |
bellard | d4e8164 | 2003-05-25 16:46:15 +0000 | [diff] [blame] | 18 | */ |
| 19 | |
Markus Armbruster | 2a6a407 | 2016-06-29 13:47:03 +0200 | [diff] [blame] | 20 | #ifndef EXEC_ALL_H |
| 21 | #define EXEC_ALL_H |
blueswir1 | 7d99a00 | 2009-01-14 19:00:36 +0000 | [diff] [blame] | 22 | |
Paolo Bonzini | 00f6da6 | 2016-03-15 13:16:36 +0100 | [diff] [blame] | 23 | #include "exec/tb-context.h" |
Richard Henderson | 416986d | 2017-10-13 12:22:28 -0700 | [diff] [blame] | 24 | #include "sysemu/cpus.h" |
blueswir1 | 7d99a00 | 2009-01-14 19:00:36 +0000 | [diff] [blame] | 25 | |
bellard | b346ff4 | 2003-06-15 20:05:50 +0000 | [diff] [blame] | 26 | /* allow to see translation results - the slowdown should be negligible, so we leave it */ |
aurel32 | de9a95f | 2008-11-11 13:41:01 +0000 | [diff] [blame] | 27 | #define DEBUG_DISAS |
bellard | b346ff4 | 2003-06-15 20:05:50 +0000 | [diff] [blame] | 28 | |
Paul Brook | 41c1b1c | 2010-03-12 16:54:58 +0000 | [diff] [blame] | 29 | /* Page tracking code uses ram addresses in system mode, and virtual |
| 30 | addresses in userspace mode. Define tb_page_addr_t to be an appropriate |
| 31 | type. */ |
| 32 | #if defined(CONFIG_USER_ONLY) |
Paul Brook | b480d9b | 2010-03-12 23:23:29 +0000 | [diff] [blame] | 33 | typedef abi_ulong tb_page_addr_t; |
Emilio G. Cota | 67a5b5d | 2017-07-13 17:18:15 -0400 | [diff] [blame] | 34 | #define TB_PAGE_ADDR_FMT TARGET_ABI_FMT_lx |
Paul Brook | 41c1b1c | 2010-03-12 16:54:58 +0000 | [diff] [blame] | 35 | #else |
| 36 | typedef ram_addr_t tb_page_addr_t; |
Emilio G. Cota | 67a5b5d | 2017-07-13 17:18:15 -0400 | [diff] [blame] | 37 | #define TB_PAGE_ADDR_FMT RAM_ADDR_FMT |
Paul Brook | 41c1b1c | 2010-03-12 16:54:58 +0000 | [diff] [blame] | 38 | #endif |
| 39 | |
Paolo Bonzini | 1de7afc | 2012-12-17 18:20:00 +0100 | [diff] [blame] | 40 | #include "qemu/log.h" |
bellard | b346ff4 | 2003-06-15 20:05:50 +0000 | [diff] [blame] | 41 | |
Richard Henderson | 8b86d6d | 2019-04-15 20:54:54 -1000 | [diff] [blame] | 42 | void gen_intermediate_code(CPUState *cpu, TranslationBlock *tb, int max_insns); |
| 43 | void restore_state_to_opc(CPUArchState *env, TranslationBlock *tb, |
Richard Henderson | bad729e | 2015-09-01 15:51:12 -0700 | [diff] [blame] | 44 | target_ulong *data); |
aurel32 | d2856f1 | 2008-04-28 00:32:32 +0000 | [diff] [blame] | 45 | |
bellard | 57fec1f | 2008-02-01 10:50:11 +0000 | [diff] [blame] | 46 | void cpu_gen_init(void); |
Alex Bennée | d25f2a7 | 2017-11-13 13:55:27 +0000 | [diff] [blame] | 47 | |
| 48 | /** |
| 49 | * cpu_restore_state: |
| 50 | * @cpu: the vCPU state is to be restore to |
| 51 | * @searched_pc: the host PC the fault occurred at |
Pavel Dovgalyuk | afd46fc | 2018-04-09 12:13:20 +0300 | [diff] [blame] | 52 | * @will_exit: true if the TB executed will be interrupted after some |
| 53 | cpu adjustments. Required for maintaining the correct |
| 54 | icount valus |
Alex Bennée | d25f2a7 | 2017-11-13 13:55:27 +0000 | [diff] [blame] | 55 | * @return: true if state was restored, false otherwise |
| 56 | * |
| 57 | * Attempt to restore the state for a fault occurring in translated |
| 58 | * code. If the searched_pc is not in translated code no state is |
| 59 | * restored and the function returns false. |
| 60 | */ |
Pavel Dovgalyuk | afd46fc | 2018-04-09 12:13:20 +0300 | [diff] [blame] | 61 | bool cpu_restore_state(CPUState *cpu, uintptr_t searched_pc, bool will_exit); |
Blue Swirl | a8a826a | 2012-12-04 20:16:07 +0000 | [diff] [blame] | 62 | |
Peter Maydell | 6886b98 | 2016-05-17 15:18:04 +0100 | [diff] [blame] | 63 | void QEMU_NORETURN cpu_loop_exit_noexc(CPUState *cpu); |
Andreas Färber | 90b40a6 | 2013-09-01 17:21:47 +0200 | [diff] [blame] | 64 | void QEMU_NORETURN cpu_io_recompile(CPUState *cpu, uintptr_t retaddr); |
Andreas Färber | 648f034 | 2013-09-01 17:43:17 +0200 | [diff] [blame] | 65 | TranslationBlock *tb_gen_code(CPUState *cpu, |
Emilio G. Cota | 89fee74 | 2016-04-07 13:19:22 -0400 | [diff] [blame] | 66 | target_ulong pc, target_ulong cs_base, |
| 67 | uint32_t flags, |
pbrook | 2e70f6e | 2008-06-29 01:03:05 +0000 | [diff] [blame] | 68 | int cflags); |
Igor Mammedov | 1bc7e52 | 2016-07-25 11:59:19 +0200 | [diff] [blame] | 69 | |
Andreas Färber | 5638d18 | 2013-08-27 17:52:12 +0200 | [diff] [blame] | 70 | void QEMU_NORETURN cpu_loop_exit(CPUState *cpu); |
Pavel Dovgalyuk | 1c3c8af | 2015-07-10 12:57:02 +0300 | [diff] [blame] | 71 | void QEMU_NORETURN cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc); |
Richard Henderson | fdbc2b5 | 2016-06-29 22:12:55 -0700 | [diff] [blame] | 72 | void QEMU_NORETURN cpu_loop_exit_atomic(CPUState *cpu, uintptr_t pc); |
Paolo Bonzini | 1652b97 | 2015-04-22 14:15:48 +0200 | [diff] [blame] | 73 | |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 74 | #if !defined(CONFIG_USER_ONLY) |
Peter Maydell | 32857f4 | 2015-10-01 15:29:50 +0100 | [diff] [blame] | 75 | void cpu_reloading_memory_map(void); |
Peter Maydell | 56943e8 | 2016-01-21 14:15:04 +0000 | [diff] [blame] | 76 | /** |
| 77 | * cpu_address_space_init: |
| 78 | * @cpu: CPU to add this address space to |
Peter Maydell | 56943e8 | 2016-01-21 14:15:04 +0000 | [diff] [blame] | 79 | * @asidx: integer index of this address space |
Peter Xu | 80ceb07 | 2017-11-23 17:23:32 +0800 | [diff] [blame] | 80 | * @prefix: prefix to be used as name of address space |
| 81 | * @mr: the root memory region of address space |
Peter Maydell | 56943e8 | 2016-01-21 14:15:04 +0000 | [diff] [blame] | 82 | * |
| 83 | * Add the specified address space to the CPU's cpu_ases list. |
| 84 | * The address space added with @asidx 0 is the one used for the |
| 85 | * convenience pointer cpu->as. |
| 86 | * The target-specific code which registers ASes is responsible |
| 87 | * for defining what semantics address space 0, 1, 2, etc have. |
| 88 | * |
Peter Maydell | 12ebc9a | 2016-01-21 14:15:04 +0000 | [diff] [blame] | 89 | * Before the first call to this function, the caller must set |
| 90 | * cpu->num_ases to the total number of address spaces it needs |
| 91 | * to support. |
| 92 | * |
Peter Maydell | 56943e8 | 2016-01-21 14:15:04 +0000 | [diff] [blame] | 93 | * Note that with KVM only one address space is supported. |
| 94 | */ |
Peter Xu | 80ceb07 | 2017-11-23 17:23:32 +0800 | [diff] [blame] | 95 | void cpu_address_space_init(CPUState *cpu, int asidx, |
| 96 | const char *prefix, MemoryRegion *mr); |
Yang Zhong | b11ec7f | 2017-07-03 18:12:21 +0800 | [diff] [blame] | 97 | #endif |
| 98 | |
| 99 | #if !defined(CONFIG_USER_ONLY) && defined(CONFIG_TCG) |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 100 | /* cputlb.c */ |
Peter Maydell | d7a74a9 | 2015-08-25 15:45:09 +0100 | [diff] [blame] | 101 | /** |
Emilio G. Cota | 5005e25 | 2018-10-09 13:45:54 -0400 | [diff] [blame] | 102 | * tlb_init - initialize a CPU's TLB |
| 103 | * @cpu: CPU whose TLB should be initialized |
| 104 | */ |
| 105 | void tlb_init(CPUState *cpu); |
| 106 | /** |
Peter Maydell | d7a74a9 | 2015-08-25 15:45:09 +0100 | [diff] [blame] | 107 | * tlb_flush_page: |
| 108 | * @cpu: CPU whose TLB should be flushed |
| 109 | * @addr: virtual address of page to be flushed |
| 110 | * |
| 111 | * Flush one page from the TLB of the specified CPU, for all |
| 112 | * MMU indexes. |
| 113 | */ |
Andreas Färber | 31b030d | 2013-09-04 01:29:02 +0200 | [diff] [blame] | 114 | void tlb_flush_page(CPUState *cpu, target_ulong addr); |
Peter Maydell | d7a74a9 | 2015-08-25 15:45:09 +0100 | [diff] [blame] | 115 | /** |
Alex Bennée | c3b9a07 | 2017-02-23 18:29:22 +0000 | [diff] [blame] | 116 | * tlb_flush_page_all_cpus: |
| 117 | * @cpu: src CPU of the flush |
| 118 | * @addr: virtual address of page to be flushed |
| 119 | * |
| 120 | * Flush one page from the TLB of the specified CPU, for all |
| 121 | * MMU indexes. |
| 122 | */ |
| 123 | void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr); |
| 124 | /** |
| 125 | * tlb_flush_page_all_cpus_synced: |
| 126 | * @cpu: src CPU of the flush |
| 127 | * @addr: virtual address of page to be flushed |
| 128 | * |
| 129 | * Flush one page from the TLB of the specified CPU, for all MMU |
| 130 | * indexes like tlb_flush_page_all_cpus except the source vCPUs work |
| 131 | * is scheduled as safe work meaning all flushes will be complete once |
| 132 | * the source vCPUs safe work is complete. This will depend on when |
| 133 | * the guests translation ends the TB. |
| 134 | */ |
| 135 | void tlb_flush_page_all_cpus_synced(CPUState *src, target_ulong addr); |
| 136 | /** |
Peter Maydell | d7a74a9 | 2015-08-25 15:45:09 +0100 | [diff] [blame] | 137 | * tlb_flush: |
| 138 | * @cpu: CPU whose TLB should be flushed |
Peter Maydell | d7a74a9 | 2015-08-25 15:45:09 +0100 | [diff] [blame] | 139 | * |
Alex Bennée | d10eb08 | 2016-11-14 14:17:28 +0000 | [diff] [blame] | 140 | * Flush the entire TLB for the specified CPU. Most CPU architectures |
| 141 | * allow the implementation to drop entries from the TLB at any time |
| 142 | * so this is generally safe. If more selective flushing is required |
| 143 | * use one of the other functions for efficiency. |
Peter Maydell | d7a74a9 | 2015-08-25 15:45:09 +0100 | [diff] [blame] | 144 | */ |
Alex Bennée | d10eb08 | 2016-11-14 14:17:28 +0000 | [diff] [blame] | 145 | void tlb_flush(CPUState *cpu); |
Peter Maydell | d7a74a9 | 2015-08-25 15:45:09 +0100 | [diff] [blame] | 146 | /** |
Alex Bennée | c3b9a07 | 2017-02-23 18:29:22 +0000 | [diff] [blame] | 147 | * tlb_flush_all_cpus: |
| 148 | * @cpu: src CPU of the flush |
| 149 | */ |
| 150 | void tlb_flush_all_cpus(CPUState *src_cpu); |
| 151 | /** |
| 152 | * tlb_flush_all_cpus_synced: |
| 153 | * @cpu: src CPU of the flush |
| 154 | * |
| 155 | * Like tlb_flush_all_cpus except this except the source vCPUs work is |
| 156 | * scheduled as safe work meaning all flushes will be complete once |
| 157 | * the source vCPUs safe work is complete. This will depend on when |
| 158 | * the guests translation ends the TB. |
| 159 | */ |
| 160 | void tlb_flush_all_cpus_synced(CPUState *src_cpu); |
| 161 | /** |
Peter Maydell | d7a74a9 | 2015-08-25 15:45:09 +0100 | [diff] [blame] | 162 | * tlb_flush_page_by_mmuidx: |
| 163 | * @cpu: CPU whose TLB should be flushed |
| 164 | * @addr: virtual address of page to be flushed |
Alex Bennée | 0336cbf | 2017-02-23 18:29:19 +0000 | [diff] [blame] | 165 | * @idxmap: bitmap of MMU indexes to flush |
Peter Maydell | d7a74a9 | 2015-08-25 15:45:09 +0100 | [diff] [blame] | 166 | * |
| 167 | * Flush one page from the TLB of the specified CPU, for the specified |
| 168 | * MMU indexes. |
| 169 | */ |
Alex Bennée | 0336cbf | 2017-02-23 18:29:19 +0000 | [diff] [blame] | 170 | void tlb_flush_page_by_mmuidx(CPUState *cpu, target_ulong addr, |
| 171 | uint16_t idxmap); |
Peter Maydell | d7a74a9 | 2015-08-25 15:45:09 +0100 | [diff] [blame] | 172 | /** |
Alex Bennée | c3b9a07 | 2017-02-23 18:29:22 +0000 | [diff] [blame] | 173 | * tlb_flush_page_by_mmuidx_all_cpus: |
| 174 | * @cpu: Originating CPU of the flush |
| 175 | * @addr: virtual address of page to be flushed |
| 176 | * @idxmap: bitmap of MMU indexes to flush |
| 177 | * |
| 178 | * Flush one page from the TLB of all CPUs, for the specified |
| 179 | * MMU indexes. |
| 180 | */ |
| 181 | void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu, target_ulong addr, |
| 182 | uint16_t idxmap); |
| 183 | /** |
| 184 | * tlb_flush_page_by_mmuidx_all_cpus_synced: |
| 185 | * @cpu: Originating CPU of the flush |
| 186 | * @addr: virtual address of page to be flushed |
| 187 | * @idxmap: bitmap of MMU indexes to flush |
| 188 | * |
| 189 | * Flush one page from the TLB of all CPUs, for the specified MMU |
| 190 | * indexes like tlb_flush_page_by_mmuidx_all_cpus except the source |
| 191 | * vCPUs work is scheduled as safe work meaning all flushes will be |
| 192 | * complete once the source vCPUs safe work is complete. This will |
| 193 | * depend on when the guests translation ends the TB. |
| 194 | */ |
| 195 | void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, target_ulong addr, |
| 196 | uint16_t idxmap); |
| 197 | /** |
Peter Maydell | d7a74a9 | 2015-08-25 15:45:09 +0100 | [diff] [blame] | 198 | * tlb_flush_by_mmuidx: |
| 199 | * @cpu: CPU whose TLB should be flushed |
Alex Bennée | c3b9a07 | 2017-02-23 18:29:22 +0000 | [diff] [blame] | 200 | * @wait: If true ensure synchronisation by exiting the cpu_loop |
Alex Bennée | 0336cbf | 2017-02-23 18:29:19 +0000 | [diff] [blame] | 201 | * @idxmap: bitmap of MMU indexes to flush |
Peter Maydell | d7a74a9 | 2015-08-25 15:45:09 +0100 | [diff] [blame] | 202 | * |
| 203 | * Flush all entries from the TLB of the specified CPU, for the specified |
| 204 | * MMU indexes. |
| 205 | */ |
Alex Bennée | 0336cbf | 2017-02-23 18:29:19 +0000 | [diff] [blame] | 206 | void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap); |
Peter Maydell | 1787cc8 | 2016-01-21 14:15:04 +0000 | [diff] [blame] | 207 | /** |
Alex Bennée | c3b9a07 | 2017-02-23 18:29:22 +0000 | [diff] [blame] | 208 | * tlb_flush_by_mmuidx_all_cpus: |
| 209 | * @cpu: Originating CPU of the flush |
| 210 | * @idxmap: bitmap of MMU indexes to flush |
| 211 | * |
| 212 | * Flush all entries from all TLBs of all CPUs, for the specified |
| 213 | * MMU indexes. |
| 214 | */ |
| 215 | void tlb_flush_by_mmuidx_all_cpus(CPUState *cpu, uint16_t idxmap); |
| 216 | /** |
| 217 | * tlb_flush_by_mmuidx_all_cpus_synced: |
| 218 | * @cpu: Originating CPU of the flush |
| 219 | * @idxmap: bitmap of MMU indexes to flush |
| 220 | * |
| 221 | * Flush all entries from all TLBs of all CPUs, for the specified |
| 222 | * MMU indexes like tlb_flush_by_mmuidx_all_cpus except except the source |
| 223 | * vCPUs work is scheduled as safe work meaning all flushes will be |
| 224 | * complete once the source vCPUs safe work is complete. This will |
| 225 | * depend on when the guests translation ends the TB. |
| 226 | */ |
| 227 | void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, uint16_t idxmap); |
| 228 | /** |
Peter Maydell | 1787cc8 | 2016-01-21 14:15:04 +0000 | [diff] [blame] | 229 | * tlb_set_page_with_attrs: |
| 230 | * @cpu: CPU to add this TLB entry for |
| 231 | * @vaddr: virtual address of page to add entry for |
| 232 | * @paddr: physical address of the page |
| 233 | * @attrs: memory transaction attributes |
| 234 | * @prot: access permissions (PAGE_READ/PAGE_WRITE/PAGE_EXEC bits) |
| 235 | * @mmu_idx: MMU index to insert TLB entry for |
| 236 | * @size: size of the page in bytes |
| 237 | * |
| 238 | * Add an entry to this CPU's TLB (a mapping from virtual address |
| 239 | * @vaddr to physical address @paddr) with the specified memory |
| 240 | * transaction attributes. This is generally called by the target CPU |
| 241 | * specific code after it has been called through the tlb_fill() |
| 242 | * entry point and performed a successful page table walk to find |
| 243 | * the physical address and attributes for the virtual address |
| 244 | * which provoked the TLB miss. |
| 245 | * |
| 246 | * At most one entry for a given virtual address is permitted. Only a |
| 247 | * single TARGET_PAGE_SIZE region is mapped; the supplied @size is only |
| 248 | * used by tlb_flush_page. |
| 249 | */ |
Peter Maydell | fadc1cb | 2015-04-26 16:49:24 +0100 | [diff] [blame] | 250 | void tlb_set_page_with_attrs(CPUState *cpu, target_ulong vaddr, |
| 251 | hwaddr paddr, MemTxAttrs attrs, |
| 252 | int prot, int mmu_idx, target_ulong size); |
Peter Maydell | 1787cc8 | 2016-01-21 14:15:04 +0000 | [diff] [blame] | 253 | /* tlb_set_page: |
| 254 | * |
| 255 | * This function is equivalent to calling tlb_set_page_with_attrs() |
| 256 | * with an @attrs argument of MEMTXATTRS_UNSPECIFIED. It's provided |
| 257 | * as a convenience for CPUs which don't use memory transaction attributes. |
| 258 | */ |
| 259 | void tlb_set_page(CPUState *cpu, target_ulong vaddr, |
| 260 | hwaddr paddr, int prot, |
| 261 | int mmu_idx, target_ulong size); |
Laurent Vivier | 98670d4 | 2018-01-18 20:38:40 +0100 | [diff] [blame] | 262 | void probe_write(CPUArchState *env, target_ulong addr, int size, int mmu_idx, |
Yongbok Kim | 3b4afc9 | 2015-06-01 12:13:23 +0100 | [diff] [blame] | 263 | uintptr_t retaddr); |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 264 | #else |
Emilio G. Cota | 5005e25 | 2018-10-09 13:45:54 -0400 | [diff] [blame] | 265 | static inline void tlb_init(CPUState *cpu) |
| 266 | { |
| 267 | } |
Andreas Färber | 31b030d | 2013-09-04 01:29:02 +0200 | [diff] [blame] | 268 | static inline void tlb_flush_page(CPUState *cpu, target_ulong addr) |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 269 | { |
| 270 | } |
Alex Bennée | c3b9a07 | 2017-02-23 18:29:22 +0000 | [diff] [blame] | 271 | static inline void tlb_flush_page_all_cpus(CPUState *src, target_ulong addr) |
| 272 | { |
| 273 | } |
| 274 | static inline void tlb_flush_page_all_cpus_synced(CPUState *src, |
| 275 | target_ulong addr) |
| 276 | { |
| 277 | } |
Alex Bennée | d10eb08 | 2016-11-14 14:17:28 +0000 | [diff] [blame] | 278 | static inline void tlb_flush(CPUState *cpu) |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 279 | { |
| 280 | } |
Alex Bennée | c3b9a07 | 2017-02-23 18:29:22 +0000 | [diff] [blame] | 281 | static inline void tlb_flush_all_cpus(CPUState *src_cpu) |
| 282 | { |
| 283 | } |
| 284 | static inline void tlb_flush_all_cpus_synced(CPUState *src_cpu) |
| 285 | { |
| 286 | } |
Peter Maydell | d7a74a9 | 2015-08-25 15:45:09 +0100 | [diff] [blame] | 287 | static inline void tlb_flush_page_by_mmuidx(CPUState *cpu, |
Alex Bennée | 0336cbf | 2017-02-23 18:29:19 +0000 | [diff] [blame] | 288 | target_ulong addr, uint16_t idxmap) |
Peter Maydell | d7a74a9 | 2015-08-25 15:45:09 +0100 | [diff] [blame] | 289 | { |
| 290 | } |
| 291 | |
Alex Bennée | 0336cbf | 2017-02-23 18:29:19 +0000 | [diff] [blame] | 292 | static inline void tlb_flush_by_mmuidx(CPUState *cpu, uint16_t idxmap) |
Peter Maydell | d7a74a9 | 2015-08-25 15:45:09 +0100 | [diff] [blame] | 293 | { |
| 294 | } |
Alex Bennée | c3b9a07 | 2017-02-23 18:29:22 +0000 | [diff] [blame] | 295 | static inline void tlb_flush_page_by_mmuidx_all_cpus(CPUState *cpu, |
| 296 | target_ulong addr, |
| 297 | uint16_t idxmap) |
| 298 | { |
| 299 | } |
| 300 | static inline void tlb_flush_page_by_mmuidx_all_cpus_synced(CPUState *cpu, |
| 301 | target_ulong addr, |
| 302 | uint16_t idxmap) |
| 303 | { |
| 304 | } |
| 305 | static inline void tlb_flush_by_mmuidx_all_cpus(CPUState *cpu, uint16_t idxmap) |
| 306 | { |
| 307 | } |
Paolo Bonzini | 8bca9a0 | 2018-05-30 11:58:36 +0200 | [diff] [blame] | 308 | |
Alex Bennée | c3b9a07 | 2017-02-23 18:29:22 +0000 | [diff] [blame] | 309 | static inline void tlb_flush_by_mmuidx_all_cpus_synced(CPUState *cpu, |
| 310 | uint16_t idxmap) |
| 311 | { |
| 312 | } |
Paul Brook | c527ee8 | 2010-03-01 03:31:14 +0000 | [diff] [blame] | 313 | #endif |
bellard | d4e8164 | 2003-05-25 16:46:15 +0000 | [diff] [blame] | 314 | |
bellard | d4e8164 | 2003-05-25 16:46:15 +0000 | [diff] [blame] | 315 | #define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */ |
| 316 | |
Richard Henderson | 126d89e | 2015-09-26 09:23:42 -0700 | [diff] [blame] | 317 | /* Estimated block size for TB allocation. */ |
| 318 | /* ??? The following is based on a 2015 survey of x86_64 host output. |
| 319 | Better would seem to be some sort of dynamically sized TB array, |
| 320 | adapting to the block sizes actually being produced. */ |
bellard | 4390df5 | 2004-01-04 18:03:10 +0000 | [diff] [blame] | 321 | #if defined(CONFIG_SOFTMMU) |
Richard Henderson | 126d89e | 2015-09-26 09:23:42 -0700 | [diff] [blame] | 322 | #define CODE_GEN_AVG_BLOCK_SIZE 400 |
bellard | 4390df5 | 2004-01-04 18:03:10 +0000 | [diff] [blame] | 323 | #else |
Richard Henderson | 126d89e | 2015-09-26 09:23:42 -0700 | [diff] [blame] | 324 | #define CODE_GEN_AVG_BLOCK_SIZE 150 |
bellard | 4390df5 | 2004-01-04 18:03:10 +0000 | [diff] [blame] | 325 | #endif |
| 326 | |
Emilio G. Cota | e7e168f | 2017-07-12 00:08:21 -0400 | [diff] [blame] | 327 | /* |
| 328 | * Translation Cache-related fields of a TB. |
Emilio G. Cota | 2ac01d6 | 2017-06-23 19:00:11 -0400 | [diff] [blame] | 329 | * This struct exists just for convenience; we keep track of TB's in a binary |
| 330 | * search tree, and the only fields needed to compare TB's in the tree are |
| 331 | * @ptr and @size. |
| 332 | * Note: the address of search data can be obtained by adding @size to @ptr. |
Emilio G. Cota | e7e168f | 2017-07-12 00:08:21 -0400 | [diff] [blame] | 333 | */ |
| 334 | struct tb_tc { |
| 335 | void *ptr; /* pointer to the translated code */ |
Emilio G. Cota | 2ac01d6 | 2017-06-23 19:00:11 -0400 | [diff] [blame] | 336 | size_t size; |
Emilio G. Cota | e7e168f | 2017-07-12 00:08:21 -0400 | [diff] [blame] | 337 | }; |
| 338 | |
pbrook | 2e70f6e | 2008-06-29 01:03:05 +0000 | [diff] [blame] | 339 | struct TranslationBlock { |
bellard | 2e12669 | 2004-04-25 21:28:44 +0000 | [diff] [blame] | 340 | target_ulong pc; /* simulated PC corresponding to this block (EIP + CS base) */ |
| 341 | target_ulong cs_base; /* CS base for this block */ |
Emilio G. Cota | 89fee74 | 2016-04-07 13:19:22 -0400 | [diff] [blame] | 342 | uint32_t flags; /* flags defining in which context the code was generated */ |
bellard | d4e8164 | 2003-05-25 16:46:15 +0000 | [diff] [blame] | 343 | uint16_t size; /* size of target code for this block (1 <= |
| 344 | size <= TARGET_PAGE_SIZE) */ |
Paolo Bonzini | 0266359 | 2014-11-26 13:39:53 +0300 | [diff] [blame] | 345 | uint16_t icount; |
| 346 | uint32_t cflags; /* compile flags */ |
Richard Henderson | 416986d | 2017-10-13 12:22:28 -0700 | [diff] [blame] | 347 | #define CF_COUNT_MASK 0x00007fff |
| 348 | #define CF_LAST_IO 0x00008000 /* Last insn may be an IO access. */ |
| 349 | #define CF_NOCACHE 0x00010000 /* To be freed after execution */ |
| 350 | #define CF_USE_ICOUNT 0x00020000 |
Emilio G. Cota | 194125e | 2017-08-02 20:34:06 -0400 | [diff] [blame] | 351 | #define CF_INVALID 0x00040000 /* TB is stale. Set with @jmp_lock held */ |
Richard Henderson | 416986d | 2017-10-13 12:22:28 -0700 | [diff] [blame] | 352 | #define CF_PARALLEL 0x00080000 /* Generate code for a parallel context */ |
Peter Maydell | f7b7860 | 2019-01-29 11:46:06 +0000 | [diff] [blame] | 353 | #define CF_CLUSTER_MASK 0xff000000 /* Top 8 bits are cluster ID */ |
| 354 | #define CF_CLUSTER_SHIFT 24 |
Emilio G. Cota | 4e2ca83 | 2017-07-11 14:29:37 -0400 | [diff] [blame] | 355 | /* cflags' mask for hashing/comparison */ |
Richard Henderson | 0cf8a44 | 2017-10-13 12:15:06 -0700 | [diff] [blame] | 356 | #define CF_HASH_MASK \ |
Peter Maydell | f7b7860 | 2019-01-29 11:46:06 +0000 | [diff] [blame] | 357 | (CF_COUNT_MASK | CF_LAST_IO | CF_USE_ICOUNT | CF_PARALLEL | CF_CLUSTER_MASK) |
bellard | 58fe2f1 | 2004-02-16 22:11:32 +0000 | [diff] [blame] | 358 | |
Lluís Vilanova | 61a67f7 | 2017-07-04 10:42:32 +0200 | [diff] [blame] | 359 | /* Per-vCPU dynamic tracing state used to generate this TB */ |
| 360 | uint32_t trace_vcpu_dstate; |
| 361 | |
Emilio G. Cota | e7e168f | 2017-07-12 00:08:21 -0400 | [diff] [blame] | 362 | struct tb_tc tc; |
| 363 | |
Sergey Fedorov | 02d57ea | 2015-06-30 12:35:09 +0300 | [diff] [blame] | 364 | /* original tb when cflags has CF_NOCACHE */ |
| 365 | struct TranslationBlock *orig_tb; |
bellard | 4390df5 | 2004-01-04 18:03:10 +0000 | [diff] [blame] | 366 | /* first and second physical page containing code. The lower bit |
Emilio G. Cota | 0b5c91f | 2017-07-26 20:22:51 -0400 | [diff] [blame] | 367 | of the pointer tells the index in page_next[]. |
| 368 | The list is protected by the TB's page('s) lock(s) */ |
Emilio G. Cota | 1e05197 | 2017-08-03 18:37:15 -0400 | [diff] [blame] | 369 | uintptr_t page_next[2]; |
Paul Brook | 41c1b1c | 2010-03-12 16:54:58 +0000 | [diff] [blame] | 370 | tb_page_addr_t page_addr[2]; |
bellard | 4390df5 | 2004-01-04 18:03:10 +0000 | [diff] [blame] | 371 | |
Emilio G. Cota | 194125e | 2017-08-02 20:34:06 -0400 | [diff] [blame] | 372 | /* jmp_lock placed here to fill a 4-byte hole. Its documentation is below */ |
| 373 | QemuSpin jmp_lock; |
| 374 | |
Sergey Fedorov | f309101 | 2016-04-10 23:35:45 +0300 | [diff] [blame] | 375 | /* The following data are used to directly call another TB from |
| 376 | * the code of this one. This can be done either by emitting direct or |
| 377 | * indirect native jump instructions. These jumps are reset so that the TB |
Emilio G. Cota | eb5e2b9 | 2017-06-23 19:43:01 -0400 | [diff] [blame] | 378 | * just continues its execution. The TB can be linked to another one by |
Sergey Fedorov | f309101 | 2016-04-10 23:35:45 +0300 | [diff] [blame] | 379 | * setting one of the jump targets (or patching the jump instruction). Only |
| 380 | * two of such jumps are supported. |
| 381 | */ |
| 382 | uint16_t jmp_reset_offset[2]; /* offset of original jump target */ |
| 383 | #define TB_JMP_RESET_OFFSET_INVALID 0xffff /* indicates no jump generated */ |
Richard Henderson | a858339 | 2017-07-31 22:02:31 -0700 | [diff] [blame] | 384 | uintptr_t jmp_target_arg[2]; /* target address or offset */ |
| 385 | |
Emilio G. Cota | 194125e | 2017-08-02 20:34:06 -0400 | [diff] [blame] | 386 | /* |
| 387 | * Each TB has a NULL-terminated list (jmp_list_head) of incoming jumps. |
| 388 | * Each TB can have two outgoing jumps, and therefore can participate |
| 389 | * in two lists. The list entries are kept in jmp_list_next[2]. The least |
| 390 | * significant bit (LSB) of the pointers in these lists is used to encode |
| 391 | * which of the two list entries is to be used in the pointed TB. |
| 392 | * |
| 393 | * List traversals are protected by jmp_lock. The destination TB of each |
| 394 | * outgoing jump is kept in jmp_dest[] so that the appropriate jmp_lock |
| 395 | * can be acquired from any origin TB. |
| 396 | * |
| 397 | * jmp_dest[] are tagged pointers as well. The LSB is set when the TB is |
| 398 | * being invalidated, so that no further outgoing jumps from it can be set. |
| 399 | * |
| 400 | * jmp_lock also protects the CF_INVALID cflag; a jump must not be chained |
| 401 | * to a destination TB that has CF_INVALID set. |
Sergey Fedorov | f309101 | 2016-04-10 23:35:45 +0300 | [diff] [blame] | 402 | */ |
Emilio G. Cota | 194125e | 2017-08-02 20:34:06 -0400 | [diff] [blame] | 403 | uintptr_t jmp_list_head; |
Sergey Fedorov | c37e6d7 | 2016-03-21 23:11:00 +0300 | [diff] [blame] | 404 | uintptr_t jmp_list_next[2]; |
Emilio G. Cota | 194125e | 2017-08-02 20:34:06 -0400 | [diff] [blame] | 405 | uintptr_t jmp_dest[2]; |
pbrook | 2e70f6e | 2008-06-29 01:03:05 +0000 | [diff] [blame] | 406 | }; |
bellard | d4e8164 | 2003-05-25 16:46:15 +0000 | [diff] [blame] | 407 | |
Emilio G. Cota | 4e2ca83 | 2017-07-11 14:29:37 -0400 | [diff] [blame] | 408 | extern bool parallel_cpus; |
| 409 | |
| 410 | /* Hide the atomic_read to make code a little easier on the eyes */ |
| 411 | static inline uint32_t tb_cflags(const TranslationBlock *tb) |
| 412 | { |
| 413 | return atomic_read(&tb->cflags); |
| 414 | } |
| 415 | |
| 416 | /* current cflags for hashing/comparison */ |
| 417 | static inline uint32_t curr_cflags(void) |
| 418 | { |
Richard Henderson | 416986d | 2017-10-13 12:22:28 -0700 | [diff] [blame] | 419 | return (parallel_cpus ? CF_PARALLEL : 0) |
| 420 | | (use_icount ? CF_USE_ICOUNT : 0); |
Emilio G. Cota | 4e2ca83 | 2017-07-11 14:29:37 -0400 | [diff] [blame] | 421 | } |
| 422 | |
Philippe Mathieu-Daudé | 646f34f | 2018-06-29 17:07:10 -0300 | [diff] [blame] | 423 | /* TranslationBlock invalidate API */ |
Philippe Mathieu-Daudé | 646f34f | 2018-06-29 17:07:10 -0300 | [diff] [blame] | 424 | #if defined(CONFIG_USER_ONLY) |
Paolo Bonzini | c40d479 | 2018-07-02 14:45:25 +0200 | [diff] [blame] | 425 | void tb_invalidate_phys_addr(target_ulong addr); |
Philippe Mathieu-Daudé | 646f34f | 2018-06-29 17:07:10 -0300 | [diff] [blame] | 426 | void tb_invalidate_phys_range(target_ulong start, target_ulong end); |
Paolo Bonzini | c40d479 | 2018-07-02 14:45:25 +0200 | [diff] [blame] | 427 | #else |
| 428 | void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr, MemTxAttrs attrs); |
Philippe Mathieu-Daudé | 646f34f | 2018-06-29 17:07:10 -0300 | [diff] [blame] | 429 | #endif |
Peter Crosthwaite | bbd77c1 | 2015-06-23 19:31:15 -0700 | [diff] [blame] | 430 | void tb_flush(CPUState *cpu); |
Paul Brook | 41c1b1c | 2010-03-12 16:54:58 +0000 | [diff] [blame] | 431 | void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr); |
Emilio G. Cota | cedbcb0 | 2017-04-26 23:29:14 -0400 | [diff] [blame] | 432 | TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc, |
Emilio G. Cota | 4e2ca83 | 2017-07-11 14:29:37 -0400 | [diff] [blame] | 433 | target_ulong cs_base, uint32_t flags, |
| 434 | uint32_t cf_mask); |
Richard Henderson | a858339 | 2017-07-31 22:02:31 -0700 | [diff] [blame] | 435 | void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr); |
bellard | d4e8164 | 2003-05-25 16:46:15 +0000 | [diff] [blame] | 436 | |
Richard Henderson | 01ecaf4 | 2016-07-26 06:09:16 +0530 | [diff] [blame] | 437 | /* GETPC is the true target of the return instruction that we'll execute. */ |
Stefan Weil | 7316329 | 2011-10-05 20:03:02 +0200 | [diff] [blame] | 438 | #if defined(CONFIG_TCG_INTERPRETER) |
Stefan Weil | c3ca046 | 2012-04-17 19:22:39 +0200 | [diff] [blame] | 439 | extern uintptr_t tci_tb_ptr; |
Richard Henderson | 01ecaf4 | 2016-07-26 06:09:16 +0530 | [diff] [blame] | 440 | # define GETPC() tci_tb_ptr |
Blue Swirl | 3917149 | 2011-09-21 18:13:16 +0000 | [diff] [blame] | 441 | #else |
Richard Henderson | 01ecaf4 | 2016-07-26 06:09:16 +0530 | [diff] [blame] | 442 | # define GETPC() \ |
Richard Henderson | 0f842f8 | 2013-08-27 10:22:54 -0700 | [diff] [blame] | 443 | ((uintptr_t)__builtin_extract_return_addr(__builtin_return_address(0))) |
Blue Swirl | 3917149 | 2011-09-21 18:13:16 +0000 | [diff] [blame] | 444 | #endif |
| 445 | |
Richard Henderson | 0f842f8 | 2013-08-27 10:22:54 -0700 | [diff] [blame] | 446 | /* The true return address will often point to a host insn that is part of |
| 447 | the next translated guest insn. Adjust the address backward to point to |
| 448 | the middle of the call insn. Subtracting one would do the job except for |
| 449 | several compressed mode architectures (arm, mips) which set the low bit |
| 450 | to indicate the compressed mode; subtracting two works around that. It |
| 451 | is also the case that there are no host isas that contain a call insn |
| 452 | smaller than 4 bytes, so we don't worry about special-casing this. */ |
Peter Crosthwaite | a17d448 | 2015-08-17 20:28:18 -0700 | [diff] [blame] | 453 | #define GETPC_ADJ 2 |
Yeongkyoon Lee | fdbb84d | 2012-10-31 16:04:24 +0900 | [diff] [blame] | 454 | |
Emilio G. Cota | faa9372 | 2018-02-22 20:50:29 -0500 | [diff] [blame] | 455 | #if !defined(CONFIG_USER_ONLY) && defined(CONFIG_DEBUG_TCG) |
| 456 | void assert_no_pages_locked(void); |
| 457 | #else |
| 458 | static inline void assert_no_pages_locked(void) |
| 459 | { |
| 460 | } |
| 461 | #endif |
| 462 | |
bellard | e95c8d5 | 2004-09-30 22:22:08 +0000 | [diff] [blame] | 463 | #if !defined(CONFIG_USER_ONLY) |
bellard | 6e59c1d | 2003-10-27 21:24:54 +0000 | [diff] [blame] | 464 | |
Peter Maydell | 2d54f19 | 2018-06-15 14:57:14 +0100 | [diff] [blame] | 465 | /** |
| 466 | * iotlb_to_section: |
| 467 | * @cpu: CPU performing the access |
| 468 | * @index: TCG CPU IOTLB entry |
| 469 | * |
| 470 | * Given a TCG CPU IOTLB entry, return the MemoryRegionSection that |
| 471 | * it refers to. @index will have been initially created and returned |
| 472 | * by memory_region_section_get_iotlb(). |
| 473 | */ |
| 474 | struct MemoryRegionSection *iotlb_to_section(CPUState *cpu, |
| 475 | hwaddr index, MemTxAttrs attrs); |
bellard | 6e59c1d | 2003-10-27 21:24:54 +0000 | [diff] [blame] | 476 | #endif |
bellard | 4390df5 | 2004-01-04 18:03:10 +0000 | [diff] [blame] | 477 | |
| 478 | #if defined(CONFIG_USER_ONLY) |
Paolo Bonzini | 8fd19e6 | 2015-08-11 10:57:52 +0200 | [diff] [blame] | 479 | void mmap_lock(void); |
| 480 | void mmap_unlock(void); |
Alex Bennée | 301e40e | 2016-10-27 16:10:00 +0100 | [diff] [blame] | 481 | bool have_mmap_lock(void); |
Paolo Bonzini | 8fd19e6 | 2015-08-11 10:57:52 +0200 | [diff] [blame] | 482 | |
Andreas Färber | 9349b4f | 2012-03-14 01:38:32 +0100 | [diff] [blame] | 483 | static inline tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr) |
bellard | 4390df5 | 2004-01-04 18:03:10 +0000 | [diff] [blame] | 484 | { |
| 485 | return addr; |
| 486 | } |
| 487 | #else |
Paolo Bonzini | 8fd19e6 | 2015-08-11 10:57:52 +0200 | [diff] [blame] | 488 | static inline void mmap_lock(void) {} |
| 489 | static inline void mmap_unlock(void) {} |
| 490 | |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 491 | /* cputlb.c */ |
Andreas Färber | 9349b4f | 2012-03-14 01:38:32 +0100 | [diff] [blame] | 492 | tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr); |
Peter Crosthwaite | dfccc76 | 2015-09-10 22:39:43 -0700 | [diff] [blame] | 493 | |
| 494 | void tlb_reset_dirty(CPUState *cpu, ram_addr_t start1, ram_addr_t length); |
| 495 | void tlb_set_dirty(CPUState *cpu, target_ulong vaddr); |
| 496 | |
| 497 | /* exec.c */ |
| 498 | void tb_flush_jmp_cache(CPUState *cpu, target_ulong addr); |
| 499 | |
| 500 | MemoryRegionSection * |
Peter Maydell | d7898cd | 2016-01-21 14:15:05 +0000 | [diff] [blame] | 501 | address_space_translate_for_iotlb(CPUState *cpu, int asidx, hwaddr addr, |
Peter Maydell | 1f871c5 | 2018-06-15 14:57:16 +0100 | [diff] [blame] | 502 | hwaddr *xlat, hwaddr *plen, |
| 503 | MemTxAttrs attrs, int *prot); |
Peter Crosthwaite | dfccc76 | 2015-09-10 22:39:43 -0700 | [diff] [blame] | 504 | hwaddr memory_region_section_get_iotlb(CPUState *cpu, |
| 505 | MemoryRegionSection *section, |
| 506 | target_ulong vaddr, |
| 507 | hwaddr paddr, hwaddr xlat, |
| 508 | int prot, |
| 509 | target_ulong *address); |
bellard | 4390df5 | 2004-01-04 18:03:10 +0000 | [diff] [blame] | 510 | #endif |
bellard | 9df217a | 2005-02-10 22:05:51 +0000 | [diff] [blame] | 511 | |
aurel32 | 1b530a6 | 2009-04-05 20:08:59 +0000 | [diff] [blame] | 512 | /* vl.c */ |
| 513 | extern int singlestep; |
| 514 | |
aliguori | 875cdcf | 2008-10-23 13:52:00 +0000 | [diff] [blame] | 515 | #endif |