Philippe Mathieu-Daudé | f184f38 | 2024-04-24 18:04:09 +0200 | [diff] [blame] | 1 | /* |
| 2 | * CPU interfaces that are target independent. |
| 3 | * |
| 4 | * Copyright (c) 2003 Fabrice Bellard |
| 5 | * |
| 6 | * SPDX-License-Identifier: LGPL-2.1+ |
| 7 | */ |
Paul Brook | 1ad2134 | 2009-05-19 16:17:58 +0100 | [diff] [blame] | 8 | #ifndef CPU_COMMON_H |
Markus Armbruster | 175de52 | 2016-06-29 15:29:06 +0200 | [diff] [blame] | 9 | #define CPU_COMMON_H |
Paul Brook | 1ad2134 | 2009-05-19 16:17:58 +0100 | [diff] [blame] | 10 | |
Anton Johansson | c4b3f46 | 2024-01-19 15:39:56 +0100 | [diff] [blame] | 11 | #include "exec/vaddr.h" |
Andreas Färber | ce927ed | 2013-05-28 14:02:38 +0200 | [diff] [blame] | 12 | #ifndef CONFIG_USER_ONLY |
Paolo Bonzini | 022c62c | 2012-12-17 18:19:49 +0100 | [diff] [blame] | 13 | #include "exec/hwaddr.h" |
Andreas Färber | ce927ed | 2013-05-28 14:02:38 +0200 | [diff] [blame] | 14 | #endif |
Anton Johansson | a7f6f4f | 2024-01-19 15:40:06 +0100 | [diff] [blame] | 15 | #include "hw/core/cpu.h" |
Richard Henderson | a120d32 | 2024-01-29 11:37:54 +1000 | [diff] [blame] | 16 | #include "tcg/debug-assert.h" |
Philippe Mathieu-Daudé | 74781c0 | 2023-12-06 20:27:32 +0100 | [diff] [blame] | 17 | #include "exec/page-protection.h" |
Paolo Bonzini | 37b76cf | 2010-04-01 19:57:10 +0200 | [diff] [blame] | 18 | |
Philippe Mathieu-Daudé | 65b074d | 2023-09-14 20:57:07 +0200 | [diff] [blame] | 19 | #define EXCP_INTERRUPT 0x10000 /* async interruption */ |
| 20 | #define EXCP_HLT 0x10001 /* hlt instruction reached */ |
| 21 | #define EXCP_DEBUG 0x10002 /* cpu stopped after a breakpoint or singlestep */ |
| 22 | #define EXCP_HALTED 0x10003 /* cpu is halted (waiting for external event) */ |
| 23 | #define EXCP_YIELD 0x10004 /* cpu wants to yield timeslice to another */ |
| 24 | #define EXCP_ATOMIC 0x10005 /* stop-the-world and emulate atomic */ |
| 25 | |
Marc-André Lureau | 1f269c1 | 2022-03-23 19:57:33 +0400 | [diff] [blame] | 26 | void cpu_exec_init_all(void); |
| 27 | void cpu_exec_step_atomic(CPUState *cpu); |
| 28 | |
Marc-André Lureau | 8e3b0cb | 2022-03-23 19:57:22 +0400 | [diff] [blame] | 29 | #define REAL_HOST_PAGE_ALIGN(addr) ROUND_UP((addr), qemu_real_host_page_size()) |
Philippe Mathieu-Daudé | b269a70 | 2022-01-20 01:08:36 +0100 | [diff] [blame] | 30 | |
Emilio G. Cota | 0ac2031 | 2017-08-04 23:46:31 -0400 | [diff] [blame] | 31 | /* The CPU list lock nests outside page_(un)lock or mmap_(un)lock */ |
Jamie Iles | 370ed60 | 2023-04-27 03:09:24 +0100 | [diff] [blame] | 32 | extern QemuMutex qemu_cpu_list_lock; |
Paolo Bonzini | 267f685 | 2016-08-28 03:45:14 +0200 | [diff] [blame] | 33 | void qemu_init_cpu_list(void); |
| 34 | void cpu_list_lock(void); |
| 35 | void cpu_list_unlock(void); |
Hyman Huang(黄勇) | ab1a161 | 2022-06-26 01:38:31 +0800 | [diff] [blame] | 36 | unsigned int cpu_list_generation_id_get(void); |
Paolo Bonzini | 267f685 | 2016-08-28 03:45:14 +0200 | [diff] [blame] | 37 | |
Harsh Prateek Bora | 18530e7 | 2024-06-18 13:53:53 +0530 | [diff] [blame] | 38 | int cpu_get_free_index(void); |
| 39 | |
Paolo Bonzini | d9f24bf | 2020-10-06 09:05:29 +0200 | [diff] [blame] | 40 | void tcg_iommu_init_notifier_list(CPUState *cpu); |
| 41 | void tcg_iommu_free_notifier_list(CPUState *cpu); |
| 42 | |
Paul Brook | b3755a9 | 2010-03-12 16:54:58 +0000 | [diff] [blame] | 43 | #if !defined(CONFIG_USER_ONLY) |
| 44 | |
Alexander Graf | dd31053 | 2010-12-08 12:05:36 +0100 | [diff] [blame] | 45 | enum device_endian { |
| 46 | DEVICE_NATIVE_ENDIAN, |
| 47 | DEVICE_BIG_ENDIAN, |
| 48 | DEVICE_LITTLE_ENDIAN, |
| 49 | }; |
| 50 | |
Marc-André Lureau | e03b568 | 2022-03-23 19:57:17 +0400 | [diff] [blame] | 51 | #if HOST_BIG_ENDIAN |
Yongji Xie | c99a29e | 2017-02-27 12:52:44 +0800 | [diff] [blame] | 52 | #define DEVICE_HOST_ENDIAN DEVICE_BIG_ENDIAN |
| 53 | #else |
| 54 | #define DEVICE_HOST_ENDIAN DEVICE_LITTLE_ENDIAN |
| 55 | #endif |
| 56 | |
Paul Brook | 1ad2134 | 2009-05-19 16:17:58 +0100 | [diff] [blame] | 57 | /* address in the RAM (different from a physical address) */ |
Avi Kivity | 4be403c | 2012-10-04 12:36:04 +0200 | [diff] [blame] | 58 | #if defined(CONFIG_XEN_BACKEND) |
Anthony PERARD | f15fbc4 | 2011-07-20 08:17:42 +0000 | [diff] [blame] | 59 | typedef uint64_t ram_addr_t; |
| 60 | # define RAM_ADDR_MAX UINT64_MAX |
| 61 | # define RAM_ADDR_FMT "%" PRIx64 |
| 62 | #else |
Stefan Weil | 5357699 | 2012-03-02 23:30:02 +0100 | [diff] [blame] | 63 | typedef uintptr_t ram_addr_t; |
| 64 | # define RAM_ADDR_MAX UINTPTR_MAX |
| 65 | # define RAM_ADDR_FMT "%" PRIxPTR |
Anthony PERARD | f15fbc4 | 2011-07-20 08:17:42 +0000 | [diff] [blame] | 66 | #endif |
Paul Brook | 1ad2134 | 2009-05-19 16:17:58 +0100 | [diff] [blame] | 67 | |
| 68 | /* memory API */ |
| 69 | |
Huang Ying | cd19cfa | 2011-03-02 08:56:19 +0100 | [diff] [blame] | 70 | void qemu_ram_remap(ram_addr_t addr, ram_addr_t length); |
Paul Brook | 1ad2134 | 2009-05-19 16:17:58 +0100 | [diff] [blame] | 71 | /* This should not be used by devices. */ |
Paolo Bonzini | 07bdaa4 | 2016-03-25 12:55:08 +0100 | [diff] [blame] | 72 | ram_addr_t qemu_ram_addr_from_host(void *ptr); |
Richard Henderson | 97e0346 | 2022-08-10 12:04:15 -0700 | [diff] [blame] | 73 | ram_addr_t qemu_ram_addr_from_host_nofail(void *ptr); |
Dr. David Alan Gilbert | e3dd749 | 2015-11-05 18:10:33 +0000 | [diff] [blame] | 74 | RAMBlock *qemu_ram_block_by_name(const char *name); |
David Hildenbrand | 022f033 | 2023-09-26 20:57:23 +0200 | [diff] [blame] | 75 | |
| 76 | /* |
| 77 | * Translates a host ptr back to a RAMBlock and an offset in that RAMBlock. |
| 78 | * |
| 79 | * @ptr: The host pointer to translate. |
| 80 | * @round_offset: Whether to round the result offset down to a target page |
| 81 | * @offset: Will be set to the offset within the returned RAMBlock. |
| 82 | * |
| 83 | * Returns: RAMBlock (or NULL if not found) |
| 84 | * |
| 85 | * By the time this function returns, the returned pointer is not protected |
| 86 | * by RCU anymore. If the caller is not within an RCU critical section and |
Stefan Hajnoczi | a4a411f | 2024-01-02 10:35:28 -0500 | [diff] [blame] | 87 | * does not hold the BQL, it must have other means of protecting the |
David Hildenbrand | 022f033 | 2023-09-26 20:57:23 +0200 | [diff] [blame] | 88 | * pointer, such as a reference to the memory region that owns the RAMBlock. |
| 89 | */ |
Dr. David Alan Gilbert | 422148d | 2015-11-05 18:10:32 +0000 | [diff] [blame] | 90 | RAMBlock *qemu_ram_block_from_host(void *ptr, bool round_offset, |
Paolo Bonzini | f615f39 | 2016-05-26 10:07:50 +0200 | [diff] [blame] | 91 | ram_addr_t *offset); |
Dr. David Alan Gilbert | f90bb71 | 2018-03-12 17:20:57 +0000 | [diff] [blame] | 92 | ram_addr_t qemu_ram_block_host_offset(RAMBlock *rb, void *host); |
Gonglei | fa53a0e | 2016-05-10 10:04:59 +0800 | [diff] [blame] | 93 | void qemu_ram_set_idstr(RAMBlock *block, const char *name, DeviceState *dev); |
| 94 | void qemu_ram_unset_idstr(RAMBlock *block); |
Dr. David Alan Gilbert | 422148d | 2015-11-05 18:10:32 +0000 | [diff] [blame] | 95 | const char *qemu_ram_get_idstr(RAMBlock *rb); |
Yury Kotov | 754cb9c | 2019-02-15 20:45:44 +0300 | [diff] [blame] | 96 | void *qemu_ram_get_host_addr(RAMBlock *rb); |
| 97 | ram_addr_t qemu_ram_get_offset(RAMBlock *rb); |
| 98 | ram_addr_t qemu_ram_get_used_length(RAMBlock *rb); |
David Hildenbrand | 082851a | 2021-04-29 13:26:59 +0200 | [diff] [blame] | 99 | ram_addr_t qemu_ram_get_max_length(RAMBlock *rb); |
Dr. David Alan Gilbert | 463a4ac | 2017-03-07 18:36:36 +0000 | [diff] [blame] | 100 | bool qemu_ram_is_shared(RAMBlock *rb); |
David Hildenbrand | 8dbe22c | 2021-05-10 13:43:21 +0200 | [diff] [blame] | 101 | bool qemu_ram_is_noreserve(RAMBlock *rb); |
Dr. David Alan Gilbert | 2ce1664 | 2018-03-12 17:20:58 +0000 | [diff] [blame] | 102 | bool qemu_ram_is_uf_zeroable(RAMBlock *rb); |
| 103 | void qemu_ram_set_uf_zeroable(RAMBlock *rb); |
Cédric Le Goater | b895de5 | 2018-05-14 08:57:00 +0200 | [diff] [blame] | 104 | bool qemu_ram_is_migratable(RAMBlock *rb); |
| 105 | void qemu_ram_set_migratable(RAMBlock *rb); |
| 106 | void qemu_ram_unset_migratable(RAMBlock *rb); |
Steve Sistare | b0182e5 | 2023-06-07 08:18:36 -0700 | [diff] [blame] | 107 | bool qemu_ram_is_named_file(RAMBlock *rb); |
Stefan Hajnoczi | 6d998f3 | 2022-10-13 14:59:05 -0400 | [diff] [blame] | 108 | int qemu_ram_get_fd(RAMBlock *rb); |
Dr. David Alan Gilbert | 2ce1664 | 2018-03-12 17:20:58 +0000 | [diff] [blame] | 109 | |
Dr. David Alan Gilbert | 863e962 | 2016-09-29 20:09:37 +0100 | [diff] [blame] | 110 | size_t qemu_ram_pagesize(RAMBlock *block); |
Dr. David Alan Gilbert | 67f11b5 | 2017-02-24 18:28:34 +0000 | [diff] [blame] | 111 | size_t qemu_ram_pagesize_largest(void); |
Paul Brook | 1ad2134 | 2009-05-19 16:17:58 +0100 | [diff] [blame] | 112 | |
Philippe Mathieu-Daudé | 1f649fe | 2021-05-16 19:01:31 +0200 | [diff] [blame] | 113 | /** |
| 114 | * cpu_address_space_init: |
| 115 | * @cpu: CPU to add this address space to |
| 116 | * @asidx: integer index of this address space |
| 117 | * @prefix: prefix to be used as name of address space |
| 118 | * @mr: the root memory region of address space |
| 119 | * |
| 120 | * Add the specified address space to the CPU's cpu_ases list. |
| 121 | * The address space added with @asidx 0 is the one used for the |
| 122 | * convenience pointer cpu->as. |
| 123 | * The target-specific code which registers ASes is responsible |
| 124 | * for defining what semantics address space 0, 1, 2, etc have. |
| 125 | * |
| 126 | * Before the first call to this function, the caller must set |
| 127 | * cpu->num_ases to the total number of address spaces it needs |
| 128 | * to support. |
| 129 | * |
| 130 | * Note that with KVM only one address space is supported. |
| 131 | */ |
| 132 | void cpu_address_space_init(CPUState *cpu, int asidx, |
| 133 | const char *prefix, MemoryRegion *mr); |
Salil Mehta | 24bec42 | 2024-07-16 12:15:01 +0100 | [diff] [blame] | 134 | /** |
| 135 | * cpu_address_space_destroy: |
| 136 | * @cpu: CPU for which address space needs to be destroyed |
| 137 | * @asidx: integer index of this address space |
| 138 | * |
| 139 | * Note that with KVM only one address space is supported. |
| 140 | */ |
| 141 | void cpu_address_space_destroy(CPUState *cpu, int asidx); |
Philippe Mathieu-Daudé | 1f649fe | 2021-05-16 19:01:31 +0200 | [diff] [blame] | 142 | |
Philippe Mathieu-Daudé | d7ef71e | 2020-02-19 20:02:11 +0100 | [diff] [blame] | 143 | void cpu_physical_memory_rw(hwaddr addr, void *buf, |
Philippe Mathieu-Daudé | 28c80bf | 2020-02-19 20:32:30 +0100 | [diff] [blame] | 144 | hwaddr len, bool is_write); |
Avi Kivity | a8170e5 | 2012-10-23 12:30:10 +0200 | [diff] [blame] | 145 | static inline void cpu_physical_memory_read(hwaddr addr, |
Li Zhijian | 0c249ff | 2019-01-17 20:49:01 +0800 | [diff] [blame] | 146 | void *buf, hwaddr len) |
Paul Brook | 1ad2134 | 2009-05-19 16:17:58 +0100 | [diff] [blame] | 147 | { |
Philippe Mathieu-Daudé | 85eb7c1 | 2020-02-19 20:20:42 +0100 | [diff] [blame] | 148 | cpu_physical_memory_rw(addr, buf, len, false); |
Paul Brook | 1ad2134 | 2009-05-19 16:17:58 +0100 | [diff] [blame] | 149 | } |
Avi Kivity | a8170e5 | 2012-10-23 12:30:10 +0200 | [diff] [blame] | 150 | static inline void cpu_physical_memory_write(hwaddr addr, |
Li Zhijian | 0c249ff | 2019-01-17 20:49:01 +0800 | [diff] [blame] | 151 | const void *buf, hwaddr len) |
Paul Brook | 1ad2134 | 2009-05-19 16:17:58 +0100 | [diff] [blame] | 152 | { |
Philippe Mathieu-Daudé | 85eb7c1 | 2020-02-19 20:20:42 +0100 | [diff] [blame] | 153 | cpu_physical_memory_rw(addr, (void *)buf, len, true); |
Paul Brook | 1ad2134 | 2009-05-19 16:17:58 +0100 | [diff] [blame] | 154 | } |
Avi Kivity | a8170e5 | 2012-10-23 12:30:10 +0200 | [diff] [blame] | 155 | void *cpu_physical_memory_map(hwaddr addr, |
| 156 | hwaddr *plen, |
Philippe Mathieu-Daudé | 28c80bf | 2020-02-19 20:32:30 +0100 | [diff] [blame] | 157 | bool is_write); |
Avi Kivity | a8170e5 | 2012-10-23 12:30:10 +0200 | [diff] [blame] | 158 | void cpu_physical_memory_unmap(void *buffer, hwaddr len, |
Philippe Mathieu-Daudé | 28c80bf | 2020-02-19 20:32:30 +0100 | [diff] [blame] | 159 | bool is_write, hwaddr access_len); |
Paul Brook | 1ad2134 | 2009-05-19 16:17:58 +0100 | [diff] [blame] | 160 | |
Avi Kivity | a8170e5 | 2012-10-23 12:30:10 +0200 | [diff] [blame] | 161 | bool cpu_physical_memory_is_io(hwaddr phys_addr); |
Wen Congyang | 76f3553 | 2012-05-07 12:04:18 +0800 | [diff] [blame] | 162 | |
Blue Swirl | 6842a08 | 2010-03-21 19:47:13 +0000 | [diff] [blame] | 163 | /* Coalesced MMIO regions are areas where write operations can be reordered. |
| 164 | * This usually implies that write operations are side-effect free. This allows |
| 165 | * batching which can make a major impact on performance when using |
| 166 | * virtualization. |
| 167 | */ |
Blue Swirl | 6842a08 | 2010-03-21 19:47:13 +0000 | [diff] [blame] | 168 | void qemu_flush_coalesced_mmio_buffer(void); |
| 169 | |
Li Zhijian | 0c249ff | 2019-01-17 20:49:01 +0800 | [diff] [blame] | 170 | void cpu_flush_icache_range(hwaddr start, hwaddr len); |
Paul Brook | 1ad2134 | 2009-05-19 16:17:58 +0100 | [diff] [blame] | 171 | |
Yury Kotov | 754cb9c | 2019-02-15 20:45:44 +0300 | [diff] [blame] | 172 | typedef int (RAMBlockIterFunc)(RAMBlock *rb, void *opaque); |
Michael R. Hines | bd2fa51 | 2013-06-25 21:35:34 -0400 | [diff] [blame] | 173 | |
Dr. David Alan Gilbert | e380705 | 2015-05-21 13:24:13 +0100 | [diff] [blame] | 174 | int qemu_ram_foreach_block(RAMBlockIterFunc func, void *opaque); |
Dr. David Alan Gilbert | d3a5038 | 2017-02-24 18:28:32 +0000 | [diff] [blame] | 175 | int ram_block_discard_range(RAMBlock *rb, uint64_t start, size_t length); |
Xiaoyao Li | b2e9426 | 2024-03-20 03:39:07 -0500 | [diff] [blame] | 176 | int ram_block_discard_guest_memfd_range(RAMBlock *rb, uint64_t start, |
| 177 | size_t length); |
Michael R. Hines | bd2fa51 | 2013-06-25 21:35:34 -0400 | [diff] [blame] | 178 | |
Paul Brook | b3755a9 | 2010-03-12 16:54:58 +0000 | [diff] [blame] | 179 | #endif |
| 180 | |
Philippe Mathieu-Daudé | 73842ef | 2022-02-03 02:13:28 +0100 | [diff] [blame] | 181 | /* Returns: 0 on success, -1 on error */ |
| 182 | int cpu_memory_rw_debug(CPUState *cpu, vaddr addr, |
| 183 | void *ptr, size_t len, bool is_write); |
| 184 | |
Paolo Bonzini | c5e3c91 | 2020-10-28 08:04:08 -0400 | [diff] [blame] | 185 | /* vl.c */ |
Thomas Huth | c138c3b | 2023-04-19 14:48:31 +0200 | [diff] [blame] | 186 | void list_cpus(void); |
Philippe Mathieu-Daudé | 377bf6f | 2022-03-14 15:01:08 +0100 | [diff] [blame] | 187 | |
Philippe Mathieu-Daudé | 3549118 | 2023-09-14 20:57:08 +0200 | [diff] [blame] | 188 | #ifdef CONFIG_TCG |
Philippe Mathieu-Daudé | b254c34 | 2024-01-10 18:09:56 +0100 | [diff] [blame] | 189 | |
| 190 | bool tcg_cflags_has(CPUState *cpu, uint32_t flags); |
| 191 | void tcg_cflags_set(CPUState *cpu, uint32_t flags); |
| 192 | |
| 193 | /* current cflags for hashing/comparison */ |
| 194 | uint32_t curr_cflags(CPUState *cpu); |
| 195 | |
Philippe Mathieu-Daudé | 3549118 | 2023-09-14 20:57:08 +0200 | [diff] [blame] | 196 | /** |
| 197 | * cpu_unwind_state_data: |
| 198 | * @cpu: the cpu context |
| 199 | * @host_pc: the host pc within the translation |
| 200 | * @data: output data |
| 201 | * |
| 202 | * Attempt to load the the unwind state for a host pc occurring in |
| 203 | * translated code. If @host_pc is not in translated code, the |
| 204 | * function returns false; otherwise @data is loaded. |
| 205 | * This is the same unwind info as given to restore_state_to_opc. |
| 206 | */ |
| 207 | bool cpu_unwind_state_data(CPUState *cpu, uintptr_t host_pc, uint64_t *data); |
| 208 | |
| 209 | /** |
| 210 | * cpu_restore_state: |
| 211 | * @cpu: the cpu context |
| 212 | * @host_pc: the host pc within the translation |
| 213 | * @return: true if state was restored, false otherwise |
| 214 | * |
| 215 | * Attempt to restore the state for a fault occurring in translated |
| 216 | * code. If @host_pc is not in translated code no state is |
| 217 | * restored and the function returns false. |
| 218 | */ |
| 219 | bool cpu_restore_state(CPUState *cpu, uintptr_t host_pc); |
| 220 | |
| 221 | G_NORETURN void cpu_loop_exit_noexc(CPUState *cpu); |
| 222 | G_NORETURN void cpu_loop_exit_atomic(CPUState *cpu, uintptr_t pc); |
| 223 | #endif /* CONFIG_TCG */ |
| 224 | G_NORETURN void cpu_loop_exit(CPUState *cpu); |
| 225 | G_NORETURN void cpu_loop_exit_restore(CPUState *cpu, uintptr_t pc); |
| 226 | |
Anton Johansson | a7f6f4f | 2024-01-19 15:40:06 +0100 | [diff] [blame] | 227 | /* accel/tcg/cpu-exec.c */ |
| 228 | int cpu_exec(CPUState *cpu); |
| 229 | |
| 230 | /** |
| 231 | * env_archcpu(env) |
| 232 | * @env: The architecture environment |
| 233 | * |
| 234 | * Return the ArchCPU associated with the environment. |
| 235 | */ |
| 236 | static inline ArchCPU *env_archcpu(CPUArchState *env) |
| 237 | { |
| 238 | return (void *)env - sizeof(CPUState); |
| 239 | } |
| 240 | |
| 241 | /** |
Ilya Leoshkevich | f781af3 | 2024-09-12 11:28:20 +0200 | [diff] [blame] | 242 | * env_cpu_const(env) |
| 243 | * @env: The architecture environment |
| 244 | * |
| 245 | * Return the CPUState associated with the environment. |
| 246 | */ |
| 247 | static inline const CPUState *env_cpu_const(const CPUArchState *env) |
| 248 | { |
| 249 | return (void *)env - sizeof(CPUState); |
| 250 | } |
| 251 | |
| 252 | /** |
Anton Johansson | a7f6f4f | 2024-01-19 15:40:06 +0100 | [diff] [blame] | 253 | * env_cpu(env) |
| 254 | * @env: The architecture environment |
| 255 | * |
| 256 | * Return the CPUState associated with the environment. |
| 257 | */ |
| 258 | static inline CPUState *env_cpu(CPUArchState *env) |
| 259 | { |
Ilya Leoshkevich | f781af3 | 2024-09-12 11:28:20 +0200 | [diff] [blame] | 260 | return (CPUState *)env_cpu_const(env); |
Anton Johansson | a7f6f4f | 2024-01-19 15:40:06 +0100 | [diff] [blame] | 261 | } |
| 262 | |
Richard Henderson | a120d32 | 2024-01-29 11:37:54 +1000 | [diff] [blame] | 263 | #ifndef CONFIG_USER_ONLY |
| 264 | /** |
| 265 | * cpu_mmu_index: |
| 266 | * @env: The cpu environment |
| 267 | * @ifetch: True for code access, false for data access. |
| 268 | * |
| 269 | * Return the core mmu index for the current translation regime. |
| 270 | * This function is used by generic TCG code paths. |
| 271 | * |
| 272 | * The user-only version of this function is inline in cpu-all.h, |
| 273 | * where it always returns MMU_USER_IDX. |
| 274 | */ |
Richard Henderson | 3b91614 | 2024-01-29 20:35:06 +1000 | [diff] [blame] | 275 | static inline int cpu_mmu_index(CPUState *cs, bool ifetch) |
Richard Henderson | a120d32 | 2024-01-29 11:37:54 +1000 | [diff] [blame] | 276 | { |
Richard Henderson | a120d32 | 2024-01-29 11:37:54 +1000 | [diff] [blame] | 277 | int ret = cs->cc->mmu_index(cs, ifetch); |
| 278 | tcg_debug_assert(ret >= 0 && ret < NB_MMU_MODES); |
| 279 | return ret; |
| 280 | } |
| 281 | #endif /* !CONFIG_USER_ONLY */ |
| 282 | |
Markus Armbruster | 175de52 | 2016-06-29 15:29:06 +0200 | [diff] [blame] | 283 | #endif /* CPU_COMMON_H */ |