bellard | 5a9fdfe | 2003-06-15 20:02:25 +0000 | [diff] [blame] | 1 | /* |
| 2 | * defines common to all virtual CPUs |
ths | 5fafdf2 | 2007-09-16 21:08:06 +0000 | [diff] [blame] | 3 | * |
bellard | 5a9fdfe | 2003-06-15 20:02:25 +0000 | [diff] [blame] | 4 | * Copyright (c) 2003 Fabrice Bellard |
| 5 | * |
| 6 | * This library is free software; you can redistribute it and/or |
| 7 | * modify it under the terms of the GNU Lesser General Public |
| 8 | * License as published by the Free Software Foundation; either |
| 9 | * version 2 of the License, or (at your option) any later version. |
| 10 | * |
| 11 | * This library is distributed in the hope that it will be useful, |
| 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 14 | * Lesser General Public License for more details. |
| 15 | * |
| 16 | * You should have received a copy of the GNU Lesser General Public |
Blue Swirl | 8167ee8 | 2009-07-16 20:47:01 +0000 | [diff] [blame] | 17 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. |
bellard | 5a9fdfe | 2003-06-15 20:02:25 +0000 | [diff] [blame] | 18 | */ |
| 19 | #ifndef CPU_ALL_H |
| 20 | #define CPU_ALL_H |
| 21 | |
blueswir1 | 7d99a00 | 2009-01-14 19:00:36 +0000 | [diff] [blame] | 22 | #include "qemu-common.h" |
Paolo Bonzini | 022c62c | 2012-12-17 18:19:49 +0100 | [diff] [blame] | 23 | #include "exec/cpu-common.h" |
Juan Quintela | 1ab4c8c | 2013-10-08 16:14:39 +0200 | [diff] [blame] | 24 | #include "exec/memory.h" |
Umesh Deshpande | b2a8658 | 2011-08-17 00:01:33 -0700 | [diff] [blame] | 25 | #include "qemu/thread.h" |
Andreas Färber | f17ec44 | 2013-06-29 19:40:58 +0200 | [diff] [blame] | 26 | #include "qom/cpu.h" |
Paolo Bonzini | 4377153 | 2013-09-09 17:58:40 +0200 | [diff] [blame] | 27 | #include "qemu/rcu.h" |
bellard | 0ac4bd5 | 2004-01-04 15:44:17 +0000 | [diff] [blame] | 28 | |
Peter Crosthwaite | 9e0dc48 | 2015-05-30 23:11:42 -0700 | [diff] [blame] | 29 | #define EXCP_INTERRUPT 0x10000 /* async interruption */ |
| 30 | #define EXCP_HLT 0x10001 /* hlt instruction reached */ |
| 31 | #define EXCP_DEBUG 0x10002 /* cpu stopped after a breakpoint or singlestep */ |
| 32 | #define EXCP_HALTED 0x10003 /* cpu is halted (waiting for external event) */ |
| 33 | #define EXCP_YIELD 0x10004 /* cpu wants to yield timeslice to another */ |
Richard Henderson | fdbc2b5 | 2016-06-29 22:12:55 -0700 | [diff] [blame] | 34 | #define EXCP_ATOMIC 0x10005 /* stop-the-world and emulate atomic */ |
Peter Crosthwaite | 9e0dc48 | 2015-05-30 23:11:42 -0700 | [diff] [blame] | 35 | |
ths | 5fafdf2 | 2007-09-16 21:08:06 +0000 | [diff] [blame] | 36 | /* some important defines: |
| 37 | * |
Juan Quintela | e2542fe | 2009-07-27 16:13:06 +0200 | [diff] [blame] | 38 | * HOST_WORDS_BIGENDIAN : if defined, the host cpu is big endian and |
bellard | 0ac4bd5 | 2004-01-04 15:44:17 +0000 | [diff] [blame] | 39 | * otherwise little endian. |
ths | 5fafdf2 | 2007-09-16 21:08:06 +0000 | [diff] [blame] | 40 | * |
bellard | 0ac4bd5 | 2004-01-04 15:44:17 +0000 | [diff] [blame] | 41 | * TARGET_WORDS_BIGENDIAN : same for target cpu |
| 42 | */ |
| 43 | |
Juan Quintela | e2542fe | 2009-07-27 16:13:06 +0200 | [diff] [blame] | 44 | #if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN) |
bellard | f193c79 | 2004-03-21 17:06:25 +0000 | [diff] [blame] | 45 | #define BSWAP_NEEDED |
| 46 | #endif |
| 47 | |
| 48 | #ifdef BSWAP_NEEDED |
| 49 | |
| 50 | static inline uint16_t tswap16(uint16_t s) |
| 51 | { |
| 52 | return bswap16(s); |
| 53 | } |
| 54 | |
| 55 | static inline uint32_t tswap32(uint32_t s) |
| 56 | { |
| 57 | return bswap32(s); |
| 58 | } |
| 59 | |
| 60 | static inline uint64_t tswap64(uint64_t s) |
| 61 | { |
| 62 | return bswap64(s); |
| 63 | } |
| 64 | |
| 65 | static inline void tswap16s(uint16_t *s) |
| 66 | { |
| 67 | *s = bswap16(*s); |
| 68 | } |
| 69 | |
| 70 | static inline void tswap32s(uint32_t *s) |
| 71 | { |
| 72 | *s = bswap32(*s); |
| 73 | } |
| 74 | |
| 75 | static inline void tswap64s(uint64_t *s) |
| 76 | { |
| 77 | *s = bswap64(*s); |
| 78 | } |
| 79 | |
| 80 | #else |
| 81 | |
| 82 | static inline uint16_t tswap16(uint16_t s) |
| 83 | { |
| 84 | return s; |
| 85 | } |
| 86 | |
| 87 | static inline uint32_t tswap32(uint32_t s) |
| 88 | { |
| 89 | return s; |
| 90 | } |
| 91 | |
| 92 | static inline uint64_t tswap64(uint64_t s) |
| 93 | { |
| 94 | return s; |
| 95 | } |
| 96 | |
| 97 | static inline void tswap16s(uint16_t *s) |
| 98 | { |
| 99 | } |
| 100 | |
| 101 | static inline void tswap32s(uint32_t *s) |
| 102 | { |
| 103 | } |
| 104 | |
| 105 | static inline void tswap64s(uint64_t *s) |
| 106 | { |
| 107 | } |
| 108 | |
| 109 | #endif |
| 110 | |
| 111 | #if TARGET_LONG_SIZE == 4 |
| 112 | #define tswapl(s) tswap32(s) |
| 113 | #define tswapls(s) tswap32s((uint32_t *)(s)) |
bellard | 0a962c0 | 2005-02-10 22:00:27 +0000 | [diff] [blame] | 114 | #define bswaptls(s) bswap32s(s) |
bellard | f193c79 | 2004-03-21 17:06:25 +0000 | [diff] [blame] | 115 | #else |
| 116 | #define tswapl(s) tswap64(s) |
| 117 | #define tswapls(s) tswap64s((uint64_t *)(s)) |
bellard | 0a962c0 | 2005-02-10 22:00:27 +0000 | [diff] [blame] | 118 | #define bswaptls(s) bswap64s(s) |
bellard | f193c79 | 2004-03-21 17:06:25 +0000 | [diff] [blame] | 119 | #endif |
| 120 | |
Peter Maydell | db5fd8d | 2015-01-20 15:19:35 +0000 | [diff] [blame] | 121 | /* Target-endianness CPU memory access functions. These fit into the |
| 122 | * {ld,st}{type}{sign}{size}{endian}_p naming scheme described in bswap.h. |
bellard | 83d7396 | 2004-02-22 11:53:50 +0000 | [diff] [blame] | 123 | */ |
bellard | 2df3b95 | 2005-11-19 17:47:39 +0000 | [diff] [blame] | 124 | #if defined(TARGET_WORDS_BIGENDIAN) |
| 125 | #define lduw_p(p) lduw_be_p(p) |
| 126 | #define ldsw_p(p) ldsw_be_p(p) |
| 127 | #define ldl_p(p) ldl_be_p(p) |
| 128 | #define ldq_p(p) ldq_be_p(p) |
| 129 | #define ldfl_p(p) ldfl_be_p(p) |
| 130 | #define ldfq_p(p) ldfq_be_p(p) |
| 131 | #define stw_p(p, v) stw_be_p(p, v) |
| 132 | #define stl_p(p, v) stl_be_p(p, v) |
| 133 | #define stq_p(p, v) stq_be_p(p, v) |
| 134 | #define stfl_p(p, v) stfl_be_p(p, v) |
| 135 | #define stfq_p(p, v) stfq_be_p(p, v) |
| 136 | #else |
| 137 | #define lduw_p(p) lduw_le_p(p) |
| 138 | #define ldsw_p(p) ldsw_le_p(p) |
| 139 | #define ldl_p(p) ldl_le_p(p) |
| 140 | #define ldq_p(p) ldq_le_p(p) |
| 141 | #define ldfl_p(p) ldfl_le_p(p) |
| 142 | #define ldfq_p(p) ldfq_le_p(p) |
| 143 | #define stw_p(p, v) stw_le_p(p, v) |
| 144 | #define stl_p(p, v) stl_le_p(p, v) |
| 145 | #define stq_p(p, v) stq_le_p(p, v) |
| 146 | #define stfl_p(p, v) stfl_le_p(p, v) |
| 147 | #define stfq_p(p, v) stfq_le_p(p, v) |
bellard | 5a9fdfe | 2003-06-15 20:02:25 +0000 | [diff] [blame] | 148 | #endif |
| 149 | |
bellard | 61382a5 | 2003-10-27 21:22:23 +0000 | [diff] [blame] | 150 | /* MMU memory access macros */ |
| 151 | |
pbrook | 53a5960 | 2006-03-25 19:31:22 +0000 | [diff] [blame] | 152 | #if defined(CONFIG_USER_ONLY) |
Paolo Bonzini | 022c62c | 2012-12-17 18:19:49 +0100 | [diff] [blame] | 153 | #include "exec/user/abitypes.h" |
aurel32 | 0e62fd7 | 2008-12-08 18:12:11 +0000 | [diff] [blame] | 154 | |
pbrook | 53a5960 | 2006-03-25 19:31:22 +0000 | [diff] [blame] | 155 | /* On some host systems the guest address space is reserved on the host. |
| 156 | * This allows the guest address space to be offset to a convenient location. |
| 157 | */ |
Paul Brook | 379f669 | 2009-07-17 12:48:08 +0100 | [diff] [blame] | 158 | extern unsigned long guest_base; |
| 159 | extern int have_guest_base; |
Paul Brook | 68a1c81 | 2010-05-29 02:27:35 +0100 | [diff] [blame] | 160 | extern unsigned long reserved_va; |
pbrook | 53a5960 | 2006-03-25 19:31:22 +0000 | [diff] [blame] | 161 | |
Laurent Vivier | b76f21a | 2015-08-24 14:53:54 +0200 | [diff] [blame] | 162 | #define GUEST_ADDR_MAX (reserved_va ? reserved_va : \ |
Mikhail Ilyin | d67f4aa | 2014-08-05 17:33:51 +0400 | [diff] [blame] | 163 | (1ul << TARGET_VIRT_ADDR_SPACE_BITS) - 1) |
Paolo Bonzini | a7d6039 | 2014-06-27 08:33:38 +0200 | [diff] [blame] | 164 | #else |
| 165 | |
| 166 | #include "exec/hwaddr.h" |
| 167 | uint32_t lduw_phys(AddressSpace *as, hwaddr addr); |
| 168 | uint32_t ldl_phys(AddressSpace *as, hwaddr addr); |
| 169 | uint64_t ldq_phys(AddressSpace *as, hwaddr addr); |
| 170 | void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val); |
| 171 | void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val); |
| 172 | void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val); |
| 173 | void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val); |
| 174 | |
| 175 | uint32_t address_space_lduw(AddressSpace *as, hwaddr addr, |
| 176 | MemTxAttrs attrs, MemTxResult *result); |
| 177 | uint32_t address_space_ldl(AddressSpace *as, hwaddr addr, |
| 178 | MemTxAttrs attrs, MemTxResult *result); |
| 179 | uint64_t address_space_ldq(AddressSpace *as, hwaddr addr, |
| 180 | MemTxAttrs attrs, MemTxResult *result); |
| 181 | void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val, |
| 182 | MemTxAttrs attrs, MemTxResult *result); |
| 183 | void address_space_stw(AddressSpace *as, hwaddr addr, uint32_t val, |
| 184 | MemTxAttrs attrs, MemTxResult *result); |
| 185 | void address_space_stl(AddressSpace *as, hwaddr addr, uint32_t val, |
| 186 | MemTxAttrs attrs, MemTxResult *result); |
| 187 | void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val, |
| 188 | MemTxAttrs attrs, MemTxResult *result); |
Paolo Bonzini | 1f4e496 | 2016-11-22 12:04:52 +0100 | [diff] [blame] | 189 | |
| 190 | uint32_t lduw_phys_cached(MemoryRegionCache *cache, hwaddr addr); |
| 191 | uint32_t ldl_phys_cached(MemoryRegionCache *cache, hwaddr addr); |
| 192 | uint64_t ldq_phys_cached(MemoryRegionCache *cache, hwaddr addr); |
| 193 | void stl_phys_notdirty_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val); |
| 194 | void stw_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val); |
| 195 | void stl_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val); |
| 196 | void stq_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint64_t val); |
| 197 | |
| 198 | uint32_t address_space_lduw_cached(MemoryRegionCache *cache, hwaddr addr, |
| 199 | MemTxAttrs attrs, MemTxResult *result); |
| 200 | uint32_t address_space_ldl_cached(MemoryRegionCache *cache, hwaddr addr, |
| 201 | MemTxAttrs attrs, MemTxResult *result); |
| 202 | uint64_t address_space_ldq_cached(MemoryRegionCache *cache, hwaddr addr, |
| 203 | MemTxAttrs attrs, MemTxResult *result); |
| 204 | void address_space_stl_notdirty_cached(MemoryRegionCache *cache, hwaddr addr, |
| 205 | uint32_t val, MemTxAttrs attrs, MemTxResult *result); |
| 206 | void address_space_stw_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val, |
| 207 | MemTxAttrs attrs, MemTxResult *result); |
| 208 | void address_space_stl_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val, |
| 209 | MemTxAttrs attrs, MemTxResult *result); |
| 210 | void address_space_stq_cached(MemoryRegionCache *cache, hwaddr addr, uint64_t val, |
| 211 | MemTxAttrs attrs, MemTxResult *result); |
Richard Henderson | b9f8312 | 2010-03-10 14:36:58 -0800 | [diff] [blame] | 212 | #endif |
| 213 | |
bellard | 5a9fdfe | 2003-06-15 20:02:25 +0000 | [diff] [blame] | 214 | /* page related stuff */ |
| 215 | |
Peter Maydell | 20bccb8 | 2016-10-24 16:26:49 +0100 | [diff] [blame] | 216 | #ifdef TARGET_PAGE_BITS_VARY |
| 217 | extern bool target_page_bits_decided; |
| 218 | extern int target_page_bits; |
| 219 | #define TARGET_PAGE_BITS ({ assert(target_page_bits_decided); \ |
| 220 | target_page_bits; }) |
| 221 | #else |
| 222 | #define TARGET_PAGE_BITS_MIN TARGET_PAGE_BITS |
| 223 | #endif |
| 224 | |
aurel32 | 0387544 | 2008-04-22 20:45:18 +0000 | [diff] [blame] | 225 | #define TARGET_PAGE_SIZE (1 << TARGET_PAGE_BITS) |
bellard | 5a9fdfe | 2003-06-15 20:02:25 +0000 | [diff] [blame] | 226 | #define TARGET_PAGE_MASK ~(TARGET_PAGE_SIZE - 1) |
| 227 | #define TARGET_PAGE_ALIGN(addr) (((addr) + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK) |
| 228 | |
Paolo Bonzini | 0c2d70c | 2015-12-02 13:00:54 +0100 | [diff] [blame] | 229 | /* Using intptr_t ensures that qemu_*_page_mask is sign-extended even |
| 230 | * when intptr_t is 32-bit and we are aligning a long long. |
| 231 | */ |
Stefan Weil | c6d5067 | 2012-03-16 20:23:49 +0100 | [diff] [blame] | 232 | extern uintptr_t qemu_host_page_size; |
Paolo Bonzini | 0c2d70c | 2015-12-02 13:00:54 +0100 | [diff] [blame] | 233 | extern intptr_t qemu_host_page_mask; |
bellard | 5a9fdfe | 2003-06-15 20:02:25 +0000 | [diff] [blame] | 234 | |
bellard | 83fb7ad | 2004-07-05 21:25:26 +0000 | [diff] [blame] | 235 | #define HOST_PAGE_ALIGN(addr) (((addr) + qemu_host_page_size - 1) & qemu_host_page_mask) |
Peter Crosthwaite | 4e51361 | 2015-07-06 12:15:12 -0600 | [diff] [blame] | 236 | #define REAL_HOST_PAGE_ALIGN(addr) (((addr) + qemu_real_host_page_size - 1) & \ |
| 237 | qemu_real_host_page_mask) |
bellard | 5a9fdfe | 2003-06-15 20:02:25 +0000 | [diff] [blame] | 238 | |
| 239 | /* same as PROT_xxx */ |
| 240 | #define PAGE_READ 0x0001 |
| 241 | #define PAGE_WRITE 0x0002 |
| 242 | #define PAGE_EXEC 0x0004 |
| 243 | #define PAGE_BITS (PAGE_READ | PAGE_WRITE | PAGE_EXEC) |
| 244 | #define PAGE_VALID 0x0008 |
| 245 | /* original state of the write flag (used when tracking self-modifying |
| 246 | code */ |
ths | 5fafdf2 | 2007-09-16 21:08:06 +0000 | [diff] [blame] | 247 | #define PAGE_WRITE_ORG 0x0010 |
David Hildenbrand | f52bfb1 | 2017-10-16 22:23:57 +0200 | [diff] [blame] | 248 | /* Invalidate the TLB entry immediately, helpful for s390x |
| 249 | * Low-Address-Protection. Used with PAGE_WRITE in tlb_set_page_with_attrs() */ |
| 250 | #define PAGE_WRITE_INV 0x0040 |
Paul Brook | 2e9a571 | 2010-05-05 16:32:59 +0100 | [diff] [blame] | 251 | #if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY) |
| 252 | /* FIXME: Code that sets/uses this is broken and needs to go away. */ |
balrog | 50a9569 | 2007-12-12 01:16:23 +0000 | [diff] [blame] | 253 | #define PAGE_RESERVED 0x0020 |
Paul Brook | 2e9a571 | 2010-05-05 16:32:59 +0100 | [diff] [blame] | 254 | #endif |
bellard | 5a9fdfe | 2003-06-15 20:02:25 +0000 | [diff] [blame] | 255 | |
Paul Brook | b480d9b | 2010-03-12 23:23:29 +0000 | [diff] [blame] | 256 | #if defined(CONFIG_USER_ONLY) |
bellard | 5a9fdfe | 2003-06-15 20:02:25 +0000 | [diff] [blame] | 257 | void page_dump(FILE *f); |
Richard Henderson | 5cd2c5b | 2010-03-10 15:53:37 -0800 | [diff] [blame] | 258 | |
Mikhail Ilyin | 1a1c4db | 2014-09-08 17:28:56 +0400 | [diff] [blame] | 259 | typedef int (*walk_memory_regions_fn)(void *, target_ulong, |
| 260 | target_ulong, unsigned long); |
Richard Henderson | 5cd2c5b | 2010-03-10 15:53:37 -0800 | [diff] [blame] | 261 | int walk_memory_regions(void *, walk_memory_regions_fn); |
| 262 | |
pbrook | 53a5960 | 2006-03-25 19:31:22 +0000 | [diff] [blame] | 263 | int page_get_flags(target_ulong address); |
| 264 | void page_set_flags(target_ulong start, target_ulong end, int flags); |
ths | 3d97b40 | 2007-11-02 19:02:07 +0000 | [diff] [blame] | 265 | int page_check_range(target_ulong start, target_ulong len, int flags); |
Paul Brook | b480d9b | 2010-03-12 23:23:29 +0000 | [diff] [blame] | 266 | #endif |
bellard | 5a9fdfe | 2003-06-15 20:02:25 +0000 | [diff] [blame] | 267 | |
Andreas Färber | 9349b4f | 2012-03-14 01:38:32 +0100 | [diff] [blame] | 268 | CPUArchState *cpu_copy(CPUArchState *env); |
ths | c5be9f0 | 2007-02-28 20:20:53 +0000 | [diff] [blame] | 269 | |
Richard Henderson | 9c76219 | 2011-05-04 13:34:24 -0700 | [diff] [blame] | 270 | /* Flags for use in ENV->INTERRUPT_PENDING. |
| 271 | |
| 272 | The numbers assigned here are non-sequential in order to preserve |
| 273 | binary compatibility with the vmstate dump. Bit 0 (0x0001) was |
| 274 | previously used for CPU_INTERRUPT_EXIT, and is cleared when loading |
| 275 | the vmstate dump. */ |
| 276 | |
| 277 | /* External hardware interrupt pending. This is typically used for |
| 278 | interrupts from devices. */ |
| 279 | #define CPU_INTERRUPT_HARD 0x0002 |
| 280 | |
| 281 | /* Exit the current TB. This is typically used when some system-level device |
| 282 | makes some change to the memory mapping. E.g. the a20 line change. */ |
| 283 | #define CPU_INTERRUPT_EXITTB 0x0004 |
| 284 | |
| 285 | /* Halt the CPU. */ |
| 286 | #define CPU_INTERRUPT_HALT 0x0020 |
| 287 | |
| 288 | /* Debug event pending. */ |
| 289 | #define CPU_INTERRUPT_DEBUG 0x0080 |
| 290 | |
Paolo Bonzini | 4a92a55 | 2013-03-05 15:35:17 +0100 | [diff] [blame] | 291 | /* Reset signal. */ |
| 292 | #define CPU_INTERRUPT_RESET 0x0400 |
| 293 | |
Richard Henderson | 9c76219 | 2011-05-04 13:34:24 -0700 | [diff] [blame] | 294 | /* Several target-specific external hardware interrupts. Each target/cpu.h |
| 295 | should define proper names based on these defines. */ |
| 296 | #define CPU_INTERRUPT_TGT_EXT_0 0x0008 |
| 297 | #define CPU_INTERRUPT_TGT_EXT_1 0x0010 |
| 298 | #define CPU_INTERRUPT_TGT_EXT_2 0x0040 |
| 299 | #define CPU_INTERRUPT_TGT_EXT_3 0x0200 |
| 300 | #define CPU_INTERRUPT_TGT_EXT_4 0x1000 |
| 301 | |
| 302 | /* Several target-specific internal interrupts. These differ from the |
Dong Xu Wang | 07f3507 | 2011-11-22 18:06:26 +0800 | [diff] [blame] | 303 | preceding target-specific interrupts in that they are intended to |
Richard Henderson | 9c76219 | 2011-05-04 13:34:24 -0700 | [diff] [blame] | 304 | originate from within the cpu itself, typically in response to some |
| 305 | instruction being executed. These, therefore, are not masked while |
| 306 | single-stepping within the debugger. */ |
| 307 | #define CPU_INTERRUPT_TGT_INT_0 0x0100 |
Paolo Bonzini | 4a92a55 | 2013-03-05 15:35:17 +0100 | [diff] [blame] | 308 | #define CPU_INTERRUPT_TGT_INT_1 0x0800 |
| 309 | #define CPU_INTERRUPT_TGT_INT_2 0x2000 |
Richard Henderson | 9c76219 | 2011-05-04 13:34:24 -0700 | [diff] [blame] | 310 | |
Jan Kiszka | d362e75 | 2012-02-17 18:31:17 +0100 | [diff] [blame] | 311 | /* First unused bit: 0x4000. */ |
Richard Henderson | 9c76219 | 2011-05-04 13:34:24 -0700 | [diff] [blame] | 312 | |
Richard Henderson | 3125f76 | 2011-05-04 13:34:25 -0700 | [diff] [blame] | 313 | /* The set of all bits that should be masked when single-stepping. */ |
| 314 | #define CPU_INTERRUPT_SSTEP_MASK \ |
| 315 | (CPU_INTERRUPT_HARD \ |
| 316 | | CPU_INTERRUPT_TGT_EXT_0 \ |
| 317 | | CPU_INTERRUPT_TGT_EXT_1 \ |
| 318 | | CPU_INTERRUPT_TGT_EXT_2 \ |
| 319 | | CPU_INTERRUPT_TGT_EXT_3 \ |
| 320 | | CPU_INTERRUPT_TGT_EXT_4) |
bellard | 9869996 | 2005-11-26 10:29:22 +0000 | [diff] [blame] | 321 | |
Paul Brook | b3755a9 | 2010-03-12 16:54:58 +0000 | [diff] [blame] | 322 | #if !defined(CONFIG_USER_ONLY) |
| 323 | |
pbrook | 0f459d1 | 2008-06-09 00:20:13 +0000 | [diff] [blame] | 324 | /* Flags stored in the low bits of the TLB virtual address. These are |
Sergey Sorokin | 1f00b27 | 2016-06-23 21:16:46 +0300 | [diff] [blame] | 325 | * defined so that fast path ram access is all zeros. |
| 326 | * The flags all must be between TARGET_PAGE_BITS and |
| 327 | * maximum address alignment bit. |
| 328 | */ |
pbrook | 0f459d1 | 2008-06-09 00:20:13 +0000 | [diff] [blame] | 329 | /* Zero if TLB entry is valid. */ |
Sergey Sorokin | 1f00b27 | 2016-06-23 21:16:46 +0300 | [diff] [blame] | 330 | #define TLB_INVALID_MASK (1 << (TARGET_PAGE_BITS - 1)) |
pbrook | 0f459d1 | 2008-06-09 00:20:13 +0000 | [diff] [blame] | 331 | /* Set if TLB entry references a clean RAM page. The iotlb entry will |
| 332 | contain the page physical address. */ |
Sergey Sorokin | 1f00b27 | 2016-06-23 21:16:46 +0300 | [diff] [blame] | 333 | #define TLB_NOTDIRTY (1 << (TARGET_PAGE_BITS - 2)) |
pbrook | 0f459d1 | 2008-06-09 00:20:13 +0000 | [diff] [blame] | 334 | /* Set if TLB entry is an IO callback. */ |
Sergey Sorokin | 1f00b27 | 2016-06-23 21:16:46 +0300 | [diff] [blame] | 335 | #define TLB_MMIO (1 << (TARGET_PAGE_BITS - 3)) |
| 336 | |
| 337 | /* Use this mask to check interception with an alignment mask |
| 338 | * in a TCG backend. |
| 339 | */ |
| 340 | #define TLB_FLAGS_MASK (TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO) |
pbrook | 0f459d1 | 2008-06-09 00:20:13 +0000 | [diff] [blame] | 341 | |
Stefan Weil | 055403b | 2010-10-22 23:03:32 +0200 | [diff] [blame] | 342 | void dump_exec_info(FILE *f, fprintf_function cpu_fprintf); |
Max Filippov | 246ae24 | 2014-11-02 11:04:18 +0300 | [diff] [blame] | 343 | void dump_opcount_info(FILE *f, fprintf_function cpu_fprintf); |
Paul Brook | b3755a9 | 2010-03-12 16:54:58 +0000 | [diff] [blame] | 344 | #endif /* !CONFIG_USER_ONLY */ |
| 345 | |
Andreas Färber | f17ec44 | 2013-06-29 19:40:58 +0200 | [diff] [blame] | 346 | int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr, |
Paul Brook | b3755a9 | 2010-03-12 16:54:58 +0000 | [diff] [blame] | 347 | uint8_t *buf, int len, int is_write); |
| 348 | |
Peter Crosthwaite | 8642c1b | 2015-07-18 02:40:28 -0700 | [diff] [blame] | 349 | int cpu_exec(CPUState *cpu); |
| 350 | |
bellard | 5a9fdfe | 2003-06-15 20:02:25 +0000 | [diff] [blame] | 351 | #endif /* CPU_ALL_H */ |