blob: 0b141683f095aaf7f89e4ec6fc80e8398758c043 [file] [log] [blame]
bellard5a9fdfe2003-06-15 20:02:25 +00001/*
2 * defines common to all virtual CPUs
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard5a9fdfe2003-06-15 20:02:25 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard5a9fdfe2003-06-15 20:02:25 +000018 */
19#ifndef CPU_ALL_H
20#define CPU_ALL_H
21
blueswir17d99a002009-01-14 19:00:36 +000022#include "qemu-common.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010023#include "exec/cpu-common.h"
Juan Quintela1ab4c8c2013-10-08 16:14:39 +020024#include "exec/memory.h"
Umesh Deshpandeb2a86582011-08-17 00:01:33 -070025#include "qemu/thread.h"
Andreas Färberf17ec442013-06-29 19:40:58 +020026#include "qom/cpu.h"
Paolo Bonzini43771532013-09-09 17:58:40 +020027#include "qemu/rcu.h"
bellard0ac4bd52004-01-04 15:44:17 +000028
Peter Crosthwaite9e0dc482015-05-30 23:11:42 -070029#define EXCP_INTERRUPT 0x10000 /* async interruption */
30#define EXCP_HLT 0x10001 /* hlt instruction reached */
31#define EXCP_DEBUG 0x10002 /* cpu stopped after a breakpoint or singlestep */
32#define EXCP_HALTED 0x10003 /* cpu is halted (waiting for external event) */
33#define EXCP_YIELD 0x10004 /* cpu wants to yield timeslice to another */
Richard Hendersonfdbc2b52016-06-29 22:12:55 -070034#define EXCP_ATOMIC 0x10005 /* stop-the-world and emulate atomic */
Peter Crosthwaite9e0dc482015-05-30 23:11:42 -070035
ths5fafdf22007-09-16 21:08:06 +000036/* some important defines:
37 *
Juan Quintelae2542fe2009-07-27 16:13:06 +020038 * HOST_WORDS_BIGENDIAN : if defined, the host cpu is big endian and
bellard0ac4bd52004-01-04 15:44:17 +000039 * otherwise little endian.
ths5fafdf22007-09-16 21:08:06 +000040 *
bellard0ac4bd52004-01-04 15:44:17 +000041 * TARGET_WORDS_BIGENDIAN : same for target cpu
42 */
43
Juan Quintelae2542fe2009-07-27 16:13:06 +020044#if defined(HOST_WORDS_BIGENDIAN) != defined(TARGET_WORDS_BIGENDIAN)
bellardf193c792004-03-21 17:06:25 +000045#define BSWAP_NEEDED
46#endif
47
48#ifdef BSWAP_NEEDED
49
50static inline uint16_t tswap16(uint16_t s)
51{
52 return bswap16(s);
53}
54
55static inline uint32_t tswap32(uint32_t s)
56{
57 return bswap32(s);
58}
59
60static inline uint64_t tswap64(uint64_t s)
61{
62 return bswap64(s);
63}
64
65static inline void tswap16s(uint16_t *s)
66{
67 *s = bswap16(*s);
68}
69
70static inline void tswap32s(uint32_t *s)
71{
72 *s = bswap32(*s);
73}
74
75static inline void tswap64s(uint64_t *s)
76{
77 *s = bswap64(*s);
78}
79
80#else
81
82static inline uint16_t tswap16(uint16_t s)
83{
84 return s;
85}
86
87static inline uint32_t tswap32(uint32_t s)
88{
89 return s;
90}
91
92static inline uint64_t tswap64(uint64_t s)
93{
94 return s;
95}
96
97static inline void tswap16s(uint16_t *s)
98{
99}
100
101static inline void tswap32s(uint32_t *s)
102{
103}
104
105static inline void tswap64s(uint64_t *s)
106{
107}
108
109#endif
110
111#if TARGET_LONG_SIZE == 4
112#define tswapl(s) tswap32(s)
113#define tswapls(s) tswap32s((uint32_t *)(s))
bellard0a962c02005-02-10 22:00:27 +0000114#define bswaptls(s) bswap32s(s)
bellardf193c792004-03-21 17:06:25 +0000115#else
116#define tswapl(s) tswap64(s)
117#define tswapls(s) tswap64s((uint64_t *)(s))
bellard0a962c02005-02-10 22:00:27 +0000118#define bswaptls(s) bswap64s(s)
bellardf193c792004-03-21 17:06:25 +0000119#endif
120
Peter Maydelldb5fd8d2015-01-20 15:19:35 +0000121/* Target-endianness CPU memory access functions. These fit into the
122 * {ld,st}{type}{sign}{size}{endian}_p naming scheme described in bswap.h.
bellard83d73962004-02-22 11:53:50 +0000123 */
bellard2df3b952005-11-19 17:47:39 +0000124#if defined(TARGET_WORDS_BIGENDIAN)
125#define lduw_p(p) lduw_be_p(p)
126#define ldsw_p(p) ldsw_be_p(p)
127#define ldl_p(p) ldl_be_p(p)
128#define ldq_p(p) ldq_be_p(p)
129#define ldfl_p(p) ldfl_be_p(p)
130#define ldfq_p(p) ldfq_be_p(p)
131#define stw_p(p, v) stw_be_p(p, v)
132#define stl_p(p, v) stl_be_p(p, v)
133#define stq_p(p, v) stq_be_p(p, v)
134#define stfl_p(p, v) stfl_be_p(p, v)
135#define stfq_p(p, v) stfq_be_p(p, v)
136#else
137#define lduw_p(p) lduw_le_p(p)
138#define ldsw_p(p) ldsw_le_p(p)
139#define ldl_p(p) ldl_le_p(p)
140#define ldq_p(p) ldq_le_p(p)
141#define ldfl_p(p) ldfl_le_p(p)
142#define ldfq_p(p) ldfq_le_p(p)
143#define stw_p(p, v) stw_le_p(p, v)
144#define stl_p(p, v) stl_le_p(p, v)
145#define stq_p(p, v) stq_le_p(p, v)
146#define stfl_p(p, v) stfl_le_p(p, v)
147#define stfq_p(p, v) stfq_le_p(p, v)
bellard5a9fdfe2003-06-15 20:02:25 +0000148#endif
149
bellard61382a52003-10-27 21:22:23 +0000150/* MMU memory access macros */
151
pbrook53a59602006-03-25 19:31:22 +0000152#if defined(CONFIG_USER_ONLY)
Paolo Bonzini022c62c2012-12-17 18:19:49 +0100153#include "exec/user/abitypes.h"
aurel320e62fd72008-12-08 18:12:11 +0000154
pbrook53a59602006-03-25 19:31:22 +0000155/* On some host systems the guest address space is reserved on the host.
156 * This allows the guest address space to be offset to a convenient location.
157 */
Paul Brook379f6692009-07-17 12:48:08 +0100158extern unsigned long guest_base;
159extern int have_guest_base;
Paul Brook68a1c812010-05-29 02:27:35 +0100160extern unsigned long reserved_va;
pbrook53a59602006-03-25 19:31:22 +0000161
Laurent Vivierb76f21a2015-08-24 14:53:54 +0200162#define GUEST_ADDR_MAX (reserved_va ? reserved_va : \
Mikhail Ilyind67f4aa2014-08-05 17:33:51 +0400163 (1ul << TARGET_VIRT_ADDR_SPACE_BITS) - 1)
Paolo Bonzinia7d60392014-06-27 08:33:38 +0200164#else
165
166#include "exec/hwaddr.h"
167uint32_t lduw_phys(AddressSpace *as, hwaddr addr);
168uint32_t ldl_phys(AddressSpace *as, hwaddr addr);
169uint64_t ldq_phys(AddressSpace *as, hwaddr addr);
170void stl_phys_notdirty(AddressSpace *as, hwaddr addr, uint32_t val);
171void stw_phys(AddressSpace *as, hwaddr addr, uint32_t val);
172void stl_phys(AddressSpace *as, hwaddr addr, uint32_t val);
173void stq_phys(AddressSpace *as, hwaddr addr, uint64_t val);
174
175uint32_t address_space_lduw(AddressSpace *as, hwaddr addr,
176 MemTxAttrs attrs, MemTxResult *result);
177uint32_t address_space_ldl(AddressSpace *as, hwaddr addr,
178 MemTxAttrs attrs, MemTxResult *result);
179uint64_t address_space_ldq(AddressSpace *as, hwaddr addr,
180 MemTxAttrs attrs, MemTxResult *result);
181void address_space_stl_notdirty(AddressSpace *as, hwaddr addr, uint32_t val,
182 MemTxAttrs attrs, MemTxResult *result);
183void address_space_stw(AddressSpace *as, hwaddr addr, uint32_t val,
184 MemTxAttrs attrs, MemTxResult *result);
185void address_space_stl(AddressSpace *as, hwaddr addr, uint32_t val,
186 MemTxAttrs attrs, MemTxResult *result);
187void address_space_stq(AddressSpace *as, hwaddr addr, uint64_t val,
188 MemTxAttrs attrs, MemTxResult *result);
Paolo Bonzini1f4e4962016-11-22 12:04:52 +0100189
190uint32_t lduw_phys_cached(MemoryRegionCache *cache, hwaddr addr);
191uint32_t ldl_phys_cached(MemoryRegionCache *cache, hwaddr addr);
192uint64_t ldq_phys_cached(MemoryRegionCache *cache, hwaddr addr);
193void stl_phys_notdirty_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val);
194void stw_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val);
195void stl_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val);
196void stq_phys_cached(MemoryRegionCache *cache, hwaddr addr, uint64_t val);
197
198uint32_t address_space_lduw_cached(MemoryRegionCache *cache, hwaddr addr,
199 MemTxAttrs attrs, MemTxResult *result);
200uint32_t address_space_ldl_cached(MemoryRegionCache *cache, hwaddr addr,
201 MemTxAttrs attrs, MemTxResult *result);
202uint64_t address_space_ldq_cached(MemoryRegionCache *cache, hwaddr addr,
203 MemTxAttrs attrs, MemTxResult *result);
204void address_space_stl_notdirty_cached(MemoryRegionCache *cache, hwaddr addr,
205 uint32_t val, MemTxAttrs attrs, MemTxResult *result);
206void address_space_stw_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val,
207 MemTxAttrs attrs, MemTxResult *result);
208void address_space_stl_cached(MemoryRegionCache *cache, hwaddr addr, uint32_t val,
209 MemTxAttrs attrs, MemTxResult *result);
210void address_space_stq_cached(MemoryRegionCache *cache, hwaddr addr, uint64_t val,
211 MemTxAttrs attrs, MemTxResult *result);
Richard Hendersonb9f83122010-03-10 14:36:58 -0800212#endif
213
bellard5a9fdfe2003-06-15 20:02:25 +0000214/* page related stuff */
215
Peter Maydell20bccb82016-10-24 16:26:49 +0100216#ifdef TARGET_PAGE_BITS_VARY
217extern bool target_page_bits_decided;
218extern int target_page_bits;
219#define TARGET_PAGE_BITS ({ assert(target_page_bits_decided); \
220 target_page_bits; })
221#else
222#define TARGET_PAGE_BITS_MIN TARGET_PAGE_BITS
223#endif
224
aurel3203875442008-04-22 20:45:18 +0000225#define TARGET_PAGE_SIZE (1 << TARGET_PAGE_BITS)
bellard5a9fdfe2003-06-15 20:02:25 +0000226#define TARGET_PAGE_MASK ~(TARGET_PAGE_SIZE - 1)
227#define TARGET_PAGE_ALIGN(addr) (((addr) + TARGET_PAGE_SIZE - 1) & TARGET_PAGE_MASK)
228
Paolo Bonzini0c2d70c2015-12-02 13:00:54 +0100229/* Using intptr_t ensures that qemu_*_page_mask is sign-extended even
230 * when intptr_t is 32-bit and we are aligning a long long.
231 */
Stefan Weilc6d50672012-03-16 20:23:49 +0100232extern uintptr_t qemu_host_page_size;
Paolo Bonzini0c2d70c2015-12-02 13:00:54 +0100233extern intptr_t qemu_host_page_mask;
bellard5a9fdfe2003-06-15 20:02:25 +0000234
bellard83fb7ad2004-07-05 21:25:26 +0000235#define HOST_PAGE_ALIGN(addr) (((addr) + qemu_host_page_size - 1) & qemu_host_page_mask)
Peter Crosthwaite4e513612015-07-06 12:15:12 -0600236#define REAL_HOST_PAGE_ALIGN(addr) (((addr) + qemu_real_host_page_size - 1) & \
237 qemu_real_host_page_mask)
bellard5a9fdfe2003-06-15 20:02:25 +0000238
239/* same as PROT_xxx */
240#define PAGE_READ 0x0001
241#define PAGE_WRITE 0x0002
242#define PAGE_EXEC 0x0004
243#define PAGE_BITS (PAGE_READ | PAGE_WRITE | PAGE_EXEC)
244#define PAGE_VALID 0x0008
245/* original state of the write flag (used when tracking self-modifying
246 code */
ths5fafdf22007-09-16 21:08:06 +0000247#define PAGE_WRITE_ORG 0x0010
David Hildenbrandf52bfb12017-10-16 22:23:57 +0200248/* Invalidate the TLB entry immediately, helpful for s390x
249 * Low-Address-Protection. Used with PAGE_WRITE in tlb_set_page_with_attrs() */
250#define PAGE_WRITE_INV 0x0040
Paul Brook2e9a5712010-05-05 16:32:59 +0100251#if defined(CONFIG_BSD) && defined(CONFIG_USER_ONLY)
252/* FIXME: Code that sets/uses this is broken and needs to go away. */
balrog50a95692007-12-12 01:16:23 +0000253#define PAGE_RESERVED 0x0020
Paul Brook2e9a5712010-05-05 16:32:59 +0100254#endif
bellard5a9fdfe2003-06-15 20:02:25 +0000255
Paul Brookb480d9b2010-03-12 23:23:29 +0000256#if defined(CONFIG_USER_ONLY)
bellard5a9fdfe2003-06-15 20:02:25 +0000257void page_dump(FILE *f);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800258
Mikhail Ilyin1a1c4db2014-09-08 17:28:56 +0400259typedef int (*walk_memory_regions_fn)(void *, target_ulong,
260 target_ulong, unsigned long);
Richard Henderson5cd2c5b2010-03-10 15:53:37 -0800261int walk_memory_regions(void *, walk_memory_regions_fn);
262
pbrook53a59602006-03-25 19:31:22 +0000263int page_get_flags(target_ulong address);
264void page_set_flags(target_ulong start, target_ulong end, int flags);
ths3d97b402007-11-02 19:02:07 +0000265int page_check_range(target_ulong start, target_ulong len, int flags);
Paul Brookb480d9b2010-03-12 23:23:29 +0000266#endif
bellard5a9fdfe2003-06-15 20:02:25 +0000267
Andreas Färber9349b4f2012-03-14 01:38:32 +0100268CPUArchState *cpu_copy(CPUArchState *env);
thsc5be9f02007-02-28 20:20:53 +0000269
Richard Henderson9c762192011-05-04 13:34:24 -0700270/* Flags for use in ENV->INTERRUPT_PENDING.
271
272 The numbers assigned here are non-sequential in order to preserve
273 binary compatibility with the vmstate dump. Bit 0 (0x0001) was
274 previously used for CPU_INTERRUPT_EXIT, and is cleared when loading
275 the vmstate dump. */
276
277/* External hardware interrupt pending. This is typically used for
278 interrupts from devices. */
279#define CPU_INTERRUPT_HARD 0x0002
280
281/* Exit the current TB. This is typically used when some system-level device
282 makes some change to the memory mapping. E.g. the a20 line change. */
283#define CPU_INTERRUPT_EXITTB 0x0004
284
285/* Halt the CPU. */
286#define CPU_INTERRUPT_HALT 0x0020
287
288/* Debug event pending. */
289#define CPU_INTERRUPT_DEBUG 0x0080
290
Paolo Bonzini4a92a552013-03-05 15:35:17 +0100291/* Reset signal. */
292#define CPU_INTERRUPT_RESET 0x0400
293
Richard Henderson9c762192011-05-04 13:34:24 -0700294/* Several target-specific external hardware interrupts. Each target/cpu.h
295 should define proper names based on these defines. */
296#define CPU_INTERRUPT_TGT_EXT_0 0x0008
297#define CPU_INTERRUPT_TGT_EXT_1 0x0010
298#define CPU_INTERRUPT_TGT_EXT_2 0x0040
299#define CPU_INTERRUPT_TGT_EXT_3 0x0200
300#define CPU_INTERRUPT_TGT_EXT_4 0x1000
301
302/* Several target-specific internal interrupts. These differ from the
Dong Xu Wang07f35072011-11-22 18:06:26 +0800303 preceding target-specific interrupts in that they are intended to
Richard Henderson9c762192011-05-04 13:34:24 -0700304 originate from within the cpu itself, typically in response to some
305 instruction being executed. These, therefore, are not masked while
306 single-stepping within the debugger. */
307#define CPU_INTERRUPT_TGT_INT_0 0x0100
Paolo Bonzini4a92a552013-03-05 15:35:17 +0100308#define CPU_INTERRUPT_TGT_INT_1 0x0800
309#define CPU_INTERRUPT_TGT_INT_2 0x2000
Richard Henderson9c762192011-05-04 13:34:24 -0700310
Jan Kiszkad362e752012-02-17 18:31:17 +0100311/* First unused bit: 0x4000. */
Richard Henderson9c762192011-05-04 13:34:24 -0700312
Richard Henderson3125f762011-05-04 13:34:25 -0700313/* The set of all bits that should be masked when single-stepping. */
314#define CPU_INTERRUPT_SSTEP_MASK \
315 (CPU_INTERRUPT_HARD \
316 | CPU_INTERRUPT_TGT_EXT_0 \
317 | CPU_INTERRUPT_TGT_EXT_1 \
318 | CPU_INTERRUPT_TGT_EXT_2 \
319 | CPU_INTERRUPT_TGT_EXT_3 \
320 | CPU_INTERRUPT_TGT_EXT_4)
bellard98699962005-11-26 10:29:22 +0000321
Paul Brookb3755a92010-03-12 16:54:58 +0000322#if !defined(CONFIG_USER_ONLY)
323
pbrook0f459d12008-06-09 00:20:13 +0000324/* Flags stored in the low bits of the TLB virtual address. These are
Sergey Sorokin1f00b272016-06-23 21:16:46 +0300325 * defined so that fast path ram access is all zeros.
326 * The flags all must be between TARGET_PAGE_BITS and
327 * maximum address alignment bit.
328 */
pbrook0f459d12008-06-09 00:20:13 +0000329/* Zero if TLB entry is valid. */
Sergey Sorokin1f00b272016-06-23 21:16:46 +0300330#define TLB_INVALID_MASK (1 << (TARGET_PAGE_BITS - 1))
pbrook0f459d12008-06-09 00:20:13 +0000331/* Set if TLB entry references a clean RAM page. The iotlb entry will
332 contain the page physical address. */
Sergey Sorokin1f00b272016-06-23 21:16:46 +0300333#define TLB_NOTDIRTY (1 << (TARGET_PAGE_BITS - 2))
pbrook0f459d12008-06-09 00:20:13 +0000334/* Set if TLB entry is an IO callback. */
Sergey Sorokin1f00b272016-06-23 21:16:46 +0300335#define TLB_MMIO (1 << (TARGET_PAGE_BITS - 3))
336
337/* Use this mask to check interception with an alignment mask
338 * in a TCG backend.
339 */
340#define TLB_FLAGS_MASK (TLB_INVALID_MASK | TLB_NOTDIRTY | TLB_MMIO)
pbrook0f459d12008-06-09 00:20:13 +0000341
Stefan Weil055403b2010-10-22 23:03:32 +0200342void dump_exec_info(FILE *f, fprintf_function cpu_fprintf);
Max Filippov246ae242014-11-02 11:04:18 +0300343void dump_opcount_info(FILE *f, fprintf_function cpu_fprintf);
Paul Brookb3755a92010-03-12 16:54:58 +0000344#endif /* !CONFIG_USER_ONLY */
345
Andreas Färberf17ec442013-06-29 19:40:58 +0200346int cpu_memory_rw_debug(CPUState *cpu, target_ulong addr,
Paul Brookb3755a92010-03-12 16:54:58 +0000347 uint8_t *buf, int len, int is_write);
348
Peter Crosthwaite8642c1b2015-07-18 02:40:28 -0700349int cpu_exec(CPUState *cpu);
350
bellard5a9fdfe2003-06-15 20:02:25 +0000351#endif /* CPU_ALL_H */