blob: 8bc2eb663ef2522ecfbee07dd81c28a894e7ad02 [file] [log] [blame]
bellardd4e81642003-05-25 16:46:15 +00001/*
2 * internal execution defines for qemu
ths5fafdf22007-09-16 21:08:06 +00003 *
bellardd4e81642003-05-25 16:46:15 +00004 * Copyright (c) 2003 Fabrice Bellard
5 *
6 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
9 * version 2 of the License, or (at your option) any later version.
10 *
11 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
15 *
16 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellardd4e81642003-05-25 16:46:15 +000018 */
19
aliguori875cdcf2008-10-23 13:52:00 +000020#ifndef _EXEC_ALL_H_
21#define _EXEC_ALL_H_
blueswir17d99a002009-01-14 19:00:36 +000022
23#include "qemu-common.h"
24
bellardb346ff42003-06-15 20:05:50 +000025/* allow to see translation results - the slowdown should be negligible, so we leave it */
aurel32de9a95f2008-11-11 13:41:01 +000026#define DEBUG_DISAS
bellardb346ff42003-06-15 20:05:50 +000027
Paul Brook41c1b1c2010-03-12 16:54:58 +000028/* Page tracking code uses ram addresses in system mode, and virtual
29 addresses in userspace mode. Define tb_page_addr_t to be an appropriate
30 type. */
31#if defined(CONFIG_USER_ONLY)
Paul Brookb480d9b2010-03-12 23:23:29 +000032typedef abi_ulong tb_page_addr_t;
Paul Brook41c1b1c2010-03-12 16:54:58 +000033#else
34typedef ram_addr_t tb_page_addr_t;
35#endif
36
bellardb346ff42003-06-15 20:05:50 +000037/* is_jmp field values */
38#define DISAS_NEXT 0 /* next instruction can be analyzed */
39#define DISAS_JUMP 1 /* only pc was modified dynamically */
40#define DISAS_UPDATE 2 /* cpu state was modified dynamically */
41#define DISAS_TB_JUMP 3 /* only pc was modified statically */
42
Blue Swirlf081c762011-05-21 07:10:23 +000043struct TranslationBlock;
pbrook2e70f6e2008-06-29 01:03:05 +000044typedef struct TranslationBlock TranslationBlock;
bellardb346ff42003-06-15 20:05:50 +000045
46/* XXX: make safe guess about sizes */
Peter Maydell14dcdac2014-03-17 16:31:51 +000047#define MAX_OP_PER_INSTR 266
Stuart Brady4d0e4ac2010-04-27 22:23:35 +010048
49#if HOST_LONG_BITS == 32
50#define MAX_OPC_PARAM_PER_ARG 2
51#else
52#define MAX_OPC_PARAM_PER_ARG 1
53#endif
Stefan Weil3cebc3f2012-09-12 19:18:55 +020054#define MAX_OPC_PARAM_IARGS 5
Stuart Brady4d0e4ac2010-04-27 22:23:35 +010055#define MAX_OPC_PARAM_OARGS 1
56#define MAX_OPC_PARAM_ARGS (MAX_OPC_PARAM_IARGS + MAX_OPC_PARAM_OARGS)
57
58/* A Call op needs up to 4 + 2N parameters on 32-bit archs,
59 * and up to 4 + N parameters on 64-bit archs
60 * (N = number of input arguments + output arguments). */
61#define MAX_OPC_PARAM (4 + (MAX_OPC_PARAM_PER_ARG * MAX_OPC_PARAM_ARGS))
Aurelien Jarno6db73502009-09-22 23:31:04 +020062#define OPC_BUF_SIZE 640
bellardb346ff42003-06-15 20:05:50 +000063#define OPC_MAX_SIZE (OPC_BUF_SIZE - MAX_OP_PER_INSTR)
64
pbrooka208e542008-03-31 17:07:36 +000065/* Maximum size a TCG op can expand to. This is complicated because a
Aurelien Jarno0cbfcd22009-10-22 02:36:27 +020066 single op may require several host instructions and register reloads.
67 For now take a wild guess at 192 bytes, which should allow at least
pbrooka208e542008-03-31 17:07:36 +000068 a couple of fixup instructions per argument. */
Aurelien Jarno0cbfcd22009-10-22 02:36:27 +020069#define TCG_MAX_OP_SIZE 192
pbrooka208e542008-03-31 17:07:36 +000070
pbrook0115be32008-02-03 17:35:41 +000071#define OPPARAM_BUF_SIZE (OPC_BUF_SIZE * MAX_OPC_PARAM)
bellardb346ff42003-06-15 20:05:50 +000072
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010073#include "qemu/log.h"
bellardb346ff42003-06-15 20:05:50 +000074
Andreas Färber9349b4f2012-03-14 01:38:32 +010075void gen_intermediate_code(CPUArchState *env, struct TranslationBlock *tb);
76void gen_intermediate_code_pc(CPUArchState *env, struct TranslationBlock *tb);
77void restore_state_to_opc(CPUArchState *env, struct TranslationBlock *tb,
Stefan Weile87b7cb2011-04-18 06:39:52 +000078 int pc_pos);
aurel32d2856f12008-04-28 00:32:32 +000079
bellard57fec1f2008-02-01 10:50:11 +000080void cpu_gen_init(void);
Andreas Färber9349b4f2012-03-14 01:38:32 +010081int cpu_gen_code(CPUArchState *env, struct TranslationBlock *tb,
blueswir1d07bde82007-12-11 19:35:45 +000082 int *gen_code_size_ptr);
Andreas Färber3f38f302013-09-01 16:51:34 +020083bool cpu_restore_state(CPUState *cpu, uintptr_t searched_pc);
Alexey Kardashevskiy47c16ed2014-01-17 11:12:07 -070084void page_size_init(void);
Blue Swirla8a826a2012-12-04 20:16:07 +000085
Andreas Färber0ea8cb82013-09-03 02:12:23 +020086void QEMU_NORETURN cpu_resume_from_signal(CPUState *cpu, void *puc);
Andreas Färber90b40a62013-09-01 17:21:47 +020087void QEMU_NORETURN cpu_io_recompile(CPUState *cpu, uintptr_t retaddr);
Andreas Färber648f0342013-09-01 17:43:17 +020088TranslationBlock *tb_gen_code(CPUState *cpu,
pbrook2e70f6e2008-06-29 01:03:05 +000089 target_ulong pc, target_ulong cs_base, int flags,
90 int cflags);
Andreas Färber9349b4f2012-03-14 01:38:32 +010091void cpu_exec_init(CPUArchState *env);
Andreas Färber5638d182013-08-27 17:52:12 +020092void QEMU_NORETURN cpu_loop_exit(CPUState *cpu);
Stefan Weil6375e092012-04-06 22:26:15 +020093int page_unprotect(target_ulong address, uintptr_t pc, void *puc);
Paul Brook41c1b1c2010-03-12 16:54:58 +000094void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end,
bellard2e126692004-04-25 21:28:44 +000095 int is_cpu_write_access);
Alexander Graf77a8f1a2012-05-10 22:40:10 +000096void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end,
97 int is_cpu_write_access);
Blue Swirl0cac1b62012-04-09 16:50:52 +000098#if !defined(CONFIG_USER_ONLY)
Edgar E. Iglesias09daed82013-12-17 13:06:51 +100099void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as);
Blue Swirl0cac1b62012-04-09 16:50:52 +0000100/* cputlb.c */
Andreas Färber31b030d2013-09-04 01:29:02 +0200101void tlb_flush_page(CPUState *cpu, target_ulong addr);
Andreas Färber00c8cb02013-09-04 02:19:44 +0200102void tlb_flush(CPUState *cpu, int flush_global);
Andreas Färber0c591eb2013-09-03 13:59:37 +0200103void tlb_set_page(CPUState *cpu, target_ulong vaddr,
Avi Kivitya8170e52012-10-23 12:30:10 +0200104 hwaddr paddr, int prot,
Paul Brookd4c430a2010-03-17 02:14:28 +0000105 int mmu_idx, target_ulong size);
Edgar E. Iglesias29d8ec72013-11-07 19:43:10 +0100106void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr);
Blue Swirl0cac1b62012-04-09 16:50:52 +0000107#else
Andreas Färber31b030d2013-09-04 01:29:02 +0200108static inline void tlb_flush_page(CPUState *cpu, target_ulong addr)
Blue Swirl0cac1b62012-04-09 16:50:52 +0000109{
110}
111
Andreas Färber00c8cb02013-09-04 02:19:44 +0200112static inline void tlb_flush(CPUState *cpu, int flush_global)
Blue Swirl0cac1b62012-04-09 16:50:52 +0000113{
114}
Paul Brookc527ee82010-03-01 03:31:14 +0000115#endif
bellardd4e81642003-05-25 16:46:15 +0000116
bellardd4e81642003-05-25 16:46:15 +0000117#define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */
118
bellard4390df52004-01-04 18:03:10 +0000119#define CODE_GEN_PHYS_HASH_BITS 15
120#define CODE_GEN_PHYS_HASH_SIZE (1 << CODE_GEN_PHYS_HASH_BITS)
121
bellard4390df52004-01-04 18:03:10 +0000122/* estimated block size for TB allocation */
123/* XXX: use a per code average code fragment size and modulate it
124 according to the host CPU */
125#if defined(CONFIG_SOFTMMU)
126#define CODE_GEN_AVG_BLOCK_SIZE 128
127#else
128#define CODE_GEN_AVG_BLOCK_SIZE 64
129#endif
130
Richard Henderson5bbd2ca2012-09-21 10:48:51 -0700131#if defined(__arm__) || defined(_ARCH_PPC) \
132 || defined(__x86_64__) || defined(__i386__) \
Claudio Fontana4a136e02013-06-12 16:20:22 +0100133 || defined(__sparc__) || defined(__aarch64__) \
Richard Hendersona10c64e2014-05-14 17:14:51 -0400134 || defined(__s390x__) \
Richard Henderson5bbd2ca2012-09-21 10:48:51 -0700135 || defined(CONFIG_TCG_INTERPRETER)
Stefan Weil73163292011-10-05 20:03:02 +0200136#define USE_DIRECT_JUMP
bellardd4e81642003-05-25 16:46:15 +0000137#endif
138
pbrook2e70f6e2008-06-29 01:03:05 +0000139struct TranslationBlock {
bellard2e126692004-04-25 21:28:44 +0000140 target_ulong pc; /* simulated PC corresponding to this block (EIP + CS base) */
141 target_ulong cs_base; /* CS base for this block */
j_mayerc0686882007-09-20 22:47:42 +0000142 uint64_t flags; /* flags defining in which context the code was generated */
bellardd4e81642003-05-25 16:46:15 +0000143 uint16_t size; /* size of target code for this block (1 <=
144 size <= TARGET_PAGE_SIZE) */
bellard58fe2f12004-02-16 22:11:32 +0000145 uint16_t cflags; /* compile flags */
pbrook2e70f6e2008-06-29 01:03:05 +0000146#define CF_COUNT_MASK 0x7fff
147#define CF_LAST_IO 0x8000 /* Last insn may be an IO access. */
bellard58fe2f12004-02-16 22:11:32 +0000148
Richard Henderson1813e172014-03-28 12:56:22 -0700149 void *tc_ptr; /* pointer to the translated code */
bellard4390df52004-01-04 18:03:10 +0000150 /* next matching tb for physical address. */
ths5fafdf22007-09-16 21:08:06 +0000151 struct TranslationBlock *phys_hash_next;
bellard4390df52004-01-04 18:03:10 +0000152 /* first and second physical page containing code. The lower bit
153 of the pointer tells the index in page_next[] */
ths5fafdf22007-09-16 21:08:06 +0000154 struct TranslationBlock *page_next[2];
Paul Brook41c1b1c2010-03-12 16:54:58 +0000155 tb_page_addr_t page_addr[2];
bellard4390df52004-01-04 18:03:10 +0000156
bellardd4e81642003-05-25 16:46:15 +0000157 /* the following data are used to directly call another TB from
158 the code of this one. */
159 uint16_t tb_next_offset[2]; /* offset of original jump target */
160#ifdef USE_DIRECT_JUMP
Filip Navaraefc0a512010-03-26 16:06:28 +0000161 uint16_t tb_jmp_offset[2]; /* offset of jump instruction */
bellardd4e81642003-05-25 16:46:15 +0000162#else
Stefan Weil6375e092012-04-06 22:26:15 +0200163 uintptr_t tb_next[2]; /* address of jump generated code */
bellardd4e81642003-05-25 16:46:15 +0000164#endif
165 /* list of TBs jumping to this one. This is a circular list using
166 the two least significant bits of the pointers to tell what is
167 the next pointer: 0 = jmp_next[0], 1 = jmp_next[1], 2 =
168 jmp_first */
ths5fafdf22007-09-16 21:08:06 +0000169 struct TranslationBlock *jmp_next[2];
bellardd4e81642003-05-25 16:46:15 +0000170 struct TranslationBlock *jmp_first;
pbrook2e70f6e2008-06-29 01:03:05 +0000171 uint32_t icount;
172};
bellardd4e81642003-05-25 16:46:15 +0000173
Evgeny Voevodin5e5f07e2013-02-01 01:47:23 +0700174#include "exec/spinlock.h"
175
176typedef struct TBContext TBContext;
177
178struct TBContext {
179
180 TranslationBlock *tbs;
181 TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE];
182 int nb_tbs;
183 /* any access to the tbs or the page table must use this lock */
184 spinlock_t tb_lock;
185
186 /* statistics */
187 int tb_flush_count;
188 int tb_phys_invalidate_count;
189
190 int tb_invalidated_flag;
191};
192
pbrookb362e5e2006-11-12 20:40:55 +0000193static inline unsigned int tb_jmp_cache_hash_page(target_ulong pc)
194{
195 target_ulong tmp;
196 tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS));
edgar_iglb5e19d42008-05-06 08:38:22 +0000197 return (tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK;
pbrookb362e5e2006-11-12 20:40:55 +0000198}
199
bellard8a40a182005-11-20 10:35:40 +0000200static inline unsigned int tb_jmp_cache_hash_func(target_ulong pc)
bellardd4e81642003-05-25 16:46:15 +0000201{
pbrookb362e5e2006-11-12 20:40:55 +0000202 target_ulong tmp;
203 tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS));
edgar_iglb5e19d42008-05-06 08:38:22 +0000204 return (((tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK)
205 | (tmp & TB_JMP_ADDR_MASK));
bellardd4e81642003-05-25 16:46:15 +0000206}
207
Paul Brook41c1b1c2010-03-12 16:54:58 +0000208static inline unsigned int tb_phys_hash_func(tb_page_addr_t pc)
bellard4390df52004-01-04 18:03:10 +0000209{
Aurelien Jarnof96a3832010-12-28 17:46:59 +0100210 return (pc >> 2) & (CODE_GEN_PHYS_HASH_SIZE - 1);
bellard4390df52004-01-04 18:03:10 +0000211}
212
pbrook2e70f6e2008-06-29 01:03:05 +0000213void tb_free(TranslationBlock *tb);
Andreas Färber9349b4f2012-03-14 01:38:32 +0100214void tb_flush(CPUArchState *env);
Paul Brook41c1b1c2010-03-12 16:54:58 +0000215void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr);
bellardd4e81642003-05-25 16:46:15 +0000216
bellard4390df52004-01-04 18:03:10 +0000217#if defined(USE_DIRECT_JUMP)
218
Stefan Weil73163292011-10-05 20:03:02 +0200219#if defined(CONFIG_TCG_INTERPRETER)
220static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
221{
222 /* patch the branch destination */
223 *(uint32_t *)jmp_addr = addr - (jmp_addr + 4);
224 /* no need to flush icache explicitly */
225}
226#elif defined(_ARCH_PPC)
Blue Swirl64b85a82011-01-23 16:21:20 +0000227void ppc_tb_set_jmp_target(unsigned long jmp_addr, unsigned long addr);
malc810260a2008-07-23 19:17:46 +0000228#define tb_set_jmp_target1 ppc_tb_set_jmp_target
bellard57fec1f2008-02-01 10:50:11 +0000229#elif defined(__i386__) || defined(__x86_64__)
Stefan Weil6375e092012-04-06 22:26:15 +0200230static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
bellard4390df52004-01-04 18:03:10 +0000231{
232 /* patch the branch destination */
Richard Hendersoncb3d83b2014-05-13 10:16:07 -0700233 stl_le_p((void*)jmp_addr, addr - (jmp_addr + 4));
ths1235fc02008-06-03 19:51:57 +0000234 /* no need to flush icache explicitly */
bellard4390df52004-01-04 18:03:10 +0000235}
Richard Hendersona10c64e2014-05-14 17:14:51 -0400236#elif defined(__s390x__)
237static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
238{
239 /* patch the branch destination */
240 intptr_t disp = addr - (jmp_addr - 2);
241 stl_be_p((void*)jmp_addr, disp / 2);
242 /* no need to flush icache explicitly */
243}
Claudio Fontana4a136e02013-06-12 16:20:22 +0100244#elif defined(__aarch64__)
245void aarch64_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr);
246#define tb_set_jmp_target1 aarch64_tb_set_jmp_target
balrog811d4cf2008-05-19 23:59:38 +0000247#elif defined(__arm__)
Stefan Weil6375e092012-04-06 22:26:15 +0200248static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr)
balrog811d4cf2008-05-19 23:59:38 +0000249{
Aurelien Jarno4a1e19a2010-12-21 19:32:49 +0100250#if !QEMU_GNUC_PREREQ(4, 1)
balrog811d4cf2008-05-19 23:59:38 +0000251 register unsigned long _beg __asm ("a1");
252 register unsigned long _end __asm ("a2");
253 register unsigned long _flg __asm ("a3");
balrog3233f0d2008-12-01 02:02:37 +0000254#endif
balrog811d4cf2008-05-19 23:59:38 +0000255
256 /* we could use a ldr pc, [pc, #-4] kind of branch and avoid the flush */
Laurent Desnogues87b78ad2009-09-21 14:27:59 +0200257 *(uint32_t *)jmp_addr =
258 (*(uint32_t *)jmp_addr & ~0xffffff)
259 | (((addr - (jmp_addr + 8)) >> 2) & 0xffffff);
balrog811d4cf2008-05-19 23:59:38 +0000260
balrog3233f0d2008-12-01 02:02:37 +0000261#if QEMU_GNUC_PREREQ(4, 1)
Aurelien Jarno4a1e19a2010-12-21 19:32:49 +0100262 __builtin___clear_cache((char *) jmp_addr, (char *) jmp_addr + 4);
balrog3233f0d2008-12-01 02:02:37 +0000263#else
balrog811d4cf2008-05-19 23:59:38 +0000264 /* flush icache */
265 _beg = jmp_addr;
266 _end = jmp_addr + 4;
267 _flg = 0;
268 __asm __volatile__ ("swi 0x9f0002" : : "r" (_beg), "r" (_end), "r" (_flg));
balrog3233f0d2008-12-01 02:02:37 +0000269#endif
balrog811d4cf2008-05-19 23:59:38 +0000270}
Richard Henderson5bbd2ca2012-09-21 10:48:51 -0700271#elif defined(__sparc__)
272void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr);
Stefan Weil73163292011-10-05 20:03:02 +0200273#else
274#error tb_set_jmp_target1 is missing
bellard4390df52004-01-04 18:03:10 +0000275#endif
bellardd4e81642003-05-25 16:46:15 +0000276
ths5fafdf22007-09-16 21:08:06 +0000277static inline void tb_set_jmp_target(TranslationBlock *tb,
Stefan Weil6375e092012-04-06 22:26:15 +0200278 int n, uintptr_t addr)
bellard4cbb86e2003-09-17 22:53:29 +0000279{
Stefan Weil6375e092012-04-06 22:26:15 +0200280 uint16_t offset = tb->tb_jmp_offset[n];
281 tb_set_jmp_target1((uintptr_t)(tb->tc_ptr + offset), addr);
bellard4cbb86e2003-09-17 22:53:29 +0000282}
283
bellardd4e81642003-05-25 16:46:15 +0000284#else
285
286/* set the jump target */
ths5fafdf22007-09-16 21:08:06 +0000287static inline void tb_set_jmp_target(TranslationBlock *tb,
Stefan Weil6375e092012-04-06 22:26:15 +0200288 int n, uintptr_t addr)
bellardd4e81642003-05-25 16:46:15 +0000289{
bellard95f76522003-06-05 00:54:44 +0000290 tb->tb_next[n] = addr;
bellardd4e81642003-05-25 16:46:15 +0000291}
292
293#endif
294
ths5fafdf22007-09-16 21:08:06 +0000295static inline void tb_add_jump(TranslationBlock *tb, int n,
bellardd4e81642003-05-25 16:46:15 +0000296 TranslationBlock *tb_next)
297{
bellardcf256292003-05-25 19:20:31 +0000298 /* NOTE: this test is only needed for thread safety */
299 if (!tb->jmp_next[n]) {
300 /* patch the native jump address */
Stefan Weil6375e092012-04-06 22:26:15 +0200301 tb_set_jmp_target(tb, n, (uintptr_t)tb_next->tc_ptr);
ths3b46e622007-09-17 08:09:54 +0000302
bellardcf256292003-05-25 19:20:31 +0000303 /* add in TB jmp circular list */
304 tb->jmp_next[n] = tb_next->jmp_first;
Stefan Weil6375e092012-04-06 22:26:15 +0200305 tb_next->jmp_first = (TranslationBlock *)((uintptr_t)(tb) | (n));
bellardcf256292003-05-25 19:20:31 +0000306 }
bellardd4e81642003-05-25 16:46:15 +0000307}
308
Richard Henderson0f842f82013-08-27 10:22:54 -0700309/* GETRA is the true target of the return instruction that we'll execute,
310 defined here for simplicity of defining the follow-up macros. */
Stefan Weil73163292011-10-05 20:03:02 +0200311#if defined(CONFIG_TCG_INTERPRETER)
Stefan Weilc3ca0462012-04-17 19:22:39 +0200312extern uintptr_t tci_tb_ptr;
Richard Henderson0f842f82013-08-27 10:22:54 -0700313# define GETRA() tci_tb_ptr
Blue Swirl39171492011-09-21 18:13:16 +0000314#else
Richard Henderson0f842f82013-08-27 10:22:54 -0700315# define GETRA() \
316 ((uintptr_t)__builtin_extract_return_addr(__builtin_return_address(0)))
Blue Swirl39171492011-09-21 18:13:16 +0000317#endif
318
Richard Henderson0f842f82013-08-27 10:22:54 -0700319/* The true return address will often point to a host insn that is part of
320 the next translated guest insn. Adjust the address backward to point to
321 the middle of the call insn. Subtracting one would do the job except for
322 several compressed mode architectures (arm, mips) which set the low bit
323 to indicate the compressed mode; subtracting two works around that. It
324 is also the case that there are no host isas that contain a call insn
325 smaller than 4 bytes, so we don't worry about special-casing this. */
326#if defined(CONFIG_TCG_INTERPRETER)
327# define GETPC_ADJ 0
328#else
329# define GETPC_ADJ 2
330#endif
Yeongkyoon Leefdbb84d2012-10-31 16:04:24 +0900331
Richard Henderson0f842f82013-08-27 10:22:54 -0700332#define GETPC() (GETRA() - GETPC_ADJ)
333
bellarde95c8d52004-09-30 22:22:08 +0000334#if !defined(CONFIG_USER_ONLY)
bellard6e59c1d2003-10-27 21:24:54 +0000335
Stefan Weil575ddeb2013-09-29 20:56:45 +0200336void phys_mem_set_alloc(void *(*alloc)(size_t));
Markus Armbruster91138032013-07-31 15:11:08 +0200337
Edgar E. Iglesias77717092013-11-07 19:55:56 +0100338struct MemoryRegion *iotlb_to_region(AddressSpace *as, hwaddr index);
Paolo Bonzini791af8c2013-05-24 16:10:39 +0200339bool io_mem_read(struct MemoryRegion *mr, hwaddr addr,
340 uint64_t *pvalue, unsigned size);
341bool io_mem_write(struct MemoryRegion *mr, hwaddr addr,
Avi Kivity37ec01d2012-03-08 18:08:35 +0200342 uint64_t value, unsigned size);
Paul Brookb3755a92010-03-12 16:54:58 +0000343
Andreas Färberd5a11fe2013-08-27 00:28:06 +0200344void tlb_fill(CPUState *cpu, target_ulong addr, int is_write, int mmu_idx,
Blue Swirl20503962012-04-09 14:20:20 +0000345 uintptr_t retaddr);
bellard6e59c1d2003-10-27 21:24:54 +0000346
Richard Hendersone58eb532013-08-27 13:13:44 -0700347uint8_t helper_ldb_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
348uint16_t helper_ldw_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
349uint32_t helper_ldl_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
350uint64_t helper_ldq_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx);
blueswir179383c92008-08-30 09:51:20 +0000351
j_mayer6ebbf392007-10-14 07:07:08 +0000352#define ACCESS_TYPE (NB_MMU_MODES + 1)
bellard6e59c1d2003-10-27 21:24:54 +0000353#define MEMSUFFIX _code
bellard6e59c1d2003-10-27 21:24:54 +0000354
355#define DATA_SIZE 1
Paolo Bonzini022c62c2012-12-17 18:19:49 +0100356#include "exec/softmmu_header.h"
bellard6e59c1d2003-10-27 21:24:54 +0000357
358#define DATA_SIZE 2
Paolo Bonzini022c62c2012-12-17 18:19:49 +0100359#include "exec/softmmu_header.h"
bellard6e59c1d2003-10-27 21:24:54 +0000360
361#define DATA_SIZE 4
Paolo Bonzini022c62c2012-12-17 18:19:49 +0100362#include "exec/softmmu_header.h"
bellard6e59c1d2003-10-27 21:24:54 +0000363
bellardc27004e2005-01-03 23:35:10 +0000364#define DATA_SIZE 8
Paolo Bonzini022c62c2012-12-17 18:19:49 +0100365#include "exec/softmmu_header.h"
bellardc27004e2005-01-03 23:35:10 +0000366
bellard6e59c1d2003-10-27 21:24:54 +0000367#undef ACCESS_TYPE
368#undef MEMSUFFIX
bellard6e59c1d2003-10-27 21:24:54 +0000369
370#endif
bellard4390df52004-01-04 18:03:10 +0000371
372#if defined(CONFIG_USER_ONLY)
Andreas Färber9349b4f2012-03-14 01:38:32 +0100373static inline tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr)
bellard4390df52004-01-04 18:03:10 +0000374{
375 return addr;
376}
377#else
Blue Swirl0cac1b62012-04-09 16:50:52 +0000378/* cputlb.c */
Andreas Färber9349b4f2012-03-14 01:38:32 +0100379tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr);
bellard4390df52004-01-04 18:03:10 +0000380#endif
bellard9df217a2005-02-10 22:05:51 +0000381
Andreas Färber9349b4f2012-03-14 01:38:32 +0100382typedef void (CPUDebugExcpHandler)(CPUArchState *env);
aliguoridde23672008-11-18 20:50:36 +0000383
Igor Mammedov84e3b602012-06-21 18:29:38 +0200384void cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler);
aurel321b530a62009-04-05 20:08:59 +0000385
386/* vl.c */
387extern int singlestep;
388
Marcelo Tosatti1a28cac2010-05-04 09:45:20 -0300389/* cpu-exec.c */
390extern volatile sig_atomic_t exit_request;
391
Andreas Färber99df7dc2013-08-26 05:15:23 +0200392/**
393 * cpu_can_do_io:
394 * @cpu: The CPU for which to check IO.
395 *
396 * Deterministic execution requires that IO only be performed on the last
397 * instruction of a TB so that interrupts take effect immediately.
398 *
399 * Returns: %true if memory-mapped IO is safe, %false otherwise.
400 */
401static inline bool cpu_can_do_io(CPUState *cpu)
Paolo Bonzini946fb272011-09-12 13:57:37 +0200402{
403 if (!use_icount) {
Andreas Färber99df7dc2013-08-26 05:15:23 +0200404 return true;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200405 }
406 /* If not executing code then assume we are ok. */
Andreas Färberd77953b2013-01-16 19:29:31 +0100407 if (cpu->current_tb == NULL) {
Andreas Färber99df7dc2013-08-26 05:15:23 +0200408 return true;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200409 }
Andreas Färber99df7dc2013-08-26 05:15:23 +0200410 return cpu->can_do_io != 0;
Paolo Bonzini946fb272011-09-12 13:57:37 +0200411}
412
aliguori875cdcf2008-10-23 13:52:00 +0000413#endif