bellard | d4e8164 | 2003-05-25 16:46:15 +0000 | [diff] [blame] | 1 | /* |
| 2 | * internal execution defines for qemu |
ths | 5fafdf2 | 2007-09-16 21:08:06 +0000 | [diff] [blame] | 3 | * |
bellard | d4e8164 | 2003-05-25 16:46:15 +0000 | [diff] [blame] | 4 | * Copyright (c) 2003 Fabrice Bellard |
| 5 | * |
| 6 | * This library is free software; you can redistribute it and/or |
| 7 | * modify it under the terms of the GNU Lesser General Public |
| 8 | * License as published by the Free Software Foundation; either |
| 9 | * version 2 of the License, or (at your option) any later version. |
| 10 | * |
| 11 | * This library is distributed in the hope that it will be useful, |
| 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 14 | * Lesser General Public License for more details. |
| 15 | * |
| 16 | * You should have received a copy of the GNU Lesser General Public |
Blue Swirl | 8167ee8 | 2009-07-16 20:47:01 +0000 | [diff] [blame] | 17 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. |
bellard | d4e8164 | 2003-05-25 16:46:15 +0000 | [diff] [blame] | 18 | */ |
| 19 | |
aliguori | 875cdcf | 2008-10-23 13:52:00 +0000 | [diff] [blame] | 20 | #ifndef _EXEC_ALL_H_ |
| 21 | #define _EXEC_ALL_H_ |
blueswir1 | 7d99a00 | 2009-01-14 19:00:36 +0000 | [diff] [blame] | 22 | |
| 23 | #include "qemu-common.h" |
| 24 | |
bellard | b346ff4 | 2003-06-15 20:05:50 +0000 | [diff] [blame] | 25 | /* allow to see translation results - the slowdown should be negligible, so we leave it */ |
aurel32 | de9a95f | 2008-11-11 13:41:01 +0000 | [diff] [blame] | 26 | #define DEBUG_DISAS |
bellard | b346ff4 | 2003-06-15 20:05:50 +0000 | [diff] [blame] | 27 | |
Paul Brook | 41c1b1c | 2010-03-12 16:54:58 +0000 | [diff] [blame] | 28 | /* Page tracking code uses ram addresses in system mode, and virtual |
| 29 | addresses in userspace mode. Define tb_page_addr_t to be an appropriate |
| 30 | type. */ |
| 31 | #if defined(CONFIG_USER_ONLY) |
Paul Brook | b480d9b | 2010-03-12 23:23:29 +0000 | [diff] [blame] | 32 | typedef abi_ulong tb_page_addr_t; |
Paul Brook | 41c1b1c | 2010-03-12 16:54:58 +0000 | [diff] [blame] | 33 | #else |
| 34 | typedef ram_addr_t tb_page_addr_t; |
| 35 | #endif |
| 36 | |
bellard | b346ff4 | 2003-06-15 20:05:50 +0000 | [diff] [blame] | 37 | /* is_jmp field values */ |
| 38 | #define DISAS_NEXT 0 /* next instruction can be analyzed */ |
| 39 | #define DISAS_JUMP 1 /* only pc was modified dynamically */ |
| 40 | #define DISAS_UPDATE 2 /* cpu state was modified dynamically */ |
| 41 | #define DISAS_TB_JUMP 3 /* only pc was modified statically */ |
| 42 | |
Blue Swirl | f081c76 | 2011-05-21 07:10:23 +0000 | [diff] [blame] | 43 | struct TranslationBlock; |
pbrook | 2e70f6e | 2008-06-29 01:03:05 +0000 | [diff] [blame] | 44 | typedef struct TranslationBlock TranslationBlock; |
bellard | b346ff4 | 2003-06-15 20:05:50 +0000 | [diff] [blame] | 45 | |
| 46 | /* XXX: make safe guess about sizes */ |
Peter Maydell | 14dcdac | 2014-03-17 16:31:51 +0000 | [diff] [blame] | 47 | #define MAX_OP_PER_INSTR 266 |
Stuart Brady | 4d0e4ac | 2010-04-27 22:23:35 +0100 | [diff] [blame] | 48 | |
| 49 | #if HOST_LONG_BITS == 32 |
| 50 | #define MAX_OPC_PARAM_PER_ARG 2 |
| 51 | #else |
| 52 | #define MAX_OPC_PARAM_PER_ARG 1 |
| 53 | #endif |
Stefan Weil | 3cebc3f | 2012-09-12 19:18:55 +0200 | [diff] [blame] | 54 | #define MAX_OPC_PARAM_IARGS 5 |
Stuart Brady | 4d0e4ac | 2010-04-27 22:23:35 +0100 | [diff] [blame] | 55 | #define MAX_OPC_PARAM_OARGS 1 |
| 56 | #define MAX_OPC_PARAM_ARGS (MAX_OPC_PARAM_IARGS + MAX_OPC_PARAM_OARGS) |
| 57 | |
| 58 | /* A Call op needs up to 4 + 2N parameters on 32-bit archs, |
| 59 | * and up to 4 + N parameters on 64-bit archs |
| 60 | * (N = number of input arguments + output arguments). */ |
| 61 | #define MAX_OPC_PARAM (4 + (MAX_OPC_PARAM_PER_ARG * MAX_OPC_PARAM_ARGS)) |
Aurelien Jarno | 6db7350 | 2009-09-22 23:31:04 +0200 | [diff] [blame] | 62 | #define OPC_BUF_SIZE 640 |
bellard | b346ff4 | 2003-06-15 20:05:50 +0000 | [diff] [blame] | 63 | #define OPC_MAX_SIZE (OPC_BUF_SIZE - MAX_OP_PER_INSTR) |
| 64 | |
pbrook | a208e54 | 2008-03-31 17:07:36 +0000 | [diff] [blame] | 65 | /* Maximum size a TCG op can expand to. This is complicated because a |
Aurelien Jarno | 0cbfcd2 | 2009-10-22 02:36:27 +0200 | [diff] [blame] | 66 | single op may require several host instructions and register reloads. |
| 67 | For now take a wild guess at 192 bytes, which should allow at least |
pbrook | a208e54 | 2008-03-31 17:07:36 +0000 | [diff] [blame] | 68 | a couple of fixup instructions per argument. */ |
Aurelien Jarno | 0cbfcd2 | 2009-10-22 02:36:27 +0200 | [diff] [blame] | 69 | #define TCG_MAX_OP_SIZE 192 |
pbrook | a208e54 | 2008-03-31 17:07:36 +0000 | [diff] [blame] | 70 | |
pbrook | 0115be3 | 2008-02-03 17:35:41 +0000 | [diff] [blame] | 71 | #define OPPARAM_BUF_SIZE (OPC_BUF_SIZE * MAX_OPC_PARAM) |
bellard | b346ff4 | 2003-06-15 20:05:50 +0000 | [diff] [blame] | 72 | |
Paolo Bonzini | 1de7afc | 2012-12-17 18:20:00 +0100 | [diff] [blame] | 73 | #include "qemu/log.h" |
bellard | b346ff4 | 2003-06-15 20:05:50 +0000 | [diff] [blame] | 74 | |
Andreas Färber | 9349b4f | 2012-03-14 01:38:32 +0100 | [diff] [blame] | 75 | void gen_intermediate_code(CPUArchState *env, struct TranslationBlock *tb); |
| 76 | void gen_intermediate_code_pc(CPUArchState *env, struct TranslationBlock *tb); |
| 77 | void restore_state_to_opc(CPUArchState *env, struct TranslationBlock *tb, |
Stefan Weil | e87b7cb | 2011-04-18 06:39:52 +0000 | [diff] [blame] | 78 | int pc_pos); |
aurel32 | d2856f1 | 2008-04-28 00:32:32 +0000 | [diff] [blame] | 79 | |
bellard | 57fec1f | 2008-02-01 10:50:11 +0000 | [diff] [blame] | 80 | void cpu_gen_init(void); |
Andreas Färber | 9349b4f | 2012-03-14 01:38:32 +0100 | [diff] [blame] | 81 | int cpu_gen_code(CPUArchState *env, struct TranslationBlock *tb, |
blueswir1 | d07bde8 | 2007-12-11 19:35:45 +0000 | [diff] [blame] | 82 | int *gen_code_size_ptr); |
Andreas Färber | 3f38f30 | 2013-09-01 16:51:34 +0200 | [diff] [blame] | 83 | bool cpu_restore_state(CPUState *cpu, uintptr_t searched_pc); |
Alexey Kardashevskiy | 47c16ed | 2014-01-17 11:12:07 -0700 | [diff] [blame] | 84 | void page_size_init(void); |
Blue Swirl | a8a826a | 2012-12-04 20:16:07 +0000 | [diff] [blame] | 85 | |
Andreas Färber | 0ea8cb8 | 2013-09-03 02:12:23 +0200 | [diff] [blame] | 86 | void QEMU_NORETURN cpu_resume_from_signal(CPUState *cpu, void *puc); |
Andreas Färber | 90b40a6 | 2013-09-01 17:21:47 +0200 | [diff] [blame] | 87 | void QEMU_NORETURN cpu_io_recompile(CPUState *cpu, uintptr_t retaddr); |
Andreas Färber | 648f034 | 2013-09-01 17:43:17 +0200 | [diff] [blame] | 88 | TranslationBlock *tb_gen_code(CPUState *cpu, |
pbrook | 2e70f6e | 2008-06-29 01:03:05 +0000 | [diff] [blame] | 89 | target_ulong pc, target_ulong cs_base, int flags, |
| 90 | int cflags); |
Andreas Färber | 9349b4f | 2012-03-14 01:38:32 +0100 | [diff] [blame] | 91 | void cpu_exec_init(CPUArchState *env); |
Andreas Färber | 5638d18 | 2013-08-27 17:52:12 +0200 | [diff] [blame] | 92 | void QEMU_NORETURN cpu_loop_exit(CPUState *cpu); |
Stefan Weil | 6375e09 | 2012-04-06 22:26:15 +0200 | [diff] [blame] | 93 | int page_unprotect(target_ulong address, uintptr_t pc, void *puc); |
Paul Brook | 41c1b1c | 2010-03-12 16:54:58 +0000 | [diff] [blame] | 94 | void tb_invalidate_phys_page_range(tb_page_addr_t start, tb_page_addr_t end, |
bellard | 2e12669 | 2004-04-25 21:28:44 +0000 | [diff] [blame] | 95 | int is_cpu_write_access); |
Alexander Graf | 77a8f1a | 2012-05-10 22:40:10 +0000 | [diff] [blame] | 96 | void tb_invalidate_phys_range(tb_page_addr_t start, tb_page_addr_t end, |
| 97 | int is_cpu_write_access); |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 98 | #if !defined(CONFIG_USER_ONLY) |
Edgar E. Iglesias | 09daed8 | 2013-12-17 13:06:51 +1000 | [diff] [blame] | 99 | void tcg_cpu_address_space_init(CPUState *cpu, AddressSpace *as); |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 100 | /* cputlb.c */ |
Andreas Färber | 31b030d | 2013-09-04 01:29:02 +0200 | [diff] [blame] | 101 | void tlb_flush_page(CPUState *cpu, target_ulong addr); |
Andreas Färber | 00c8cb0 | 2013-09-04 02:19:44 +0200 | [diff] [blame] | 102 | void tlb_flush(CPUState *cpu, int flush_global); |
Andreas Färber | 0c591eb | 2013-09-03 13:59:37 +0200 | [diff] [blame] | 103 | void tlb_set_page(CPUState *cpu, target_ulong vaddr, |
Avi Kivity | a8170e5 | 2012-10-23 12:30:10 +0200 | [diff] [blame] | 104 | hwaddr paddr, int prot, |
Paul Brook | d4c430a | 2010-03-17 02:14:28 +0000 | [diff] [blame] | 105 | int mmu_idx, target_ulong size); |
Edgar E. Iglesias | 29d8ec7 | 2013-11-07 19:43:10 +0100 | [diff] [blame] | 106 | void tb_invalidate_phys_addr(AddressSpace *as, hwaddr addr); |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 107 | #else |
Andreas Färber | 31b030d | 2013-09-04 01:29:02 +0200 | [diff] [blame] | 108 | static inline void tlb_flush_page(CPUState *cpu, target_ulong addr) |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 109 | { |
| 110 | } |
| 111 | |
Andreas Färber | 00c8cb0 | 2013-09-04 02:19:44 +0200 | [diff] [blame] | 112 | static inline void tlb_flush(CPUState *cpu, int flush_global) |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 113 | { |
| 114 | } |
Paul Brook | c527ee8 | 2010-03-01 03:31:14 +0000 | [diff] [blame] | 115 | #endif |
bellard | d4e8164 | 2003-05-25 16:46:15 +0000 | [diff] [blame] | 116 | |
bellard | d4e8164 | 2003-05-25 16:46:15 +0000 | [diff] [blame] | 117 | #define CODE_GEN_ALIGN 16 /* must be >= of the size of a icache line */ |
| 118 | |
bellard | 4390df5 | 2004-01-04 18:03:10 +0000 | [diff] [blame] | 119 | #define CODE_GEN_PHYS_HASH_BITS 15 |
| 120 | #define CODE_GEN_PHYS_HASH_SIZE (1 << CODE_GEN_PHYS_HASH_BITS) |
| 121 | |
bellard | 4390df5 | 2004-01-04 18:03:10 +0000 | [diff] [blame] | 122 | /* estimated block size for TB allocation */ |
| 123 | /* XXX: use a per code average code fragment size and modulate it |
| 124 | according to the host CPU */ |
| 125 | #if defined(CONFIG_SOFTMMU) |
| 126 | #define CODE_GEN_AVG_BLOCK_SIZE 128 |
| 127 | #else |
| 128 | #define CODE_GEN_AVG_BLOCK_SIZE 64 |
| 129 | #endif |
| 130 | |
Richard Henderson | 5bbd2ca | 2012-09-21 10:48:51 -0700 | [diff] [blame] | 131 | #if defined(__arm__) || defined(_ARCH_PPC) \ |
| 132 | || defined(__x86_64__) || defined(__i386__) \ |
Claudio Fontana | 4a136e0 | 2013-06-12 16:20:22 +0100 | [diff] [blame] | 133 | || defined(__sparc__) || defined(__aarch64__) \ |
Richard Henderson | a10c64e | 2014-05-14 17:14:51 -0400 | [diff] [blame] | 134 | || defined(__s390x__) \ |
Richard Henderson | 5bbd2ca | 2012-09-21 10:48:51 -0700 | [diff] [blame] | 135 | || defined(CONFIG_TCG_INTERPRETER) |
Stefan Weil | 7316329 | 2011-10-05 20:03:02 +0200 | [diff] [blame] | 136 | #define USE_DIRECT_JUMP |
bellard | d4e8164 | 2003-05-25 16:46:15 +0000 | [diff] [blame] | 137 | #endif |
| 138 | |
pbrook | 2e70f6e | 2008-06-29 01:03:05 +0000 | [diff] [blame] | 139 | struct TranslationBlock { |
bellard | 2e12669 | 2004-04-25 21:28:44 +0000 | [diff] [blame] | 140 | target_ulong pc; /* simulated PC corresponding to this block (EIP + CS base) */ |
| 141 | target_ulong cs_base; /* CS base for this block */ |
j_mayer | c068688 | 2007-09-20 22:47:42 +0000 | [diff] [blame] | 142 | uint64_t flags; /* flags defining in which context the code was generated */ |
bellard | d4e8164 | 2003-05-25 16:46:15 +0000 | [diff] [blame] | 143 | uint16_t size; /* size of target code for this block (1 <= |
| 144 | size <= TARGET_PAGE_SIZE) */ |
bellard | 58fe2f1 | 2004-02-16 22:11:32 +0000 | [diff] [blame] | 145 | uint16_t cflags; /* compile flags */ |
pbrook | 2e70f6e | 2008-06-29 01:03:05 +0000 | [diff] [blame] | 146 | #define CF_COUNT_MASK 0x7fff |
| 147 | #define CF_LAST_IO 0x8000 /* Last insn may be an IO access. */ |
bellard | 58fe2f1 | 2004-02-16 22:11:32 +0000 | [diff] [blame] | 148 | |
Richard Henderson | 1813e17 | 2014-03-28 12:56:22 -0700 | [diff] [blame] | 149 | void *tc_ptr; /* pointer to the translated code */ |
bellard | 4390df5 | 2004-01-04 18:03:10 +0000 | [diff] [blame] | 150 | /* next matching tb for physical address. */ |
ths | 5fafdf2 | 2007-09-16 21:08:06 +0000 | [diff] [blame] | 151 | struct TranslationBlock *phys_hash_next; |
bellard | 4390df5 | 2004-01-04 18:03:10 +0000 | [diff] [blame] | 152 | /* first and second physical page containing code. The lower bit |
| 153 | of the pointer tells the index in page_next[] */ |
ths | 5fafdf2 | 2007-09-16 21:08:06 +0000 | [diff] [blame] | 154 | struct TranslationBlock *page_next[2]; |
Paul Brook | 41c1b1c | 2010-03-12 16:54:58 +0000 | [diff] [blame] | 155 | tb_page_addr_t page_addr[2]; |
bellard | 4390df5 | 2004-01-04 18:03:10 +0000 | [diff] [blame] | 156 | |
bellard | d4e8164 | 2003-05-25 16:46:15 +0000 | [diff] [blame] | 157 | /* the following data are used to directly call another TB from |
| 158 | the code of this one. */ |
| 159 | uint16_t tb_next_offset[2]; /* offset of original jump target */ |
| 160 | #ifdef USE_DIRECT_JUMP |
Filip Navara | efc0a51 | 2010-03-26 16:06:28 +0000 | [diff] [blame] | 161 | uint16_t tb_jmp_offset[2]; /* offset of jump instruction */ |
bellard | d4e8164 | 2003-05-25 16:46:15 +0000 | [diff] [blame] | 162 | #else |
Stefan Weil | 6375e09 | 2012-04-06 22:26:15 +0200 | [diff] [blame] | 163 | uintptr_t tb_next[2]; /* address of jump generated code */ |
bellard | d4e8164 | 2003-05-25 16:46:15 +0000 | [diff] [blame] | 164 | #endif |
| 165 | /* list of TBs jumping to this one. This is a circular list using |
| 166 | the two least significant bits of the pointers to tell what is |
| 167 | the next pointer: 0 = jmp_next[0], 1 = jmp_next[1], 2 = |
| 168 | jmp_first */ |
ths | 5fafdf2 | 2007-09-16 21:08:06 +0000 | [diff] [blame] | 169 | struct TranslationBlock *jmp_next[2]; |
bellard | d4e8164 | 2003-05-25 16:46:15 +0000 | [diff] [blame] | 170 | struct TranslationBlock *jmp_first; |
pbrook | 2e70f6e | 2008-06-29 01:03:05 +0000 | [diff] [blame] | 171 | uint32_t icount; |
| 172 | }; |
bellard | d4e8164 | 2003-05-25 16:46:15 +0000 | [diff] [blame] | 173 | |
Evgeny Voevodin | 5e5f07e | 2013-02-01 01:47:23 +0700 | [diff] [blame] | 174 | #include "exec/spinlock.h" |
| 175 | |
| 176 | typedef struct TBContext TBContext; |
| 177 | |
| 178 | struct TBContext { |
| 179 | |
| 180 | TranslationBlock *tbs; |
| 181 | TranslationBlock *tb_phys_hash[CODE_GEN_PHYS_HASH_SIZE]; |
| 182 | int nb_tbs; |
| 183 | /* any access to the tbs or the page table must use this lock */ |
| 184 | spinlock_t tb_lock; |
| 185 | |
| 186 | /* statistics */ |
| 187 | int tb_flush_count; |
| 188 | int tb_phys_invalidate_count; |
| 189 | |
| 190 | int tb_invalidated_flag; |
| 191 | }; |
| 192 | |
pbrook | b362e5e | 2006-11-12 20:40:55 +0000 | [diff] [blame] | 193 | static inline unsigned int tb_jmp_cache_hash_page(target_ulong pc) |
| 194 | { |
| 195 | target_ulong tmp; |
| 196 | tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)); |
edgar_igl | b5e19d4 | 2008-05-06 08:38:22 +0000 | [diff] [blame] | 197 | return (tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK; |
pbrook | b362e5e | 2006-11-12 20:40:55 +0000 | [diff] [blame] | 198 | } |
| 199 | |
bellard | 8a40a18 | 2005-11-20 10:35:40 +0000 | [diff] [blame] | 200 | static inline unsigned int tb_jmp_cache_hash_func(target_ulong pc) |
bellard | d4e8164 | 2003-05-25 16:46:15 +0000 | [diff] [blame] | 201 | { |
pbrook | b362e5e | 2006-11-12 20:40:55 +0000 | [diff] [blame] | 202 | target_ulong tmp; |
| 203 | tmp = pc ^ (pc >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)); |
edgar_igl | b5e19d4 | 2008-05-06 08:38:22 +0000 | [diff] [blame] | 204 | return (((tmp >> (TARGET_PAGE_BITS - TB_JMP_PAGE_BITS)) & TB_JMP_PAGE_MASK) |
| 205 | | (tmp & TB_JMP_ADDR_MASK)); |
bellard | d4e8164 | 2003-05-25 16:46:15 +0000 | [diff] [blame] | 206 | } |
| 207 | |
Paul Brook | 41c1b1c | 2010-03-12 16:54:58 +0000 | [diff] [blame] | 208 | static inline unsigned int tb_phys_hash_func(tb_page_addr_t pc) |
bellard | 4390df5 | 2004-01-04 18:03:10 +0000 | [diff] [blame] | 209 | { |
Aurelien Jarno | f96a383 | 2010-12-28 17:46:59 +0100 | [diff] [blame] | 210 | return (pc >> 2) & (CODE_GEN_PHYS_HASH_SIZE - 1); |
bellard | 4390df5 | 2004-01-04 18:03:10 +0000 | [diff] [blame] | 211 | } |
| 212 | |
pbrook | 2e70f6e | 2008-06-29 01:03:05 +0000 | [diff] [blame] | 213 | void tb_free(TranslationBlock *tb); |
Andreas Färber | 9349b4f | 2012-03-14 01:38:32 +0100 | [diff] [blame] | 214 | void tb_flush(CPUArchState *env); |
Paul Brook | 41c1b1c | 2010-03-12 16:54:58 +0000 | [diff] [blame] | 215 | void tb_phys_invalidate(TranslationBlock *tb, tb_page_addr_t page_addr); |
bellard | d4e8164 | 2003-05-25 16:46:15 +0000 | [diff] [blame] | 216 | |
bellard | 4390df5 | 2004-01-04 18:03:10 +0000 | [diff] [blame] | 217 | #if defined(USE_DIRECT_JUMP) |
| 218 | |
Stefan Weil | 7316329 | 2011-10-05 20:03:02 +0200 | [diff] [blame] | 219 | #if defined(CONFIG_TCG_INTERPRETER) |
| 220 | static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr) |
| 221 | { |
| 222 | /* patch the branch destination */ |
| 223 | *(uint32_t *)jmp_addr = addr - (jmp_addr + 4); |
| 224 | /* no need to flush icache explicitly */ |
| 225 | } |
| 226 | #elif defined(_ARCH_PPC) |
Blue Swirl | 64b85a8 | 2011-01-23 16:21:20 +0000 | [diff] [blame] | 227 | void ppc_tb_set_jmp_target(unsigned long jmp_addr, unsigned long addr); |
malc | 810260a | 2008-07-23 19:17:46 +0000 | [diff] [blame] | 228 | #define tb_set_jmp_target1 ppc_tb_set_jmp_target |
bellard | 57fec1f | 2008-02-01 10:50:11 +0000 | [diff] [blame] | 229 | #elif defined(__i386__) || defined(__x86_64__) |
Stefan Weil | 6375e09 | 2012-04-06 22:26:15 +0200 | [diff] [blame] | 230 | static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr) |
bellard | 4390df5 | 2004-01-04 18:03:10 +0000 | [diff] [blame] | 231 | { |
| 232 | /* patch the branch destination */ |
Richard Henderson | cb3d83b | 2014-05-13 10:16:07 -0700 | [diff] [blame] | 233 | stl_le_p((void*)jmp_addr, addr - (jmp_addr + 4)); |
ths | 1235fc0 | 2008-06-03 19:51:57 +0000 | [diff] [blame] | 234 | /* no need to flush icache explicitly */ |
bellard | 4390df5 | 2004-01-04 18:03:10 +0000 | [diff] [blame] | 235 | } |
Richard Henderson | a10c64e | 2014-05-14 17:14:51 -0400 | [diff] [blame] | 236 | #elif defined(__s390x__) |
| 237 | static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr) |
| 238 | { |
| 239 | /* patch the branch destination */ |
| 240 | intptr_t disp = addr - (jmp_addr - 2); |
| 241 | stl_be_p((void*)jmp_addr, disp / 2); |
| 242 | /* no need to flush icache explicitly */ |
| 243 | } |
Claudio Fontana | 4a136e0 | 2013-06-12 16:20:22 +0100 | [diff] [blame] | 244 | #elif defined(__aarch64__) |
| 245 | void aarch64_tb_set_jmp_target(uintptr_t jmp_addr, uintptr_t addr); |
| 246 | #define tb_set_jmp_target1 aarch64_tb_set_jmp_target |
balrog | 811d4cf | 2008-05-19 23:59:38 +0000 | [diff] [blame] | 247 | #elif defined(__arm__) |
Stefan Weil | 6375e09 | 2012-04-06 22:26:15 +0200 | [diff] [blame] | 248 | static inline void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr) |
balrog | 811d4cf | 2008-05-19 23:59:38 +0000 | [diff] [blame] | 249 | { |
Aurelien Jarno | 4a1e19a | 2010-12-21 19:32:49 +0100 | [diff] [blame] | 250 | #if !QEMU_GNUC_PREREQ(4, 1) |
balrog | 811d4cf | 2008-05-19 23:59:38 +0000 | [diff] [blame] | 251 | register unsigned long _beg __asm ("a1"); |
| 252 | register unsigned long _end __asm ("a2"); |
| 253 | register unsigned long _flg __asm ("a3"); |
balrog | 3233f0d | 2008-12-01 02:02:37 +0000 | [diff] [blame] | 254 | #endif |
balrog | 811d4cf | 2008-05-19 23:59:38 +0000 | [diff] [blame] | 255 | |
| 256 | /* we could use a ldr pc, [pc, #-4] kind of branch and avoid the flush */ |
Laurent Desnogues | 87b78ad | 2009-09-21 14:27:59 +0200 | [diff] [blame] | 257 | *(uint32_t *)jmp_addr = |
| 258 | (*(uint32_t *)jmp_addr & ~0xffffff) |
| 259 | | (((addr - (jmp_addr + 8)) >> 2) & 0xffffff); |
balrog | 811d4cf | 2008-05-19 23:59:38 +0000 | [diff] [blame] | 260 | |
balrog | 3233f0d | 2008-12-01 02:02:37 +0000 | [diff] [blame] | 261 | #if QEMU_GNUC_PREREQ(4, 1) |
Aurelien Jarno | 4a1e19a | 2010-12-21 19:32:49 +0100 | [diff] [blame] | 262 | __builtin___clear_cache((char *) jmp_addr, (char *) jmp_addr + 4); |
balrog | 3233f0d | 2008-12-01 02:02:37 +0000 | [diff] [blame] | 263 | #else |
balrog | 811d4cf | 2008-05-19 23:59:38 +0000 | [diff] [blame] | 264 | /* flush icache */ |
| 265 | _beg = jmp_addr; |
| 266 | _end = jmp_addr + 4; |
| 267 | _flg = 0; |
| 268 | __asm __volatile__ ("swi 0x9f0002" : : "r" (_beg), "r" (_end), "r" (_flg)); |
balrog | 3233f0d | 2008-12-01 02:02:37 +0000 | [diff] [blame] | 269 | #endif |
balrog | 811d4cf | 2008-05-19 23:59:38 +0000 | [diff] [blame] | 270 | } |
Richard Henderson | 5bbd2ca | 2012-09-21 10:48:51 -0700 | [diff] [blame] | 271 | #elif defined(__sparc__) |
| 272 | void tb_set_jmp_target1(uintptr_t jmp_addr, uintptr_t addr); |
Stefan Weil | 7316329 | 2011-10-05 20:03:02 +0200 | [diff] [blame] | 273 | #else |
| 274 | #error tb_set_jmp_target1 is missing |
bellard | 4390df5 | 2004-01-04 18:03:10 +0000 | [diff] [blame] | 275 | #endif |
bellard | d4e8164 | 2003-05-25 16:46:15 +0000 | [diff] [blame] | 276 | |
ths | 5fafdf2 | 2007-09-16 21:08:06 +0000 | [diff] [blame] | 277 | static inline void tb_set_jmp_target(TranslationBlock *tb, |
Stefan Weil | 6375e09 | 2012-04-06 22:26:15 +0200 | [diff] [blame] | 278 | int n, uintptr_t addr) |
bellard | 4cbb86e | 2003-09-17 22:53:29 +0000 | [diff] [blame] | 279 | { |
Stefan Weil | 6375e09 | 2012-04-06 22:26:15 +0200 | [diff] [blame] | 280 | uint16_t offset = tb->tb_jmp_offset[n]; |
| 281 | tb_set_jmp_target1((uintptr_t)(tb->tc_ptr + offset), addr); |
bellard | 4cbb86e | 2003-09-17 22:53:29 +0000 | [diff] [blame] | 282 | } |
| 283 | |
bellard | d4e8164 | 2003-05-25 16:46:15 +0000 | [diff] [blame] | 284 | #else |
| 285 | |
| 286 | /* set the jump target */ |
ths | 5fafdf2 | 2007-09-16 21:08:06 +0000 | [diff] [blame] | 287 | static inline void tb_set_jmp_target(TranslationBlock *tb, |
Stefan Weil | 6375e09 | 2012-04-06 22:26:15 +0200 | [diff] [blame] | 288 | int n, uintptr_t addr) |
bellard | d4e8164 | 2003-05-25 16:46:15 +0000 | [diff] [blame] | 289 | { |
bellard | 95f7652 | 2003-06-05 00:54:44 +0000 | [diff] [blame] | 290 | tb->tb_next[n] = addr; |
bellard | d4e8164 | 2003-05-25 16:46:15 +0000 | [diff] [blame] | 291 | } |
| 292 | |
| 293 | #endif |
| 294 | |
ths | 5fafdf2 | 2007-09-16 21:08:06 +0000 | [diff] [blame] | 295 | static inline void tb_add_jump(TranslationBlock *tb, int n, |
bellard | d4e8164 | 2003-05-25 16:46:15 +0000 | [diff] [blame] | 296 | TranslationBlock *tb_next) |
| 297 | { |
bellard | cf25629 | 2003-05-25 19:20:31 +0000 | [diff] [blame] | 298 | /* NOTE: this test is only needed for thread safety */ |
| 299 | if (!tb->jmp_next[n]) { |
| 300 | /* patch the native jump address */ |
Stefan Weil | 6375e09 | 2012-04-06 22:26:15 +0200 | [diff] [blame] | 301 | tb_set_jmp_target(tb, n, (uintptr_t)tb_next->tc_ptr); |
ths | 3b46e62 | 2007-09-17 08:09:54 +0000 | [diff] [blame] | 302 | |
bellard | cf25629 | 2003-05-25 19:20:31 +0000 | [diff] [blame] | 303 | /* add in TB jmp circular list */ |
| 304 | tb->jmp_next[n] = tb_next->jmp_first; |
Stefan Weil | 6375e09 | 2012-04-06 22:26:15 +0200 | [diff] [blame] | 305 | tb_next->jmp_first = (TranslationBlock *)((uintptr_t)(tb) | (n)); |
bellard | cf25629 | 2003-05-25 19:20:31 +0000 | [diff] [blame] | 306 | } |
bellard | d4e8164 | 2003-05-25 16:46:15 +0000 | [diff] [blame] | 307 | } |
| 308 | |
Richard Henderson | 0f842f8 | 2013-08-27 10:22:54 -0700 | [diff] [blame] | 309 | /* GETRA is the true target of the return instruction that we'll execute, |
| 310 | defined here for simplicity of defining the follow-up macros. */ |
Stefan Weil | 7316329 | 2011-10-05 20:03:02 +0200 | [diff] [blame] | 311 | #if defined(CONFIG_TCG_INTERPRETER) |
Stefan Weil | c3ca046 | 2012-04-17 19:22:39 +0200 | [diff] [blame] | 312 | extern uintptr_t tci_tb_ptr; |
Richard Henderson | 0f842f8 | 2013-08-27 10:22:54 -0700 | [diff] [blame] | 313 | # define GETRA() tci_tb_ptr |
Blue Swirl | 3917149 | 2011-09-21 18:13:16 +0000 | [diff] [blame] | 314 | #else |
Richard Henderson | 0f842f8 | 2013-08-27 10:22:54 -0700 | [diff] [blame] | 315 | # define GETRA() \ |
| 316 | ((uintptr_t)__builtin_extract_return_addr(__builtin_return_address(0))) |
Blue Swirl | 3917149 | 2011-09-21 18:13:16 +0000 | [diff] [blame] | 317 | #endif |
| 318 | |
Richard Henderson | 0f842f8 | 2013-08-27 10:22:54 -0700 | [diff] [blame] | 319 | /* The true return address will often point to a host insn that is part of |
| 320 | the next translated guest insn. Adjust the address backward to point to |
| 321 | the middle of the call insn. Subtracting one would do the job except for |
| 322 | several compressed mode architectures (arm, mips) which set the low bit |
| 323 | to indicate the compressed mode; subtracting two works around that. It |
| 324 | is also the case that there are no host isas that contain a call insn |
| 325 | smaller than 4 bytes, so we don't worry about special-casing this. */ |
| 326 | #if defined(CONFIG_TCG_INTERPRETER) |
| 327 | # define GETPC_ADJ 0 |
| 328 | #else |
| 329 | # define GETPC_ADJ 2 |
| 330 | #endif |
Yeongkyoon Lee | fdbb84d | 2012-10-31 16:04:24 +0900 | [diff] [blame] | 331 | |
Richard Henderson | 0f842f8 | 2013-08-27 10:22:54 -0700 | [diff] [blame] | 332 | #define GETPC() (GETRA() - GETPC_ADJ) |
| 333 | |
bellard | e95c8d5 | 2004-09-30 22:22:08 +0000 | [diff] [blame] | 334 | #if !defined(CONFIG_USER_ONLY) |
bellard | 6e59c1d | 2003-10-27 21:24:54 +0000 | [diff] [blame] | 335 | |
Stefan Weil | 575ddeb | 2013-09-29 20:56:45 +0200 | [diff] [blame] | 336 | void phys_mem_set_alloc(void *(*alloc)(size_t)); |
Markus Armbruster | 9113803 | 2013-07-31 15:11:08 +0200 | [diff] [blame] | 337 | |
Edgar E. Iglesias | 7771709 | 2013-11-07 19:55:56 +0100 | [diff] [blame] | 338 | struct MemoryRegion *iotlb_to_region(AddressSpace *as, hwaddr index); |
Paolo Bonzini | 791af8c | 2013-05-24 16:10:39 +0200 | [diff] [blame] | 339 | bool io_mem_read(struct MemoryRegion *mr, hwaddr addr, |
| 340 | uint64_t *pvalue, unsigned size); |
| 341 | bool io_mem_write(struct MemoryRegion *mr, hwaddr addr, |
Avi Kivity | 37ec01d | 2012-03-08 18:08:35 +0200 | [diff] [blame] | 342 | uint64_t value, unsigned size); |
Paul Brook | b3755a9 | 2010-03-12 16:54:58 +0000 | [diff] [blame] | 343 | |
Andreas Färber | d5a11fe | 2013-08-27 00:28:06 +0200 | [diff] [blame] | 344 | void tlb_fill(CPUState *cpu, target_ulong addr, int is_write, int mmu_idx, |
Blue Swirl | 2050396 | 2012-04-09 14:20:20 +0000 | [diff] [blame] | 345 | uintptr_t retaddr); |
bellard | 6e59c1d | 2003-10-27 21:24:54 +0000 | [diff] [blame] | 346 | |
Richard Henderson | e58eb53 | 2013-08-27 13:13:44 -0700 | [diff] [blame] | 347 | uint8_t helper_ldb_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx); |
| 348 | uint16_t helper_ldw_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx); |
| 349 | uint32_t helper_ldl_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx); |
| 350 | uint64_t helper_ldq_cmmu(CPUArchState *env, target_ulong addr, int mmu_idx); |
blueswir1 | 79383c9 | 2008-08-30 09:51:20 +0000 | [diff] [blame] | 351 | |
j_mayer | 6ebbf39 | 2007-10-14 07:07:08 +0000 | [diff] [blame] | 352 | #define ACCESS_TYPE (NB_MMU_MODES + 1) |
bellard | 6e59c1d | 2003-10-27 21:24:54 +0000 | [diff] [blame] | 353 | #define MEMSUFFIX _code |
bellard | 6e59c1d | 2003-10-27 21:24:54 +0000 | [diff] [blame] | 354 | |
| 355 | #define DATA_SIZE 1 |
Paolo Bonzini | 022c62c | 2012-12-17 18:19:49 +0100 | [diff] [blame] | 356 | #include "exec/softmmu_header.h" |
bellard | 6e59c1d | 2003-10-27 21:24:54 +0000 | [diff] [blame] | 357 | |
| 358 | #define DATA_SIZE 2 |
Paolo Bonzini | 022c62c | 2012-12-17 18:19:49 +0100 | [diff] [blame] | 359 | #include "exec/softmmu_header.h" |
bellard | 6e59c1d | 2003-10-27 21:24:54 +0000 | [diff] [blame] | 360 | |
| 361 | #define DATA_SIZE 4 |
Paolo Bonzini | 022c62c | 2012-12-17 18:19:49 +0100 | [diff] [blame] | 362 | #include "exec/softmmu_header.h" |
bellard | 6e59c1d | 2003-10-27 21:24:54 +0000 | [diff] [blame] | 363 | |
bellard | c27004e | 2005-01-03 23:35:10 +0000 | [diff] [blame] | 364 | #define DATA_SIZE 8 |
Paolo Bonzini | 022c62c | 2012-12-17 18:19:49 +0100 | [diff] [blame] | 365 | #include "exec/softmmu_header.h" |
bellard | c27004e | 2005-01-03 23:35:10 +0000 | [diff] [blame] | 366 | |
bellard | 6e59c1d | 2003-10-27 21:24:54 +0000 | [diff] [blame] | 367 | #undef ACCESS_TYPE |
| 368 | #undef MEMSUFFIX |
bellard | 6e59c1d | 2003-10-27 21:24:54 +0000 | [diff] [blame] | 369 | |
| 370 | #endif |
bellard | 4390df5 | 2004-01-04 18:03:10 +0000 | [diff] [blame] | 371 | |
| 372 | #if defined(CONFIG_USER_ONLY) |
Andreas Färber | 9349b4f | 2012-03-14 01:38:32 +0100 | [diff] [blame] | 373 | static inline tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr) |
bellard | 4390df5 | 2004-01-04 18:03:10 +0000 | [diff] [blame] | 374 | { |
| 375 | return addr; |
| 376 | } |
| 377 | #else |
Blue Swirl | 0cac1b6 | 2012-04-09 16:50:52 +0000 | [diff] [blame] | 378 | /* cputlb.c */ |
Andreas Färber | 9349b4f | 2012-03-14 01:38:32 +0100 | [diff] [blame] | 379 | tb_page_addr_t get_page_addr_code(CPUArchState *env1, target_ulong addr); |
bellard | 4390df5 | 2004-01-04 18:03:10 +0000 | [diff] [blame] | 380 | #endif |
bellard | 9df217a | 2005-02-10 22:05:51 +0000 | [diff] [blame] | 381 | |
Andreas Färber | 9349b4f | 2012-03-14 01:38:32 +0100 | [diff] [blame] | 382 | typedef void (CPUDebugExcpHandler)(CPUArchState *env); |
aliguori | dde2367 | 2008-11-18 20:50:36 +0000 | [diff] [blame] | 383 | |
Igor Mammedov | 84e3b60 | 2012-06-21 18:29:38 +0200 | [diff] [blame] | 384 | void cpu_set_debug_excp_handler(CPUDebugExcpHandler *handler); |
aurel32 | 1b530a6 | 2009-04-05 20:08:59 +0000 | [diff] [blame] | 385 | |
| 386 | /* vl.c */ |
| 387 | extern int singlestep; |
| 388 | |
Marcelo Tosatti | 1a28cac | 2010-05-04 09:45:20 -0300 | [diff] [blame] | 389 | /* cpu-exec.c */ |
| 390 | extern volatile sig_atomic_t exit_request; |
| 391 | |
Andreas Färber | 99df7dc | 2013-08-26 05:15:23 +0200 | [diff] [blame] | 392 | /** |
| 393 | * cpu_can_do_io: |
| 394 | * @cpu: The CPU for which to check IO. |
| 395 | * |
| 396 | * Deterministic execution requires that IO only be performed on the last |
| 397 | * instruction of a TB so that interrupts take effect immediately. |
| 398 | * |
| 399 | * Returns: %true if memory-mapped IO is safe, %false otherwise. |
| 400 | */ |
| 401 | static inline bool cpu_can_do_io(CPUState *cpu) |
Paolo Bonzini | 946fb27 | 2011-09-12 13:57:37 +0200 | [diff] [blame] | 402 | { |
| 403 | if (!use_icount) { |
Andreas Färber | 99df7dc | 2013-08-26 05:15:23 +0200 | [diff] [blame] | 404 | return true; |
Paolo Bonzini | 946fb27 | 2011-09-12 13:57:37 +0200 | [diff] [blame] | 405 | } |
| 406 | /* If not executing code then assume we are ok. */ |
Andreas Färber | d77953b | 2013-01-16 19:29:31 +0100 | [diff] [blame] | 407 | if (cpu->current_tb == NULL) { |
Andreas Färber | 99df7dc | 2013-08-26 05:15:23 +0200 | [diff] [blame] | 408 | return true; |
Paolo Bonzini | 946fb27 | 2011-09-12 13:57:37 +0200 | [diff] [blame] | 409 | } |
Andreas Färber | 99df7dc | 2013-08-26 05:15:23 +0200 | [diff] [blame] | 410 | return cpu->can_do_io != 0; |
Paolo Bonzini | 946fb27 | 2011-09-12 13:57:37 +0200 | [diff] [blame] | 411 | } |
| 412 | |
aliguori | 875cdcf | 2008-10-23 13:52:00 +0000 | [diff] [blame] | 413 | #endif |