bellard | 7d13299 | 2003-03-06 23:23:54 +0000 | [diff] [blame] | 1 | /* |
陳韋任 | e965fc3 | 2012-02-06 14:02:55 +0800 | [diff] [blame] | 2 | * emulator main execution loop |
ths | 5fafdf2 | 2007-09-16 21:08:06 +0000 | [diff] [blame] | 3 | * |
bellard | 66321a1 | 2005-04-06 20:47:48 +0000 | [diff] [blame] | 4 | * Copyright (c) 2003-2005 Fabrice Bellard |
bellard | 7d13299 | 2003-03-06 23:23:54 +0000 | [diff] [blame] | 5 | * |
bellard | 3ef693a | 2003-03-23 20:17:16 +0000 | [diff] [blame] | 6 | * This library is free software; you can redistribute it and/or |
| 7 | * modify it under the terms of the GNU Lesser General Public |
| 8 | * License as published by the Free Software Foundation; either |
Thomas Huth | fb0343d | 2019-01-23 15:08:56 +0100 | [diff] [blame] | 9 | * version 2.1 of the License, or (at your option) any later version. |
bellard | 7d13299 | 2003-03-06 23:23:54 +0000 | [diff] [blame] | 10 | * |
bellard | 3ef693a | 2003-03-23 20:17:16 +0000 | [diff] [blame] | 11 | * This library is distributed in the hope that it will be useful, |
| 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 14 | * Lesser General Public License for more details. |
bellard | 7d13299 | 2003-03-06 23:23:54 +0000 | [diff] [blame] | 15 | * |
bellard | 3ef693a | 2003-03-23 20:17:16 +0000 | [diff] [blame] | 16 | * You should have received a copy of the GNU Lesser General Public |
Blue Swirl | 8167ee8 | 2009-07-16 20:47:01 +0000 | [diff] [blame] | 17 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. |
bellard | 7d13299 | 2003-03-06 23:23:54 +0000 | [diff] [blame] | 18 | */ |
Markus Armbruster | a8d2532 | 2019-05-23 16:35:08 +0200 | [diff] [blame] | 19 | |
Peter Maydell | 7b31bbc | 2016-01-26 18:16:56 +0000 | [diff] [blame] | 20 | #include "qemu/osdep.h" |
Claudio Fontana | 740b175 | 2020-08-19 13:17:19 +0200 | [diff] [blame] | 21 | #include "qemu/qemu-print.h" |
Daniel P. Berrangé | 3a841ab | 2021-09-08 10:35:43 +0100 | [diff] [blame] | 22 | #include "qapi/error.h" |
| 23 | #include "qapi/qapi-commands-machine.h" |
| 24 | #include "qapi/type-helpers.h" |
Claudio Fontana | 7827168 | 2021-02-04 17:39:23 +0100 | [diff] [blame] | 25 | #include "hw/core/tcg-cpu-ops.h" |
Yang Zhong | d9bb58e | 2017-06-02 14:06:44 +0800 | [diff] [blame] | 26 | #include "trace.h" |
Paolo Bonzini | 76cad71 | 2012-10-24 11:12:21 +0200 | [diff] [blame] | 27 | #include "disas/disas.h" |
Paolo Bonzini | 63c9155 | 2016-03-15 13:18:37 +0100 | [diff] [blame] | 28 | #include "exec/exec-all.h" |
Philippe Mathieu-Daudé | dcb32f1 | 2020-01-01 12:23:00 +0100 | [diff] [blame] | 29 | #include "tcg/tcg.h" |
Paolo Bonzini | 1de7afc | 2012-12-17 18:20:00 +0100 | [diff] [blame] | 30 | #include "qemu/atomic.h" |
Daniele Buono | c905a36 | 2020-12-04 18:06:12 -0500 | [diff] [blame] | 31 | #include "qemu/compiler.h" |
Sebastian Tanase | c2aa5f8 | 2014-07-25 11:56:31 +0200 | [diff] [blame] | 32 | #include "qemu/timer.h" |
Paolo Bonzini | 79e2b9a | 2015-01-21 12:09:14 +0100 | [diff] [blame] | 33 | #include "qemu/rcu.h" |
Paolo Bonzini | 508127e | 2016-01-07 16:55:28 +0300 | [diff] [blame] | 34 | #include "exec/log.h" |
Jan Kiszka | 8d04fb5 | 2017-02-23 18:29:11 +0000 | [diff] [blame] | 35 | #include "qemu/main-loop.h" |
Pavel Dovgalyuk | 6220e90 | 2015-09-17 19:23:31 +0300 | [diff] [blame] | 36 | #if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY) |
| 37 | #include "hw/i386/apic.h" |
| 38 | #endif |
Paolo Bonzini | d2528bd | 2017-03-03 12:01:16 +0100 | [diff] [blame] | 39 | #include "sysemu/cpus.h" |
Claudio Fontana | 740b175 | 2020-08-19 13:17:19 +0200 | [diff] [blame] | 40 | #include "exec/cpu-all.h" |
| 41 | #include "sysemu/cpu-timers.h" |
Pavel Dovgalyuk | 6f06096 | 2015-09-17 19:24:16 +0300 | [diff] [blame] | 42 | #include "sysemu/replay.h" |
Daniel P. Berrangé | 3a841ab | 2021-09-08 10:35:43 +0100 | [diff] [blame] | 43 | #include "sysemu/tcg.h" |
Richard Henderson | 4288eb2 | 2021-06-29 12:28:29 -0700 | [diff] [blame] | 44 | #include "exec/helper-proto.h" |
Philippe Mathieu-Daudé | e5ceadf | 2021-05-24 19:04:53 +0200 | [diff] [blame] | 45 | #include "tb-hash.h" |
Philippe Mathieu-Daudé | e5ceadf | 2021-05-24 19:04:53 +0200 | [diff] [blame] | 46 | #include "tb-context.h" |
Philippe Mathieu-Daudé | c03f041 | 2021-01-20 20:15:06 -1000 | [diff] [blame] | 47 | #include "internal.h" |
Sebastian Tanase | c2aa5f8 | 2014-07-25 11:56:31 +0200 | [diff] [blame] | 48 | |
| 49 | /* -icount align implementation. */ |
| 50 | |
| 51 | typedef struct SyncClocks { |
| 52 | int64_t diff_clk; |
| 53 | int64_t last_cpu_icount; |
Sebastian Tanase | 7f7bc14 | 2014-07-25 11:56:32 +0200 | [diff] [blame] | 54 | int64_t realtime_clock; |
Sebastian Tanase | c2aa5f8 | 2014-07-25 11:56:31 +0200 | [diff] [blame] | 55 | } SyncClocks; |
| 56 | |
| 57 | #if !defined(CONFIG_USER_ONLY) |
| 58 | /* Allow the guest to have a max 3ms advance. |
| 59 | * The difference between the 2 clocks could therefore |
| 60 | * oscillate around 0. |
| 61 | */ |
| 62 | #define VM_CLOCK_ADVANCE 3000000 |
Sebastian Tanase | 7f7bc14 | 2014-07-25 11:56:32 +0200 | [diff] [blame] | 63 | #define THRESHOLD_REDUCE 1.5 |
| 64 | #define MAX_DELAY_PRINT_RATE 2000000000LL |
| 65 | #define MAX_NB_PRINTS 100 |
Sebastian Tanase | c2aa5f8 | 2014-07-25 11:56:31 +0200 | [diff] [blame] | 66 | |
Claudio Fontana | 740b175 | 2020-08-19 13:17:19 +0200 | [diff] [blame] | 67 | static int64_t max_delay; |
| 68 | static int64_t max_advance; |
| 69 | |
Richard Henderson | 5e14019 | 2019-03-28 11:54:23 -1000 | [diff] [blame] | 70 | static void align_clocks(SyncClocks *sc, CPUState *cpu) |
Sebastian Tanase | c2aa5f8 | 2014-07-25 11:56:31 +0200 | [diff] [blame] | 71 | { |
| 72 | int64_t cpu_icount; |
| 73 | |
| 74 | if (!icount_align_option) { |
| 75 | return; |
| 76 | } |
| 77 | |
Richard Henderson | 5e14019 | 2019-03-28 11:54:23 -1000 | [diff] [blame] | 78 | cpu_icount = cpu->icount_extra + cpu_neg(cpu)->icount_decr.u16.low; |
Claudio Fontana | 8191d36 | 2020-08-31 16:18:34 +0200 | [diff] [blame] | 79 | sc->diff_clk += icount_to_ns(sc->last_cpu_icount - cpu_icount); |
Sebastian Tanase | c2aa5f8 | 2014-07-25 11:56:31 +0200 | [diff] [blame] | 80 | sc->last_cpu_icount = cpu_icount; |
| 81 | |
| 82 | if (sc->diff_clk > VM_CLOCK_ADVANCE) { |
| 83 | #ifndef _WIN32 |
| 84 | struct timespec sleep_delay, rem_delay; |
| 85 | sleep_delay.tv_sec = sc->diff_clk / 1000000000LL; |
| 86 | sleep_delay.tv_nsec = sc->diff_clk % 1000000000LL; |
| 87 | if (nanosleep(&sleep_delay, &rem_delay) < 0) { |
Paolo Bonzini | a498d0e | 2015-01-28 10:09:55 +0100 | [diff] [blame] | 88 | sc->diff_clk = rem_delay.tv_sec * 1000000000LL + rem_delay.tv_nsec; |
Sebastian Tanase | c2aa5f8 | 2014-07-25 11:56:31 +0200 | [diff] [blame] | 89 | } else { |
| 90 | sc->diff_clk = 0; |
| 91 | } |
| 92 | #else |
| 93 | Sleep(sc->diff_clk / SCALE_MS); |
| 94 | sc->diff_clk = 0; |
| 95 | #endif |
| 96 | } |
| 97 | } |
| 98 | |
Sebastian Tanase | 7f7bc14 | 2014-07-25 11:56:32 +0200 | [diff] [blame] | 99 | static void print_delay(const SyncClocks *sc) |
| 100 | { |
| 101 | static float threshold_delay; |
| 102 | static int64_t last_realtime_clock; |
| 103 | static int nb_prints; |
| 104 | |
| 105 | if (icount_align_option && |
| 106 | sc->realtime_clock - last_realtime_clock >= MAX_DELAY_PRINT_RATE && |
| 107 | nb_prints < MAX_NB_PRINTS) { |
| 108 | if ((-sc->diff_clk / (float)1000000000LL > threshold_delay) || |
| 109 | (-sc->diff_clk / (float)1000000000LL < |
| 110 | (threshold_delay - THRESHOLD_REDUCE))) { |
| 111 | threshold_delay = (-sc->diff_clk / 1000000000LL) + 1; |
Claudio Fontana | 740b175 | 2020-08-19 13:17:19 +0200 | [diff] [blame] | 112 | qemu_printf("Warning: The guest is now late by %.1f to %.1f seconds\n", |
| 113 | threshold_delay - 1, |
| 114 | threshold_delay); |
Sebastian Tanase | 7f7bc14 | 2014-07-25 11:56:32 +0200 | [diff] [blame] | 115 | nb_prints++; |
| 116 | last_realtime_clock = sc->realtime_clock; |
| 117 | } |
| 118 | } |
| 119 | } |
| 120 | |
Richard Henderson | 5e14019 | 2019-03-28 11:54:23 -1000 | [diff] [blame] | 121 | static void init_delay_params(SyncClocks *sc, CPUState *cpu) |
Sebastian Tanase | c2aa5f8 | 2014-07-25 11:56:31 +0200 | [diff] [blame] | 122 | { |
| 123 | if (!icount_align_option) { |
| 124 | return; |
| 125 | } |
Paolo Bonzini | 2e91cc6 | 2015-01-28 10:16:37 +0100 | [diff] [blame] | 126 | sc->realtime_clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT); |
| 127 | sc->diff_clk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - sc->realtime_clock; |
Richard Henderson | 5e14019 | 2019-03-28 11:54:23 -1000 | [diff] [blame] | 128 | sc->last_cpu_icount |
| 129 | = cpu->icount_extra + cpu_neg(cpu)->icount_decr.u16.low; |
Sebastian Tanase | 27498be | 2014-07-25 11:56:33 +0200 | [diff] [blame] | 130 | if (sc->diff_clk < max_delay) { |
| 131 | max_delay = sc->diff_clk; |
| 132 | } |
| 133 | if (sc->diff_clk > max_advance) { |
| 134 | max_advance = sc->diff_clk; |
| 135 | } |
Sebastian Tanase | 7f7bc14 | 2014-07-25 11:56:32 +0200 | [diff] [blame] | 136 | |
| 137 | /* Print every 2s max if the guest is late. We limit the number |
| 138 | of printed messages to NB_PRINT_MAX(currently 100) */ |
| 139 | print_delay(sc); |
Sebastian Tanase | c2aa5f8 | 2014-07-25 11:56:31 +0200 | [diff] [blame] | 140 | } |
| 141 | #else |
| 142 | static void align_clocks(SyncClocks *sc, const CPUState *cpu) |
| 143 | { |
| 144 | } |
| 145 | |
| 146 | static void init_delay_params(SyncClocks *sc, const CPUState *cpu) |
| 147 | { |
| 148 | } |
| 149 | #endif /* CONFIG USER ONLY */ |
bellard | 7d13299 | 2003-03-06 23:23:54 +0000 | [diff] [blame] | 150 | |
Richard Henderson | 043e35d | 2021-07-17 15:18:40 -0700 | [diff] [blame] | 151 | uint32_t curr_cflags(CPUState *cpu) |
| 152 | { |
Richard Henderson | 84f15616 | 2021-07-17 15:18:41 -0700 | [diff] [blame] | 153 | uint32_t cflags = cpu->tcg_cflags; |
| 154 | |
Richard Henderson | 04f5b64 | 2021-07-17 15:18:43 -0700 | [diff] [blame] | 155 | /* |
Richard Henderson | c2ffd75 | 2021-07-19 10:43:46 -1000 | [diff] [blame] | 156 | * Record gdb single-step. We should be exiting the TB by raising |
| 157 | * EXCP_DEBUG, but to simplify other tests, disable chaining too. |
| 158 | * |
Richard Henderson | 04f5b64 | 2021-07-17 15:18:43 -0700 | [diff] [blame] | 159 | * For singlestep and -d nochain, suppress goto_tb so that |
| 160 | * we can log -d cpu,exec after every TB. |
| 161 | */ |
Richard Henderson | c2ffd75 | 2021-07-19 10:43:46 -1000 | [diff] [blame] | 162 | if (unlikely(cpu->singlestep_enabled)) { |
| 163 | cflags |= CF_NO_GOTO_TB | CF_NO_GOTO_PTR | CF_SINGLE_STEP | 1; |
| 164 | } else if (singlestep) { |
Richard Henderson | 04f5b64 | 2021-07-17 15:18:43 -0700 | [diff] [blame] | 165 | cflags |= CF_NO_GOTO_TB | 1; |
| 166 | } else if (qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) { |
Richard Henderson | fb95701 | 2021-07-17 15:18:42 -0700 | [diff] [blame] | 167 | cflags |= CF_NO_GOTO_TB; |
Richard Henderson | 84f15616 | 2021-07-17 15:18:41 -0700 | [diff] [blame] | 168 | } |
| 169 | |
| 170 | return cflags; |
Richard Henderson | 043e35d | 2021-07-17 15:18:40 -0700 | [diff] [blame] | 171 | } |
| 172 | |
Richard Henderson | 0c90ba1 | 2022-08-16 13:53:18 -0500 | [diff] [blame] | 173 | struct tb_desc { |
| 174 | target_ulong pc; |
| 175 | target_ulong cs_base; |
| 176 | CPUArchState *env; |
Richard Henderson | 93b9961 | 2022-08-15 15:00:57 -0500 | [diff] [blame^] | 177 | tb_page_addr_t page_addr0; |
Richard Henderson | 0c90ba1 | 2022-08-16 13:53:18 -0500 | [diff] [blame] | 178 | uint32_t flags; |
| 179 | uint32_t cflags; |
| 180 | uint32_t trace_vcpu_dstate; |
| 181 | }; |
| 182 | |
| 183 | static bool tb_lookup_cmp(const void *p, const void *d) |
| 184 | { |
| 185 | const TranslationBlock *tb = p; |
| 186 | const struct tb_desc *desc = d; |
| 187 | |
| 188 | if (tb->pc == desc->pc && |
Richard Henderson | 93b9961 | 2022-08-15 15:00:57 -0500 | [diff] [blame^] | 189 | tb->page_addr[0] == desc->page_addr0 && |
Richard Henderson | 0c90ba1 | 2022-08-16 13:53:18 -0500 | [diff] [blame] | 190 | tb->cs_base == desc->cs_base && |
| 191 | tb->flags == desc->flags && |
| 192 | tb->trace_vcpu_dstate == desc->trace_vcpu_dstate && |
| 193 | tb_cflags(tb) == desc->cflags) { |
| 194 | /* check next page if needed */ |
| 195 | if (tb->page_addr[1] == -1) { |
| 196 | return true; |
| 197 | } else { |
Richard Henderson | 93b9961 | 2022-08-15 15:00:57 -0500 | [diff] [blame^] | 198 | tb_page_addr_t phys_page1; |
| 199 | target_ulong virt_page1; |
Richard Henderson | 0c90ba1 | 2022-08-16 13:53:18 -0500 | [diff] [blame] | 200 | |
Richard Henderson | 9867b30 | 2022-08-22 18:50:46 -0700 | [diff] [blame] | 201 | /* |
| 202 | * We know that the first page matched, and an otherwise valid TB |
| 203 | * encountered an incomplete instruction at the end of that page, |
| 204 | * therefore we know that generating a new TB from the current PC |
| 205 | * must also require reading from the next page -- even if the |
| 206 | * second pages do not match, and therefore the resulting insn |
| 207 | * is different for the new TB. Therefore any exception raised |
| 208 | * here by the faulting lookup is not premature. |
| 209 | */ |
Richard Henderson | 93b9961 | 2022-08-15 15:00:57 -0500 | [diff] [blame^] | 210 | virt_page1 = TARGET_PAGE_ALIGN(desc->pc); |
| 211 | phys_page1 = get_page_addr_code(desc->env, virt_page1); |
| 212 | if (tb->page_addr[1] == phys_page1) { |
Richard Henderson | 0c90ba1 | 2022-08-16 13:53:18 -0500 | [diff] [blame] | 213 | return true; |
| 214 | } |
| 215 | } |
| 216 | } |
| 217 | return false; |
| 218 | } |
| 219 | |
| 220 | static TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc, |
| 221 | target_ulong cs_base, uint32_t flags, |
| 222 | uint32_t cflags) |
| 223 | { |
| 224 | tb_page_addr_t phys_pc; |
| 225 | struct tb_desc desc; |
| 226 | uint32_t h; |
| 227 | |
| 228 | desc.env = cpu->env_ptr; |
| 229 | desc.cs_base = cs_base; |
| 230 | desc.flags = flags; |
| 231 | desc.cflags = cflags; |
| 232 | desc.trace_vcpu_dstate = *cpu->trace_dstate; |
| 233 | desc.pc = pc; |
| 234 | phys_pc = get_page_addr_code(desc.env, pc); |
| 235 | if (phys_pc == -1) { |
| 236 | return NULL; |
| 237 | } |
Richard Henderson | 93b9961 | 2022-08-15 15:00:57 -0500 | [diff] [blame^] | 238 | desc.page_addr0 = phys_pc; |
Richard Henderson | 0c90ba1 | 2022-08-16 13:53:18 -0500 | [diff] [blame] | 239 | h = tb_hash_func(phys_pc, pc, flags, cflags, *cpu->trace_dstate); |
| 240 | return qht_lookup_custom(&tb_ctx.htable, &desc, h, tb_lookup_cmp); |
| 241 | } |
| 242 | |
Richard Henderson | 632cb63 | 2021-06-29 12:31:19 -0700 | [diff] [blame] | 243 | /* Might cause an exception, so have a longjmp destination ready */ |
| 244 | static inline TranslationBlock *tb_lookup(CPUState *cpu, target_ulong pc, |
| 245 | target_ulong cs_base, |
| 246 | uint32_t flags, uint32_t cflags) |
| 247 | { |
| 248 | TranslationBlock *tb; |
| 249 | uint32_t hash; |
| 250 | |
| 251 | /* we should never be trying to look up an INVALID tb */ |
| 252 | tcg_debug_assert(!(cflags & CF_INVALID)); |
| 253 | |
| 254 | hash = tb_jmp_cache_hash_func(pc); |
| 255 | tb = qatomic_rcu_read(&cpu->tb_jmp_cache[hash]); |
| 256 | |
| 257 | if (likely(tb && |
| 258 | tb->pc == pc && |
| 259 | tb->cs_base == cs_base && |
| 260 | tb->flags == flags && |
| 261 | tb->trace_vcpu_dstate == *cpu->trace_dstate && |
| 262 | tb_cflags(tb) == cflags)) { |
| 263 | return tb; |
| 264 | } |
| 265 | tb = tb_htable_lookup(cpu, pc, cs_base, flags, cflags); |
| 266 | if (tb == NULL) { |
| 267 | return NULL; |
| 268 | } |
| 269 | qatomic_set(&cpu->tb_jmp_cache[hash], tb); |
| 270 | return tb; |
| 271 | } |
| 272 | |
Richard Henderson | abb0cd9 | 2021-06-29 13:17:18 -0700 | [diff] [blame] | 273 | static inline void log_cpu_exec(target_ulong pc, CPUState *cpu, |
| 274 | const TranslationBlock *tb) |
| 275 | { |
| 276 | if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_CPU | CPU_LOG_EXEC)) |
| 277 | && qemu_log_in_addr_range(pc)) { |
| 278 | |
| 279 | qemu_log_mask(CPU_LOG_EXEC, |
| 280 | "Trace %d: %p [" TARGET_FMT_lx |
Richard Henderson | 7eabad3 | 2021-06-30 08:31:46 -0700 | [diff] [blame] | 281 | "/" TARGET_FMT_lx "/%08x/%08x] %s\n", |
| 282 | cpu->cpu_index, tb->tc.ptr, tb->cs_base, pc, |
| 283 | tb->flags, tb->cflags, lookup_symbol(pc)); |
Richard Henderson | abb0cd9 | 2021-06-29 13:17:18 -0700 | [diff] [blame] | 284 | |
| 285 | #if defined(DEBUG_DISAS) |
| 286 | if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) { |
Richard Henderson | c60f599 | 2022-04-17 11:29:47 -0700 | [diff] [blame] | 287 | FILE *logfile = qemu_log_trylock(); |
Richard Henderson | 78b5485 | 2022-04-17 11:29:49 -0700 | [diff] [blame] | 288 | if (logfile) { |
| 289 | int flags = 0; |
Richard Henderson | abb0cd9 | 2021-06-29 13:17:18 -0700 | [diff] [blame] | 290 | |
Richard Henderson | 78b5485 | 2022-04-17 11:29:49 -0700 | [diff] [blame] | 291 | if (qemu_loglevel_mask(CPU_LOG_TB_FPU)) { |
| 292 | flags |= CPU_DUMP_FPU; |
| 293 | } |
Richard Henderson | abb0cd9 | 2021-06-29 13:17:18 -0700 | [diff] [blame] | 294 | #if defined(TARGET_I386) |
Richard Henderson | 78b5485 | 2022-04-17 11:29:49 -0700 | [diff] [blame] | 295 | flags |= CPU_DUMP_CCOP; |
Richard Henderson | abb0cd9 | 2021-06-29 13:17:18 -0700 | [diff] [blame] | 296 | #endif |
Richard Henderson | c769fbd | 2022-04-17 11:29:54 -0700 | [diff] [blame] | 297 | cpu_dump_state(cpu, logfile, flags); |
Richard Henderson | 78b5485 | 2022-04-17 11:29:49 -0700 | [diff] [blame] | 298 | qemu_log_unlock(logfile); |
| 299 | } |
Richard Henderson | abb0cd9 | 2021-06-29 13:17:18 -0700 | [diff] [blame] | 300 | } |
| 301 | #endif /* DEBUG_DISAS */ |
| 302 | } |
| 303 | } |
| 304 | |
Richard Henderson | 10c3782 | 2021-07-19 09:03:21 -1000 | [diff] [blame] | 305 | static bool check_for_breakpoints(CPUState *cpu, target_ulong pc, |
| 306 | uint32_t *cflags) |
| 307 | { |
| 308 | CPUBreakpoint *bp; |
| 309 | bool match_page = false; |
| 310 | |
| 311 | if (likely(QTAILQ_EMPTY(&cpu->breakpoints))) { |
| 312 | return false; |
| 313 | } |
| 314 | |
| 315 | /* |
| 316 | * Singlestep overrides breakpoints. |
| 317 | * This requirement is visible in the record-replay tests, where |
| 318 | * we would fail to make forward progress in reverse-continue. |
| 319 | * |
| 320 | * TODO: gdb singlestep should only override gdb breakpoints, |
| 321 | * so that one could (gdb) singlestep into the guest kernel's |
| 322 | * architectural breakpoint handler. |
| 323 | */ |
| 324 | if (cpu->singlestep_enabled) { |
| 325 | return false; |
| 326 | } |
| 327 | |
| 328 | QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) { |
| 329 | /* |
| 330 | * If we have an exact pc match, trigger the breakpoint. |
| 331 | * Otherwise, note matches within the page. |
| 332 | */ |
| 333 | if (pc == bp->pc) { |
| 334 | bool match_bp = false; |
| 335 | |
| 336 | if (bp->flags & BP_GDB) { |
| 337 | match_bp = true; |
| 338 | } else if (bp->flags & BP_CPU) { |
| 339 | #ifdef CONFIG_USER_ONLY |
| 340 | g_assert_not_reached(); |
| 341 | #else |
| 342 | CPUClass *cc = CPU_GET_CLASS(cpu); |
| 343 | assert(cc->tcg_ops->debug_check_breakpoint); |
| 344 | match_bp = cc->tcg_ops->debug_check_breakpoint(cpu); |
| 345 | #endif |
| 346 | } |
| 347 | |
| 348 | if (match_bp) { |
| 349 | cpu->exception_index = EXCP_DEBUG; |
| 350 | return true; |
| 351 | } |
| 352 | } else if (((pc ^ bp->pc) & TARGET_PAGE_MASK) == 0) { |
| 353 | match_page = true; |
| 354 | } |
| 355 | } |
| 356 | |
| 357 | /* |
| 358 | * Within the same page as a breakpoint, single-step, |
| 359 | * returning to helper_lookup_tb_ptr after each insn looking |
| 360 | * for the actual breakpoint. |
| 361 | * |
| 362 | * TODO: Perhaps better to record all of the TBs associated |
| 363 | * with a given virtual page that contains a breakpoint, and |
| 364 | * then invalidate them when a new overlapping breakpoint is |
| 365 | * set on the page. Non-overlapping TBs would not be |
| 366 | * invalidated, nor would any TB need to be invalidated as |
| 367 | * breakpoints are removed. |
| 368 | */ |
| 369 | if (match_page) { |
| 370 | *cflags = (*cflags & ~CF_COUNT_MASK) | CF_NO_GOTO_TB | 1; |
| 371 | } |
| 372 | return false; |
| 373 | } |
| 374 | |
Richard Henderson | 4288eb2 | 2021-06-29 12:28:29 -0700 | [diff] [blame] | 375 | /** |
| 376 | * helper_lookup_tb_ptr: quick check for next tb |
| 377 | * @env: current cpu state |
| 378 | * |
| 379 | * Look for an existing TB matching the current cpu state. |
| 380 | * If found, return the code pointer. If not found, return |
| 381 | * the tcg epilogue so that we return into cpu_tb_exec. |
| 382 | */ |
| 383 | const void *HELPER(lookup_tb_ptr)(CPUArchState *env) |
| 384 | { |
| 385 | CPUState *cpu = env_cpu(env); |
| 386 | TranslationBlock *tb; |
| 387 | target_ulong cs_base, pc; |
Richard Henderson | 10c3782 | 2021-07-19 09:03:21 -1000 | [diff] [blame] | 388 | uint32_t flags, cflags; |
Richard Henderson | 4288eb2 | 2021-06-29 12:28:29 -0700 | [diff] [blame] | 389 | |
| 390 | cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags); |
| 391 | |
Richard Henderson | 10c3782 | 2021-07-19 09:03:21 -1000 | [diff] [blame] | 392 | cflags = curr_cflags(cpu); |
| 393 | if (check_for_breakpoints(cpu, pc, &cflags)) { |
| 394 | cpu_loop_exit(cpu); |
| 395 | } |
| 396 | |
| 397 | tb = tb_lookup(cpu, pc, cs_base, flags, cflags); |
Richard Henderson | 4288eb2 | 2021-06-29 12:28:29 -0700 | [diff] [blame] | 398 | if (tb == NULL) { |
| 399 | return tcg_code_gen_epilogue; |
| 400 | } |
Richard Henderson | abb0cd9 | 2021-06-29 13:17:18 -0700 | [diff] [blame] | 401 | |
| 402 | log_cpu_exec(pc, cpu, tb); |
| 403 | |
Richard Henderson | 4288eb2 | 2021-06-29 12:28:29 -0700 | [diff] [blame] | 404 | return tb->tc.ptr; |
| 405 | } |
| 406 | |
Peter Maydell | 7721137 | 2013-02-22 18:10:02 +0000 | [diff] [blame] | 407 | /* Execute a TB, and fix up the CPU state afterwards if necessary */ |
Daniele Buono | c905a36 | 2020-12-04 18:06:12 -0500 | [diff] [blame] | 408 | /* |
| 409 | * Disable CFI checks. |
| 410 | * TCG creates binary blobs at runtime, with the transformed code. |
| 411 | * A TB is a blob of binary code, created at runtime and called with an |
| 412 | * indirect function call. Since such function did not exist at compile time, |
| 413 | * the CFI runtime has no way to verify its signature and would fail. |
| 414 | * TCG is not considered a security-sensitive part of QEMU so this does not |
| 415 | * affect the impact of CFI in environment with high security requirements |
| 416 | */ |
Richard Henderson | eba4035 | 2020-10-29 13:18:12 -0700 | [diff] [blame] | 417 | static inline TranslationBlock * QEMU_DISABLE_CFI |
| 418 | cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit) |
Peter Maydell | 7721137 | 2013-02-22 18:10:02 +0000 | [diff] [blame] | 419 | { |
| 420 | CPUArchState *env = cpu->env_ptr; |
Sergey Fedorov | 819af24 | 2016-04-21 15:58:23 +0300 | [diff] [blame] | 421 | uintptr_t ret; |
| 422 | TranslationBlock *last_tb; |
Richard Henderson | db0c51a | 2020-10-28 12:05:44 -0700 | [diff] [blame] | 423 | const void *tb_ptr = itb->tc.ptr; |
Peter Maydell | 1a83063 | 2016-03-15 14:30:19 +0000 | [diff] [blame] | 424 | |
Richard Henderson | abb0cd9 | 2021-06-29 13:17:18 -0700 | [diff] [blame] | 425 | log_cpu_exec(itb->pc, cpu, itb); |
Richard Henderson | 03afa5f | 2013-11-06 17:29:39 +1000 | [diff] [blame] | 426 | |
Roman Bolshakov | 653b87e | 2021-01-13 06:28:07 +0300 | [diff] [blame] | 427 | qemu_thread_jit_execute(); |
Sergey Fedorov | 819af24 | 2016-04-21 15:58:23 +0300 | [diff] [blame] | 428 | ret = tcg_qemu_tb_exec(env, tb_ptr); |
Pavel Dovgalyuk | 626cf8f | 2014-12-08 10:53:17 +0300 | [diff] [blame] | 429 | cpu->can_do_io = 1; |
Richard Henderson | eba4035 | 2020-10-29 13:18:12 -0700 | [diff] [blame] | 430 | /* |
| 431 | * TODO: Delay swapping back to the read-write region of the TB |
| 432 | * until we actually need to modify the TB. The read-only copy, |
| 433 | * coming from the rx region, shares the same host TLB entry as |
| 434 | * the code that executed the exit_tb opcode that arrived here. |
| 435 | * If we insist on touching both the RX and the RW pages, we |
| 436 | * double the host TLB pressure. |
| 437 | */ |
| 438 | last_tb = tcg_splitwx_to_rw((void *)(ret & ~TB_EXIT_MASK)); |
| 439 | *tb_exit = ret & TB_EXIT_MASK; |
Alex Bennée | 6db8b53 | 2014-08-01 17:08:57 +0100 | [diff] [blame] | 440 | |
Richard Henderson | eba4035 | 2020-10-29 13:18:12 -0700 | [diff] [blame] | 441 | trace_exec_tb_exit(last_tb, *tb_exit); |
| 442 | |
| 443 | if (*tb_exit > TB_EXIT_IDX1) { |
Peter Maydell | 7721137 | 2013-02-22 18:10:02 +0000 | [diff] [blame] | 444 | /* We didn't start executing this TB (eg because the instruction |
| 445 | * counter hit zero); we must restore the guest PC to the address |
| 446 | * of the start of the TB. |
| 447 | */ |
Andreas Färber | bdf7ae5 | 2013-06-28 19:31:32 +0200 | [diff] [blame] | 448 | CPUClass *cc = CPU_GET_CLASS(cpu); |
Sergey Fedorov | 819af24 | 2016-04-21 15:58:23 +0300 | [diff] [blame] | 449 | qemu_log_mask_and_addr(CPU_LOG_EXEC, last_tb->pc, |
Alex Bennée | d977e1c | 2016-03-15 14:30:21 +0000 | [diff] [blame] | 450 | "Stopped execution of TB chain before %p [" |
| 451 | TARGET_FMT_lx "] %s\n", |
Emilio G. Cota | e7e168f | 2017-07-12 00:08:21 -0400 | [diff] [blame] | 452 | last_tb->tc.ptr, last_tb->pc, |
Sergey Fedorov | 819af24 | 2016-04-21 15:58:23 +0300 | [diff] [blame] | 453 | lookup_symbol(last_tb->pc)); |
Claudio Fontana | 7827168 | 2021-02-04 17:39:23 +0100 | [diff] [blame] | 454 | if (cc->tcg_ops->synchronize_from_tb) { |
| 455 | cc->tcg_ops->synchronize_from_tb(cpu, last_tb); |
Andreas Färber | bdf7ae5 | 2013-06-28 19:31:32 +0200 | [diff] [blame] | 456 | } else { |
| 457 | assert(cc->set_pc); |
Sergey Fedorov | 819af24 | 2016-04-21 15:58:23 +0300 | [diff] [blame] | 458 | cc->set_pc(cpu, last_tb->pc); |
Andreas Färber | bdf7ae5 | 2013-06-28 19:31:32 +0200 | [diff] [blame] | 459 | } |
Peter Maydell | 7721137 | 2013-02-22 18:10:02 +0000 | [diff] [blame] | 460 | } |
Richard Henderson | c9460d7 | 2021-07-18 15:12:12 -1000 | [diff] [blame] | 461 | |
| 462 | /* |
| 463 | * If gdb single-step, and we haven't raised another exception, |
| 464 | * raise a debug exception. Single-step with another exception |
| 465 | * is handled in cpu_handle_exception. |
| 466 | */ |
| 467 | if (unlikely(cpu->singlestep_enabled) && cpu->exception_index == -1) { |
| 468 | cpu->exception_index = EXCP_DEBUG; |
| 469 | cpu_loop_exit(cpu); |
| 470 | } |
| 471 | |
Richard Henderson | eba4035 | 2020-10-29 13:18:12 -0700 | [diff] [blame] | 472 | return last_tb; |
Peter Maydell | 7721137 | 2013-02-22 18:10:02 +0000 | [diff] [blame] | 473 | } |
| 474 | |
pbrook | 2e70f6e | 2008-06-29 01:03:05 +0000 | [diff] [blame] | 475 | |
Eduardo Habkost | 035ba06 | 2020-12-12 16:55:16 +0100 | [diff] [blame] | 476 | static void cpu_exec_enter(CPUState *cpu) |
Richard Henderson | fdbc2b5 | 2016-06-29 22:12:55 -0700 | [diff] [blame] | 477 | { |
Pranith Kumar | 08e73c4 | 2017-02-23 18:29:15 +0000 | [diff] [blame] | 478 | CPUClass *cc = CPU_GET_CLASS(cpu); |
Eduardo Habkost | 035ba06 | 2020-12-12 16:55:16 +0100 | [diff] [blame] | 479 | |
Claudio Fontana | 7827168 | 2021-02-04 17:39:23 +0100 | [diff] [blame] | 480 | if (cc->tcg_ops->cpu_exec_enter) { |
| 481 | cc->tcg_ops->cpu_exec_enter(cpu); |
Eduardo Habkost | 80c4750 | 2020-12-12 16:55:17 +0100 | [diff] [blame] | 482 | } |
Eduardo Habkost | 035ba06 | 2020-12-12 16:55:16 +0100 | [diff] [blame] | 483 | } |
| 484 | |
| 485 | static void cpu_exec_exit(CPUState *cpu) |
| 486 | { |
| 487 | CPUClass *cc = CPU_GET_CLASS(cpu); |
| 488 | |
Claudio Fontana | 7827168 | 2021-02-04 17:39:23 +0100 | [diff] [blame] | 489 | if (cc->tcg_ops->cpu_exec_exit) { |
| 490 | cc->tcg_ops->cpu_exec_exit(cpu); |
Eduardo Habkost | 80c4750 | 2020-12-12 16:55:17 +0100 | [diff] [blame] | 491 | } |
Eduardo Habkost | 035ba06 | 2020-12-12 16:55:16 +0100 | [diff] [blame] | 492 | } |
| 493 | |
| 494 | void cpu_exec_step_atomic(CPUState *cpu) |
| 495 | { |
Philippe Mathieu-Daudé | 61deada | 2022-03-05 23:35:19 +0100 | [diff] [blame] | 496 | CPUArchState *env = cpu->env_ptr; |
Richard Henderson | fdbc2b5 | 2016-06-29 22:12:55 -0700 | [diff] [blame] | 497 | TranslationBlock *tb; |
| 498 | target_ulong cs_base, pc; |
Richard Henderson | 258afb4 | 2021-07-17 15:18:44 -0700 | [diff] [blame] | 499 | uint32_t flags, cflags; |
Richard Henderson | eba4035 | 2020-10-29 13:18:12 -0700 | [diff] [blame] | 500 | int tb_exit; |
Richard Henderson | fdbc2b5 | 2016-06-29 22:12:55 -0700 | [diff] [blame] | 501 | |
Pranith Kumar | 08e73c4 | 2017-02-23 18:29:15 +0000 | [diff] [blame] | 502 | if (sigsetjmp(cpu->jmp_env, 0) == 0) { |
Alex Bennée | 886cc68 | 2020-02-14 14:49:52 +0000 | [diff] [blame] | 503 | start_exclusive(); |
Douglas Crosher | bfff072 | 2020-09-22 17:42:41 +1000 | [diff] [blame] | 504 | g_assert(cpu == current_cpu); |
| 505 | g_assert(!cpu->running); |
| 506 | cpu->running = true; |
Alex Bennée | 886cc68 | 2020-02-14 14:49:52 +0000 | [diff] [blame] | 507 | |
Alex Bennée | 6f04cb1 | 2021-02-24 16:58:07 +0000 | [diff] [blame] | 508 | cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags); |
Alex Bennée | 6f04cb1 | 2021-02-24 16:58:07 +0000 | [diff] [blame] | 509 | |
Richard Henderson | 258afb4 | 2021-07-17 15:18:44 -0700 | [diff] [blame] | 510 | cflags = curr_cflags(cpu); |
| 511 | /* Execute in a serial context. */ |
| 512 | cflags &= ~CF_PARALLEL; |
| 513 | /* After 1 insn, return and release the exclusive lock. */ |
| 514 | cflags |= CF_NO_GOTO_TB | CF_NO_GOTO_PTR | 1; |
Richard Henderson | 10c3782 | 2021-07-19 09:03:21 -1000 | [diff] [blame] | 515 | /* |
| 516 | * No need to check_for_breakpoints here. |
| 517 | * We only arrive in cpu_exec_step_atomic after beginning execution |
| 518 | * of an insn that includes an atomic operation we can't handle. |
| 519 | * Any breakpoint for this insn will have been recognized earlier. |
| 520 | */ |
Richard Henderson | 258afb4 | 2021-07-17 15:18:44 -0700 | [diff] [blame] | 521 | |
| 522 | tb = tb_lookup(cpu, pc, cs_base, flags, cflags); |
Emilio G. Cota | 4e2ca83 | 2017-07-11 14:29:37 -0400 | [diff] [blame] | 523 | if (tb == NULL) { |
| 524 | mmap_lock(); |
Emilio G. Cota | 95590e2 | 2017-08-01 15:40:16 -0400 | [diff] [blame] | 525 | tb = tb_gen_code(cpu, pc, cs_base, flags, cflags); |
Emilio G. Cota | 4e2ca83 | 2017-07-11 14:29:37 -0400 | [diff] [blame] | 526 | mmap_unlock(); |
| 527 | } |
Pranith Kumar | 08e73c4 | 2017-02-23 18:29:15 +0000 | [diff] [blame] | 528 | |
Eduardo Habkost | 035ba06 | 2020-12-12 16:55:16 +0100 | [diff] [blame] | 529 | cpu_exec_enter(cpu); |
Pranith Kumar | 08e73c4 | 2017-02-23 18:29:15 +0000 | [diff] [blame] | 530 | /* execute the generated code */ |
Emilio G. Cota | 4e2ca83 | 2017-07-11 14:29:37 -0400 | [diff] [blame] | 531 | trace_exec_tb(tb, pc); |
Richard Henderson | eba4035 | 2020-10-29 13:18:12 -0700 | [diff] [blame] | 532 | cpu_tb_exec(cpu, tb, &tb_exit); |
Eduardo Habkost | 035ba06 | 2020-12-12 16:55:16 +0100 | [diff] [blame] | 533 | cpu_exec_exit(cpu); |
Pranith Kumar | 08e73c4 | 2017-02-23 18:29:15 +0000 | [diff] [blame] | 534 | } else { |
Pranith Kumar | 08e73c4 | 2017-02-23 18:29:15 +0000 | [diff] [blame] | 535 | #ifndef CONFIG_SOFTMMU |
Richard Henderson | f920ffd | 2021-09-13 13:01:07 -0700 | [diff] [blame] | 536 | clear_helper_retaddr(); |
Richard Henderson | 297368c | 2022-08-10 17:14:26 -0700 | [diff] [blame] | 537 | if (have_mmap_lock()) { |
| 538 | mmap_unlock(); |
| 539 | } |
Pranith Kumar | 08e73c4 | 2017-02-23 18:29:15 +0000 | [diff] [blame] | 540 | #endif |
Emilio G. Cota | 6aaa24f | 2019-01-15 14:47:54 -0500 | [diff] [blame] | 541 | if (qemu_mutex_iothread_locked()) { |
| 542 | qemu_mutex_unlock_iothread(); |
| 543 | } |
Emilio G. Cota | faa9372 | 2018-02-22 20:50:29 -0500 | [diff] [blame] | 544 | assert_no_pages_locked(); |
Emilio G. Cota | e6d86be | 2018-10-21 13:24:26 -0400 | [diff] [blame] | 545 | qemu_plugin_disable_mem_helpers(cpu); |
Pranith Kumar | 08e73c4 | 2017-02-23 18:29:15 +0000 | [diff] [blame] | 546 | } |
Peter Maydell | 426eeec | 2017-11-02 16:35:36 +0000 | [diff] [blame] | 547 | |
Alex Bennée | 886cc68 | 2020-02-14 14:49:52 +0000 | [diff] [blame] | 548 | /* |
| 549 | * As we start the exclusive region before codegen we must still |
| 550 | * be in the region if we longjump out of either the codegen or |
| 551 | * the execution. |
| 552 | */ |
| 553 | g_assert(cpu_in_exclusive_context(cpu)); |
Douglas Crosher | bfff072 | 2020-09-22 17:42:41 +1000 | [diff] [blame] | 554 | cpu->running = false; |
Alex Bennée | 886cc68 | 2020-02-14 14:49:52 +0000 | [diff] [blame] | 555 | end_exclusive(); |
Richard Henderson | fdbc2b5 | 2016-06-29 22:12:55 -0700 | [diff] [blame] | 556 | } |
| 557 | |
Richard Henderson | a858339 | 2017-07-31 22:02:31 -0700 | [diff] [blame] | 558 | void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr) |
| 559 | { |
| 560 | if (TCG_TARGET_HAS_direct_jump) { |
| 561 | uintptr_t offset = tb->jmp_target_arg[n]; |
Emilio G. Cota | e7e168f | 2017-07-12 00:08:21 -0400 | [diff] [blame] | 562 | uintptr_t tc_ptr = (uintptr_t)tb->tc.ptr; |
Richard Henderson | 1acbad0 | 2020-10-28 23:30:21 -0700 | [diff] [blame] | 563 | uintptr_t jmp_rx = tc_ptr + offset; |
| 564 | uintptr_t jmp_rw = jmp_rx - tcg_splitwx_diff; |
| 565 | tb_target_set_jmp_target(tc_ptr, jmp_rx, jmp_rw, addr); |
Richard Henderson | a858339 | 2017-07-31 22:02:31 -0700 | [diff] [blame] | 566 | } else { |
| 567 | tb->jmp_target_arg[n] = addr; |
| 568 | } |
| 569 | } |
| 570 | |
Richard Henderson | a858339 | 2017-07-31 22:02:31 -0700 | [diff] [blame] | 571 | static inline void tb_add_jump(TranslationBlock *tb, int n, |
| 572 | TranslationBlock *tb_next) |
| 573 | { |
Emilio G. Cota | 194125e | 2017-08-02 20:34:06 -0400 | [diff] [blame] | 574 | uintptr_t old; |
| 575 | |
Roman Bolshakov | 653b87e | 2021-01-13 06:28:07 +0300 | [diff] [blame] | 576 | qemu_thread_jit_write(); |
Richard Henderson | a858339 | 2017-07-31 22:02:31 -0700 | [diff] [blame] | 577 | assert(n < ARRAY_SIZE(tb->jmp_list_next)); |
Emilio G. Cota | 194125e | 2017-08-02 20:34:06 -0400 | [diff] [blame] | 578 | qemu_spin_lock(&tb_next->jmp_lock); |
| 579 | |
| 580 | /* make sure the destination TB is valid */ |
| 581 | if (tb_next->cflags & CF_INVALID) { |
| 582 | goto out_unlock_next; |
Richard Henderson | a858339 | 2017-07-31 22:02:31 -0700 | [diff] [blame] | 583 | } |
Emilio G. Cota | 194125e | 2017-08-02 20:34:06 -0400 | [diff] [blame] | 584 | /* Atomically claim the jump destination slot only if it was NULL */ |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 585 | old = qatomic_cmpxchg(&tb->jmp_dest[n], (uintptr_t)NULL, |
| 586 | (uintptr_t)tb_next); |
Emilio G. Cota | 194125e | 2017-08-02 20:34:06 -0400 | [diff] [blame] | 587 | if (old) { |
| 588 | goto out_unlock_next; |
| 589 | } |
| 590 | |
| 591 | /* patch the native jump address */ |
| 592 | tb_set_jmp_target(tb, n, (uintptr_t)tb_next->tc.ptr); |
| 593 | |
| 594 | /* add in TB jmp list */ |
| 595 | tb->jmp_list_next[n] = tb_next->jmp_list_head; |
| 596 | tb_next->jmp_list_head = (uintptr_t)tb | n; |
| 597 | |
| 598 | qemu_spin_unlock(&tb_next->jmp_lock); |
| 599 | |
Richard Henderson | a858339 | 2017-07-31 22:02:31 -0700 | [diff] [blame] | 600 | qemu_log_mask_and_addr(CPU_LOG_EXEC, tb->pc, |
| 601 | "Linking TBs %p [" TARGET_FMT_lx |
| 602 | "] index %d -> %p [" TARGET_FMT_lx "]\n", |
Emilio G. Cota | e7e168f | 2017-07-12 00:08:21 -0400 | [diff] [blame] | 603 | tb->tc.ptr, tb->pc, n, |
| 604 | tb_next->tc.ptr, tb_next->pc); |
Emilio G. Cota | 194125e | 2017-08-02 20:34:06 -0400 | [diff] [blame] | 605 | return; |
Richard Henderson | a858339 | 2017-07-31 22:02:31 -0700 | [diff] [blame] | 606 | |
Emilio G. Cota | 194125e | 2017-08-02 20:34:06 -0400 | [diff] [blame] | 607 | out_unlock_next: |
| 608 | qemu_spin_unlock(&tb_next->jmp_lock); |
| 609 | return; |
Richard Henderson | a858339 | 2017-07-31 22:02:31 -0700 | [diff] [blame] | 610 | } |
| 611 | |
Sergey Fedorov | 8b2d34e | 2016-05-11 13:21:47 +0300 | [diff] [blame] | 612 | static inline bool cpu_handle_halt(CPUState *cpu) |
| 613 | { |
Philippe Mathieu-Daudé | 0596fa1 | 2021-09-12 19:27:02 +0200 | [diff] [blame] | 614 | #ifndef CONFIG_USER_ONLY |
Sergey Fedorov | 8b2d34e | 2016-05-11 13:21:47 +0300 | [diff] [blame] | 615 | if (cpu->halted) { |
Philippe Mathieu-Daudé | 0596fa1 | 2021-09-12 19:27:02 +0200 | [diff] [blame] | 616 | #if defined(TARGET_I386) |
Pavel Dovgalyuk | 4084893 | 2020-10-03 20:12:51 +0300 | [diff] [blame] | 617 | if (cpu->interrupt_request & CPU_INTERRUPT_POLL) { |
Sergey Fedorov | 8b2d34e | 2016-05-11 13:21:47 +0300 | [diff] [blame] | 618 | X86CPU *x86_cpu = X86_CPU(cpu); |
Jan Kiszka | 8d04fb5 | 2017-02-23 18:29:11 +0000 | [diff] [blame] | 619 | qemu_mutex_lock_iothread(); |
Sergey Fedorov | 8b2d34e | 2016-05-11 13:21:47 +0300 | [diff] [blame] | 620 | apic_poll_irq(x86_cpu->apic_state); |
| 621 | cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL); |
Jan Kiszka | 8d04fb5 | 2017-02-23 18:29:11 +0000 | [diff] [blame] | 622 | qemu_mutex_unlock_iothread(); |
Sergey Fedorov | 8b2d34e | 2016-05-11 13:21:47 +0300 | [diff] [blame] | 623 | } |
Philippe Mathieu-Daudé | 0596fa1 | 2021-09-12 19:27:02 +0200 | [diff] [blame] | 624 | #endif /* TARGET_I386 */ |
Sergey Fedorov | 8b2d34e | 2016-05-11 13:21:47 +0300 | [diff] [blame] | 625 | if (!cpu_has_work(cpu)) { |
Sergey Fedorov | 8b2d34e | 2016-05-11 13:21:47 +0300 | [diff] [blame] | 626 | return true; |
| 627 | } |
| 628 | |
| 629 | cpu->halted = 0; |
| 630 | } |
Philippe Mathieu-Daudé | 0596fa1 | 2021-09-12 19:27:02 +0200 | [diff] [blame] | 631 | #endif /* !CONFIG_USER_ONLY */ |
Sergey Fedorov | 8b2d34e | 2016-05-11 13:21:47 +0300 | [diff] [blame] | 632 | |
| 633 | return false; |
| 634 | } |
| 635 | |
Sergey Fedorov | ea28476 | 2016-05-11 13:21:48 +0300 | [diff] [blame] | 636 | static inline void cpu_handle_debug_exception(CPUState *cpu) |
Jan Kiszka | 1009d2e | 2011-03-15 12:26:13 +0100 | [diff] [blame] | 637 | { |
Peter Maydell | 86025ee | 2014-09-12 14:06:48 +0100 | [diff] [blame] | 638 | CPUClass *cc = CPU_GET_CLASS(cpu); |
Jan Kiszka | 1009d2e | 2011-03-15 12:26:13 +0100 | [diff] [blame] | 639 | CPUWatchpoint *wp; |
| 640 | |
Andreas Färber | ff4700b | 2013-08-26 18:23:18 +0200 | [diff] [blame] | 641 | if (!cpu->watchpoint_hit) { |
| 642 | QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) { |
Jan Kiszka | 1009d2e | 2011-03-15 12:26:13 +0100 | [diff] [blame] | 643 | wp->flags &= ~BP_WATCHPOINT_HIT; |
| 644 | } |
| 645 | } |
Peter Maydell | 86025ee | 2014-09-12 14:06:48 +0100 | [diff] [blame] | 646 | |
Claudio Fontana | 7827168 | 2021-02-04 17:39:23 +0100 | [diff] [blame] | 647 | if (cc->tcg_ops->debug_excp_handler) { |
| 648 | cc->tcg_ops->debug_excp_handler(cpu); |
Eduardo Habkost | 710384d | 2020-12-12 16:55:18 +0100 | [diff] [blame] | 649 | } |
Jan Kiszka | 1009d2e | 2011-03-15 12:26:13 +0100 | [diff] [blame] | 650 | } |
| 651 | |
Sergey Fedorov | ea28476 | 2016-05-11 13:21:48 +0300 | [diff] [blame] | 652 | static inline bool cpu_handle_exception(CPUState *cpu, int *ret) |
| 653 | { |
Pavel Dovgalyuk | 17b50b0 | 2017-11-14 11:18:18 +0300 | [diff] [blame] | 654 | if (cpu->exception_index < 0) { |
Sergey Fedorov | ea28476 | 2016-05-11 13:21:48 +0300 | [diff] [blame] | 655 | #ifndef CONFIG_USER_ONLY |
Pavel Dovgalyuk | 17b50b0 | 2017-11-14 11:18:18 +0300 | [diff] [blame] | 656 | if (replay_has_exception() |
Richard Henderson | 5e14019 | 2019-03-28 11:54:23 -1000 | [diff] [blame] | 657 | && cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra == 0) { |
Alex Bennée | a11bbb6 | 2021-02-13 13:03:19 +0000 | [diff] [blame] | 658 | /* Execute just one insn to trigger exception pending in the log */ |
Pavel Dovgalyuk | c3e97f6 | 2022-01-31 14:25:40 +0300 | [diff] [blame] | 659 | cpu->cflags_next_tb = (curr_cflags(cpu) & ~CF_USE_ICOUNT) |
| 660 | | CF_NOIRQ | 1; |
Pavel Dovgalyuk | 17b50b0 | 2017-11-14 11:18:18 +0300 | [diff] [blame] | 661 | } |
| 662 | #endif |
Alex Bennée | a11bbb6 | 2021-02-13 13:03:19 +0000 | [diff] [blame] | 663 | return false; |
Pavel Dovgalyuk | 17b50b0 | 2017-11-14 11:18:18 +0300 | [diff] [blame] | 664 | } |
Pavel Dovgalyuk | 17b50b0 | 2017-11-14 11:18:18 +0300 | [diff] [blame] | 665 | if (cpu->exception_index >= EXCP_INTERRUPT) { |
| 666 | /* exit request from the cpu execution loop */ |
| 667 | *ret = cpu->exception_index; |
| 668 | if (*ret == EXCP_DEBUG) { |
| 669 | cpu_handle_debug_exception(cpu); |
| 670 | } |
| 671 | cpu->exception_index = -1; |
Sergey Fedorov | ea28476 | 2016-05-11 13:21:48 +0300 | [diff] [blame] | 672 | return true; |
Pavel Dovgalyuk | 17b50b0 | 2017-11-14 11:18:18 +0300 | [diff] [blame] | 673 | } else { |
| 674 | #if defined(CONFIG_USER_ONLY) |
| 675 | /* if user mode only, we simulate a fake exception |
| 676 | which will be handled outside the cpu execution |
| 677 | loop */ |
| 678 | #if defined(TARGET_I386) |
| 679 | CPUClass *cc = CPU_GET_CLASS(cpu); |
Philippe Mathieu-Daudé | 1209642 | 2021-09-11 18:54:15 +0200 | [diff] [blame] | 680 | cc->tcg_ops->fake_user_interrupt(cpu); |
| 681 | #endif /* TARGET_I386 */ |
Pavel Dovgalyuk | 17b50b0 | 2017-11-14 11:18:18 +0300 | [diff] [blame] | 682 | *ret = cpu->exception_index; |
| 683 | cpu->exception_index = -1; |
| 684 | return true; |
| 685 | #else |
| 686 | if (replay_exception()) { |
| 687 | CPUClass *cc = CPU_GET_CLASS(cpu); |
| 688 | qemu_mutex_lock_iothread(); |
Claudio Fontana | 7827168 | 2021-02-04 17:39:23 +0100 | [diff] [blame] | 689 | cc->tcg_ops->do_interrupt(cpu); |
Pavel Dovgalyuk | 17b50b0 | 2017-11-14 11:18:18 +0300 | [diff] [blame] | 690 | qemu_mutex_unlock_iothread(); |
| 691 | cpu->exception_index = -1; |
Luc Michel | a7ba744 | 2020-07-16 21:39:47 +0200 | [diff] [blame] | 692 | |
| 693 | if (unlikely(cpu->singlestep_enabled)) { |
| 694 | /* |
| 695 | * After processing the exception, ensure an EXCP_DEBUG is |
| 696 | * raised when single-stepping so that GDB doesn't miss the |
| 697 | * next instruction. |
| 698 | */ |
| 699 | *ret = EXCP_DEBUG; |
| 700 | cpu_handle_debug_exception(cpu); |
| 701 | return true; |
| 702 | } |
Pavel Dovgalyuk | 17b50b0 | 2017-11-14 11:18:18 +0300 | [diff] [blame] | 703 | } else if (!replay_has_interrupt()) { |
| 704 | /* give a chance to iothread in replay mode */ |
| 705 | *ret = EXCP_INTERRUPT; |
| 706 | return true; |
| 707 | } |
Sergey Fedorov | ea28476 | 2016-05-11 13:21:48 +0300 | [diff] [blame] | 708 | #endif |
| 709 | } |
| 710 | |
| 711 | return false; |
| 712 | } |
| 713 | |
Philippe Mathieu-Daudé | 77c0fc4 | 2021-09-11 18:54:33 +0200 | [diff] [blame] | 714 | #ifndef CONFIG_USER_ONLY |
Pavel Dovgalyuk | 4084893 | 2020-10-03 20:12:51 +0300 | [diff] [blame] | 715 | /* |
| 716 | * CPU_INTERRUPT_POLL is a virtual event which gets converted into a |
| 717 | * "real" interrupt event later. It does not need to be recorded for |
| 718 | * replay purposes. |
| 719 | */ |
| 720 | static inline bool need_replay_interrupt(int interrupt_request) |
| 721 | { |
| 722 | #if defined(TARGET_I386) |
| 723 | return !(interrupt_request & CPU_INTERRUPT_POLL); |
| 724 | #else |
| 725 | return true; |
| 726 | #endif |
| 727 | } |
Philippe Mathieu-Daudé | 77c0fc4 | 2021-09-11 18:54:33 +0200 | [diff] [blame] | 728 | #endif /* !CONFIG_USER_ONLY */ |
Pavel Dovgalyuk | 4084893 | 2020-10-03 20:12:51 +0300 | [diff] [blame] | 729 | |
Paolo Bonzini | 209b71b | 2017-01-27 10:57:18 +0100 | [diff] [blame] | 730 | static inline bool cpu_handle_interrupt(CPUState *cpu, |
Sergey Fedorov | c385e6e | 2016-05-11 13:21:49 +0300 | [diff] [blame] | 731 | TranslationBlock **last_tb) |
| 732 | { |
Alex Bennée | aff0e20 | 2021-11-29 14:09:26 +0000 | [diff] [blame] | 733 | /* |
| 734 | * If we have requested custom cflags with CF_NOIRQ we should |
| 735 | * skip checking here. Any pending interrupts will get picked up |
| 736 | * by the next TB we execute under normal cflags. |
| 737 | */ |
| 738 | if (cpu->cflags_next_tb != -1 && cpu->cflags_next_tb & CF_NOIRQ) { |
| 739 | return false; |
| 740 | } |
| 741 | |
Pavel Dovgalyuk | 17b50b0 | 2017-11-14 11:18:18 +0300 | [diff] [blame] | 742 | /* Clear the interrupt flag now since we're processing |
| 743 | * cpu->interrupt_request and cpu->exit_request. |
David Hildenbrand | d84be02 | 2017-11-29 20:13:19 +0100 | [diff] [blame] | 744 | * Ensure zeroing happens before reading cpu->exit_request or |
| 745 | * cpu->interrupt_request (see also smp_wmb in cpu_exit()) |
Pavel Dovgalyuk | 17b50b0 | 2017-11-14 11:18:18 +0300 | [diff] [blame] | 746 | */ |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 747 | qatomic_mb_set(&cpu_neg(cpu)->icount_decr.u16.high, 0); |
Sergey Fedorov | c385e6e | 2016-05-11 13:21:49 +0300 | [diff] [blame] | 748 | |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 749 | if (unlikely(qatomic_read(&cpu->interrupt_request))) { |
Jan Kiszka | 8d04fb5 | 2017-02-23 18:29:11 +0000 | [diff] [blame] | 750 | int interrupt_request; |
| 751 | qemu_mutex_lock_iothread(); |
| 752 | interrupt_request = cpu->interrupt_request; |
Sergey Fedorov | c385e6e | 2016-05-11 13:21:49 +0300 | [diff] [blame] | 753 | if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) { |
| 754 | /* Mask out external interrupts for this step. */ |
| 755 | interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK; |
| 756 | } |
| 757 | if (interrupt_request & CPU_INTERRUPT_DEBUG) { |
| 758 | cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG; |
| 759 | cpu->exception_index = EXCP_DEBUG; |
Jan Kiszka | 8d04fb5 | 2017-02-23 18:29:11 +0000 | [diff] [blame] | 760 | qemu_mutex_unlock_iothread(); |
Paolo Bonzini | 209b71b | 2017-01-27 10:57:18 +0100 | [diff] [blame] | 761 | return true; |
Sergey Fedorov | c385e6e | 2016-05-11 13:21:49 +0300 | [diff] [blame] | 762 | } |
Philippe Mathieu-Daudé | 77c0fc4 | 2021-09-11 18:54:33 +0200 | [diff] [blame] | 763 | #if !defined(CONFIG_USER_ONLY) |
Sergey Fedorov | c385e6e | 2016-05-11 13:21:49 +0300 | [diff] [blame] | 764 | if (replay_mode == REPLAY_MODE_PLAY && !replay_has_interrupt()) { |
| 765 | /* Do nothing */ |
| 766 | } else if (interrupt_request & CPU_INTERRUPT_HALT) { |
| 767 | replay_interrupt(); |
| 768 | cpu->interrupt_request &= ~CPU_INTERRUPT_HALT; |
| 769 | cpu->halted = 1; |
| 770 | cpu->exception_index = EXCP_HLT; |
Jan Kiszka | 8d04fb5 | 2017-02-23 18:29:11 +0000 | [diff] [blame] | 771 | qemu_mutex_unlock_iothread(); |
Paolo Bonzini | 209b71b | 2017-01-27 10:57:18 +0100 | [diff] [blame] | 772 | return true; |
Sergey Fedorov | c385e6e | 2016-05-11 13:21:49 +0300 | [diff] [blame] | 773 | } |
| 774 | #if defined(TARGET_I386) |
| 775 | else if (interrupt_request & CPU_INTERRUPT_INIT) { |
| 776 | X86CPU *x86_cpu = X86_CPU(cpu); |
| 777 | CPUArchState *env = &x86_cpu->env; |
| 778 | replay_interrupt(); |
Paolo Bonzini | 65c9d60 | 2017-02-16 12:30:05 +0100 | [diff] [blame] | 779 | cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0, 0); |
Sergey Fedorov | c385e6e | 2016-05-11 13:21:49 +0300 | [diff] [blame] | 780 | do_cpu_init(x86_cpu); |
| 781 | cpu->exception_index = EXCP_HALTED; |
Jan Kiszka | 8d04fb5 | 2017-02-23 18:29:11 +0000 | [diff] [blame] | 782 | qemu_mutex_unlock_iothread(); |
Paolo Bonzini | 209b71b | 2017-01-27 10:57:18 +0100 | [diff] [blame] | 783 | return true; |
Sergey Fedorov | c385e6e | 2016-05-11 13:21:49 +0300 | [diff] [blame] | 784 | } |
| 785 | #else |
| 786 | else if (interrupt_request & CPU_INTERRUPT_RESET) { |
| 787 | replay_interrupt(); |
| 788 | cpu_reset(cpu); |
Jan Kiszka | 8d04fb5 | 2017-02-23 18:29:11 +0000 | [diff] [blame] | 789 | qemu_mutex_unlock_iothread(); |
Paolo Bonzini | 209b71b | 2017-01-27 10:57:18 +0100 | [diff] [blame] | 790 | return true; |
Sergey Fedorov | c385e6e | 2016-05-11 13:21:49 +0300 | [diff] [blame] | 791 | } |
Philippe Mathieu-Daudé | 77c0fc4 | 2021-09-11 18:54:33 +0200 | [diff] [blame] | 792 | #endif /* !TARGET_I386 */ |
Sergey Fedorov | c385e6e | 2016-05-11 13:21:49 +0300 | [diff] [blame] | 793 | /* The target hook has 3 exit conditions: |
| 794 | False when the interrupt isn't processed, |
| 795 | True when it is, and we should restart on a new TB, |
| 796 | and via longjmp via cpu_loop_exit. */ |
| 797 | else { |
Philippe Mathieu-Daudé | 77c0fc4 | 2021-09-11 18:54:33 +0200 | [diff] [blame] | 798 | CPUClass *cc = CPU_GET_CLASS(cpu); |
| 799 | |
Claudio Fontana | 7827168 | 2021-02-04 17:39:23 +0100 | [diff] [blame] | 800 | if (cc->tcg_ops->cpu_exec_interrupt && |
| 801 | cc->tcg_ops->cpu_exec_interrupt(cpu, interrupt_request)) { |
Pavel Dovgalyuk | 4084893 | 2020-10-03 20:12:51 +0300 | [diff] [blame] | 802 | if (need_replay_interrupt(interrupt_request)) { |
| 803 | replay_interrupt(); |
| 804 | } |
Richard Henderson | ba3c35d | 2020-07-17 09:26:59 -0700 | [diff] [blame] | 805 | /* |
| 806 | * After processing the interrupt, ensure an EXCP_DEBUG is |
| 807 | * raised when single-stepping so that GDB doesn't miss the |
| 808 | * next instruction. |
| 809 | */ |
Luc Michel | 5b7b197 | 2022-02-24 14:52:42 -1000 | [diff] [blame] | 810 | if (unlikely(cpu->singlestep_enabled)) { |
| 811 | cpu->exception_index = EXCP_DEBUG; |
| 812 | qemu_mutex_unlock_iothread(); |
| 813 | return true; |
| 814 | } |
| 815 | cpu->exception_index = -1; |
Sergey Fedorov | c385e6e | 2016-05-11 13:21:49 +0300 | [diff] [blame] | 816 | *last_tb = NULL; |
| 817 | } |
Sergey Fedorov | 8b1fe3f | 2016-05-12 19:52:17 +0300 | [diff] [blame] | 818 | /* The target hook may have updated the 'cpu->interrupt_request'; |
| 819 | * reload the 'interrupt_request' value */ |
| 820 | interrupt_request = cpu->interrupt_request; |
Sergey Fedorov | c385e6e | 2016-05-11 13:21:49 +0300 | [diff] [blame] | 821 | } |
Philippe Mathieu-Daudé | 77c0fc4 | 2021-09-11 18:54:33 +0200 | [diff] [blame] | 822 | #endif /* !CONFIG_USER_ONLY */ |
Sergey Fedorov | 8b1fe3f | 2016-05-12 19:52:17 +0300 | [diff] [blame] | 823 | if (interrupt_request & CPU_INTERRUPT_EXITTB) { |
Sergey Fedorov | c385e6e | 2016-05-11 13:21:49 +0300 | [diff] [blame] | 824 | cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB; |
| 825 | /* ensure that no TB jump will be modified as |
| 826 | the program flow was changed */ |
| 827 | *last_tb = NULL; |
| 828 | } |
Jan Kiszka | 8d04fb5 | 2017-02-23 18:29:11 +0000 | [diff] [blame] | 829 | |
| 830 | /* If we exit via cpu_loop_exit/longjmp it is reset in cpu_exec */ |
| 831 | qemu_mutex_unlock_iothread(); |
Sergey Fedorov | c385e6e | 2016-05-11 13:21:49 +0300 | [diff] [blame] | 832 | } |
Jan Kiszka | 8d04fb5 | 2017-02-23 18:29:11 +0000 | [diff] [blame] | 833 | |
Pavel Dovgalyuk | cfb2d02 | 2017-02-07 09:54:57 +0300 | [diff] [blame] | 834 | /* Finally, check if we need to exit to the main loop. */ |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 835 | if (unlikely(qatomic_read(&cpu->exit_request)) |
Claudio Fontana | 740b175 | 2020-08-19 13:17:19 +0200 | [diff] [blame] | 836 | || (icount_enabled() |
Alex Bennée | a11bbb6 | 2021-02-13 13:03:19 +0000 | [diff] [blame] | 837 | && (cpu->cflags_next_tb == -1 || cpu->cflags_next_tb & CF_USE_ICOUNT) |
Richard Henderson | 5e14019 | 2019-03-28 11:54:23 -1000 | [diff] [blame] | 838 | && cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra == 0)) { |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 839 | qatomic_set(&cpu->exit_request, 0); |
Pavel Dovgalyuk | 5f3bdfd | 2018-02-27 12:51:41 +0300 | [diff] [blame] | 840 | if (cpu->exception_index == -1) { |
| 841 | cpu->exception_index = EXCP_INTERRUPT; |
| 842 | } |
Paolo Bonzini | 209b71b | 2017-01-27 10:57:18 +0100 | [diff] [blame] | 843 | return true; |
Sergey Fedorov | c385e6e | 2016-05-11 13:21:49 +0300 | [diff] [blame] | 844 | } |
Paolo Bonzini | 209b71b | 2017-01-27 10:57:18 +0100 | [diff] [blame] | 845 | |
| 846 | return false; |
Sergey Fedorov | c385e6e | 2016-05-11 13:21:49 +0300 | [diff] [blame] | 847 | } |
| 848 | |
Sergey Fedorov | 928de9e | 2016-05-11 13:21:50 +0300 | [diff] [blame] | 849 | static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb, |
Pavel Dovgalyuk | cfb2d02 | 2017-02-07 09:54:57 +0300 | [diff] [blame] | 850 | TranslationBlock **last_tb, int *tb_exit) |
Sergey Fedorov | 928de9e | 2016-05-11 13:21:50 +0300 | [diff] [blame] | 851 | { |
Paolo Bonzini | 1aab16c | 2017-01-27 11:25:33 +0100 | [diff] [blame] | 852 | int32_t insns_left; |
Sergey Fedorov | 928de9e | 2016-05-11 13:21:50 +0300 | [diff] [blame] | 853 | |
| 854 | trace_exec_tb(tb, tb->pc); |
Richard Henderson | eba4035 | 2020-10-29 13:18:12 -0700 | [diff] [blame] | 855 | tb = cpu_tb_exec(cpu, tb, tb_exit); |
Paolo Bonzini | 1aab16c | 2017-01-27 11:25:33 +0100 | [diff] [blame] | 856 | if (*tb_exit != TB_EXIT_REQUESTED) { |
| 857 | *last_tb = tb; |
| 858 | return; |
| 859 | } |
| 860 | |
| 861 | *last_tb = NULL; |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 862 | insns_left = qatomic_read(&cpu_neg(cpu)->icount_decr.u32); |
Paolo Bonzini | 1aab16c | 2017-01-27 11:25:33 +0100 | [diff] [blame] | 863 | if (insns_left < 0) { |
Alex Bennée | e5143e3 | 2017-02-23 18:29:12 +0000 | [diff] [blame] | 864 | /* Something asked us to stop executing chained TBs; just |
| 865 | * continue round the main loop. Whatever requested the exit |
Paolo Bonzini | 30f3dda | 2017-03-03 16:39:18 +0100 | [diff] [blame] | 866 | * will also have set something else (eg exit_request or |
Pavel Dovgalyuk | 17b50b0 | 2017-11-14 11:18:18 +0300 | [diff] [blame] | 867 | * interrupt_request) which will be handled by |
| 868 | * cpu_handle_interrupt. cpu_handle_interrupt will also |
| 869 | * clear cpu->icount_decr.u16.high. |
Sergey Fedorov | 928de9e | 2016-05-11 13:21:50 +0300 | [diff] [blame] | 870 | */ |
Paolo Bonzini | 1aab16c | 2017-01-27 11:25:33 +0100 | [diff] [blame] | 871 | return; |
| 872 | } |
| 873 | |
| 874 | /* Instruction counter expired. */ |
Claudio Fontana | 740b175 | 2020-08-19 13:17:19 +0200 | [diff] [blame] | 875 | assert(icount_enabled()); |
Paolo Bonzini | 1aab16c | 2017-01-27 11:25:33 +0100 | [diff] [blame] | 876 | #ifndef CONFIG_USER_ONLY |
Alex Bennée | eda5f7c | 2017-04-05 12:35:48 +0100 | [diff] [blame] | 877 | /* Ensure global icount has gone forward */ |
Claudio Fontana | 8191d36 | 2020-08-31 16:18:34 +0200 | [diff] [blame] | 878 | icount_update(cpu); |
Alex Bennée | eda5f7c | 2017-04-05 12:35:48 +0100 | [diff] [blame] | 879 | /* Refill decrementer and continue execution. */ |
Peter Maydell | df3a2de | 2021-07-25 18:44:04 +0100 | [diff] [blame] | 880 | insns_left = MIN(0xffff, cpu->icount_budget); |
Richard Henderson | 5e14019 | 2019-03-28 11:54:23 -1000 | [diff] [blame] | 881 | cpu_neg(cpu)->icount_decr.u16.low = insns_left; |
Alex Bennée | eda5f7c | 2017-04-05 12:35:48 +0100 | [diff] [blame] | 882 | cpu->icount_extra = cpu->icount_budget - insns_left; |
Alex Bennée | bc662a3 | 2021-02-13 13:03:18 +0000 | [diff] [blame] | 883 | |
| 884 | /* |
| 885 | * If the next tb has more instructions than we have left to |
| 886 | * execute we need to ensure we find/generate a TB with exactly |
| 887 | * insns_left instructions in it. |
| 888 | */ |
Peter Maydell | c8cf47a | 2021-07-25 18:44:05 +0100 | [diff] [blame] | 889 | if (insns_left > 0 && insns_left < tb->icount) { |
| 890 | assert(insns_left <= CF_COUNT_MASK); |
| 891 | assert(cpu->icount_extra == 0); |
Alex Bennée | bc662a3 | 2021-02-13 13:03:18 +0000 | [diff] [blame] | 892 | cpu->cflags_next_tb = (tb->cflags & ~CF_COUNT_MASK) | insns_left; |
Paolo Bonzini | 1aab16c | 2017-01-27 11:25:33 +0100 | [diff] [blame] | 893 | } |
Sergey Fedorov | 928de9e | 2016-05-11 13:21:50 +0300 | [diff] [blame] | 894 | #endif |
Sergey Fedorov | 928de9e | 2016-05-11 13:21:50 +0300 | [diff] [blame] | 895 | } |
| 896 | |
bellard | 7d13299 | 2003-03-06 23:23:54 +0000 | [diff] [blame] | 897 | /* main execution loop */ |
| 898 | |
Peter Crosthwaite | ea3e984 | 2015-06-18 10:24:55 -0700 | [diff] [blame] | 899 | int cpu_exec(CPUState *cpu) |
bellard | 7d13299 | 2003-03-06 23:23:54 +0000 | [diff] [blame] | 900 | { |
Sergey Fedorov | c385e6e | 2016-05-11 13:21:49 +0300 | [diff] [blame] | 901 | int ret; |
Pavel Dovgalyuk | cfb2d02 | 2017-02-07 09:54:57 +0300 | [diff] [blame] | 902 | SyncClocks sc = { 0 }; |
Sebastian Tanase | c2aa5f8 | 2014-07-25 11:56:31 +0200 | [diff] [blame] | 903 | |
Pavel Dovgalyuk | 6f06096 | 2015-09-17 19:24:16 +0300 | [diff] [blame] | 904 | /* replay_interrupt may need current_cpu */ |
| 905 | current_cpu = cpu; |
| 906 | |
Sergey Fedorov | 8b2d34e | 2016-05-11 13:21:47 +0300 | [diff] [blame] | 907 | if (cpu_handle_halt(cpu)) { |
| 908 | return EXCP_HALTED; |
Paolo Bonzini | eda48c3 | 2011-03-12 17:43:56 +0100 | [diff] [blame] | 909 | } |
bellard | 5a1e3cf | 2005-11-23 21:02:53 +0000 | [diff] [blame] | 910 | |
Paolo Bonzini | 79e2b9a | 2015-01-21 12:09:14 +0100 | [diff] [blame] | 911 | rcu_read_lock(); |
| 912 | |
Eduardo Habkost | 035ba06 | 2020-12-12 16:55:16 +0100 | [diff] [blame] | 913 | cpu_exec_enter(cpu); |
bellard | 9d27abd | 2003-05-10 13:13:54 +0000 | [diff] [blame] | 914 | |
Sebastian Tanase | c2aa5f8 | 2014-07-25 11:56:31 +0200 | [diff] [blame] | 915 | /* Calculate difference between guest clock and host clock. |
| 916 | * This delay includes the delay of the last cycle, so |
| 917 | * what we have to do is sleep until it is 0. As for the |
| 918 | * advance/delay we gain here, we try to fix it next time. |
| 919 | */ |
| 920 | init_delay_params(&sc, cpu); |
| 921 | |
Paolo Bonzini | 4515e58 | 2017-01-29 10:55:14 +0100 | [diff] [blame] | 922 | /* prepare setjmp context for exception handling */ |
| 923 | if (sigsetjmp(cpu->jmp_env, 0) != 0) { |
Philippe Mathieu-Daudé | 19a8431 | 2020-12-10 17:47:41 +0400 | [diff] [blame] | 924 | #if defined(__clang__) |
Peter Maydell | e6a41a0 | 2021-01-29 13:03:30 +0000 | [diff] [blame] | 925 | /* |
| 926 | * Some compilers wrongly smash all local variables after |
| 927 | * siglongjmp (the spec requires that only non-volatile locals |
| 928 | * which are changed between the sigsetjmp and siglongjmp are |
| 929 | * permitted to be trashed). There were bug reports for gcc |
| 930 | * 4.5.0 and clang. The bug is fixed in all versions of gcc |
| 931 | * that we support, but is still unfixed in clang: |
| 932 | * https://bugs.llvm.org/show_bug.cgi?id=21183 |
| 933 | * |
Richard Henderson | 2521c77 | 2021-07-12 18:29:34 +0000 | [diff] [blame] | 934 | * Reload an essential local variable here for those compilers. |
Peter Maydell | e6a41a0 | 2021-01-29 13:03:30 +0000 | [diff] [blame] | 935 | * Newer versions of gcc would complain about this code (-Wclobbered), |
| 936 | * so we only perform the workaround for clang. |
| 937 | */ |
Paolo Bonzini | 4515e58 | 2017-01-29 10:55:14 +0100 | [diff] [blame] | 938 | cpu = current_cpu; |
Peter Maydell | e6a41a0 | 2021-01-29 13:03:30 +0000 | [diff] [blame] | 939 | #else |
Richard Henderson | 2521c77 | 2021-07-12 18:29:34 +0000 | [diff] [blame] | 940 | /* Non-buggy compilers preserve this; assert the correct value. */ |
Paolo Bonzini | 4515e58 | 2017-01-29 10:55:14 +0100 | [diff] [blame] | 941 | g_assert(cpu == current_cpu); |
Peter Maydell | e6a41a0 | 2021-01-29 13:03:30 +0000 | [diff] [blame] | 942 | #endif |
| 943 | |
Emilio G. Cota | 0ac2031 | 2017-08-04 23:46:31 -0400 | [diff] [blame] | 944 | #ifndef CONFIG_SOFTMMU |
Richard Henderson | f920ffd | 2021-09-13 13:01:07 -0700 | [diff] [blame] | 945 | clear_helper_retaddr(); |
Richard Henderson | 297368c | 2022-08-10 17:14:26 -0700 | [diff] [blame] | 946 | if (have_mmap_lock()) { |
| 947 | mmap_unlock(); |
| 948 | } |
Emilio G. Cota | 0ac2031 | 2017-08-04 23:46:31 -0400 | [diff] [blame] | 949 | #endif |
Jan Kiszka | 8d04fb5 | 2017-02-23 18:29:11 +0000 | [diff] [blame] | 950 | if (qemu_mutex_iothread_locked()) { |
| 951 | qemu_mutex_unlock_iothread(); |
| 952 | } |
Emilio G. Cota | e6d86be | 2018-10-21 13:24:26 -0400 | [diff] [blame] | 953 | qemu_plugin_disable_mem_helpers(cpu); |
| 954 | |
Emilio G. Cota | 8fd3a9b | 2019-01-15 14:47:53 -0500 | [diff] [blame] | 955 | assert_no_pages_locked(); |
Paolo Bonzini | 4515e58 | 2017-01-29 10:55:14 +0100 | [diff] [blame] | 956 | } |
| 957 | |
| 958 | /* if an exception is pending, we execute it here */ |
| 959 | while (!cpu_handle_exception(cpu, &ret)) { |
| 960 | TranslationBlock *last_tb = NULL; |
| 961 | int tb_exit = 0; |
| 962 | |
| 963 | while (!cpu_handle_interrupt(cpu, &last_tb)) { |
Richard Henderson | 9b990ee | 2017-10-13 10:50:02 -0700 | [diff] [blame] | 964 | TranslationBlock *tb; |
Richard Henderson | 11c1d5f | 2021-07-19 12:40:57 -1000 | [diff] [blame] | 965 | target_ulong cs_base, pc; |
| 966 | uint32_t flags, cflags; |
Richard Henderson | 9b990ee | 2017-10-13 10:50:02 -0700 | [diff] [blame] | 967 | |
Richard Henderson | 10c3782 | 2021-07-19 09:03:21 -1000 | [diff] [blame] | 968 | cpu_get_tb_cpu_state(cpu->env_ptr, &pc, &cs_base, &flags); |
| 969 | |
Richard Henderson | 11c1d5f | 2021-07-19 12:40:57 -1000 | [diff] [blame] | 970 | /* |
| 971 | * When requested, use an exact setting for cflags for the next |
| 972 | * execution. This is used for icount, precise smc, and stop- |
| 973 | * after-access watchpoints. Since this request should never |
| 974 | * have CF_INVALID set, -1 is a convenient invalid value that |
| 975 | * does not require tcg headers for cpu_common_reset. |
| 976 | */ |
| 977 | cflags = cpu->cflags_next_tb; |
Richard Henderson | 9b990ee | 2017-10-13 10:50:02 -0700 | [diff] [blame] | 978 | if (cflags == -1) { |
Alex Bennée | c0ae396 | 2021-02-24 16:58:08 +0000 | [diff] [blame] | 979 | cflags = curr_cflags(cpu); |
Richard Henderson | 9b990ee | 2017-10-13 10:50:02 -0700 | [diff] [blame] | 980 | } else { |
| 981 | cpu->cflags_next_tb = -1; |
| 982 | } |
| 983 | |
Richard Henderson | 10c3782 | 2021-07-19 09:03:21 -1000 | [diff] [blame] | 984 | if (check_for_breakpoints(cpu, pc, &cflags)) { |
| 985 | break; |
| 986 | } |
Richard Henderson | 11c1d5f | 2021-07-19 12:40:57 -1000 | [diff] [blame] | 987 | |
| 988 | tb = tb_lookup(cpu, pc, cs_base, flags, cflags); |
| 989 | if (tb == NULL) { |
| 990 | mmap_lock(); |
| 991 | tb = tb_gen_code(cpu, pc, cs_base, flags, cflags); |
| 992 | mmap_unlock(); |
| 993 | /* |
| 994 | * We add the TB in the virtual pc hash table |
| 995 | * for the fast lookup |
| 996 | */ |
| 997 | qatomic_set(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)], tb); |
| 998 | } |
| 999 | |
| 1000 | #ifndef CONFIG_USER_ONLY |
| 1001 | /* |
| 1002 | * We don't take care of direct jumps when address mapping |
| 1003 | * changes in system emulation. So it's not safe to make a |
| 1004 | * direct jump to a TB spanning two pages because the mapping |
| 1005 | * for the second page can change. |
| 1006 | */ |
| 1007 | if (tb->page_addr[1] != -1) { |
| 1008 | last_tb = NULL; |
| 1009 | } |
| 1010 | #endif |
| 1011 | /* See if we can patch the calling TB. */ |
| 1012 | if (last_tb) { |
| 1013 | tb_add_jump(last_tb, tb_exit, tb); |
| 1014 | } |
| 1015 | |
Pavel Dovgalyuk | cfb2d02 | 2017-02-07 09:54:57 +0300 | [diff] [blame] | 1016 | cpu_loop_exec_tb(cpu, tb, &last_tb, &tb_exit); |
Richard Henderson | 11c1d5f | 2021-07-19 12:40:57 -1000 | [diff] [blame] | 1017 | |
Paolo Bonzini | 4515e58 | 2017-01-29 10:55:14 +0100 | [diff] [blame] | 1018 | /* Try to align the host and virtual clocks |
| 1019 | if the guest is in advance */ |
| 1020 | align_clocks(&sc, cpu); |
bellard | 7d13299 | 2003-03-06 23:23:54 +0000 | [diff] [blame] | 1021 | } |
Paolo Bonzini | 4515e58 | 2017-01-29 10:55:14 +0100 | [diff] [blame] | 1022 | } |
bellard | 3fb2ded | 2003-06-24 13:22:59 +0000 | [diff] [blame] | 1023 | |
Eduardo Habkost | 035ba06 | 2020-12-12 16:55:16 +0100 | [diff] [blame] | 1024 | cpu_exec_exit(cpu); |
Paolo Bonzini | 79e2b9a | 2015-01-21 12:09:14 +0100 | [diff] [blame] | 1025 | rcu_read_unlock(); |
pbrook | 1057eaa | 2007-02-04 13:37:44 +0000 | [diff] [blame] | 1026 | |
bellard | 7d13299 | 2003-03-06 23:23:54 +0000 | [diff] [blame] | 1027 | return ret; |
| 1028 | } |
Claudio Fontana | 740b175 | 2020-08-19 13:17:19 +0200 | [diff] [blame] | 1029 | |
Claudio Fontana | 7df5e3d | 2021-02-04 17:39:11 +0100 | [diff] [blame] | 1030 | void tcg_exec_realizefn(CPUState *cpu, Error **errp) |
| 1031 | { |
| 1032 | static bool tcg_target_initialized; |
| 1033 | CPUClass *cc = CPU_GET_CLASS(cpu); |
| 1034 | |
| 1035 | if (!tcg_target_initialized) { |
Claudio Fontana | 7827168 | 2021-02-04 17:39:23 +0100 | [diff] [blame] | 1036 | cc->tcg_ops->initialize(); |
Claudio Fontana | 7df5e3d | 2021-02-04 17:39:11 +0100 | [diff] [blame] | 1037 | tcg_target_initialized = true; |
| 1038 | } |
| 1039 | tlb_init(cpu); |
| 1040 | qemu_plugin_vcpu_init_hook(cpu); |
| 1041 | |
| 1042 | #ifndef CONFIG_USER_ONLY |
| 1043 | tcg_iommu_init_notifier_list(cpu); |
| 1044 | #endif /* !CONFIG_USER_ONLY */ |
| 1045 | } |
| 1046 | |
| 1047 | /* undo the initializations in reverse order */ |
| 1048 | void tcg_exec_unrealizefn(CPUState *cpu) |
| 1049 | { |
| 1050 | #ifndef CONFIG_USER_ONLY |
| 1051 | tcg_iommu_free_notifier_list(cpu); |
| 1052 | #endif /* !CONFIG_USER_ONLY */ |
| 1053 | |
| 1054 | qemu_plugin_vcpu_exit_hook(cpu); |
| 1055 | tlb_destroy(cpu); |
| 1056 | } |
| 1057 | |
Claudio Fontana | 740b175 | 2020-08-19 13:17:19 +0200 | [diff] [blame] | 1058 | #ifndef CONFIG_USER_ONLY |
| 1059 | |
Bernhard Beschow | 7112ffd | 2022-05-20 20:01:07 +0200 | [diff] [blame] | 1060 | static void dump_drift_info(GString *buf) |
Claudio Fontana | 740b175 | 2020-08-19 13:17:19 +0200 | [diff] [blame] | 1061 | { |
| 1062 | if (!icount_enabled()) { |
| 1063 | return; |
| 1064 | } |
| 1065 | |
Daniel P. Berrangé | 3a841ab | 2021-09-08 10:35:43 +0100 | [diff] [blame] | 1066 | g_string_append_printf(buf, "Host - Guest clock %"PRIi64" ms\n", |
| 1067 | (cpu_get_clock() - icount_get()) / SCALE_MS); |
Claudio Fontana | 740b175 | 2020-08-19 13:17:19 +0200 | [diff] [blame] | 1068 | if (icount_align_option) { |
Daniel P. Berrangé | 3a841ab | 2021-09-08 10:35:43 +0100 | [diff] [blame] | 1069 | g_string_append_printf(buf, "Max guest delay %"PRIi64" ms\n", |
| 1070 | -max_delay / SCALE_MS); |
| 1071 | g_string_append_printf(buf, "Max guest advance %"PRIi64" ms\n", |
| 1072 | max_advance / SCALE_MS); |
Claudio Fontana | 740b175 | 2020-08-19 13:17:19 +0200 | [diff] [blame] | 1073 | } else { |
Daniel P. Berrangé | 3a841ab | 2021-09-08 10:35:43 +0100 | [diff] [blame] | 1074 | g_string_append_printf(buf, "Max guest delay NA\n"); |
| 1075 | g_string_append_printf(buf, "Max guest advance NA\n"); |
Claudio Fontana | 740b175 | 2020-08-19 13:17:19 +0200 | [diff] [blame] | 1076 | } |
| 1077 | } |
| 1078 | |
Daniel P. Berrangé | 3a841ab | 2021-09-08 10:35:43 +0100 | [diff] [blame] | 1079 | HumanReadableText *qmp_x_query_jit(Error **errp) |
| 1080 | { |
| 1081 | g_autoptr(GString) buf = g_string_new(""); |
| 1082 | |
| 1083 | if (!tcg_enabled()) { |
| 1084 | error_setg(errp, "JIT information is only available with accel=tcg"); |
| 1085 | return NULL; |
| 1086 | } |
| 1087 | |
| 1088 | dump_exec_info(buf); |
| 1089 | dump_drift_info(buf); |
| 1090 | |
| 1091 | return human_readable_text_from_str(buf); |
| 1092 | } |
| 1093 | |
Daniel P. Berrangé | b6a7f3e | 2021-09-08 10:35:43 +0100 | [diff] [blame] | 1094 | HumanReadableText *qmp_x_query_opcount(Error **errp) |
| 1095 | { |
| 1096 | g_autoptr(GString) buf = g_string_new(""); |
| 1097 | |
| 1098 | if (!tcg_enabled()) { |
| 1099 | error_setg(errp, "Opcode count information is only available with accel=tcg"); |
| 1100 | return NULL; |
| 1101 | } |
| 1102 | |
Bernhard Beschow | b01841f | 2022-05-20 20:01:08 +0200 | [diff] [blame] | 1103 | tcg_dump_op_count(buf); |
Daniel P. Berrangé | b6a7f3e | 2021-09-08 10:35:43 +0100 | [diff] [blame] | 1104 | |
| 1105 | return human_readable_text_from_str(buf); |
| 1106 | } |
| 1107 | |
Alex Bennée | 92e28c0 | 2022-01-05 13:49:57 +0000 | [diff] [blame] | 1108 | #ifdef CONFIG_PROFILER |
| 1109 | |
| 1110 | int64_t dev_time; |
| 1111 | |
| 1112 | HumanReadableText *qmp_x_query_profile(Error **errp) |
| 1113 | { |
| 1114 | g_autoptr(GString) buf = g_string_new(""); |
| 1115 | static int64_t last_cpu_exec_time; |
| 1116 | int64_t cpu_exec_time; |
| 1117 | int64_t delta; |
| 1118 | |
| 1119 | cpu_exec_time = tcg_cpu_exec_time(); |
| 1120 | delta = cpu_exec_time - last_cpu_exec_time; |
| 1121 | |
| 1122 | g_string_append_printf(buf, "async time %" PRId64 " (%0.3f)\n", |
| 1123 | dev_time, dev_time / (double)NANOSECONDS_PER_SECOND); |
| 1124 | g_string_append_printf(buf, "qemu time %" PRId64 " (%0.3f)\n", |
| 1125 | delta, delta / (double)NANOSECONDS_PER_SECOND); |
| 1126 | last_cpu_exec_time = cpu_exec_time; |
| 1127 | dev_time = 0; |
| 1128 | |
| 1129 | return human_readable_text_from_str(buf); |
| 1130 | } |
| 1131 | #else |
| 1132 | HumanReadableText *qmp_x_query_profile(Error **errp) |
| 1133 | { |
| 1134 | error_setg(errp, "Internal profiler not compiled"); |
| 1135 | return NULL; |
| 1136 | } |
| 1137 | #endif |
| 1138 | |
Claudio Fontana | 740b175 | 2020-08-19 13:17:19 +0200 | [diff] [blame] | 1139 | #endif /* !CONFIG_USER_ONLY */ |