bellard | 7d13299 | 2003-03-06 23:23:54 +0000 | [diff] [blame] | 1 | /* |
陳韋任 | e965fc3 | 2012-02-06 14:02:55 +0800 | [diff] [blame] | 2 | * emulator main execution loop |
ths | 5fafdf2 | 2007-09-16 21:08:06 +0000 | [diff] [blame] | 3 | * |
bellard | 66321a1 | 2005-04-06 20:47:48 +0000 | [diff] [blame] | 4 | * Copyright (c) 2003-2005 Fabrice Bellard |
bellard | 7d13299 | 2003-03-06 23:23:54 +0000 | [diff] [blame] | 5 | * |
bellard | 3ef693a | 2003-03-23 20:17:16 +0000 | [diff] [blame] | 6 | * This library is free software; you can redistribute it and/or |
| 7 | * modify it under the terms of the GNU Lesser General Public |
| 8 | * License as published by the Free Software Foundation; either |
Thomas Huth | fb0343d | 2019-01-23 15:08:56 +0100 | [diff] [blame] | 9 | * version 2.1 of the License, or (at your option) any later version. |
bellard | 7d13299 | 2003-03-06 23:23:54 +0000 | [diff] [blame] | 10 | * |
bellard | 3ef693a | 2003-03-23 20:17:16 +0000 | [diff] [blame] | 11 | * This library is distributed in the hope that it will be useful, |
| 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU |
| 14 | * Lesser General Public License for more details. |
bellard | 7d13299 | 2003-03-06 23:23:54 +0000 | [diff] [blame] | 15 | * |
bellard | 3ef693a | 2003-03-23 20:17:16 +0000 | [diff] [blame] | 16 | * You should have received a copy of the GNU Lesser General Public |
Blue Swirl | 8167ee8 | 2009-07-16 20:47:01 +0000 | [diff] [blame] | 17 | * License along with this library; if not, see <http://www.gnu.org/licenses/>. |
bellard | 7d13299 | 2003-03-06 23:23:54 +0000 | [diff] [blame] | 18 | */ |
Markus Armbruster | a8d2532 | 2019-05-23 16:35:08 +0200 | [diff] [blame] | 19 | |
Peter Maydell | 7b31bbc | 2016-01-26 18:16:56 +0000 | [diff] [blame] | 20 | #include "qemu/osdep.h" |
Markus Armbruster | a8d2532 | 2019-05-23 16:35:08 +0200 | [diff] [blame] | 21 | #include "qemu-common.h" |
Claudio Fontana | 740b175 | 2020-08-19 13:17:19 +0200 | [diff] [blame^] | 22 | #include "qemu/qemu-print.h" |
Blue Swirl | cea5f9a | 2011-05-15 16:03:25 +0000 | [diff] [blame] | 23 | #include "cpu.h" |
Yang Zhong | d9bb58e | 2017-06-02 14:06:44 +0800 | [diff] [blame] | 24 | #include "trace.h" |
Paolo Bonzini | 76cad71 | 2012-10-24 11:12:21 +0200 | [diff] [blame] | 25 | #include "disas/disas.h" |
Paolo Bonzini | 63c9155 | 2016-03-15 13:18:37 +0100 | [diff] [blame] | 26 | #include "exec/exec-all.h" |
Philippe Mathieu-Daudé | dcb32f1 | 2020-01-01 12:23:00 +0100 | [diff] [blame] | 27 | #include "tcg/tcg.h" |
Paolo Bonzini | 1de7afc | 2012-12-17 18:20:00 +0100 | [diff] [blame] | 28 | #include "qemu/atomic.h" |
Paolo Bonzini | 9c17d61 | 2012-12-17 18:20:04 +0100 | [diff] [blame] | 29 | #include "sysemu/qtest.h" |
Sebastian Tanase | c2aa5f8 | 2014-07-25 11:56:31 +0200 | [diff] [blame] | 30 | #include "qemu/timer.h" |
Paolo Bonzini | 79e2b9a | 2015-01-21 12:09:14 +0100 | [diff] [blame] | 31 | #include "qemu/rcu.h" |
Peter Crosthwaite | e1b8932 | 2015-05-30 23:11:45 -0700 | [diff] [blame] | 32 | #include "exec/tb-hash.h" |
Emilio G. Cota | f6bb84d | 2017-07-11 17:33:33 -0400 | [diff] [blame] | 33 | #include "exec/tb-lookup.h" |
Paolo Bonzini | 508127e | 2016-01-07 16:55:28 +0300 | [diff] [blame] | 34 | #include "exec/log.h" |
Jan Kiszka | 8d04fb5 | 2017-02-23 18:29:11 +0000 | [diff] [blame] | 35 | #include "qemu/main-loop.h" |
Pavel Dovgalyuk | 6220e90 | 2015-09-17 19:23:31 +0300 | [diff] [blame] | 36 | #if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY) |
| 37 | #include "hw/i386/apic.h" |
| 38 | #endif |
Paolo Bonzini | d2528bd | 2017-03-03 12:01:16 +0100 | [diff] [blame] | 39 | #include "sysemu/cpus.h" |
Claudio Fontana | 740b175 | 2020-08-19 13:17:19 +0200 | [diff] [blame^] | 40 | #include "exec/cpu-all.h" |
| 41 | #include "sysemu/cpu-timers.h" |
Pavel Dovgalyuk | 6f06096 | 2015-09-17 19:24:16 +0300 | [diff] [blame] | 42 | #include "sysemu/replay.h" |
Sebastian Tanase | c2aa5f8 | 2014-07-25 11:56:31 +0200 | [diff] [blame] | 43 | |
| 44 | /* -icount align implementation. */ |
| 45 | |
| 46 | typedef struct SyncClocks { |
| 47 | int64_t diff_clk; |
| 48 | int64_t last_cpu_icount; |
Sebastian Tanase | 7f7bc14 | 2014-07-25 11:56:32 +0200 | [diff] [blame] | 49 | int64_t realtime_clock; |
Sebastian Tanase | c2aa5f8 | 2014-07-25 11:56:31 +0200 | [diff] [blame] | 50 | } SyncClocks; |
| 51 | |
| 52 | #if !defined(CONFIG_USER_ONLY) |
| 53 | /* Allow the guest to have a max 3ms advance. |
| 54 | * The difference between the 2 clocks could therefore |
| 55 | * oscillate around 0. |
| 56 | */ |
| 57 | #define VM_CLOCK_ADVANCE 3000000 |
Sebastian Tanase | 7f7bc14 | 2014-07-25 11:56:32 +0200 | [diff] [blame] | 58 | #define THRESHOLD_REDUCE 1.5 |
| 59 | #define MAX_DELAY_PRINT_RATE 2000000000LL |
| 60 | #define MAX_NB_PRINTS 100 |
Sebastian Tanase | c2aa5f8 | 2014-07-25 11:56:31 +0200 | [diff] [blame] | 61 | |
Claudio Fontana | 740b175 | 2020-08-19 13:17:19 +0200 | [diff] [blame^] | 62 | static int64_t max_delay; |
| 63 | static int64_t max_advance; |
| 64 | |
Richard Henderson | 5e14019 | 2019-03-28 11:54:23 -1000 | [diff] [blame] | 65 | static void align_clocks(SyncClocks *sc, CPUState *cpu) |
Sebastian Tanase | c2aa5f8 | 2014-07-25 11:56:31 +0200 | [diff] [blame] | 66 | { |
| 67 | int64_t cpu_icount; |
| 68 | |
| 69 | if (!icount_align_option) { |
| 70 | return; |
| 71 | } |
| 72 | |
Richard Henderson | 5e14019 | 2019-03-28 11:54:23 -1000 | [diff] [blame] | 73 | cpu_icount = cpu->icount_extra + cpu_neg(cpu)->icount_decr.u16.low; |
Sebastian Tanase | c2aa5f8 | 2014-07-25 11:56:31 +0200 | [diff] [blame] | 74 | sc->diff_clk += cpu_icount_to_ns(sc->last_cpu_icount - cpu_icount); |
| 75 | sc->last_cpu_icount = cpu_icount; |
| 76 | |
| 77 | if (sc->diff_clk > VM_CLOCK_ADVANCE) { |
| 78 | #ifndef _WIN32 |
| 79 | struct timespec sleep_delay, rem_delay; |
| 80 | sleep_delay.tv_sec = sc->diff_clk / 1000000000LL; |
| 81 | sleep_delay.tv_nsec = sc->diff_clk % 1000000000LL; |
| 82 | if (nanosleep(&sleep_delay, &rem_delay) < 0) { |
Paolo Bonzini | a498d0e | 2015-01-28 10:09:55 +0100 | [diff] [blame] | 83 | sc->diff_clk = rem_delay.tv_sec * 1000000000LL + rem_delay.tv_nsec; |
Sebastian Tanase | c2aa5f8 | 2014-07-25 11:56:31 +0200 | [diff] [blame] | 84 | } else { |
| 85 | sc->diff_clk = 0; |
| 86 | } |
| 87 | #else |
| 88 | Sleep(sc->diff_clk / SCALE_MS); |
| 89 | sc->diff_clk = 0; |
| 90 | #endif |
| 91 | } |
| 92 | } |
| 93 | |
Sebastian Tanase | 7f7bc14 | 2014-07-25 11:56:32 +0200 | [diff] [blame] | 94 | static void print_delay(const SyncClocks *sc) |
| 95 | { |
| 96 | static float threshold_delay; |
| 97 | static int64_t last_realtime_clock; |
| 98 | static int nb_prints; |
| 99 | |
| 100 | if (icount_align_option && |
| 101 | sc->realtime_clock - last_realtime_clock >= MAX_DELAY_PRINT_RATE && |
| 102 | nb_prints < MAX_NB_PRINTS) { |
| 103 | if ((-sc->diff_clk / (float)1000000000LL > threshold_delay) || |
| 104 | (-sc->diff_clk / (float)1000000000LL < |
| 105 | (threshold_delay - THRESHOLD_REDUCE))) { |
| 106 | threshold_delay = (-sc->diff_clk / 1000000000LL) + 1; |
Claudio Fontana | 740b175 | 2020-08-19 13:17:19 +0200 | [diff] [blame^] | 107 | qemu_printf("Warning: The guest is now late by %.1f to %.1f seconds\n", |
| 108 | threshold_delay - 1, |
| 109 | threshold_delay); |
Sebastian Tanase | 7f7bc14 | 2014-07-25 11:56:32 +0200 | [diff] [blame] | 110 | nb_prints++; |
| 111 | last_realtime_clock = sc->realtime_clock; |
| 112 | } |
| 113 | } |
| 114 | } |
| 115 | |
Richard Henderson | 5e14019 | 2019-03-28 11:54:23 -1000 | [diff] [blame] | 116 | static void init_delay_params(SyncClocks *sc, CPUState *cpu) |
Sebastian Tanase | c2aa5f8 | 2014-07-25 11:56:31 +0200 | [diff] [blame] | 117 | { |
| 118 | if (!icount_align_option) { |
| 119 | return; |
| 120 | } |
Paolo Bonzini | 2e91cc6 | 2015-01-28 10:16:37 +0100 | [diff] [blame] | 121 | sc->realtime_clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT); |
| 122 | sc->diff_clk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - sc->realtime_clock; |
Richard Henderson | 5e14019 | 2019-03-28 11:54:23 -1000 | [diff] [blame] | 123 | sc->last_cpu_icount |
| 124 | = cpu->icount_extra + cpu_neg(cpu)->icount_decr.u16.low; |
Sebastian Tanase | 27498be | 2014-07-25 11:56:33 +0200 | [diff] [blame] | 125 | if (sc->diff_clk < max_delay) { |
| 126 | max_delay = sc->diff_clk; |
| 127 | } |
| 128 | if (sc->diff_clk > max_advance) { |
| 129 | max_advance = sc->diff_clk; |
| 130 | } |
Sebastian Tanase | 7f7bc14 | 2014-07-25 11:56:32 +0200 | [diff] [blame] | 131 | |
| 132 | /* Print every 2s max if the guest is late. We limit the number |
| 133 | of printed messages to NB_PRINT_MAX(currently 100) */ |
| 134 | print_delay(sc); |
Sebastian Tanase | c2aa5f8 | 2014-07-25 11:56:31 +0200 | [diff] [blame] | 135 | } |
| 136 | #else |
| 137 | static void align_clocks(SyncClocks *sc, const CPUState *cpu) |
| 138 | { |
| 139 | } |
| 140 | |
| 141 | static void init_delay_params(SyncClocks *sc, const CPUState *cpu) |
| 142 | { |
| 143 | } |
| 144 | #endif /* CONFIG USER ONLY */ |
bellard | 7d13299 | 2003-03-06 23:23:54 +0000 | [diff] [blame] | 145 | |
Peter Maydell | 7721137 | 2013-02-22 18:10:02 +0000 | [diff] [blame] | 146 | /* Execute a TB, and fix up the CPU state afterwards if necessary */ |
Peter Maydell | 1a83063 | 2016-03-15 14:30:19 +0000 | [diff] [blame] | 147 | static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, TranslationBlock *itb) |
Peter Maydell | 7721137 | 2013-02-22 18:10:02 +0000 | [diff] [blame] | 148 | { |
| 149 | CPUArchState *env = cpu->env_ptr; |
Sergey Fedorov | 819af24 | 2016-04-21 15:58:23 +0300 | [diff] [blame] | 150 | uintptr_t ret; |
| 151 | TranslationBlock *last_tb; |
| 152 | int tb_exit; |
Emilio G. Cota | e7e168f | 2017-07-12 00:08:21 -0400 | [diff] [blame] | 153 | uint8_t *tb_ptr = itb->tc.ptr; |
Peter Maydell | 1a83063 | 2016-03-15 14:30:19 +0000 | [diff] [blame] | 154 | |
Alex Bennée | d977e1c | 2016-03-15 14:30:21 +0000 | [diff] [blame] | 155 | qemu_log_mask_and_addr(CPU_LOG_EXEC, itb->pc, |
Paolo Bonzini | 4fad446 | 2017-12-17 06:50:23 +0100 | [diff] [blame] | 156 | "Trace %d: %p [" |
| 157 | TARGET_FMT_lx "/" TARGET_FMT_lx "/%#x] %s\n", |
| 158 | cpu->cpu_index, itb->tc.ptr, |
| 159 | itb->cs_base, itb->pc, itb->flags, |
Alex Bennée | 4426f83 | 2016-10-27 16:10:01 +0100 | [diff] [blame] | 160 | lookup_symbol(itb->pc)); |
Richard Henderson | 03afa5f | 2013-11-06 17:29:39 +1000 | [diff] [blame] | 161 | |
| 162 | #if defined(DEBUG_DISAS) |
Richard Henderson | be2208e | 2016-07-12 23:39:16 -0700 | [diff] [blame] | 163 | if (qemu_loglevel_mask(CPU_LOG_TB_CPU) |
| 164 | && qemu_log_in_addr_range(itb->pc)) { |
Robert Foley | fc59d2d | 2019-11-18 16:15:26 -0500 | [diff] [blame] | 165 | FILE *logfile = qemu_log_lock(); |
Peter Maydell | ae76518 | 2018-05-15 14:58:44 +0100 | [diff] [blame] | 166 | int flags = 0; |
| 167 | if (qemu_loglevel_mask(CPU_LOG_TB_FPU)) { |
| 168 | flags |= CPU_DUMP_FPU; |
| 169 | } |
Richard Henderson | 03afa5f | 2013-11-06 17:29:39 +1000 | [diff] [blame] | 170 | #if defined(TARGET_I386) |
Peter Maydell | ae76518 | 2018-05-15 14:58:44 +0100 | [diff] [blame] | 171 | flags |= CPU_DUMP_CCOP; |
Richard Henderson | 03afa5f | 2013-11-06 17:29:39 +1000 | [diff] [blame] | 172 | #endif |
Peter Maydell | ae76518 | 2018-05-15 14:58:44 +0100 | [diff] [blame] | 173 | log_cpu_state(cpu, flags); |
Robert Foley | fc59d2d | 2019-11-18 16:15:26 -0500 | [diff] [blame] | 174 | qemu_log_unlock(logfile); |
Richard Henderson | 03afa5f | 2013-11-06 17:29:39 +1000 | [diff] [blame] | 175 | } |
| 176 | #endif /* DEBUG_DISAS */ |
| 177 | |
Sergey Fedorov | 819af24 | 2016-04-21 15:58:23 +0300 | [diff] [blame] | 178 | ret = tcg_qemu_tb_exec(env, tb_ptr); |
Pavel Dovgalyuk | 626cf8f | 2014-12-08 10:53:17 +0300 | [diff] [blame] | 179 | cpu->can_do_io = 1; |
Sergey Fedorov | 819af24 | 2016-04-21 15:58:23 +0300 | [diff] [blame] | 180 | last_tb = (TranslationBlock *)(ret & ~TB_EXIT_MASK); |
| 181 | tb_exit = ret & TB_EXIT_MASK; |
| 182 | trace_exec_tb_exit(last_tb, tb_exit); |
Alex Bennée | 6db8b53 | 2014-08-01 17:08:57 +0100 | [diff] [blame] | 183 | |
Sergey Fedorov | 819af24 | 2016-04-21 15:58:23 +0300 | [diff] [blame] | 184 | if (tb_exit > TB_EXIT_IDX1) { |
Peter Maydell | 7721137 | 2013-02-22 18:10:02 +0000 | [diff] [blame] | 185 | /* We didn't start executing this TB (eg because the instruction |
| 186 | * counter hit zero); we must restore the guest PC to the address |
| 187 | * of the start of the TB. |
| 188 | */ |
Andreas Färber | bdf7ae5 | 2013-06-28 19:31:32 +0200 | [diff] [blame] | 189 | CPUClass *cc = CPU_GET_CLASS(cpu); |
Sergey Fedorov | 819af24 | 2016-04-21 15:58:23 +0300 | [diff] [blame] | 190 | qemu_log_mask_and_addr(CPU_LOG_EXEC, last_tb->pc, |
Alex Bennée | d977e1c | 2016-03-15 14:30:21 +0000 | [diff] [blame] | 191 | "Stopped execution of TB chain before %p [" |
| 192 | TARGET_FMT_lx "] %s\n", |
Emilio G. Cota | e7e168f | 2017-07-12 00:08:21 -0400 | [diff] [blame] | 193 | last_tb->tc.ptr, last_tb->pc, |
Sergey Fedorov | 819af24 | 2016-04-21 15:58:23 +0300 | [diff] [blame] | 194 | lookup_symbol(last_tb->pc)); |
Andreas Färber | bdf7ae5 | 2013-06-28 19:31:32 +0200 | [diff] [blame] | 195 | if (cc->synchronize_from_tb) { |
Sergey Fedorov | 819af24 | 2016-04-21 15:58:23 +0300 | [diff] [blame] | 196 | cc->synchronize_from_tb(cpu, last_tb); |
Andreas Färber | bdf7ae5 | 2013-06-28 19:31:32 +0200 | [diff] [blame] | 197 | } else { |
| 198 | assert(cc->set_pc); |
Sergey Fedorov | 819af24 | 2016-04-21 15:58:23 +0300 | [diff] [blame] | 199 | cc->set_pc(cpu, last_tb->pc); |
Andreas Färber | bdf7ae5 | 2013-06-28 19:31:32 +0200 | [diff] [blame] | 200 | } |
Peter Maydell | 7721137 | 2013-02-22 18:10:02 +0000 | [diff] [blame] | 201 | } |
Sergey Fedorov | 819af24 | 2016-04-21 15:58:23 +0300 | [diff] [blame] | 202 | return ret; |
Peter Maydell | 7721137 | 2013-02-22 18:10:02 +0000 | [diff] [blame] | 203 | } |
| 204 | |
Paolo Bonzini | 7687bf5 | 2015-08-11 11:05:12 +0200 | [diff] [blame] | 205 | #ifndef CONFIG_USER_ONLY |
pbrook | 2e70f6e | 2008-06-29 01:03:05 +0000 | [diff] [blame] | 206 | /* Execute the code without caching the generated code. An interpreter |
| 207 | could be used if available. */ |
Peter Crosthwaite | ea3e984 | 2015-06-18 10:24:55 -0700 | [diff] [blame] | 208 | static void cpu_exec_nocache(CPUState *cpu, int max_cycles, |
Pavel Dovgalyuk | 56c0269 | 2015-09-17 19:23:59 +0300 | [diff] [blame] | 209 | TranslationBlock *orig_tb, bool ignore_icount) |
pbrook | 2e70f6e | 2008-06-29 01:03:05 +0000 | [diff] [blame] | 210 | { |
pbrook | 2e70f6e | 2008-06-29 01:03:05 +0000 | [diff] [blame] | 211 | TranslationBlock *tb; |
Richard Henderson | 416986d | 2017-10-13 12:22:28 -0700 | [diff] [blame] | 212 | uint32_t cflags = curr_cflags() | CF_NOCACHE; |
| 213 | |
| 214 | if (ignore_icount) { |
| 215 | cflags &= ~CF_USE_ICOUNT; |
| 216 | } |
pbrook | 2e70f6e | 2008-06-29 01:03:05 +0000 | [diff] [blame] | 217 | |
| 218 | /* Should never happen. |
| 219 | We only end up here when an existing TB is too long. */ |
Richard Henderson | 416986d | 2017-10-13 12:22:28 -0700 | [diff] [blame] | 220 | cflags |= MIN(max_cycles, CF_COUNT_MASK); |
pbrook | 2e70f6e | 2008-06-29 01:03:05 +0000 | [diff] [blame] | 221 | |
Emilio G. Cota | 0ac2031 | 2017-08-04 23:46:31 -0400 | [diff] [blame] | 222 | mmap_lock(); |
Richard Henderson | 416986d | 2017-10-13 12:22:28 -0700 | [diff] [blame] | 223 | tb = tb_gen_code(cpu, orig_tb->pc, orig_tb->cs_base, |
| 224 | orig_tb->flags, cflags); |
Sergey Fedorov | 3359baa | 2016-08-02 18:27:43 +0100 | [diff] [blame] | 225 | tb->orig_tb = orig_tb; |
Emilio G. Cota | 0ac2031 | 2017-08-04 23:46:31 -0400 | [diff] [blame] | 226 | mmap_unlock(); |
KONRAD Frederic | a5e9982 | 2016-10-27 16:10:06 +0100 | [diff] [blame] | 227 | |
pbrook | 2e70f6e | 2008-06-29 01:03:05 +0000 | [diff] [blame] | 228 | /* execute the generated code */ |
Alex Bennée | 6db8b53 | 2014-08-01 17:08:57 +0100 | [diff] [blame] | 229 | trace_exec_tb_nocache(tb, tb->pc); |
Peter Maydell | 1a83063 | 2016-03-15 14:30:19 +0000 | [diff] [blame] | 230 | cpu_tb_exec(cpu, tb); |
KONRAD Frederic | a5e9982 | 2016-10-27 16:10:06 +0100 | [diff] [blame] | 231 | |
Emilio G. Cota | 0ac2031 | 2017-08-04 23:46:31 -0400 | [diff] [blame] | 232 | mmap_lock(); |
pbrook | 2e70f6e | 2008-06-29 01:03:05 +0000 | [diff] [blame] | 233 | tb_phys_invalidate(tb, -1); |
Emilio G. Cota | 0ac2031 | 2017-08-04 23:46:31 -0400 | [diff] [blame] | 234 | mmap_unlock(); |
Emilio G. Cota | be2cdc5 | 2017-07-26 16:58:05 -0400 | [diff] [blame] | 235 | tcg_tb_remove(tb); |
pbrook | 2e70f6e | 2008-06-29 01:03:05 +0000 | [diff] [blame] | 236 | } |
Paolo Bonzini | 7687bf5 | 2015-08-11 11:05:12 +0200 | [diff] [blame] | 237 | #endif |
pbrook | 2e70f6e | 2008-06-29 01:03:05 +0000 | [diff] [blame] | 238 | |
Emilio G. Cota | ac03ee5 | 2017-07-14 17:56:30 -0400 | [diff] [blame] | 239 | void cpu_exec_step_atomic(CPUState *cpu) |
Richard Henderson | fdbc2b5 | 2016-06-29 22:12:55 -0700 | [diff] [blame] | 240 | { |
Pranith Kumar | 08e73c4 | 2017-02-23 18:29:15 +0000 | [diff] [blame] | 241 | CPUClass *cc = CPU_GET_CLASS(cpu); |
Richard Henderson | fdbc2b5 | 2016-06-29 22:12:55 -0700 | [diff] [blame] | 242 | TranslationBlock *tb; |
| 243 | target_ulong cs_base, pc; |
| 244 | uint32_t flags; |
Richard Henderson | 416986d | 2017-10-13 12:22:28 -0700 | [diff] [blame] | 245 | uint32_t cflags = 1; |
Emilio G. Cota | ac03ee5 | 2017-07-14 17:56:30 -0400 | [diff] [blame] | 246 | uint32_t cf_mask = cflags & CF_HASH_MASK; |
Richard Henderson | fdbc2b5 | 2016-06-29 22:12:55 -0700 | [diff] [blame] | 247 | |
Pranith Kumar | 08e73c4 | 2017-02-23 18:29:15 +0000 | [diff] [blame] | 248 | if (sigsetjmp(cpu->jmp_env, 0) == 0) { |
Alex Bennée | 886cc68 | 2020-02-14 14:49:52 +0000 | [diff] [blame] | 249 | start_exclusive(); |
| 250 | |
Emilio G. Cota | ac03ee5 | 2017-07-14 17:56:30 -0400 | [diff] [blame] | 251 | tb = tb_lookup__cpu_state(cpu, &pc, &cs_base, &flags, cf_mask); |
Emilio G. Cota | 4e2ca83 | 2017-07-11 14:29:37 -0400 | [diff] [blame] | 252 | if (tb == NULL) { |
| 253 | mmap_lock(); |
Emilio G. Cota | 95590e2 | 2017-08-01 15:40:16 -0400 | [diff] [blame] | 254 | tb = tb_gen_code(cpu, pc, cs_base, flags, cflags); |
Emilio G. Cota | 4e2ca83 | 2017-07-11 14:29:37 -0400 | [diff] [blame] | 255 | mmap_unlock(); |
| 256 | } |
Pranith Kumar | 08e73c4 | 2017-02-23 18:29:15 +0000 | [diff] [blame] | 257 | |
Emilio G. Cota | ac03ee5 | 2017-07-14 17:56:30 -0400 | [diff] [blame] | 258 | /* Since we got here, we know that parallel_cpus must be true. */ |
| 259 | parallel_cpus = false; |
Pranith Kumar | 08e73c4 | 2017-02-23 18:29:15 +0000 | [diff] [blame] | 260 | cc->cpu_exec_enter(cpu); |
| 261 | /* execute the generated code */ |
Emilio G. Cota | 4e2ca83 | 2017-07-11 14:29:37 -0400 | [diff] [blame] | 262 | trace_exec_tb(tb, pc); |
Pranith Kumar | 08e73c4 | 2017-02-23 18:29:15 +0000 | [diff] [blame] | 263 | cpu_tb_exec(cpu, tb); |
| 264 | cc->cpu_exec_exit(cpu); |
Pranith Kumar | 08e73c4 | 2017-02-23 18:29:15 +0000 | [diff] [blame] | 265 | } else { |
Emilio G. Cota | 0ac2031 | 2017-08-04 23:46:31 -0400 | [diff] [blame] | 266 | /* |
Pranith Kumar | 08e73c4 | 2017-02-23 18:29:15 +0000 | [diff] [blame] | 267 | * The mmap_lock is dropped by tb_gen_code if it runs out of |
| 268 | * memory. |
| 269 | */ |
| 270 | #ifndef CONFIG_SOFTMMU |
| 271 | tcg_debug_assert(!have_mmap_lock()); |
| 272 | #endif |
Emilio G. Cota | 6aaa24f | 2019-01-15 14:47:54 -0500 | [diff] [blame] | 273 | if (qemu_mutex_iothread_locked()) { |
| 274 | qemu_mutex_unlock_iothread(); |
| 275 | } |
Emilio G. Cota | faa9372 | 2018-02-22 20:50:29 -0500 | [diff] [blame] | 276 | assert_no_pages_locked(); |
Emilio G. Cota | e6d86be | 2018-10-21 13:24:26 -0400 | [diff] [blame] | 277 | qemu_plugin_disable_mem_helpers(cpu); |
Pranith Kumar | 08e73c4 | 2017-02-23 18:29:15 +0000 | [diff] [blame] | 278 | } |
Peter Maydell | 426eeec | 2017-11-02 16:35:36 +0000 | [diff] [blame] | 279 | |
Alex Bennée | 886cc68 | 2020-02-14 14:49:52 +0000 | [diff] [blame] | 280 | |
| 281 | /* |
| 282 | * As we start the exclusive region before codegen we must still |
| 283 | * be in the region if we longjump out of either the codegen or |
| 284 | * the execution. |
| 285 | */ |
| 286 | g_assert(cpu_in_exclusive_context(cpu)); |
| 287 | parallel_cpus = true; |
| 288 | end_exclusive(); |
Richard Henderson | fdbc2b5 | 2016-06-29 22:12:55 -0700 | [diff] [blame] | 289 | } |
| 290 | |
Emilio G. Cota | 909eaac | 2016-06-08 14:55:32 -0400 | [diff] [blame] | 291 | struct tb_desc { |
| 292 | target_ulong pc; |
| 293 | target_ulong cs_base; |
| 294 | CPUArchState *env; |
| 295 | tb_page_addr_t phys_page1; |
| 296 | uint32_t flags; |
Emilio G. Cota | 4e2ca83 | 2017-07-11 14:29:37 -0400 | [diff] [blame] | 297 | uint32_t cf_mask; |
Lluís Vilanova | 61a67f7 | 2017-07-04 10:42:32 +0200 | [diff] [blame] | 298 | uint32_t trace_vcpu_dstate; |
Emilio G. Cota | 909eaac | 2016-06-08 14:55:32 -0400 | [diff] [blame] | 299 | }; |
| 300 | |
Emilio G. Cota | 61b8cef | 2017-07-11 18:47:38 -0400 | [diff] [blame] | 301 | static bool tb_lookup_cmp(const void *p, const void *d) |
Emilio G. Cota | 909eaac | 2016-06-08 14:55:32 -0400 | [diff] [blame] | 302 | { |
| 303 | const TranslationBlock *tb = p; |
| 304 | const struct tb_desc *desc = d; |
| 305 | |
| 306 | if (tb->pc == desc->pc && |
| 307 | tb->page_addr[0] == desc->phys_page1 && |
| 308 | tb->cs_base == desc->cs_base && |
Paolo Bonzini | 6d21e42 | 2016-07-19 08:36:18 +0200 | [diff] [blame] | 309 | tb->flags == desc->flags && |
Lluís Vilanova | 61a67f7 | 2017-07-04 10:42:32 +0200 | [diff] [blame] | 310 | tb->trace_vcpu_dstate == desc->trace_vcpu_dstate && |
Emilio G. Cota | 4e2ca83 | 2017-07-11 14:29:37 -0400 | [diff] [blame] | 311 | (tb_cflags(tb) & (CF_HASH_MASK | CF_INVALID)) == desc->cf_mask) { |
Emilio G. Cota | 909eaac | 2016-06-08 14:55:32 -0400 | [diff] [blame] | 312 | /* check next page if needed */ |
| 313 | if (tb->page_addr[1] == -1) { |
| 314 | return true; |
| 315 | } else { |
| 316 | tb_page_addr_t phys_page2; |
| 317 | target_ulong virt_page2; |
| 318 | |
| 319 | virt_page2 = (desc->pc & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE; |
| 320 | phys_page2 = get_page_addr_code(desc->env, virt_page2); |
| 321 | if (tb->page_addr[1] == phys_page2) { |
| 322 | return true; |
| 323 | } |
| 324 | } |
| 325 | } |
| 326 | return false; |
| 327 | } |
| 328 | |
Emilio G. Cota | cedbcb0 | 2017-04-26 23:29:14 -0400 | [diff] [blame] | 329 | TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc, |
Emilio G. Cota | 4e2ca83 | 2017-07-11 14:29:37 -0400 | [diff] [blame] | 330 | target_ulong cs_base, uint32_t flags, |
| 331 | uint32_t cf_mask) |
bellard | 8a40a18 | 2005-11-20 10:35:40 +0000 | [diff] [blame] | 332 | { |
Emilio G. Cota | 909eaac | 2016-06-08 14:55:32 -0400 | [diff] [blame] | 333 | tb_page_addr_t phys_pc; |
| 334 | struct tb_desc desc; |
Emilio G. Cota | 42bd322 | 2016-06-08 14:55:25 -0400 | [diff] [blame] | 335 | uint32_t h; |
ths | 3b46e62 | 2007-09-17 08:09:54 +0000 | [diff] [blame] | 336 | |
Emilio G. Cota | 909eaac | 2016-06-08 14:55:32 -0400 | [diff] [blame] | 337 | desc.env = (CPUArchState *)cpu->env_ptr; |
| 338 | desc.cs_base = cs_base; |
| 339 | desc.flags = flags; |
Emilio G. Cota | 4e2ca83 | 2017-07-11 14:29:37 -0400 | [diff] [blame] | 340 | desc.cf_mask = cf_mask; |
Lluís Vilanova | 61a67f7 | 2017-07-04 10:42:32 +0200 | [diff] [blame] | 341 | desc.trace_vcpu_dstate = *cpu->trace_dstate; |
Emilio G. Cota | 909eaac | 2016-06-08 14:55:32 -0400 | [diff] [blame] | 342 | desc.pc = pc; |
| 343 | phys_pc = get_page_addr_code(desc.env, pc); |
Peter Maydell | 7252f2d | 2018-08-14 17:17:19 +0100 | [diff] [blame] | 344 | if (phys_pc == -1) { |
| 345 | return NULL; |
| 346 | } |
Emilio G. Cota | 909eaac | 2016-06-08 14:55:32 -0400 | [diff] [blame] | 347 | desc.phys_page1 = phys_pc & TARGET_PAGE_MASK; |
Emilio G. Cota | 4e2ca83 | 2017-07-11 14:29:37 -0400 | [diff] [blame] | 348 | h = tb_hash_func(phys_pc, pc, flags, cf_mask, *cpu->trace_dstate); |
Emilio G. Cota | 61b8cef | 2017-07-11 18:47:38 -0400 | [diff] [blame] | 349 | return qht_lookup_custom(&tb_ctx.htable, &desc, h, tb_lookup_cmp); |
Paolo Bonzini | 9fd1a94 | 2015-08-11 11:33:24 +0200 | [diff] [blame] | 350 | } |
| 351 | |
Richard Henderson | a858339 | 2017-07-31 22:02:31 -0700 | [diff] [blame] | 352 | void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr) |
| 353 | { |
| 354 | if (TCG_TARGET_HAS_direct_jump) { |
| 355 | uintptr_t offset = tb->jmp_target_arg[n]; |
Emilio G. Cota | e7e168f | 2017-07-12 00:08:21 -0400 | [diff] [blame] | 356 | uintptr_t tc_ptr = (uintptr_t)tb->tc.ptr; |
Richard Henderson | a858339 | 2017-07-31 22:02:31 -0700 | [diff] [blame] | 357 | tb_target_set_jmp_target(tc_ptr, tc_ptr + offset, addr); |
| 358 | } else { |
| 359 | tb->jmp_target_arg[n] = addr; |
| 360 | } |
| 361 | } |
| 362 | |
Richard Henderson | a858339 | 2017-07-31 22:02:31 -0700 | [diff] [blame] | 363 | static inline void tb_add_jump(TranslationBlock *tb, int n, |
| 364 | TranslationBlock *tb_next) |
| 365 | { |
Emilio G. Cota | 194125e | 2017-08-02 20:34:06 -0400 | [diff] [blame] | 366 | uintptr_t old; |
| 367 | |
Richard Henderson | a858339 | 2017-07-31 22:02:31 -0700 | [diff] [blame] | 368 | assert(n < ARRAY_SIZE(tb->jmp_list_next)); |
Emilio G. Cota | 194125e | 2017-08-02 20:34:06 -0400 | [diff] [blame] | 369 | qemu_spin_lock(&tb_next->jmp_lock); |
| 370 | |
| 371 | /* make sure the destination TB is valid */ |
| 372 | if (tb_next->cflags & CF_INVALID) { |
| 373 | goto out_unlock_next; |
Richard Henderson | a858339 | 2017-07-31 22:02:31 -0700 | [diff] [blame] | 374 | } |
Emilio G. Cota | 194125e | 2017-08-02 20:34:06 -0400 | [diff] [blame] | 375 | /* Atomically claim the jump destination slot only if it was NULL */ |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 376 | old = qatomic_cmpxchg(&tb->jmp_dest[n], (uintptr_t)NULL, |
| 377 | (uintptr_t)tb_next); |
Emilio G. Cota | 194125e | 2017-08-02 20:34:06 -0400 | [diff] [blame] | 378 | if (old) { |
| 379 | goto out_unlock_next; |
| 380 | } |
| 381 | |
| 382 | /* patch the native jump address */ |
| 383 | tb_set_jmp_target(tb, n, (uintptr_t)tb_next->tc.ptr); |
| 384 | |
| 385 | /* add in TB jmp list */ |
| 386 | tb->jmp_list_next[n] = tb_next->jmp_list_head; |
| 387 | tb_next->jmp_list_head = (uintptr_t)tb | n; |
| 388 | |
| 389 | qemu_spin_unlock(&tb_next->jmp_lock); |
| 390 | |
Richard Henderson | a858339 | 2017-07-31 22:02:31 -0700 | [diff] [blame] | 391 | qemu_log_mask_and_addr(CPU_LOG_EXEC, tb->pc, |
| 392 | "Linking TBs %p [" TARGET_FMT_lx |
| 393 | "] index %d -> %p [" TARGET_FMT_lx "]\n", |
Emilio G. Cota | e7e168f | 2017-07-12 00:08:21 -0400 | [diff] [blame] | 394 | tb->tc.ptr, tb->pc, n, |
| 395 | tb_next->tc.ptr, tb_next->pc); |
Emilio G. Cota | 194125e | 2017-08-02 20:34:06 -0400 | [diff] [blame] | 396 | return; |
Richard Henderson | a858339 | 2017-07-31 22:02:31 -0700 | [diff] [blame] | 397 | |
Emilio G. Cota | 194125e | 2017-08-02 20:34:06 -0400 | [diff] [blame] | 398 | out_unlock_next: |
| 399 | qemu_spin_unlock(&tb_next->jmp_lock); |
| 400 | return; |
Richard Henderson | a858339 | 2017-07-31 22:02:31 -0700 | [diff] [blame] | 401 | } |
| 402 | |
Sergey Fedorov | bd2710d | 2016-07-15 20:58:51 +0300 | [diff] [blame] | 403 | static inline TranslationBlock *tb_find(CPUState *cpu, |
| 404 | TranslationBlock *last_tb, |
Richard Henderson | 9b990ee | 2017-10-13 10:50:02 -0700 | [diff] [blame] | 405 | int tb_exit, uint32_t cf_mask) |
bellard | 8a40a18 | 2005-11-20 10:35:40 +0000 | [diff] [blame] | 406 | { |
| 407 | TranslationBlock *tb; |
| 408 | target_ulong cs_base, pc; |
Emilio G. Cota | 89fee74 | 2016-04-07 13:19:22 -0400 | [diff] [blame] | 409 | uint32_t flags; |
bellard | 8a40a18 | 2005-11-20 10:35:40 +0000 | [diff] [blame] | 410 | |
Emilio G. Cota | 4e2ca83 | 2017-07-11 14:29:37 -0400 | [diff] [blame] | 411 | tb = tb_lookup__cpu_state(cpu, &pc, &cs_base, &flags, cf_mask); |
Emilio G. Cota | f6bb84d | 2017-07-11 17:33:33 -0400 | [diff] [blame] | 412 | if (tb == NULL) { |
Emilio G. Cota | f6bb84d | 2017-07-11 17:33:33 -0400 | [diff] [blame] | 413 | mmap_lock(); |
Emilio G. Cota | 95590e2 | 2017-08-01 15:40:16 -0400 | [diff] [blame] | 414 | tb = tb_gen_code(cpu, pc, cs_base, flags, cf_mask); |
Emilio G. Cota | f6bb84d | 2017-07-11 17:33:33 -0400 | [diff] [blame] | 415 | mmap_unlock(); |
Sergey Fedorov | bd2710d | 2016-07-15 20:58:51 +0300 | [diff] [blame] | 416 | /* We add the TB in the virtual pc hash table for the fast lookup */ |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 417 | qatomic_set(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)], tb); |
bellard | 8a40a18 | 2005-11-20 10:35:40 +0000 | [diff] [blame] | 418 | } |
Sergey Fedorov | c88c67e | 2016-05-16 16:13:00 +0300 | [diff] [blame] | 419 | #ifndef CONFIG_USER_ONLY |
| 420 | /* We don't take care of direct jumps when address mapping changes in |
| 421 | * system emulation. So it's not safe to make a direct jump to a TB |
| 422 | * spanning two pages because the mapping for the second page can change. |
| 423 | */ |
| 424 | if (tb->page_addr[1] != -1) { |
Sergey Fedorov | 4b7e695 | 2016-07-15 20:58:42 +0300 | [diff] [blame] | 425 | last_tb = NULL; |
Sergey Fedorov | c88c67e | 2016-05-16 16:13:00 +0300 | [diff] [blame] | 426 | } |
| 427 | #endif |
Sergey Fedorov | a0522c7 | 2016-04-25 18:17:30 +0300 | [diff] [blame] | 428 | /* See if we can patch the calling TB. */ |
Richard Henderson | d7f425f | 2018-10-06 09:05:30 -0700 | [diff] [blame] | 429 | if (last_tb) { |
Emilio G. Cota | 194125e | 2017-08-02 20:34:06 -0400 | [diff] [blame] | 430 | tb_add_jump(last_tb, tb_exit, tb); |
Sergey Fedorov | 74d356d | 2016-07-15 20:58:50 +0300 | [diff] [blame] | 431 | } |
bellard | 8a40a18 | 2005-11-20 10:35:40 +0000 | [diff] [blame] | 432 | return tb; |
| 433 | } |
| 434 | |
Sergey Fedorov | 8b2d34e | 2016-05-11 13:21:47 +0300 | [diff] [blame] | 435 | static inline bool cpu_handle_halt(CPUState *cpu) |
| 436 | { |
| 437 | if (cpu->halted) { |
| 438 | #if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY) |
| 439 | if ((cpu->interrupt_request & CPU_INTERRUPT_POLL) |
| 440 | && replay_interrupt()) { |
| 441 | X86CPU *x86_cpu = X86_CPU(cpu); |
Jan Kiszka | 8d04fb5 | 2017-02-23 18:29:11 +0000 | [diff] [blame] | 442 | qemu_mutex_lock_iothread(); |
Sergey Fedorov | 8b2d34e | 2016-05-11 13:21:47 +0300 | [diff] [blame] | 443 | apic_poll_irq(x86_cpu->apic_state); |
| 444 | cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL); |
Jan Kiszka | 8d04fb5 | 2017-02-23 18:29:11 +0000 | [diff] [blame] | 445 | qemu_mutex_unlock_iothread(); |
Sergey Fedorov | 8b2d34e | 2016-05-11 13:21:47 +0300 | [diff] [blame] | 446 | } |
| 447 | #endif |
| 448 | if (!cpu_has_work(cpu)) { |
Sergey Fedorov | 8b2d34e | 2016-05-11 13:21:47 +0300 | [diff] [blame] | 449 | return true; |
| 450 | } |
| 451 | |
| 452 | cpu->halted = 0; |
| 453 | } |
| 454 | |
| 455 | return false; |
| 456 | } |
| 457 | |
Sergey Fedorov | ea28476 | 2016-05-11 13:21:48 +0300 | [diff] [blame] | 458 | static inline void cpu_handle_debug_exception(CPUState *cpu) |
Jan Kiszka | 1009d2e | 2011-03-15 12:26:13 +0100 | [diff] [blame] | 459 | { |
Peter Maydell | 86025ee | 2014-09-12 14:06:48 +0100 | [diff] [blame] | 460 | CPUClass *cc = CPU_GET_CLASS(cpu); |
Jan Kiszka | 1009d2e | 2011-03-15 12:26:13 +0100 | [diff] [blame] | 461 | CPUWatchpoint *wp; |
| 462 | |
Andreas Färber | ff4700b | 2013-08-26 18:23:18 +0200 | [diff] [blame] | 463 | if (!cpu->watchpoint_hit) { |
| 464 | QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) { |
Jan Kiszka | 1009d2e | 2011-03-15 12:26:13 +0100 | [diff] [blame] | 465 | wp->flags &= ~BP_WATCHPOINT_HIT; |
| 466 | } |
| 467 | } |
Peter Maydell | 86025ee | 2014-09-12 14:06:48 +0100 | [diff] [blame] | 468 | |
| 469 | cc->debug_excp_handler(cpu); |
Jan Kiszka | 1009d2e | 2011-03-15 12:26:13 +0100 | [diff] [blame] | 470 | } |
| 471 | |
Sergey Fedorov | ea28476 | 2016-05-11 13:21:48 +0300 | [diff] [blame] | 472 | static inline bool cpu_handle_exception(CPUState *cpu, int *ret) |
| 473 | { |
Pavel Dovgalyuk | 17b50b0 | 2017-11-14 11:18:18 +0300 | [diff] [blame] | 474 | if (cpu->exception_index < 0) { |
Sergey Fedorov | ea28476 | 2016-05-11 13:21:48 +0300 | [diff] [blame] | 475 | #ifndef CONFIG_USER_ONLY |
Pavel Dovgalyuk | 17b50b0 | 2017-11-14 11:18:18 +0300 | [diff] [blame] | 476 | if (replay_has_exception() |
Richard Henderson | 5e14019 | 2019-03-28 11:54:23 -1000 | [diff] [blame] | 477 | && cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra == 0) { |
Pavel Dovgalyuk | 17b50b0 | 2017-11-14 11:18:18 +0300 | [diff] [blame] | 478 | /* try to cause an exception pending in the log */ |
| 479 | cpu_exec_nocache(cpu, 1, tb_find(cpu, NULL, 0, curr_cflags()), true); |
| 480 | } |
| 481 | #endif |
| 482 | if (cpu->exception_index < 0) { |
| 483 | return false; |
| 484 | } |
| 485 | } |
| 486 | |
| 487 | if (cpu->exception_index >= EXCP_INTERRUPT) { |
| 488 | /* exit request from the cpu execution loop */ |
| 489 | *ret = cpu->exception_index; |
| 490 | if (*ret == EXCP_DEBUG) { |
| 491 | cpu_handle_debug_exception(cpu); |
| 492 | } |
| 493 | cpu->exception_index = -1; |
Sergey Fedorov | ea28476 | 2016-05-11 13:21:48 +0300 | [diff] [blame] | 494 | return true; |
Pavel Dovgalyuk | 17b50b0 | 2017-11-14 11:18:18 +0300 | [diff] [blame] | 495 | } else { |
| 496 | #if defined(CONFIG_USER_ONLY) |
| 497 | /* if user mode only, we simulate a fake exception |
| 498 | which will be handled outside the cpu execution |
| 499 | loop */ |
| 500 | #if defined(TARGET_I386) |
| 501 | CPUClass *cc = CPU_GET_CLASS(cpu); |
| 502 | cc->do_interrupt(cpu); |
| 503 | #endif |
| 504 | *ret = cpu->exception_index; |
| 505 | cpu->exception_index = -1; |
| 506 | return true; |
| 507 | #else |
| 508 | if (replay_exception()) { |
| 509 | CPUClass *cc = CPU_GET_CLASS(cpu); |
| 510 | qemu_mutex_lock_iothread(); |
| 511 | cc->do_interrupt(cpu); |
| 512 | qemu_mutex_unlock_iothread(); |
| 513 | cpu->exception_index = -1; |
Luc Michel | a7ba744 | 2020-07-16 21:39:47 +0200 | [diff] [blame] | 514 | |
| 515 | if (unlikely(cpu->singlestep_enabled)) { |
| 516 | /* |
| 517 | * After processing the exception, ensure an EXCP_DEBUG is |
| 518 | * raised when single-stepping so that GDB doesn't miss the |
| 519 | * next instruction. |
| 520 | */ |
| 521 | *ret = EXCP_DEBUG; |
| 522 | cpu_handle_debug_exception(cpu); |
| 523 | return true; |
| 524 | } |
Pavel Dovgalyuk | 17b50b0 | 2017-11-14 11:18:18 +0300 | [diff] [blame] | 525 | } else if (!replay_has_interrupt()) { |
| 526 | /* give a chance to iothread in replay mode */ |
| 527 | *ret = EXCP_INTERRUPT; |
| 528 | return true; |
| 529 | } |
Sergey Fedorov | ea28476 | 2016-05-11 13:21:48 +0300 | [diff] [blame] | 530 | #endif |
| 531 | } |
| 532 | |
| 533 | return false; |
| 534 | } |
| 535 | |
Paolo Bonzini | 209b71b | 2017-01-27 10:57:18 +0100 | [diff] [blame] | 536 | static inline bool cpu_handle_interrupt(CPUState *cpu, |
Sergey Fedorov | c385e6e | 2016-05-11 13:21:49 +0300 | [diff] [blame] | 537 | TranslationBlock **last_tb) |
| 538 | { |
| 539 | CPUClass *cc = CPU_GET_CLASS(cpu); |
Pavel Dovgalyuk | 17b50b0 | 2017-11-14 11:18:18 +0300 | [diff] [blame] | 540 | |
| 541 | /* Clear the interrupt flag now since we're processing |
| 542 | * cpu->interrupt_request and cpu->exit_request. |
David Hildenbrand | d84be02 | 2017-11-29 20:13:19 +0100 | [diff] [blame] | 543 | * Ensure zeroing happens before reading cpu->exit_request or |
| 544 | * cpu->interrupt_request (see also smp_wmb in cpu_exit()) |
Pavel Dovgalyuk | 17b50b0 | 2017-11-14 11:18:18 +0300 | [diff] [blame] | 545 | */ |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 546 | qatomic_mb_set(&cpu_neg(cpu)->icount_decr.u16.high, 0); |
Sergey Fedorov | c385e6e | 2016-05-11 13:21:49 +0300 | [diff] [blame] | 547 | |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 548 | if (unlikely(qatomic_read(&cpu->interrupt_request))) { |
Jan Kiszka | 8d04fb5 | 2017-02-23 18:29:11 +0000 | [diff] [blame] | 549 | int interrupt_request; |
| 550 | qemu_mutex_lock_iothread(); |
| 551 | interrupt_request = cpu->interrupt_request; |
Sergey Fedorov | c385e6e | 2016-05-11 13:21:49 +0300 | [diff] [blame] | 552 | if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) { |
| 553 | /* Mask out external interrupts for this step. */ |
| 554 | interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK; |
| 555 | } |
| 556 | if (interrupt_request & CPU_INTERRUPT_DEBUG) { |
| 557 | cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG; |
| 558 | cpu->exception_index = EXCP_DEBUG; |
Jan Kiszka | 8d04fb5 | 2017-02-23 18:29:11 +0000 | [diff] [blame] | 559 | qemu_mutex_unlock_iothread(); |
Paolo Bonzini | 209b71b | 2017-01-27 10:57:18 +0100 | [diff] [blame] | 560 | return true; |
Sergey Fedorov | c385e6e | 2016-05-11 13:21:49 +0300 | [diff] [blame] | 561 | } |
| 562 | if (replay_mode == REPLAY_MODE_PLAY && !replay_has_interrupt()) { |
| 563 | /* Do nothing */ |
| 564 | } else if (interrupt_request & CPU_INTERRUPT_HALT) { |
| 565 | replay_interrupt(); |
| 566 | cpu->interrupt_request &= ~CPU_INTERRUPT_HALT; |
| 567 | cpu->halted = 1; |
| 568 | cpu->exception_index = EXCP_HLT; |
Jan Kiszka | 8d04fb5 | 2017-02-23 18:29:11 +0000 | [diff] [blame] | 569 | qemu_mutex_unlock_iothread(); |
Paolo Bonzini | 209b71b | 2017-01-27 10:57:18 +0100 | [diff] [blame] | 570 | return true; |
Sergey Fedorov | c385e6e | 2016-05-11 13:21:49 +0300 | [diff] [blame] | 571 | } |
| 572 | #if defined(TARGET_I386) |
| 573 | else if (interrupt_request & CPU_INTERRUPT_INIT) { |
| 574 | X86CPU *x86_cpu = X86_CPU(cpu); |
| 575 | CPUArchState *env = &x86_cpu->env; |
| 576 | replay_interrupt(); |
Paolo Bonzini | 65c9d60 | 2017-02-16 12:30:05 +0100 | [diff] [blame] | 577 | cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0, 0); |
Sergey Fedorov | c385e6e | 2016-05-11 13:21:49 +0300 | [diff] [blame] | 578 | do_cpu_init(x86_cpu); |
| 579 | cpu->exception_index = EXCP_HALTED; |
Jan Kiszka | 8d04fb5 | 2017-02-23 18:29:11 +0000 | [diff] [blame] | 580 | qemu_mutex_unlock_iothread(); |
Paolo Bonzini | 209b71b | 2017-01-27 10:57:18 +0100 | [diff] [blame] | 581 | return true; |
Sergey Fedorov | c385e6e | 2016-05-11 13:21:49 +0300 | [diff] [blame] | 582 | } |
| 583 | #else |
| 584 | else if (interrupt_request & CPU_INTERRUPT_RESET) { |
| 585 | replay_interrupt(); |
| 586 | cpu_reset(cpu); |
Jan Kiszka | 8d04fb5 | 2017-02-23 18:29:11 +0000 | [diff] [blame] | 587 | qemu_mutex_unlock_iothread(); |
Paolo Bonzini | 209b71b | 2017-01-27 10:57:18 +0100 | [diff] [blame] | 588 | return true; |
Sergey Fedorov | c385e6e | 2016-05-11 13:21:49 +0300 | [diff] [blame] | 589 | } |
| 590 | #endif |
| 591 | /* The target hook has 3 exit conditions: |
| 592 | False when the interrupt isn't processed, |
| 593 | True when it is, and we should restart on a new TB, |
| 594 | and via longjmp via cpu_loop_exit. */ |
| 595 | else { |
Sergey Fedorov | c385e6e | 2016-05-11 13:21:49 +0300 | [diff] [blame] | 596 | if (cc->cpu_exec_interrupt(cpu, interrupt_request)) { |
Pavel Dovgalyuk | d718b14 | 2017-01-24 10:17:08 +0300 | [diff] [blame] | 597 | replay_interrupt(); |
Richard Henderson | ba3c35d | 2020-07-17 09:26:59 -0700 | [diff] [blame] | 598 | /* |
| 599 | * After processing the interrupt, ensure an EXCP_DEBUG is |
| 600 | * raised when single-stepping so that GDB doesn't miss the |
| 601 | * next instruction. |
| 602 | */ |
| 603 | cpu->exception_index = |
| 604 | (cpu->singlestep_enabled ? EXCP_DEBUG : -1); |
Sergey Fedorov | c385e6e | 2016-05-11 13:21:49 +0300 | [diff] [blame] | 605 | *last_tb = NULL; |
| 606 | } |
Sergey Fedorov | 8b1fe3f | 2016-05-12 19:52:17 +0300 | [diff] [blame] | 607 | /* The target hook may have updated the 'cpu->interrupt_request'; |
| 608 | * reload the 'interrupt_request' value */ |
| 609 | interrupt_request = cpu->interrupt_request; |
Sergey Fedorov | c385e6e | 2016-05-11 13:21:49 +0300 | [diff] [blame] | 610 | } |
Sergey Fedorov | 8b1fe3f | 2016-05-12 19:52:17 +0300 | [diff] [blame] | 611 | if (interrupt_request & CPU_INTERRUPT_EXITTB) { |
Sergey Fedorov | c385e6e | 2016-05-11 13:21:49 +0300 | [diff] [blame] | 612 | cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB; |
| 613 | /* ensure that no TB jump will be modified as |
| 614 | the program flow was changed */ |
| 615 | *last_tb = NULL; |
| 616 | } |
Jan Kiszka | 8d04fb5 | 2017-02-23 18:29:11 +0000 | [diff] [blame] | 617 | |
| 618 | /* If we exit via cpu_loop_exit/longjmp it is reset in cpu_exec */ |
| 619 | qemu_mutex_unlock_iothread(); |
Sergey Fedorov | c385e6e | 2016-05-11 13:21:49 +0300 | [diff] [blame] | 620 | } |
Jan Kiszka | 8d04fb5 | 2017-02-23 18:29:11 +0000 | [diff] [blame] | 621 | |
Pavel Dovgalyuk | cfb2d02 | 2017-02-07 09:54:57 +0300 | [diff] [blame] | 622 | /* Finally, check if we need to exit to the main loop. */ |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 623 | if (unlikely(qatomic_read(&cpu->exit_request)) |
Claudio Fontana | 740b175 | 2020-08-19 13:17:19 +0200 | [diff] [blame^] | 624 | || (icount_enabled() |
Richard Henderson | 5e14019 | 2019-03-28 11:54:23 -1000 | [diff] [blame] | 625 | && cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra == 0)) { |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 626 | qatomic_set(&cpu->exit_request, 0); |
Pavel Dovgalyuk | 5f3bdfd | 2018-02-27 12:51:41 +0300 | [diff] [blame] | 627 | if (cpu->exception_index == -1) { |
| 628 | cpu->exception_index = EXCP_INTERRUPT; |
| 629 | } |
Paolo Bonzini | 209b71b | 2017-01-27 10:57:18 +0100 | [diff] [blame] | 630 | return true; |
Sergey Fedorov | c385e6e | 2016-05-11 13:21:49 +0300 | [diff] [blame] | 631 | } |
Paolo Bonzini | 209b71b | 2017-01-27 10:57:18 +0100 | [diff] [blame] | 632 | |
| 633 | return false; |
Sergey Fedorov | c385e6e | 2016-05-11 13:21:49 +0300 | [diff] [blame] | 634 | } |
| 635 | |
Sergey Fedorov | 928de9e | 2016-05-11 13:21:50 +0300 | [diff] [blame] | 636 | static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb, |
Pavel Dovgalyuk | cfb2d02 | 2017-02-07 09:54:57 +0300 | [diff] [blame] | 637 | TranslationBlock **last_tb, int *tb_exit) |
Sergey Fedorov | 928de9e | 2016-05-11 13:21:50 +0300 | [diff] [blame] | 638 | { |
| 639 | uintptr_t ret; |
Paolo Bonzini | 1aab16c | 2017-01-27 11:25:33 +0100 | [diff] [blame] | 640 | int32_t insns_left; |
Sergey Fedorov | 928de9e | 2016-05-11 13:21:50 +0300 | [diff] [blame] | 641 | |
| 642 | trace_exec_tb(tb, tb->pc); |
| 643 | ret = cpu_tb_exec(cpu, tb); |
Paolo Bonzini | 43d70dd | 2017-01-29 12:00:59 +0100 | [diff] [blame] | 644 | tb = (TranslationBlock *)(ret & ~TB_EXIT_MASK); |
Sergey Fedorov | 928de9e | 2016-05-11 13:21:50 +0300 | [diff] [blame] | 645 | *tb_exit = ret & TB_EXIT_MASK; |
Paolo Bonzini | 1aab16c | 2017-01-27 11:25:33 +0100 | [diff] [blame] | 646 | if (*tb_exit != TB_EXIT_REQUESTED) { |
| 647 | *last_tb = tb; |
| 648 | return; |
| 649 | } |
| 650 | |
| 651 | *last_tb = NULL; |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 652 | insns_left = qatomic_read(&cpu_neg(cpu)->icount_decr.u32); |
Paolo Bonzini | 1aab16c | 2017-01-27 11:25:33 +0100 | [diff] [blame] | 653 | if (insns_left < 0) { |
Alex Bennée | e5143e3 | 2017-02-23 18:29:12 +0000 | [diff] [blame] | 654 | /* Something asked us to stop executing chained TBs; just |
| 655 | * continue round the main loop. Whatever requested the exit |
Paolo Bonzini | 30f3dda | 2017-03-03 16:39:18 +0100 | [diff] [blame] | 656 | * will also have set something else (eg exit_request or |
Pavel Dovgalyuk | 17b50b0 | 2017-11-14 11:18:18 +0300 | [diff] [blame] | 657 | * interrupt_request) which will be handled by |
| 658 | * cpu_handle_interrupt. cpu_handle_interrupt will also |
| 659 | * clear cpu->icount_decr.u16.high. |
Sergey Fedorov | 928de9e | 2016-05-11 13:21:50 +0300 | [diff] [blame] | 660 | */ |
Paolo Bonzini | 1aab16c | 2017-01-27 11:25:33 +0100 | [diff] [blame] | 661 | return; |
| 662 | } |
| 663 | |
| 664 | /* Instruction counter expired. */ |
Claudio Fontana | 740b175 | 2020-08-19 13:17:19 +0200 | [diff] [blame^] | 665 | assert(icount_enabled()); |
Paolo Bonzini | 1aab16c | 2017-01-27 11:25:33 +0100 | [diff] [blame] | 666 | #ifndef CONFIG_USER_ONLY |
Alex Bennée | eda5f7c | 2017-04-05 12:35:48 +0100 | [diff] [blame] | 667 | /* Ensure global icount has gone forward */ |
| 668 | cpu_update_icount(cpu); |
| 669 | /* Refill decrementer and continue execution. */ |
| 670 | insns_left = MIN(0xffff, cpu->icount_budget); |
Richard Henderson | 5e14019 | 2019-03-28 11:54:23 -1000 | [diff] [blame] | 671 | cpu_neg(cpu)->icount_decr.u16.low = insns_left; |
Alex Bennée | eda5f7c | 2017-04-05 12:35:48 +0100 | [diff] [blame] | 672 | cpu->icount_extra = cpu->icount_budget - insns_left; |
| 673 | if (!cpu->icount_extra) { |
Paolo Bonzini | 1aab16c | 2017-01-27 11:25:33 +0100 | [diff] [blame] | 674 | /* Execute any remaining instructions, then let the main loop |
| 675 | * handle the next event. |
| 676 | */ |
| 677 | if (insns_left > 0) { |
| 678 | cpu_exec_nocache(cpu, insns_left, tb, false); |
Sergey Fedorov | 928de9e | 2016-05-11 13:21:50 +0300 | [diff] [blame] | 679 | } |
Paolo Bonzini | 1aab16c | 2017-01-27 11:25:33 +0100 | [diff] [blame] | 680 | } |
Sergey Fedorov | 928de9e | 2016-05-11 13:21:50 +0300 | [diff] [blame] | 681 | #endif |
Sergey Fedorov | 928de9e | 2016-05-11 13:21:50 +0300 | [diff] [blame] | 682 | } |
| 683 | |
bellard | 7d13299 | 2003-03-06 23:23:54 +0000 | [diff] [blame] | 684 | /* main execution loop */ |
| 685 | |
Peter Crosthwaite | ea3e984 | 2015-06-18 10:24:55 -0700 | [diff] [blame] | 686 | int cpu_exec(CPUState *cpu) |
bellard | 7d13299 | 2003-03-06 23:23:54 +0000 | [diff] [blame] | 687 | { |
Andreas Färber | 97a8ea5 | 2013-02-02 10:57:51 +0100 | [diff] [blame] | 688 | CPUClass *cc = CPU_GET_CLASS(cpu); |
Sergey Fedorov | c385e6e | 2016-05-11 13:21:49 +0300 | [diff] [blame] | 689 | int ret; |
Pavel Dovgalyuk | cfb2d02 | 2017-02-07 09:54:57 +0300 | [diff] [blame] | 690 | SyncClocks sc = { 0 }; |
Sebastian Tanase | c2aa5f8 | 2014-07-25 11:56:31 +0200 | [diff] [blame] | 691 | |
Pavel Dovgalyuk | 6f06096 | 2015-09-17 19:24:16 +0300 | [diff] [blame] | 692 | /* replay_interrupt may need current_cpu */ |
| 693 | current_cpu = cpu; |
| 694 | |
Sergey Fedorov | 8b2d34e | 2016-05-11 13:21:47 +0300 | [diff] [blame] | 695 | if (cpu_handle_halt(cpu)) { |
| 696 | return EXCP_HALTED; |
Paolo Bonzini | eda48c3 | 2011-03-12 17:43:56 +0100 | [diff] [blame] | 697 | } |
bellard | 5a1e3cf | 2005-11-23 21:02:53 +0000 | [diff] [blame] | 698 | |
Paolo Bonzini | 79e2b9a | 2015-01-21 12:09:14 +0100 | [diff] [blame] | 699 | rcu_read_lock(); |
| 700 | |
Richard Henderson | cffe7b3 | 2014-09-13 09:45:12 -0700 | [diff] [blame] | 701 | cc->cpu_exec_enter(cpu); |
bellard | 9d27abd | 2003-05-10 13:13:54 +0000 | [diff] [blame] | 702 | |
Sebastian Tanase | c2aa5f8 | 2014-07-25 11:56:31 +0200 | [diff] [blame] | 703 | /* Calculate difference between guest clock and host clock. |
| 704 | * This delay includes the delay of the last cycle, so |
| 705 | * what we have to do is sleep until it is 0. As for the |
| 706 | * advance/delay we gain here, we try to fix it next time. |
| 707 | */ |
| 708 | init_delay_params(&sc, cpu); |
| 709 | |
Paolo Bonzini | 4515e58 | 2017-01-29 10:55:14 +0100 | [diff] [blame] | 710 | /* prepare setjmp context for exception handling */ |
| 711 | if (sigsetjmp(cpu->jmp_env, 0) != 0) { |
Stefan Weil | 0448f5f | 2015-09-26 13:23:26 +0200 | [diff] [blame] | 712 | #if defined(__clang__) || !QEMU_GNUC_PREREQ(4, 6) |
Paolo Bonzini | 4515e58 | 2017-01-29 10:55:14 +0100 | [diff] [blame] | 713 | /* Some compilers wrongly smash all local variables after |
| 714 | * siglongjmp. There were bug reports for gcc 4.5.0 and clang. |
| 715 | * Reload essential local variables here for those compilers. |
| 716 | * Newer versions of gcc would complain about this code (-Wclobbered). */ |
| 717 | cpu = current_cpu; |
| 718 | cc = CPU_GET_CLASS(cpu); |
Stefan Weil | 0448f5f | 2015-09-26 13:23:26 +0200 | [diff] [blame] | 719 | #else /* buggy compiler */ |
Paolo Bonzini | 4515e58 | 2017-01-29 10:55:14 +0100 | [diff] [blame] | 720 | /* Assert that the compiler does not smash local variables. */ |
| 721 | g_assert(cpu == current_cpu); |
| 722 | g_assert(cc == CPU_GET_CLASS(cpu)); |
Stefan Weil | 0448f5f | 2015-09-26 13:23:26 +0200 | [diff] [blame] | 723 | #endif /* buggy compiler */ |
Emilio G. Cota | 0ac2031 | 2017-08-04 23:46:31 -0400 | [diff] [blame] | 724 | #ifndef CONFIG_SOFTMMU |
| 725 | tcg_debug_assert(!have_mmap_lock()); |
| 726 | #endif |
Jan Kiszka | 8d04fb5 | 2017-02-23 18:29:11 +0000 | [diff] [blame] | 727 | if (qemu_mutex_iothread_locked()) { |
| 728 | qemu_mutex_unlock_iothread(); |
| 729 | } |
Emilio G. Cota | e6d86be | 2018-10-21 13:24:26 -0400 | [diff] [blame] | 730 | qemu_plugin_disable_mem_helpers(cpu); |
| 731 | |
Emilio G. Cota | 8fd3a9b | 2019-01-15 14:47:53 -0500 | [diff] [blame] | 732 | assert_no_pages_locked(); |
Paolo Bonzini | 4515e58 | 2017-01-29 10:55:14 +0100 | [diff] [blame] | 733 | } |
| 734 | |
| 735 | /* if an exception is pending, we execute it here */ |
| 736 | while (!cpu_handle_exception(cpu, &ret)) { |
| 737 | TranslationBlock *last_tb = NULL; |
| 738 | int tb_exit = 0; |
| 739 | |
| 740 | while (!cpu_handle_interrupt(cpu, &last_tb)) { |
Richard Henderson | 9b990ee | 2017-10-13 10:50:02 -0700 | [diff] [blame] | 741 | uint32_t cflags = cpu->cflags_next_tb; |
| 742 | TranslationBlock *tb; |
| 743 | |
| 744 | /* When requested, use an exact setting for cflags for the next |
| 745 | execution. This is used for icount, precise smc, and stop- |
| 746 | after-access watchpoints. Since this request should never |
| 747 | have CF_INVALID set, -1 is a convenient invalid value that |
| 748 | does not require tcg headers for cpu_common_reset. */ |
| 749 | if (cflags == -1) { |
| 750 | cflags = curr_cflags(); |
| 751 | } else { |
| 752 | cpu->cflags_next_tb = -1; |
| 753 | } |
| 754 | |
| 755 | tb = tb_find(cpu, last_tb, tb_exit, cflags); |
Pavel Dovgalyuk | cfb2d02 | 2017-02-07 09:54:57 +0300 | [diff] [blame] | 756 | cpu_loop_exec_tb(cpu, tb, &last_tb, &tb_exit); |
Paolo Bonzini | 4515e58 | 2017-01-29 10:55:14 +0100 | [diff] [blame] | 757 | /* Try to align the host and virtual clocks |
| 758 | if the guest is in advance */ |
| 759 | align_clocks(&sc, cpu); |
bellard | 7d13299 | 2003-03-06 23:23:54 +0000 | [diff] [blame] | 760 | } |
Paolo Bonzini | 4515e58 | 2017-01-29 10:55:14 +0100 | [diff] [blame] | 761 | } |
bellard | 3fb2ded | 2003-06-24 13:22:59 +0000 | [diff] [blame] | 762 | |
Richard Henderson | cffe7b3 | 2014-09-13 09:45:12 -0700 | [diff] [blame] | 763 | cc->cpu_exec_exit(cpu); |
Paolo Bonzini | 79e2b9a | 2015-01-21 12:09:14 +0100 | [diff] [blame] | 764 | rcu_read_unlock(); |
pbrook | 1057eaa | 2007-02-04 13:37:44 +0000 | [diff] [blame] | 765 | |
bellard | 7d13299 | 2003-03-06 23:23:54 +0000 | [diff] [blame] | 766 | return ret; |
| 767 | } |
Claudio Fontana | 740b175 | 2020-08-19 13:17:19 +0200 | [diff] [blame^] | 768 | |
| 769 | #ifndef CONFIG_USER_ONLY |
| 770 | |
| 771 | void dump_drift_info(void) |
| 772 | { |
| 773 | if (!icount_enabled()) { |
| 774 | return; |
| 775 | } |
| 776 | |
| 777 | qemu_printf("Host - Guest clock %"PRIi64" ms\n", |
| 778 | (cpu_get_clock() - cpu_get_icount()) / SCALE_MS); |
| 779 | if (icount_align_option) { |
| 780 | qemu_printf("Max guest delay %"PRIi64" ms\n", |
| 781 | -max_delay / SCALE_MS); |
| 782 | qemu_printf("Max guest advance %"PRIi64" ms\n", |
| 783 | max_advance / SCALE_MS); |
| 784 | } else { |
| 785 | qemu_printf("Max guest delay NA\n"); |
| 786 | qemu_printf("Max guest advance NA\n"); |
| 787 | } |
| 788 | } |
| 789 | |
| 790 | #endif /* !CONFIG_USER_ONLY */ |