blob: 35bfe2ca92c314d9144bcc558e19177f2b732a4f [file] [log] [blame]
bellard7d132992003-03-06 23:23:54 +00001/*
陳韋任e965fc32012-02-06 14:02:55 +08002 * emulator main execution loop
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard66321a12005-04-06 20:47:48 +00004 * Copyright (c) 2003-2005 Fabrice Bellard
bellard7d132992003-03-06 23:23:54 +00005 *
bellard3ef693a2003-03-23 20:17:16 +00006 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
Thomas Huthfb0343d2019-01-23 15:08:56 +01009 * version 2.1 of the License, or (at your option) any later version.
bellard7d132992003-03-06 23:23:54 +000010 *
bellard3ef693a2003-03-23 20:17:16 +000011 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
bellard7d132992003-03-06 23:23:54 +000015 *
bellard3ef693a2003-03-23 20:17:16 +000016 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard7d132992003-03-06 23:23:54 +000018 */
Markus Armbrustera8d25322019-05-23 16:35:08 +020019
Peter Maydell7b31bbc2016-01-26 18:16:56 +000020#include "qemu/osdep.h"
Markus Armbrustera8d25322019-05-23 16:35:08 +020021#include "qemu-common.h"
Claudio Fontana740b1752020-08-19 13:17:19 +020022#include "qemu/qemu-print.h"
Blue Swirlcea5f9a2011-05-15 16:03:25 +000023#include "cpu.h"
Yang Zhongd9bb58e2017-06-02 14:06:44 +080024#include "trace.h"
Paolo Bonzini76cad712012-10-24 11:12:21 +020025#include "disas/disas.h"
Paolo Bonzini63c91552016-03-15 13:18:37 +010026#include "exec/exec-all.h"
Philippe Mathieu-Daudédcb32f12020-01-01 12:23:00 +010027#include "tcg/tcg.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010028#include "qemu/atomic.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010029#include "sysemu/qtest.h"
Sebastian Tanasec2aa5f82014-07-25 11:56:31 +020030#include "qemu/timer.h"
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +010031#include "qemu/rcu.h"
Peter Crosthwaitee1b89322015-05-30 23:11:45 -070032#include "exec/tb-hash.h"
Emilio G. Cotaf6bb84d2017-07-11 17:33:33 -040033#include "exec/tb-lookup.h"
Paolo Bonzini508127e2016-01-07 16:55:28 +030034#include "exec/log.h"
Jan Kiszka8d04fb52017-02-23 18:29:11 +000035#include "qemu/main-loop.h"
Pavel Dovgalyuk6220e902015-09-17 19:23:31 +030036#if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY)
37#include "hw/i386/apic.h"
38#endif
Paolo Bonzinid2528bd2017-03-03 12:01:16 +010039#include "sysemu/cpus.h"
Claudio Fontana740b1752020-08-19 13:17:19 +020040#include "exec/cpu-all.h"
41#include "sysemu/cpu-timers.h"
Pavel Dovgalyuk6f060962015-09-17 19:24:16 +030042#include "sysemu/replay.h"
Sebastian Tanasec2aa5f82014-07-25 11:56:31 +020043
44/* -icount align implementation. */
45
46typedef struct SyncClocks {
47 int64_t diff_clk;
48 int64_t last_cpu_icount;
Sebastian Tanase7f7bc142014-07-25 11:56:32 +020049 int64_t realtime_clock;
Sebastian Tanasec2aa5f82014-07-25 11:56:31 +020050} SyncClocks;
51
52#if !defined(CONFIG_USER_ONLY)
53/* Allow the guest to have a max 3ms advance.
54 * The difference between the 2 clocks could therefore
55 * oscillate around 0.
56 */
57#define VM_CLOCK_ADVANCE 3000000
Sebastian Tanase7f7bc142014-07-25 11:56:32 +020058#define THRESHOLD_REDUCE 1.5
59#define MAX_DELAY_PRINT_RATE 2000000000LL
60#define MAX_NB_PRINTS 100
Sebastian Tanasec2aa5f82014-07-25 11:56:31 +020061
Claudio Fontana740b1752020-08-19 13:17:19 +020062static int64_t max_delay;
63static int64_t max_advance;
64
Richard Henderson5e140192019-03-28 11:54:23 -100065static void align_clocks(SyncClocks *sc, CPUState *cpu)
Sebastian Tanasec2aa5f82014-07-25 11:56:31 +020066{
67 int64_t cpu_icount;
68
69 if (!icount_align_option) {
70 return;
71 }
72
Richard Henderson5e140192019-03-28 11:54:23 -100073 cpu_icount = cpu->icount_extra + cpu_neg(cpu)->icount_decr.u16.low;
Sebastian Tanasec2aa5f82014-07-25 11:56:31 +020074 sc->diff_clk += cpu_icount_to_ns(sc->last_cpu_icount - cpu_icount);
75 sc->last_cpu_icount = cpu_icount;
76
77 if (sc->diff_clk > VM_CLOCK_ADVANCE) {
78#ifndef _WIN32
79 struct timespec sleep_delay, rem_delay;
80 sleep_delay.tv_sec = sc->diff_clk / 1000000000LL;
81 sleep_delay.tv_nsec = sc->diff_clk % 1000000000LL;
82 if (nanosleep(&sleep_delay, &rem_delay) < 0) {
Paolo Bonzinia498d0e2015-01-28 10:09:55 +010083 sc->diff_clk = rem_delay.tv_sec * 1000000000LL + rem_delay.tv_nsec;
Sebastian Tanasec2aa5f82014-07-25 11:56:31 +020084 } else {
85 sc->diff_clk = 0;
86 }
87#else
88 Sleep(sc->diff_clk / SCALE_MS);
89 sc->diff_clk = 0;
90#endif
91 }
92}
93
Sebastian Tanase7f7bc142014-07-25 11:56:32 +020094static void print_delay(const SyncClocks *sc)
95{
96 static float threshold_delay;
97 static int64_t last_realtime_clock;
98 static int nb_prints;
99
100 if (icount_align_option &&
101 sc->realtime_clock - last_realtime_clock >= MAX_DELAY_PRINT_RATE &&
102 nb_prints < MAX_NB_PRINTS) {
103 if ((-sc->diff_clk / (float)1000000000LL > threshold_delay) ||
104 (-sc->diff_clk / (float)1000000000LL <
105 (threshold_delay - THRESHOLD_REDUCE))) {
106 threshold_delay = (-sc->diff_clk / 1000000000LL) + 1;
Claudio Fontana740b1752020-08-19 13:17:19 +0200107 qemu_printf("Warning: The guest is now late by %.1f to %.1f seconds\n",
108 threshold_delay - 1,
109 threshold_delay);
Sebastian Tanase7f7bc142014-07-25 11:56:32 +0200110 nb_prints++;
111 last_realtime_clock = sc->realtime_clock;
112 }
113 }
114}
115
Richard Henderson5e140192019-03-28 11:54:23 -1000116static void init_delay_params(SyncClocks *sc, CPUState *cpu)
Sebastian Tanasec2aa5f82014-07-25 11:56:31 +0200117{
118 if (!icount_align_option) {
119 return;
120 }
Paolo Bonzini2e91cc62015-01-28 10:16:37 +0100121 sc->realtime_clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT);
122 sc->diff_clk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - sc->realtime_clock;
Richard Henderson5e140192019-03-28 11:54:23 -1000123 sc->last_cpu_icount
124 = cpu->icount_extra + cpu_neg(cpu)->icount_decr.u16.low;
Sebastian Tanase27498be2014-07-25 11:56:33 +0200125 if (sc->diff_clk < max_delay) {
126 max_delay = sc->diff_clk;
127 }
128 if (sc->diff_clk > max_advance) {
129 max_advance = sc->diff_clk;
130 }
Sebastian Tanase7f7bc142014-07-25 11:56:32 +0200131
132 /* Print every 2s max if the guest is late. We limit the number
133 of printed messages to NB_PRINT_MAX(currently 100) */
134 print_delay(sc);
Sebastian Tanasec2aa5f82014-07-25 11:56:31 +0200135}
136#else
137static void align_clocks(SyncClocks *sc, const CPUState *cpu)
138{
139}
140
141static void init_delay_params(SyncClocks *sc, const CPUState *cpu)
142{
143}
144#endif /* CONFIG USER ONLY */
bellard7d132992003-03-06 23:23:54 +0000145
Peter Maydell77211372013-02-22 18:10:02 +0000146/* Execute a TB, and fix up the CPU state afterwards if necessary */
Peter Maydell1a830632016-03-15 14:30:19 +0000147static inline tcg_target_ulong cpu_tb_exec(CPUState *cpu, TranslationBlock *itb)
Peter Maydell77211372013-02-22 18:10:02 +0000148{
149 CPUArchState *env = cpu->env_ptr;
Sergey Fedorov819af242016-04-21 15:58:23 +0300150 uintptr_t ret;
151 TranslationBlock *last_tb;
152 int tb_exit;
Emilio G. Cotae7e168f2017-07-12 00:08:21 -0400153 uint8_t *tb_ptr = itb->tc.ptr;
Peter Maydell1a830632016-03-15 14:30:19 +0000154
Alex Bennéed977e1c2016-03-15 14:30:21 +0000155 qemu_log_mask_and_addr(CPU_LOG_EXEC, itb->pc,
Paolo Bonzini4fad4462017-12-17 06:50:23 +0100156 "Trace %d: %p ["
157 TARGET_FMT_lx "/" TARGET_FMT_lx "/%#x] %s\n",
158 cpu->cpu_index, itb->tc.ptr,
159 itb->cs_base, itb->pc, itb->flags,
Alex Bennée4426f832016-10-27 16:10:01 +0100160 lookup_symbol(itb->pc));
Richard Henderson03afa5f2013-11-06 17:29:39 +1000161
162#if defined(DEBUG_DISAS)
Richard Hendersonbe2208e2016-07-12 23:39:16 -0700163 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)
164 && qemu_log_in_addr_range(itb->pc)) {
Robert Foleyfc59d2d2019-11-18 16:15:26 -0500165 FILE *logfile = qemu_log_lock();
Peter Maydellae765182018-05-15 14:58:44 +0100166 int flags = 0;
167 if (qemu_loglevel_mask(CPU_LOG_TB_FPU)) {
168 flags |= CPU_DUMP_FPU;
169 }
Richard Henderson03afa5f2013-11-06 17:29:39 +1000170#if defined(TARGET_I386)
Peter Maydellae765182018-05-15 14:58:44 +0100171 flags |= CPU_DUMP_CCOP;
Richard Henderson03afa5f2013-11-06 17:29:39 +1000172#endif
Peter Maydellae765182018-05-15 14:58:44 +0100173 log_cpu_state(cpu, flags);
Robert Foleyfc59d2d2019-11-18 16:15:26 -0500174 qemu_log_unlock(logfile);
Richard Henderson03afa5f2013-11-06 17:29:39 +1000175 }
176#endif /* DEBUG_DISAS */
177
Sergey Fedorov819af242016-04-21 15:58:23 +0300178 ret = tcg_qemu_tb_exec(env, tb_ptr);
Pavel Dovgalyuk626cf8f2014-12-08 10:53:17 +0300179 cpu->can_do_io = 1;
Sergey Fedorov819af242016-04-21 15:58:23 +0300180 last_tb = (TranslationBlock *)(ret & ~TB_EXIT_MASK);
181 tb_exit = ret & TB_EXIT_MASK;
182 trace_exec_tb_exit(last_tb, tb_exit);
Alex Bennée6db8b532014-08-01 17:08:57 +0100183
Sergey Fedorov819af242016-04-21 15:58:23 +0300184 if (tb_exit > TB_EXIT_IDX1) {
Peter Maydell77211372013-02-22 18:10:02 +0000185 /* We didn't start executing this TB (eg because the instruction
186 * counter hit zero); we must restore the guest PC to the address
187 * of the start of the TB.
188 */
Andreas Färberbdf7ae52013-06-28 19:31:32 +0200189 CPUClass *cc = CPU_GET_CLASS(cpu);
Sergey Fedorov819af242016-04-21 15:58:23 +0300190 qemu_log_mask_and_addr(CPU_LOG_EXEC, last_tb->pc,
Alex Bennéed977e1c2016-03-15 14:30:21 +0000191 "Stopped execution of TB chain before %p ["
192 TARGET_FMT_lx "] %s\n",
Emilio G. Cotae7e168f2017-07-12 00:08:21 -0400193 last_tb->tc.ptr, last_tb->pc,
Sergey Fedorov819af242016-04-21 15:58:23 +0300194 lookup_symbol(last_tb->pc));
Andreas Färberbdf7ae52013-06-28 19:31:32 +0200195 if (cc->synchronize_from_tb) {
Sergey Fedorov819af242016-04-21 15:58:23 +0300196 cc->synchronize_from_tb(cpu, last_tb);
Andreas Färberbdf7ae52013-06-28 19:31:32 +0200197 } else {
198 assert(cc->set_pc);
Sergey Fedorov819af242016-04-21 15:58:23 +0300199 cc->set_pc(cpu, last_tb->pc);
Andreas Färberbdf7ae52013-06-28 19:31:32 +0200200 }
Peter Maydell77211372013-02-22 18:10:02 +0000201 }
Sergey Fedorov819af242016-04-21 15:58:23 +0300202 return ret;
Peter Maydell77211372013-02-22 18:10:02 +0000203}
204
Paolo Bonzini7687bf52015-08-11 11:05:12 +0200205#ifndef CONFIG_USER_ONLY
pbrook2e70f6e2008-06-29 01:03:05 +0000206/* Execute the code without caching the generated code. An interpreter
207 could be used if available. */
Peter Crosthwaiteea3e9842015-06-18 10:24:55 -0700208static void cpu_exec_nocache(CPUState *cpu, int max_cycles,
Pavel Dovgalyuk56c02692015-09-17 19:23:59 +0300209 TranslationBlock *orig_tb, bool ignore_icount)
pbrook2e70f6e2008-06-29 01:03:05 +0000210{
pbrook2e70f6e2008-06-29 01:03:05 +0000211 TranslationBlock *tb;
Richard Henderson416986d2017-10-13 12:22:28 -0700212 uint32_t cflags = curr_cflags() | CF_NOCACHE;
213
214 if (ignore_icount) {
215 cflags &= ~CF_USE_ICOUNT;
216 }
pbrook2e70f6e2008-06-29 01:03:05 +0000217
218 /* Should never happen.
219 We only end up here when an existing TB is too long. */
Richard Henderson416986d2017-10-13 12:22:28 -0700220 cflags |= MIN(max_cycles, CF_COUNT_MASK);
pbrook2e70f6e2008-06-29 01:03:05 +0000221
Emilio G. Cota0ac20312017-08-04 23:46:31 -0400222 mmap_lock();
Richard Henderson416986d2017-10-13 12:22:28 -0700223 tb = tb_gen_code(cpu, orig_tb->pc, orig_tb->cs_base,
224 orig_tb->flags, cflags);
Sergey Fedorov3359baa2016-08-02 18:27:43 +0100225 tb->orig_tb = orig_tb;
Emilio G. Cota0ac20312017-08-04 23:46:31 -0400226 mmap_unlock();
KONRAD Frederica5e99822016-10-27 16:10:06 +0100227
pbrook2e70f6e2008-06-29 01:03:05 +0000228 /* execute the generated code */
Alex Bennée6db8b532014-08-01 17:08:57 +0100229 trace_exec_tb_nocache(tb, tb->pc);
Peter Maydell1a830632016-03-15 14:30:19 +0000230 cpu_tb_exec(cpu, tb);
KONRAD Frederica5e99822016-10-27 16:10:06 +0100231
Emilio G. Cota0ac20312017-08-04 23:46:31 -0400232 mmap_lock();
pbrook2e70f6e2008-06-29 01:03:05 +0000233 tb_phys_invalidate(tb, -1);
Emilio G. Cota0ac20312017-08-04 23:46:31 -0400234 mmap_unlock();
Emilio G. Cotabe2cdc52017-07-26 16:58:05 -0400235 tcg_tb_remove(tb);
pbrook2e70f6e2008-06-29 01:03:05 +0000236}
Paolo Bonzini7687bf52015-08-11 11:05:12 +0200237#endif
pbrook2e70f6e2008-06-29 01:03:05 +0000238
Emilio G. Cotaac03ee52017-07-14 17:56:30 -0400239void cpu_exec_step_atomic(CPUState *cpu)
Richard Hendersonfdbc2b52016-06-29 22:12:55 -0700240{
Pranith Kumar08e73c42017-02-23 18:29:15 +0000241 CPUClass *cc = CPU_GET_CLASS(cpu);
Richard Hendersonfdbc2b52016-06-29 22:12:55 -0700242 TranslationBlock *tb;
243 target_ulong cs_base, pc;
244 uint32_t flags;
Richard Henderson416986d2017-10-13 12:22:28 -0700245 uint32_t cflags = 1;
Emilio G. Cotaac03ee52017-07-14 17:56:30 -0400246 uint32_t cf_mask = cflags & CF_HASH_MASK;
Richard Hendersonfdbc2b52016-06-29 22:12:55 -0700247
Pranith Kumar08e73c42017-02-23 18:29:15 +0000248 if (sigsetjmp(cpu->jmp_env, 0) == 0) {
Alex Bennée886cc682020-02-14 14:49:52 +0000249 start_exclusive();
250
Emilio G. Cotaac03ee52017-07-14 17:56:30 -0400251 tb = tb_lookup__cpu_state(cpu, &pc, &cs_base, &flags, cf_mask);
Emilio G. Cota4e2ca832017-07-11 14:29:37 -0400252 if (tb == NULL) {
253 mmap_lock();
Emilio G. Cota95590e22017-08-01 15:40:16 -0400254 tb = tb_gen_code(cpu, pc, cs_base, flags, cflags);
Emilio G. Cota4e2ca832017-07-11 14:29:37 -0400255 mmap_unlock();
256 }
Pranith Kumar08e73c42017-02-23 18:29:15 +0000257
Emilio G. Cotaac03ee52017-07-14 17:56:30 -0400258 /* Since we got here, we know that parallel_cpus must be true. */
259 parallel_cpus = false;
Pranith Kumar08e73c42017-02-23 18:29:15 +0000260 cc->cpu_exec_enter(cpu);
261 /* execute the generated code */
Emilio G. Cota4e2ca832017-07-11 14:29:37 -0400262 trace_exec_tb(tb, pc);
Pranith Kumar08e73c42017-02-23 18:29:15 +0000263 cpu_tb_exec(cpu, tb);
264 cc->cpu_exec_exit(cpu);
Pranith Kumar08e73c42017-02-23 18:29:15 +0000265 } else {
Emilio G. Cota0ac20312017-08-04 23:46:31 -0400266 /*
Pranith Kumar08e73c42017-02-23 18:29:15 +0000267 * The mmap_lock is dropped by tb_gen_code if it runs out of
268 * memory.
269 */
270#ifndef CONFIG_SOFTMMU
271 tcg_debug_assert(!have_mmap_lock());
272#endif
Emilio G. Cota6aaa24f2019-01-15 14:47:54 -0500273 if (qemu_mutex_iothread_locked()) {
274 qemu_mutex_unlock_iothread();
275 }
Emilio G. Cotafaa93722018-02-22 20:50:29 -0500276 assert_no_pages_locked();
Emilio G. Cotae6d86be2018-10-21 13:24:26 -0400277 qemu_plugin_disable_mem_helpers(cpu);
Pranith Kumar08e73c42017-02-23 18:29:15 +0000278 }
Peter Maydell426eeec2017-11-02 16:35:36 +0000279
Alex Bennée886cc682020-02-14 14:49:52 +0000280
281 /*
282 * As we start the exclusive region before codegen we must still
283 * be in the region if we longjump out of either the codegen or
284 * the execution.
285 */
286 g_assert(cpu_in_exclusive_context(cpu));
287 parallel_cpus = true;
288 end_exclusive();
Richard Hendersonfdbc2b52016-06-29 22:12:55 -0700289}
290
Emilio G. Cota909eaac2016-06-08 14:55:32 -0400291struct tb_desc {
292 target_ulong pc;
293 target_ulong cs_base;
294 CPUArchState *env;
295 tb_page_addr_t phys_page1;
296 uint32_t flags;
Emilio G. Cota4e2ca832017-07-11 14:29:37 -0400297 uint32_t cf_mask;
Lluís Vilanova61a67f72017-07-04 10:42:32 +0200298 uint32_t trace_vcpu_dstate;
Emilio G. Cota909eaac2016-06-08 14:55:32 -0400299};
300
Emilio G. Cota61b8cef2017-07-11 18:47:38 -0400301static bool tb_lookup_cmp(const void *p, const void *d)
Emilio G. Cota909eaac2016-06-08 14:55:32 -0400302{
303 const TranslationBlock *tb = p;
304 const struct tb_desc *desc = d;
305
306 if (tb->pc == desc->pc &&
307 tb->page_addr[0] == desc->phys_page1 &&
308 tb->cs_base == desc->cs_base &&
Paolo Bonzini6d21e422016-07-19 08:36:18 +0200309 tb->flags == desc->flags &&
Lluís Vilanova61a67f72017-07-04 10:42:32 +0200310 tb->trace_vcpu_dstate == desc->trace_vcpu_dstate &&
Emilio G. Cota4e2ca832017-07-11 14:29:37 -0400311 (tb_cflags(tb) & (CF_HASH_MASK | CF_INVALID)) == desc->cf_mask) {
Emilio G. Cota909eaac2016-06-08 14:55:32 -0400312 /* check next page if needed */
313 if (tb->page_addr[1] == -1) {
314 return true;
315 } else {
316 tb_page_addr_t phys_page2;
317 target_ulong virt_page2;
318
319 virt_page2 = (desc->pc & TARGET_PAGE_MASK) + TARGET_PAGE_SIZE;
320 phys_page2 = get_page_addr_code(desc->env, virt_page2);
321 if (tb->page_addr[1] == phys_page2) {
322 return true;
323 }
324 }
325 }
326 return false;
327}
328
Emilio G. Cotacedbcb02017-04-26 23:29:14 -0400329TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
Emilio G. Cota4e2ca832017-07-11 14:29:37 -0400330 target_ulong cs_base, uint32_t flags,
331 uint32_t cf_mask)
bellard8a40a182005-11-20 10:35:40 +0000332{
Emilio G. Cota909eaac2016-06-08 14:55:32 -0400333 tb_page_addr_t phys_pc;
334 struct tb_desc desc;
Emilio G. Cota42bd3222016-06-08 14:55:25 -0400335 uint32_t h;
ths3b46e622007-09-17 08:09:54 +0000336
Emilio G. Cota909eaac2016-06-08 14:55:32 -0400337 desc.env = (CPUArchState *)cpu->env_ptr;
338 desc.cs_base = cs_base;
339 desc.flags = flags;
Emilio G. Cota4e2ca832017-07-11 14:29:37 -0400340 desc.cf_mask = cf_mask;
Lluís Vilanova61a67f72017-07-04 10:42:32 +0200341 desc.trace_vcpu_dstate = *cpu->trace_dstate;
Emilio G. Cota909eaac2016-06-08 14:55:32 -0400342 desc.pc = pc;
343 phys_pc = get_page_addr_code(desc.env, pc);
Peter Maydell7252f2d2018-08-14 17:17:19 +0100344 if (phys_pc == -1) {
345 return NULL;
346 }
Emilio G. Cota909eaac2016-06-08 14:55:32 -0400347 desc.phys_page1 = phys_pc & TARGET_PAGE_MASK;
Emilio G. Cota4e2ca832017-07-11 14:29:37 -0400348 h = tb_hash_func(phys_pc, pc, flags, cf_mask, *cpu->trace_dstate);
Emilio G. Cota61b8cef2017-07-11 18:47:38 -0400349 return qht_lookup_custom(&tb_ctx.htable, &desc, h, tb_lookup_cmp);
Paolo Bonzini9fd1a942015-08-11 11:33:24 +0200350}
351
Richard Hendersona8583392017-07-31 22:02:31 -0700352void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr)
353{
354 if (TCG_TARGET_HAS_direct_jump) {
355 uintptr_t offset = tb->jmp_target_arg[n];
Emilio G. Cotae7e168f2017-07-12 00:08:21 -0400356 uintptr_t tc_ptr = (uintptr_t)tb->tc.ptr;
Richard Hendersona8583392017-07-31 22:02:31 -0700357 tb_target_set_jmp_target(tc_ptr, tc_ptr + offset, addr);
358 } else {
359 tb->jmp_target_arg[n] = addr;
360 }
361}
362
Richard Hendersona8583392017-07-31 22:02:31 -0700363static inline void tb_add_jump(TranslationBlock *tb, int n,
364 TranslationBlock *tb_next)
365{
Emilio G. Cota194125e2017-08-02 20:34:06 -0400366 uintptr_t old;
367
Richard Hendersona8583392017-07-31 22:02:31 -0700368 assert(n < ARRAY_SIZE(tb->jmp_list_next));
Emilio G. Cota194125e2017-08-02 20:34:06 -0400369 qemu_spin_lock(&tb_next->jmp_lock);
370
371 /* make sure the destination TB is valid */
372 if (tb_next->cflags & CF_INVALID) {
373 goto out_unlock_next;
Richard Hendersona8583392017-07-31 22:02:31 -0700374 }
Emilio G. Cota194125e2017-08-02 20:34:06 -0400375 /* Atomically claim the jump destination slot only if it was NULL */
Stefan Hajnoczid73415a2020-09-23 11:56:46 +0100376 old = qatomic_cmpxchg(&tb->jmp_dest[n], (uintptr_t)NULL,
377 (uintptr_t)tb_next);
Emilio G. Cota194125e2017-08-02 20:34:06 -0400378 if (old) {
379 goto out_unlock_next;
380 }
381
382 /* patch the native jump address */
383 tb_set_jmp_target(tb, n, (uintptr_t)tb_next->tc.ptr);
384
385 /* add in TB jmp list */
386 tb->jmp_list_next[n] = tb_next->jmp_list_head;
387 tb_next->jmp_list_head = (uintptr_t)tb | n;
388
389 qemu_spin_unlock(&tb_next->jmp_lock);
390
Richard Hendersona8583392017-07-31 22:02:31 -0700391 qemu_log_mask_and_addr(CPU_LOG_EXEC, tb->pc,
392 "Linking TBs %p [" TARGET_FMT_lx
393 "] index %d -> %p [" TARGET_FMT_lx "]\n",
Emilio G. Cotae7e168f2017-07-12 00:08:21 -0400394 tb->tc.ptr, tb->pc, n,
395 tb_next->tc.ptr, tb_next->pc);
Emilio G. Cota194125e2017-08-02 20:34:06 -0400396 return;
Richard Hendersona8583392017-07-31 22:02:31 -0700397
Emilio G. Cota194125e2017-08-02 20:34:06 -0400398 out_unlock_next:
399 qemu_spin_unlock(&tb_next->jmp_lock);
400 return;
Richard Hendersona8583392017-07-31 22:02:31 -0700401}
402
Sergey Fedorovbd2710d2016-07-15 20:58:51 +0300403static inline TranslationBlock *tb_find(CPUState *cpu,
404 TranslationBlock *last_tb,
Richard Henderson9b990ee2017-10-13 10:50:02 -0700405 int tb_exit, uint32_t cf_mask)
bellard8a40a182005-11-20 10:35:40 +0000406{
407 TranslationBlock *tb;
408 target_ulong cs_base, pc;
Emilio G. Cota89fee742016-04-07 13:19:22 -0400409 uint32_t flags;
bellard8a40a182005-11-20 10:35:40 +0000410
Emilio G. Cota4e2ca832017-07-11 14:29:37 -0400411 tb = tb_lookup__cpu_state(cpu, &pc, &cs_base, &flags, cf_mask);
Emilio G. Cotaf6bb84d2017-07-11 17:33:33 -0400412 if (tb == NULL) {
Emilio G. Cotaf6bb84d2017-07-11 17:33:33 -0400413 mmap_lock();
Emilio G. Cota95590e22017-08-01 15:40:16 -0400414 tb = tb_gen_code(cpu, pc, cs_base, flags, cf_mask);
Emilio G. Cotaf6bb84d2017-07-11 17:33:33 -0400415 mmap_unlock();
Sergey Fedorovbd2710d2016-07-15 20:58:51 +0300416 /* We add the TB in the virtual pc hash table for the fast lookup */
Stefan Hajnoczid73415a2020-09-23 11:56:46 +0100417 qatomic_set(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)], tb);
bellard8a40a182005-11-20 10:35:40 +0000418 }
Sergey Fedorovc88c67e2016-05-16 16:13:00 +0300419#ifndef CONFIG_USER_ONLY
420 /* We don't take care of direct jumps when address mapping changes in
421 * system emulation. So it's not safe to make a direct jump to a TB
422 * spanning two pages because the mapping for the second page can change.
423 */
424 if (tb->page_addr[1] != -1) {
Sergey Fedorov4b7e6952016-07-15 20:58:42 +0300425 last_tb = NULL;
Sergey Fedorovc88c67e2016-05-16 16:13:00 +0300426 }
427#endif
Sergey Fedorova0522c72016-04-25 18:17:30 +0300428 /* See if we can patch the calling TB. */
Richard Hendersond7f425f2018-10-06 09:05:30 -0700429 if (last_tb) {
Emilio G. Cota194125e2017-08-02 20:34:06 -0400430 tb_add_jump(last_tb, tb_exit, tb);
Sergey Fedorov74d356d2016-07-15 20:58:50 +0300431 }
bellard8a40a182005-11-20 10:35:40 +0000432 return tb;
433}
434
Sergey Fedorov8b2d34e2016-05-11 13:21:47 +0300435static inline bool cpu_handle_halt(CPUState *cpu)
436{
437 if (cpu->halted) {
438#if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY)
439 if ((cpu->interrupt_request & CPU_INTERRUPT_POLL)
440 && replay_interrupt()) {
441 X86CPU *x86_cpu = X86_CPU(cpu);
Jan Kiszka8d04fb52017-02-23 18:29:11 +0000442 qemu_mutex_lock_iothread();
Sergey Fedorov8b2d34e2016-05-11 13:21:47 +0300443 apic_poll_irq(x86_cpu->apic_state);
444 cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL);
Jan Kiszka8d04fb52017-02-23 18:29:11 +0000445 qemu_mutex_unlock_iothread();
Sergey Fedorov8b2d34e2016-05-11 13:21:47 +0300446 }
447#endif
448 if (!cpu_has_work(cpu)) {
Sergey Fedorov8b2d34e2016-05-11 13:21:47 +0300449 return true;
450 }
451
452 cpu->halted = 0;
453 }
454
455 return false;
456}
457
Sergey Fedorovea284762016-05-11 13:21:48 +0300458static inline void cpu_handle_debug_exception(CPUState *cpu)
Jan Kiszka1009d2e2011-03-15 12:26:13 +0100459{
Peter Maydell86025ee2014-09-12 14:06:48 +0100460 CPUClass *cc = CPU_GET_CLASS(cpu);
Jan Kiszka1009d2e2011-03-15 12:26:13 +0100461 CPUWatchpoint *wp;
462
Andreas Färberff4700b2013-08-26 18:23:18 +0200463 if (!cpu->watchpoint_hit) {
464 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Jan Kiszka1009d2e2011-03-15 12:26:13 +0100465 wp->flags &= ~BP_WATCHPOINT_HIT;
466 }
467 }
Peter Maydell86025ee2014-09-12 14:06:48 +0100468
469 cc->debug_excp_handler(cpu);
Jan Kiszka1009d2e2011-03-15 12:26:13 +0100470}
471
Sergey Fedorovea284762016-05-11 13:21:48 +0300472static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
473{
Pavel Dovgalyuk17b50b02017-11-14 11:18:18 +0300474 if (cpu->exception_index < 0) {
Sergey Fedorovea284762016-05-11 13:21:48 +0300475#ifndef CONFIG_USER_ONLY
Pavel Dovgalyuk17b50b02017-11-14 11:18:18 +0300476 if (replay_has_exception()
Richard Henderson5e140192019-03-28 11:54:23 -1000477 && cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra == 0) {
Pavel Dovgalyuk17b50b02017-11-14 11:18:18 +0300478 /* try to cause an exception pending in the log */
479 cpu_exec_nocache(cpu, 1, tb_find(cpu, NULL, 0, curr_cflags()), true);
480 }
481#endif
482 if (cpu->exception_index < 0) {
483 return false;
484 }
485 }
486
487 if (cpu->exception_index >= EXCP_INTERRUPT) {
488 /* exit request from the cpu execution loop */
489 *ret = cpu->exception_index;
490 if (*ret == EXCP_DEBUG) {
491 cpu_handle_debug_exception(cpu);
492 }
493 cpu->exception_index = -1;
Sergey Fedorovea284762016-05-11 13:21:48 +0300494 return true;
Pavel Dovgalyuk17b50b02017-11-14 11:18:18 +0300495 } else {
496#if defined(CONFIG_USER_ONLY)
497 /* if user mode only, we simulate a fake exception
498 which will be handled outside the cpu execution
499 loop */
500#if defined(TARGET_I386)
501 CPUClass *cc = CPU_GET_CLASS(cpu);
502 cc->do_interrupt(cpu);
503#endif
504 *ret = cpu->exception_index;
505 cpu->exception_index = -1;
506 return true;
507#else
508 if (replay_exception()) {
509 CPUClass *cc = CPU_GET_CLASS(cpu);
510 qemu_mutex_lock_iothread();
511 cc->do_interrupt(cpu);
512 qemu_mutex_unlock_iothread();
513 cpu->exception_index = -1;
Luc Michela7ba7442020-07-16 21:39:47 +0200514
515 if (unlikely(cpu->singlestep_enabled)) {
516 /*
517 * After processing the exception, ensure an EXCP_DEBUG is
518 * raised when single-stepping so that GDB doesn't miss the
519 * next instruction.
520 */
521 *ret = EXCP_DEBUG;
522 cpu_handle_debug_exception(cpu);
523 return true;
524 }
Pavel Dovgalyuk17b50b02017-11-14 11:18:18 +0300525 } else if (!replay_has_interrupt()) {
526 /* give a chance to iothread in replay mode */
527 *ret = EXCP_INTERRUPT;
528 return true;
529 }
Sergey Fedorovea284762016-05-11 13:21:48 +0300530#endif
531 }
532
533 return false;
534}
535
Paolo Bonzini209b71b2017-01-27 10:57:18 +0100536static inline bool cpu_handle_interrupt(CPUState *cpu,
Sergey Fedorovc385e6e2016-05-11 13:21:49 +0300537 TranslationBlock **last_tb)
538{
539 CPUClass *cc = CPU_GET_CLASS(cpu);
Pavel Dovgalyuk17b50b02017-11-14 11:18:18 +0300540
541 /* Clear the interrupt flag now since we're processing
542 * cpu->interrupt_request and cpu->exit_request.
David Hildenbrandd84be022017-11-29 20:13:19 +0100543 * Ensure zeroing happens before reading cpu->exit_request or
544 * cpu->interrupt_request (see also smp_wmb in cpu_exit())
Pavel Dovgalyuk17b50b02017-11-14 11:18:18 +0300545 */
Stefan Hajnoczid73415a2020-09-23 11:56:46 +0100546 qatomic_mb_set(&cpu_neg(cpu)->icount_decr.u16.high, 0);
Sergey Fedorovc385e6e2016-05-11 13:21:49 +0300547
Stefan Hajnoczid73415a2020-09-23 11:56:46 +0100548 if (unlikely(qatomic_read(&cpu->interrupt_request))) {
Jan Kiszka8d04fb52017-02-23 18:29:11 +0000549 int interrupt_request;
550 qemu_mutex_lock_iothread();
551 interrupt_request = cpu->interrupt_request;
Sergey Fedorovc385e6e2016-05-11 13:21:49 +0300552 if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
553 /* Mask out external interrupts for this step. */
554 interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
555 }
556 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
557 cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
558 cpu->exception_index = EXCP_DEBUG;
Jan Kiszka8d04fb52017-02-23 18:29:11 +0000559 qemu_mutex_unlock_iothread();
Paolo Bonzini209b71b2017-01-27 10:57:18 +0100560 return true;
Sergey Fedorovc385e6e2016-05-11 13:21:49 +0300561 }
562 if (replay_mode == REPLAY_MODE_PLAY && !replay_has_interrupt()) {
563 /* Do nothing */
564 } else if (interrupt_request & CPU_INTERRUPT_HALT) {
565 replay_interrupt();
566 cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
567 cpu->halted = 1;
568 cpu->exception_index = EXCP_HLT;
Jan Kiszka8d04fb52017-02-23 18:29:11 +0000569 qemu_mutex_unlock_iothread();
Paolo Bonzini209b71b2017-01-27 10:57:18 +0100570 return true;
Sergey Fedorovc385e6e2016-05-11 13:21:49 +0300571 }
572#if defined(TARGET_I386)
573 else if (interrupt_request & CPU_INTERRUPT_INIT) {
574 X86CPU *x86_cpu = X86_CPU(cpu);
575 CPUArchState *env = &x86_cpu->env;
576 replay_interrupt();
Paolo Bonzini65c9d602017-02-16 12:30:05 +0100577 cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0, 0);
Sergey Fedorovc385e6e2016-05-11 13:21:49 +0300578 do_cpu_init(x86_cpu);
579 cpu->exception_index = EXCP_HALTED;
Jan Kiszka8d04fb52017-02-23 18:29:11 +0000580 qemu_mutex_unlock_iothread();
Paolo Bonzini209b71b2017-01-27 10:57:18 +0100581 return true;
Sergey Fedorovc385e6e2016-05-11 13:21:49 +0300582 }
583#else
584 else if (interrupt_request & CPU_INTERRUPT_RESET) {
585 replay_interrupt();
586 cpu_reset(cpu);
Jan Kiszka8d04fb52017-02-23 18:29:11 +0000587 qemu_mutex_unlock_iothread();
Paolo Bonzini209b71b2017-01-27 10:57:18 +0100588 return true;
Sergey Fedorovc385e6e2016-05-11 13:21:49 +0300589 }
590#endif
591 /* The target hook has 3 exit conditions:
592 False when the interrupt isn't processed,
593 True when it is, and we should restart on a new TB,
594 and via longjmp via cpu_loop_exit. */
595 else {
Sergey Fedorovc385e6e2016-05-11 13:21:49 +0300596 if (cc->cpu_exec_interrupt(cpu, interrupt_request)) {
Pavel Dovgalyukd718b142017-01-24 10:17:08 +0300597 replay_interrupt();
Richard Hendersonba3c35d2020-07-17 09:26:59 -0700598 /*
599 * After processing the interrupt, ensure an EXCP_DEBUG is
600 * raised when single-stepping so that GDB doesn't miss the
601 * next instruction.
602 */
603 cpu->exception_index =
604 (cpu->singlestep_enabled ? EXCP_DEBUG : -1);
Sergey Fedorovc385e6e2016-05-11 13:21:49 +0300605 *last_tb = NULL;
606 }
Sergey Fedorov8b1fe3f2016-05-12 19:52:17 +0300607 /* The target hook may have updated the 'cpu->interrupt_request';
608 * reload the 'interrupt_request' value */
609 interrupt_request = cpu->interrupt_request;
Sergey Fedorovc385e6e2016-05-11 13:21:49 +0300610 }
Sergey Fedorov8b1fe3f2016-05-12 19:52:17 +0300611 if (interrupt_request & CPU_INTERRUPT_EXITTB) {
Sergey Fedorovc385e6e2016-05-11 13:21:49 +0300612 cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
613 /* ensure that no TB jump will be modified as
614 the program flow was changed */
615 *last_tb = NULL;
616 }
Jan Kiszka8d04fb52017-02-23 18:29:11 +0000617
618 /* If we exit via cpu_loop_exit/longjmp it is reset in cpu_exec */
619 qemu_mutex_unlock_iothread();
Sergey Fedorovc385e6e2016-05-11 13:21:49 +0300620 }
Jan Kiszka8d04fb52017-02-23 18:29:11 +0000621
Pavel Dovgalyukcfb2d022017-02-07 09:54:57 +0300622 /* Finally, check if we need to exit to the main loop. */
Stefan Hajnoczid73415a2020-09-23 11:56:46 +0100623 if (unlikely(qatomic_read(&cpu->exit_request))
Claudio Fontana740b1752020-08-19 13:17:19 +0200624 || (icount_enabled()
Richard Henderson5e140192019-03-28 11:54:23 -1000625 && cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra == 0)) {
Stefan Hajnoczid73415a2020-09-23 11:56:46 +0100626 qatomic_set(&cpu->exit_request, 0);
Pavel Dovgalyuk5f3bdfd2018-02-27 12:51:41 +0300627 if (cpu->exception_index == -1) {
628 cpu->exception_index = EXCP_INTERRUPT;
629 }
Paolo Bonzini209b71b2017-01-27 10:57:18 +0100630 return true;
Sergey Fedorovc385e6e2016-05-11 13:21:49 +0300631 }
Paolo Bonzini209b71b2017-01-27 10:57:18 +0100632
633 return false;
Sergey Fedorovc385e6e2016-05-11 13:21:49 +0300634}
635
Sergey Fedorov928de9e2016-05-11 13:21:50 +0300636static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb,
Pavel Dovgalyukcfb2d022017-02-07 09:54:57 +0300637 TranslationBlock **last_tb, int *tb_exit)
Sergey Fedorov928de9e2016-05-11 13:21:50 +0300638{
639 uintptr_t ret;
Paolo Bonzini1aab16c2017-01-27 11:25:33 +0100640 int32_t insns_left;
Sergey Fedorov928de9e2016-05-11 13:21:50 +0300641
642 trace_exec_tb(tb, tb->pc);
643 ret = cpu_tb_exec(cpu, tb);
Paolo Bonzini43d70dd2017-01-29 12:00:59 +0100644 tb = (TranslationBlock *)(ret & ~TB_EXIT_MASK);
Sergey Fedorov928de9e2016-05-11 13:21:50 +0300645 *tb_exit = ret & TB_EXIT_MASK;
Paolo Bonzini1aab16c2017-01-27 11:25:33 +0100646 if (*tb_exit != TB_EXIT_REQUESTED) {
647 *last_tb = tb;
648 return;
649 }
650
651 *last_tb = NULL;
Stefan Hajnoczid73415a2020-09-23 11:56:46 +0100652 insns_left = qatomic_read(&cpu_neg(cpu)->icount_decr.u32);
Paolo Bonzini1aab16c2017-01-27 11:25:33 +0100653 if (insns_left < 0) {
Alex Bennéee5143e32017-02-23 18:29:12 +0000654 /* Something asked us to stop executing chained TBs; just
655 * continue round the main loop. Whatever requested the exit
Paolo Bonzini30f3dda2017-03-03 16:39:18 +0100656 * will also have set something else (eg exit_request or
Pavel Dovgalyuk17b50b02017-11-14 11:18:18 +0300657 * interrupt_request) which will be handled by
658 * cpu_handle_interrupt. cpu_handle_interrupt will also
659 * clear cpu->icount_decr.u16.high.
Sergey Fedorov928de9e2016-05-11 13:21:50 +0300660 */
Paolo Bonzini1aab16c2017-01-27 11:25:33 +0100661 return;
662 }
663
664 /* Instruction counter expired. */
Claudio Fontana740b1752020-08-19 13:17:19 +0200665 assert(icount_enabled());
Paolo Bonzini1aab16c2017-01-27 11:25:33 +0100666#ifndef CONFIG_USER_ONLY
Alex Bennéeeda5f7c2017-04-05 12:35:48 +0100667 /* Ensure global icount has gone forward */
668 cpu_update_icount(cpu);
669 /* Refill decrementer and continue execution. */
670 insns_left = MIN(0xffff, cpu->icount_budget);
Richard Henderson5e140192019-03-28 11:54:23 -1000671 cpu_neg(cpu)->icount_decr.u16.low = insns_left;
Alex Bennéeeda5f7c2017-04-05 12:35:48 +0100672 cpu->icount_extra = cpu->icount_budget - insns_left;
673 if (!cpu->icount_extra) {
Paolo Bonzini1aab16c2017-01-27 11:25:33 +0100674 /* Execute any remaining instructions, then let the main loop
675 * handle the next event.
676 */
677 if (insns_left > 0) {
678 cpu_exec_nocache(cpu, insns_left, tb, false);
Sergey Fedorov928de9e2016-05-11 13:21:50 +0300679 }
Paolo Bonzini1aab16c2017-01-27 11:25:33 +0100680 }
Sergey Fedorov928de9e2016-05-11 13:21:50 +0300681#endif
Sergey Fedorov928de9e2016-05-11 13:21:50 +0300682}
683
bellard7d132992003-03-06 23:23:54 +0000684/* main execution loop */
685
Peter Crosthwaiteea3e9842015-06-18 10:24:55 -0700686int cpu_exec(CPUState *cpu)
bellard7d132992003-03-06 23:23:54 +0000687{
Andreas Färber97a8ea52013-02-02 10:57:51 +0100688 CPUClass *cc = CPU_GET_CLASS(cpu);
Sergey Fedorovc385e6e2016-05-11 13:21:49 +0300689 int ret;
Pavel Dovgalyukcfb2d022017-02-07 09:54:57 +0300690 SyncClocks sc = { 0 };
Sebastian Tanasec2aa5f82014-07-25 11:56:31 +0200691
Pavel Dovgalyuk6f060962015-09-17 19:24:16 +0300692 /* replay_interrupt may need current_cpu */
693 current_cpu = cpu;
694
Sergey Fedorov8b2d34e2016-05-11 13:21:47 +0300695 if (cpu_handle_halt(cpu)) {
696 return EXCP_HALTED;
Paolo Bonzinieda48c32011-03-12 17:43:56 +0100697 }
bellard5a1e3cf2005-11-23 21:02:53 +0000698
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100699 rcu_read_lock();
700
Richard Hendersoncffe7b32014-09-13 09:45:12 -0700701 cc->cpu_exec_enter(cpu);
bellard9d27abd2003-05-10 13:13:54 +0000702
Sebastian Tanasec2aa5f82014-07-25 11:56:31 +0200703 /* Calculate difference between guest clock and host clock.
704 * This delay includes the delay of the last cycle, so
705 * what we have to do is sleep until it is 0. As for the
706 * advance/delay we gain here, we try to fix it next time.
707 */
708 init_delay_params(&sc, cpu);
709
Paolo Bonzini4515e582017-01-29 10:55:14 +0100710 /* prepare setjmp context for exception handling */
711 if (sigsetjmp(cpu->jmp_env, 0) != 0) {
Stefan Weil0448f5f2015-09-26 13:23:26 +0200712#if defined(__clang__) || !QEMU_GNUC_PREREQ(4, 6)
Paolo Bonzini4515e582017-01-29 10:55:14 +0100713 /* Some compilers wrongly smash all local variables after
714 * siglongjmp. There were bug reports for gcc 4.5.0 and clang.
715 * Reload essential local variables here for those compilers.
716 * Newer versions of gcc would complain about this code (-Wclobbered). */
717 cpu = current_cpu;
718 cc = CPU_GET_CLASS(cpu);
Stefan Weil0448f5f2015-09-26 13:23:26 +0200719#else /* buggy compiler */
Paolo Bonzini4515e582017-01-29 10:55:14 +0100720 /* Assert that the compiler does not smash local variables. */
721 g_assert(cpu == current_cpu);
722 g_assert(cc == CPU_GET_CLASS(cpu));
Stefan Weil0448f5f2015-09-26 13:23:26 +0200723#endif /* buggy compiler */
Emilio G. Cota0ac20312017-08-04 23:46:31 -0400724#ifndef CONFIG_SOFTMMU
725 tcg_debug_assert(!have_mmap_lock());
726#endif
Jan Kiszka8d04fb52017-02-23 18:29:11 +0000727 if (qemu_mutex_iothread_locked()) {
728 qemu_mutex_unlock_iothread();
729 }
Emilio G. Cotae6d86be2018-10-21 13:24:26 -0400730 qemu_plugin_disable_mem_helpers(cpu);
731
Emilio G. Cota8fd3a9b2019-01-15 14:47:53 -0500732 assert_no_pages_locked();
Paolo Bonzini4515e582017-01-29 10:55:14 +0100733 }
734
735 /* if an exception is pending, we execute it here */
736 while (!cpu_handle_exception(cpu, &ret)) {
737 TranslationBlock *last_tb = NULL;
738 int tb_exit = 0;
739
740 while (!cpu_handle_interrupt(cpu, &last_tb)) {
Richard Henderson9b990ee2017-10-13 10:50:02 -0700741 uint32_t cflags = cpu->cflags_next_tb;
742 TranslationBlock *tb;
743
744 /* When requested, use an exact setting for cflags for the next
745 execution. This is used for icount, precise smc, and stop-
746 after-access watchpoints. Since this request should never
747 have CF_INVALID set, -1 is a convenient invalid value that
748 does not require tcg headers for cpu_common_reset. */
749 if (cflags == -1) {
750 cflags = curr_cflags();
751 } else {
752 cpu->cflags_next_tb = -1;
753 }
754
755 tb = tb_find(cpu, last_tb, tb_exit, cflags);
Pavel Dovgalyukcfb2d022017-02-07 09:54:57 +0300756 cpu_loop_exec_tb(cpu, tb, &last_tb, &tb_exit);
Paolo Bonzini4515e582017-01-29 10:55:14 +0100757 /* Try to align the host and virtual clocks
758 if the guest is in advance */
759 align_clocks(&sc, cpu);
bellard7d132992003-03-06 23:23:54 +0000760 }
Paolo Bonzini4515e582017-01-29 10:55:14 +0100761 }
bellard3fb2ded2003-06-24 13:22:59 +0000762
Richard Hendersoncffe7b32014-09-13 09:45:12 -0700763 cc->cpu_exec_exit(cpu);
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100764 rcu_read_unlock();
pbrook1057eaa2007-02-04 13:37:44 +0000765
bellard7d132992003-03-06 23:23:54 +0000766 return ret;
767}
Claudio Fontana740b1752020-08-19 13:17:19 +0200768
769#ifndef CONFIG_USER_ONLY
770
771void dump_drift_info(void)
772{
773 if (!icount_enabled()) {
774 return;
775 }
776
777 qemu_printf("Host - Guest clock %"PRIi64" ms\n",
778 (cpu_get_clock() - cpu_get_icount()) / SCALE_MS);
779 if (icount_align_option) {
780 qemu_printf("Max guest delay %"PRIi64" ms\n",
781 -max_delay / SCALE_MS);
782 qemu_printf("Max guest advance %"PRIi64" ms\n",
783 max_advance / SCALE_MS);
784 } else {
785 qemu_printf("Max guest delay NA\n");
786 qemu_printf("Max guest advance NA\n");
787 }
788}
789
790#endif /* !CONFIG_USER_ONLY */