blob: dd58a144a89645a3ec89e5cb2b9f586ff2791721 [file] [log] [blame]
bellard7d132992003-03-06 23:23:54 +00001/*
陳韋任e965fc32012-02-06 14:02:55 +08002 * emulator main execution loop
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard66321a12005-04-06 20:47:48 +00004 * Copyright (c) 2003-2005 Fabrice Bellard
bellard7d132992003-03-06 23:23:54 +00005 *
bellard3ef693a2003-03-23 20:17:16 +00006 * This library is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU Lesser General Public
8 * License as published by the Free Software Foundation; either
Thomas Huthfb0343d2019-01-23 15:08:56 +01009 * version 2.1 of the License, or (at your option) any later version.
bellard7d132992003-03-06 23:23:54 +000010 *
bellard3ef693a2003-03-23 20:17:16 +000011 * This library is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
14 * Lesser General Public License for more details.
bellard7d132992003-03-06 23:23:54 +000015 *
bellard3ef693a2003-03-23 20:17:16 +000016 * You should have received a copy of the GNU Lesser General Public
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * License along with this library; if not, see <http://www.gnu.org/licenses/>.
bellard7d132992003-03-06 23:23:54 +000018 */
Markus Armbrustera8d25322019-05-23 16:35:08 +020019
Peter Maydell7b31bbc2016-01-26 18:16:56 +000020#include "qemu/osdep.h"
Claudio Fontana740b1752020-08-19 13:17:19 +020021#include "qemu/qemu-print.h"
Daniel P. Berrangé3a841ab2021-09-08 10:35:43 +010022#include "qapi/error.h"
23#include "qapi/qapi-commands-machine.h"
24#include "qapi/type-helpers.h"
Claudio Fontana78271682021-02-04 17:39:23 +010025#include "hw/core/tcg-cpu-ops.h"
Yang Zhongd9bb58e2017-06-02 14:06:44 +080026#include "trace.h"
Paolo Bonzini76cad712012-10-24 11:12:21 +020027#include "disas/disas.h"
Paolo Bonzini63c91552016-03-15 13:18:37 +010028#include "exec/exec-all.h"
Philippe Mathieu-Daudédcb32f12020-01-01 12:23:00 +010029#include "tcg/tcg.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010030#include "qemu/atomic.h"
Daniele Buonoc905a362020-12-04 18:06:12 -050031#include "qemu/compiler.h"
Sebastian Tanasec2aa5f82014-07-25 11:56:31 +020032#include "qemu/timer.h"
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +010033#include "qemu/rcu.h"
Paolo Bonzini508127e2016-01-07 16:55:28 +030034#include "exec/log.h"
Jan Kiszka8d04fb52017-02-23 18:29:11 +000035#include "qemu/main-loop.h"
Pavel Dovgalyuk6220e902015-09-17 19:23:31 +030036#if defined(TARGET_I386) && !defined(CONFIG_USER_ONLY)
37#include "hw/i386/apic.h"
38#endif
Paolo Bonzinid2528bd2017-03-03 12:01:16 +010039#include "sysemu/cpus.h"
Claudio Fontana740b1752020-08-19 13:17:19 +020040#include "exec/cpu-all.h"
41#include "sysemu/cpu-timers.h"
Pavel Dovgalyuk6f060962015-09-17 19:24:16 +030042#include "sysemu/replay.h"
Daniel P. Berrangé3a841ab2021-09-08 10:35:43 +010043#include "sysemu/tcg.h"
Richard Henderson4288eb22021-06-29 12:28:29 -070044#include "exec/helper-proto.h"
Philippe Mathieu-Daudée5ceadf2021-05-24 19:04:53 +020045#include "tb-hash.h"
Philippe Mathieu-Daudée5ceadf2021-05-24 19:04:53 +020046#include "tb-context.h"
Philippe Mathieu-Daudéc03f0412021-01-20 20:15:06 -100047#include "internal.h"
Sebastian Tanasec2aa5f82014-07-25 11:56:31 +020048
49/* -icount align implementation. */
50
51typedef struct SyncClocks {
52 int64_t diff_clk;
53 int64_t last_cpu_icount;
Sebastian Tanase7f7bc142014-07-25 11:56:32 +020054 int64_t realtime_clock;
Sebastian Tanasec2aa5f82014-07-25 11:56:31 +020055} SyncClocks;
56
57#if !defined(CONFIG_USER_ONLY)
58/* Allow the guest to have a max 3ms advance.
59 * The difference between the 2 clocks could therefore
60 * oscillate around 0.
61 */
62#define VM_CLOCK_ADVANCE 3000000
Sebastian Tanase7f7bc142014-07-25 11:56:32 +020063#define THRESHOLD_REDUCE 1.5
64#define MAX_DELAY_PRINT_RATE 2000000000LL
65#define MAX_NB_PRINTS 100
Sebastian Tanasec2aa5f82014-07-25 11:56:31 +020066
Claudio Fontana740b1752020-08-19 13:17:19 +020067static int64_t max_delay;
68static int64_t max_advance;
69
Richard Henderson5e140192019-03-28 11:54:23 -100070static void align_clocks(SyncClocks *sc, CPUState *cpu)
Sebastian Tanasec2aa5f82014-07-25 11:56:31 +020071{
72 int64_t cpu_icount;
73
74 if (!icount_align_option) {
75 return;
76 }
77
Richard Henderson5e140192019-03-28 11:54:23 -100078 cpu_icount = cpu->icount_extra + cpu_neg(cpu)->icount_decr.u16.low;
Claudio Fontana8191d362020-08-31 16:18:34 +020079 sc->diff_clk += icount_to_ns(sc->last_cpu_icount - cpu_icount);
Sebastian Tanasec2aa5f82014-07-25 11:56:31 +020080 sc->last_cpu_icount = cpu_icount;
81
82 if (sc->diff_clk > VM_CLOCK_ADVANCE) {
83#ifndef _WIN32
84 struct timespec sleep_delay, rem_delay;
85 sleep_delay.tv_sec = sc->diff_clk / 1000000000LL;
86 sleep_delay.tv_nsec = sc->diff_clk % 1000000000LL;
87 if (nanosleep(&sleep_delay, &rem_delay) < 0) {
Paolo Bonzinia498d0e2015-01-28 10:09:55 +010088 sc->diff_clk = rem_delay.tv_sec * 1000000000LL + rem_delay.tv_nsec;
Sebastian Tanasec2aa5f82014-07-25 11:56:31 +020089 } else {
90 sc->diff_clk = 0;
91 }
92#else
93 Sleep(sc->diff_clk / SCALE_MS);
94 sc->diff_clk = 0;
95#endif
96 }
97}
98
Sebastian Tanase7f7bc142014-07-25 11:56:32 +020099static void print_delay(const SyncClocks *sc)
100{
101 static float threshold_delay;
102 static int64_t last_realtime_clock;
103 static int nb_prints;
104
105 if (icount_align_option &&
106 sc->realtime_clock - last_realtime_clock >= MAX_DELAY_PRINT_RATE &&
107 nb_prints < MAX_NB_PRINTS) {
108 if ((-sc->diff_clk / (float)1000000000LL > threshold_delay) ||
109 (-sc->diff_clk / (float)1000000000LL <
110 (threshold_delay - THRESHOLD_REDUCE))) {
111 threshold_delay = (-sc->diff_clk / 1000000000LL) + 1;
Claudio Fontana740b1752020-08-19 13:17:19 +0200112 qemu_printf("Warning: The guest is now late by %.1f to %.1f seconds\n",
113 threshold_delay - 1,
114 threshold_delay);
Sebastian Tanase7f7bc142014-07-25 11:56:32 +0200115 nb_prints++;
116 last_realtime_clock = sc->realtime_clock;
117 }
118 }
119}
120
Richard Henderson5e140192019-03-28 11:54:23 -1000121static void init_delay_params(SyncClocks *sc, CPUState *cpu)
Sebastian Tanasec2aa5f82014-07-25 11:56:31 +0200122{
123 if (!icount_align_option) {
124 return;
125 }
Paolo Bonzini2e91cc62015-01-28 10:16:37 +0100126 sc->realtime_clock = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT);
127 sc->diff_clk = qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) - sc->realtime_clock;
Richard Henderson5e140192019-03-28 11:54:23 -1000128 sc->last_cpu_icount
129 = cpu->icount_extra + cpu_neg(cpu)->icount_decr.u16.low;
Sebastian Tanase27498be2014-07-25 11:56:33 +0200130 if (sc->diff_clk < max_delay) {
131 max_delay = sc->diff_clk;
132 }
133 if (sc->diff_clk > max_advance) {
134 max_advance = sc->diff_clk;
135 }
Sebastian Tanase7f7bc142014-07-25 11:56:32 +0200136
137 /* Print every 2s max if the guest is late. We limit the number
138 of printed messages to NB_PRINT_MAX(currently 100) */
139 print_delay(sc);
Sebastian Tanasec2aa5f82014-07-25 11:56:31 +0200140}
141#else
142static void align_clocks(SyncClocks *sc, const CPUState *cpu)
143{
144}
145
146static void init_delay_params(SyncClocks *sc, const CPUState *cpu)
147{
148}
149#endif /* CONFIG USER ONLY */
bellard7d132992003-03-06 23:23:54 +0000150
Richard Henderson043e35d2021-07-17 15:18:40 -0700151uint32_t curr_cflags(CPUState *cpu)
152{
Richard Henderson84f156162021-07-17 15:18:41 -0700153 uint32_t cflags = cpu->tcg_cflags;
154
Richard Henderson04f5b642021-07-17 15:18:43 -0700155 /*
Richard Hendersonc2ffd752021-07-19 10:43:46 -1000156 * Record gdb single-step. We should be exiting the TB by raising
157 * EXCP_DEBUG, but to simplify other tests, disable chaining too.
158 *
Richard Henderson04f5b642021-07-17 15:18:43 -0700159 * For singlestep and -d nochain, suppress goto_tb so that
160 * we can log -d cpu,exec after every TB.
161 */
Richard Hendersonc2ffd752021-07-19 10:43:46 -1000162 if (unlikely(cpu->singlestep_enabled)) {
163 cflags |= CF_NO_GOTO_TB | CF_NO_GOTO_PTR | CF_SINGLE_STEP | 1;
164 } else if (singlestep) {
Richard Henderson04f5b642021-07-17 15:18:43 -0700165 cflags |= CF_NO_GOTO_TB | 1;
166 } else if (qemu_loglevel_mask(CPU_LOG_TB_NOCHAIN)) {
Richard Hendersonfb957012021-07-17 15:18:42 -0700167 cflags |= CF_NO_GOTO_TB;
Richard Henderson84f156162021-07-17 15:18:41 -0700168 }
169
170 return cflags;
Richard Henderson043e35d2021-07-17 15:18:40 -0700171}
172
Richard Henderson0c90ba12022-08-16 13:53:18 -0500173struct tb_desc {
174 target_ulong pc;
175 target_ulong cs_base;
176 CPUArchState *env;
Richard Henderson93b99612022-08-15 15:00:57 -0500177 tb_page_addr_t page_addr0;
Richard Henderson0c90ba12022-08-16 13:53:18 -0500178 uint32_t flags;
179 uint32_t cflags;
180 uint32_t trace_vcpu_dstate;
181};
182
183static bool tb_lookup_cmp(const void *p, const void *d)
184{
185 const TranslationBlock *tb = p;
186 const struct tb_desc *desc = d;
187
188 if (tb->pc == desc->pc &&
Richard Henderson93b99612022-08-15 15:00:57 -0500189 tb->page_addr[0] == desc->page_addr0 &&
Richard Henderson0c90ba12022-08-16 13:53:18 -0500190 tb->cs_base == desc->cs_base &&
191 tb->flags == desc->flags &&
192 tb->trace_vcpu_dstate == desc->trace_vcpu_dstate &&
193 tb_cflags(tb) == desc->cflags) {
194 /* check next page if needed */
195 if (tb->page_addr[1] == -1) {
196 return true;
197 } else {
Richard Henderson93b99612022-08-15 15:00:57 -0500198 tb_page_addr_t phys_page1;
199 target_ulong virt_page1;
Richard Henderson0c90ba12022-08-16 13:53:18 -0500200
Richard Henderson9867b302022-08-22 18:50:46 -0700201 /*
202 * We know that the first page matched, and an otherwise valid TB
203 * encountered an incomplete instruction at the end of that page,
204 * therefore we know that generating a new TB from the current PC
205 * must also require reading from the next page -- even if the
206 * second pages do not match, and therefore the resulting insn
207 * is different for the new TB. Therefore any exception raised
208 * here by the faulting lookup is not premature.
209 */
Richard Henderson93b99612022-08-15 15:00:57 -0500210 virt_page1 = TARGET_PAGE_ALIGN(desc->pc);
211 phys_page1 = get_page_addr_code(desc->env, virt_page1);
212 if (tb->page_addr[1] == phys_page1) {
Richard Henderson0c90ba12022-08-16 13:53:18 -0500213 return true;
214 }
215 }
216 }
217 return false;
218}
219
220static TranslationBlock *tb_htable_lookup(CPUState *cpu, target_ulong pc,
221 target_ulong cs_base, uint32_t flags,
222 uint32_t cflags)
223{
224 tb_page_addr_t phys_pc;
225 struct tb_desc desc;
226 uint32_t h;
227
228 desc.env = cpu->env_ptr;
229 desc.cs_base = cs_base;
230 desc.flags = flags;
231 desc.cflags = cflags;
232 desc.trace_vcpu_dstate = *cpu->trace_dstate;
233 desc.pc = pc;
234 phys_pc = get_page_addr_code(desc.env, pc);
235 if (phys_pc == -1) {
236 return NULL;
237 }
Richard Henderson93b99612022-08-15 15:00:57 -0500238 desc.page_addr0 = phys_pc;
Richard Henderson0c90ba12022-08-16 13:53:18 -0500239 h = tb_hash_func(phys_pc, pc, flags, cflags, *cpu->trace_dstate);
240 return qht_lookup_custom(&tb_ctx.htable, &desc, h, tb_lookup_cmp);
241}
242
Richard Henderson632cb632021-06-29 12:31:19 -0700243/* Might cause an exception, so have a longjmp destination ready */
244static inline TranslationBlock *tb_lookup(CPUState *cpu, target_ulong pc,
245 target_ulong cs_base,
246 uint32_t flags, uint32_t cflags)
247{
248 TranslationBlock *tb;
249 uint32_t hash;
250
251 /* we should never be trying to look up an INVALID tb */
252 tcg_debug_assert(!(cflags & CF_INVALID));
253
254 hash = tb_jmp_cache_hash_func(pc);
255 tb = qatomic_rcu_read(&cpu->tb_jmp_cache[hash]);
256
257 if (likely(tb &&
258 tb->pc == pc &&
259 tb->cs_base == cs_base &&
260 tb->flags == flags &&
261 tb->trace_vcpu_dstate == *cpu->trace_dstate &&
262 tb_cflags(tb) == cflags)) {
263 return tb;
264 }
265 tb = tb_htable_lookup(cpu, pc, cs_base, flags, cflags);
266 if (tb == NULL) {
267 return NULL;
268 }
269 qatomic_set(&cpu->tb_jmp_cache[hash], tb);
270 return tb;
271}
272
Richard Hendersonabb0cd92021-06-29 13:17:18 -0700273static inline void log_cpu_exec(target_ulong pc, CPUState *cpu,
274 const TranslationBlock *tb)
275{
276 if (unlikely(qemu_loglevel_mask(CPU_LOG_TB_CPU | CPU_LOG_EXEC))
277 && qemu_log_in_addr_range(pc)) {
278
279 qemu_log_mask(CPU_LOG_EXEC,
280 "Trace %d: %p [" TARGET_FMT_lx
Richard Henderson7eabad32021-06-30 08:31:46 -0700281 "/" TARGET_FMT_lx "/%08x/%08x] %s\n",
282 cpu->cpu_index, tb->tc.ptr, tb->cs_base, pc,
283 tb->flags, tb->cflags, lookup_symbol(pc));
Richard Hendersonabb0cd92021-06-29 13:17:18 -0700284
285#if defined(DEBUG_DISAS)
286 if (qemu_loglevel_mask(CPU_LOG_TB_CPU)) {
Richard Hendersonc60f5992022-04-17 11:29:47 -0700287 FILE *logfile = qemu_log_trylock();
Richard Henderson78b54852022-04-17 11:29:49 -0700288 if (logfile) {
289 int flags = 0;
Richard Hendersonabb0cd92021-06-29 13:17:18 -0700290
Richard Henderson78b54852022-04-17 11:29:49 -0700291 if (qemu_loglevel_mask(CPU_LOG_TB_FPU)) {
292 flags |= CPU_DUMP_FPU;
293 }
Richard Hendersonabb0cd92021-06-29 13:17:18 -0700294#if defined(TARGET_I386)
Richard Henderson78b54852022-04-17 11:29:49 -0700295 flags |= CPU_DUMP_CCOP;
Richard Hendersonabb0cd92021-06-29 13:17:18 -0700296#endif
Richard Hendersonc769fbd2022-04-17 11:29:54 -0700297 cpu_dump_state(cpu, logfile, flags);
Richard Henderson78b54852022-04-17 11:29:49 -0700298 qemu_log_unlock(logfile);
299 }
Richard Hendersonabb0cd92021-06-29 13:17:18 -0700300 }
301#endif /* DEBUG_DISAS */
302 }
303}
304
Richard Henderson10c37822021-07-19 09:03:21 -1000305static bool check_for_breakpoints(CPUState *cpu, target_ulong pc,
306 uint32_t *cflags)
307{
308 CPUBreakpoint *bp;
309 bool match_page = false;
310
311 if (likely(QTAILQ_EMPTY(&cpu->breakpoints))) {
312 return false;
313 }
314
315 /*
316 * Singlestep overrides breakpoints.
317 * This requirement is visible in the record-replay tests, where
318 * we would fail to make forward progress in reverse-continue.
319 *
320 * TODO: gdb singlestep should only override gdb breakpoints,
321 * so that one could (gdb) singlestep into the guest kernel's
322 * architectural breakpoint handler.
323 */
324 if (cpu->singlestep_enabled) {
325 return false;
326 }
327
328 QTAILQ_FOREACH(bp, &cpu->breakpoints, entry) {
329 /*
330 * If we have an exact pc match, trigger the breakpoint.
331 * Otherwise, note matches within the page.
332 */
333 if (pc == bp->pc) {
334 bool match_bp = false;
335
336 if (bp->flags & BP_GDB) {
337 match_bp = true;
338 } else if (bp->flags & BP_CPU) {
339#ifdef CONFIG_USER_ONLY
340 g_assert_not_reached();
341#else
342 CPUClass *cc = CPU_GET_CLASS(cpu);
343 assert(cc->tcg_ops->debug_check_breakpoint);
344 match_bp = cc->tcg_ops->debug_check_breakpoint(cpu);
345#endif
346 }
347
348 if (match_bp) {
349 cpu->exception_index = EXCP_DEBUG;
350 return true;
351 }
352 } else if (((pc ^ bp->pc) & TARGET_PAGE_MASK) == 0) {
353 match_page = true;
354 }
355 }
356
357 /*
358 * Within the same page as a breakpoint, single-step,
359 * returning to helper_lookup_tb_ptr after each insn looking
360 * for the actual breakpoint.
361 *
362 * TODO: Perhaps better to record all of the TBs associated
363 * with a given virtual page that contains a breakpoint, and
364 * then invalidate them when a new overlapping breakpoint is
365 * set on the page. Non-overlapping TBs would not be
366 * invalidated, nor would any TB need to be invalidated as
367 * breakpoints are removed.
368 */
369 if (match_page) {
370 *cflags = (*cflags & ~CF_COUNT_MASK) | CF_NO_GOTO_TB | 1;
371 }
372 return false;
373}
374
Richard Henderson4288eb22021-06-29 12:28:29 -0700375/**
376 * helper_lookup_tb_ptr: quick check for next tb
377 * @env: current cpu state
378 *
379 * Look for an existing TB matching the current cpu state.
380 * If found, return the code pointer. If not found, return
381 * the tcg epilogue so that we return into cpu_tb_exec.
382 */
383const void *HELPER(lookup_tb_ptr)(CPUArchState *env)
384{
385 CPUState *cpu = env_cpu(env);
386 TranslationBlock *tb;
387 target_ulong cs_base, pc;
Richard Henderson10c37822021-07-19 09:03:21 -1000388 uint32_t flags, cflags;
Richard Henderson4288eb22021-06-29 12:28:29 -0700389
390 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
391
Richard Henderson10c37822021-07-19 09:03:21 -1000392 cflags = curr_cflags(cpu);
393 if (check_for_breakpoints(cpu, pc, &cflags)) {
394 cpu_loop_exit(cpu);
395 }
396
397 tb = tb_lookup(cpu, pc, cs_base, flags, cflags);
Richard Henderson4288eb22021-06-29 12:28:29 -0700398 if (tb == NULL) {
399 return tcg_code_gen_epilogue;
400 }
Richard Hendersonabb0cd92021-06-29 13:17:18 -0700401
402 log_cpu_exec(pc, cpu, tb);
403
Richard Henderson4288eb22021-06-29 12:28:29 -0700404 return tb->tc.ptr;
405}
406
Peter Maydell77211372013-02-22 18:10:02 +0000407/* Execute a TB, and fix up the CPU state afterwards if necessary */
Daniele Buonoc905a362020-12-04 18:06:12 -0500408/*
409 * Disable CFI checks.
410 * TCG creates binary blobs at runtime, with the transformed code.
411 * A TB is a blob of binary code, created at runtime and called with an
412 * indirect function call. Since such function did not exist at compile time,
413 * the CFI runtime has no way to verify its signature and would fail.
414 * TCG is not considered a security-sensitive part of QEMU so this does not
415 * affect the impact of CFI in environment with high security requirements
416 */
Richard Hendersoneba40352020-10-29 13:18:12 -0700417static inline TranslationBlock * QEMU_DISABLE_CFI
418cpu_tb_exec(CPUState *cpu, TranslationBlock *itb, int *tb_exit)
Peter Maydell77211372013-02-22 18:10:02 +0000419{
420 CPUArchState *env = cpu->env_ptr;
Sergey Fedorov819af242016-04-21 15:58:23 +0300421 uintptr_t ret;
422 TranslationBlock *last_tb;
Richard Hendersondb0c51a2020-10-28 12:05:44 -0700423 const void *tb_ptr = itb->tc.ptr;
Peter Maydell1a830632016-03-15 14:30:19 +0000424
Richard Hendersonabb0cd92021-06-29 13:17:18 -0700425 log_cpu_exec(itb->pc, cpu, itb);
Richard Henderson03afa5f2013-11-06 17:29:39 +1000426
Roman Bolshakov653b87e2021-01-13 06:28:07 +0300427 qemu_thread_jit_execute();
Sergey Fedorov819af242016-04-21 15:58:23 +0300428 ret = tcg_qemu_tb_exec(env, tb_ptr);
Pavel Dovgalyuk626cf8f2014-12-08 10:53:17 +0300429 cpu->can_do_io = 1;
Richard Hendersoneba40352020-10-29 13:18:12 -0700430 /*
431 * TODO: Delay swapping back to the read-write region of the TB
432 * until we actually need to modify the TB. The read-only copy,
433 * coming from the rx region, shares the same host TLB entry as
434 * the code that executed the exit_tb opcode that arrived here.
435 * If we insist on touching both the RX and the RW pages, we
436 * double the host TLB pressure.
437 */
438 last_tb = tcg_splitwx_to_rw((void *)(ret & ~TB_EXIT_MASK));
439 *tb_exit = ret & TB_EXIT_MASK;
Alex Bennée6db8b532014-08-01 17:08:57 +0100440
Richard Hendersoneba40352020-10-29 13:18:12 -0700441 trace_exec_tb_exit(last_tb, *tb_exit);
442
443 if (*tb_exit > TB_EXIT_IDX1) {
Peter Maydell77211372013-02-22 18:10:02 +0000444 /* We didn't start executing this TB (eg because the instruction
445 * counter hit zero); we must restore the guest PC to the address
446 * of the start of the TB.
447 */
Andreas Färberbdf7ae52013-06-28 19:31:32 +0200448 CPUClass *cc = CPU_GET_CLASS(cpu);
Sergey Fedorov819af242016-04-21 15:58:23 +0300449 qemu_log_mask_and_addr(CPU_LOG_EXEC, last_tb->pc,
Alex Bennéed977e1c2016-03-15 14:30:21 +0000450 "Stopped execution of TB chain before %p ["
451 TARGET_FMT_lx "] %s\n",
Emilio G. Cotae7e168f2017-07-12 00:08:21 -0400452 last_tb->tc.ptr, last_tb->pc,
Sergey Fedorov819af242016-04-21 15:58:23 +0300453 lookup_symbol(last_tb->pc));
Claudio Fontana78271682021-02-04 17:39:23 +0100454 if (cc->tcg_ops->synchronize_from_tb) {
455 cc->tcg_ops->synchronize_from_tb(cpu, last_tb);
Andreas Färberbdf7ae52013-06-28 19:31:32 +0200456 } else {
457 assert(cc->set_pc);
Sergey Fedorov819af242016-04-21 15:58:23 +0300458 cc->set_pc(cpu, last_tb->pc);
Andreas Färberbdf7ae52013-06-28 19:31:32 +0200459 }
Peter Maydell77211372013-02-22 18:10:02 +0000460 }
Richard Hendersonc9460d72021-07-18 15:12:12 -1000461
462 /*
463 * If gdb single-step, and we haven't raised another exception,
464 * raise a debug exception. Single-step with another exception
465 * is handled in cpu_handle_exception.
466 */
467 if (unlikely(cpu->singlestep_enabled) && cpu->exception_index == -1) {
468 cpu->exception_index = EXCP_DEBUG;
469 cpu_loop_exit(cpu);
470 }
471
Richard Hendersoneba40352020-10-29 13:18:12 -0700472 return last_tb;
Peter Maydell77211372013-02-22 18:10:02 +0000473}
474
pbrook2e70f6e2008-06-29 01:03:05 +0000475
Eduardo Habkost035ba062020-12-12 16:55:16 +0100476static void cpu_exec_enter(CPUState *cpu)
Richard Hendersonfdbc2b52016-06-29 22:12:55 -0700477{
Pranith Kumar08e73c42017-02-23 18:29:15 +0000478 CPUClass *cc = CPU_GET_CLASS(cpu);
Eduardo Habkost035ba062020-12-12 16:55:16 +0100479
Claudio Fontana78271682021-02-04 17:39:23 +0100480 if (cc->tcg_ops->cpu_exec_enter) {
481 cc->tcg_ops->cpu_exec_enter(cpu);
Eduardo Habkost80c47502020-12-12 16:55:17 +0100482 }
Eduardo Habkost035ba062020-12-12 16:55:16 +0100483}
484
485static void cpu_exec_exit(CPUState *cpu)
486{
487 CPUClass *cc = CPU_GET_CLASS(cpu);
488
Claudio Fontana78271682021-02-04 17:39:23 +0100489 if (cc->tcg_ops->cpu_exec_exit) {
490 cc->tcg_ops->cpu_exec_exit(cpu);
Eduardo Habkost80c47502020-12-12 16:55:17 +0100491 }
Eduardo Habkost035ba062020-12-12 16:55:16 +0100492}
493
494void cpu_exec_step_atomic(CPUState *cpu)
495{
Philippe Mathieu-Daudé61deada2022-03-05 23:35:19 +0100496 CPUArchState *env = cpu->env_ptr;
Richard Hendersonfdbc2b52016-06-29 22:12:55 -0700497 TranslationBlock *tb;
498 target_ulong cs_base, pc;
Richard Henderson258afb42021-07-17 15:18:44 -0700499 uint32_t flags, cflags;
Richard Hendersoneba40352020-10-29 13:18:12 -0700500 int tb_exit;
Richard Hendersonfdbc2b52016-06-29 22:12:55 -0700501
Pranith Kumar08e73c42017-02-23 18:29:15 +0000502 if (sigsetjmp(cpu->jmp_env, 0) == 0) {
Alex Bennée886cc682020-02-14 14:49:52 +0000503 start_exclusive();
Douglas Crosherbfff0722020-09-22 17:42:41 +1000504 g_assert(cpu == current_cpu);
505 g_assert(!cpu->running);
506 cpu->running = true;
Alex Bennée886cc682020-02-14 14:49:52 +0000507
Alex Bennée6f04cb12021-02-24 16:58:07 +0000508 cpu_get_tb_cpu_state(env, &pc, &cs_base, &flags);
Alex Bennée6f04cb12021-02-24 16:58:07 +0000509
Richard Henderson258afb42021-07-17 15:18:44 -0700510 cflags = curr_cflags(cpu);
511 /* Execute in a serial context. */
512 cflags &= ~CF_PARALLEL;
513 /* After 1 insn, return and release the exclusive lock. */
514 cflags |= CF_NO_GOTO_TB | CF_NO_GOTO_PTR | 1;
Richard Henderson10c37822021-07-19 09:03:21 -1000515 /*
516 * No need to check_for_breakpoints here.
517 * We only arrive in cpu_exec_step_atomic after beginning execution
518 * of an insn that includes an atomic operation we can't handle.
519 * Any breakpoint for this insn will have been recognized earlier.
520 */
Richard Henderson258afb42021-07-17 15:18:44 -0700521
522 tb = tb_lookup(cpu, pc, cs_base, flags, cflags);
Emilio G. Cota4e2ca832017-07-11 14:29:37 -0400523 if (tb == NULL) {
524 mmap_lock();
Emilio G. Cota95590e22017-08-01 15:40:16 -0400525 tb = tb_gen_code(cpu, pc, cs_base, flags, cflags);
Emilio G. Cota4e2ca832017-07-11 14:29:37 -0400526 mmap_unlock();
527 }
Pranith Kumar08e73c42017-02-23 18:29:15 +0000528
Eduardo Habkost035ba062020-12-12 16:55:16 +0100529 cpu_exec_enter(cpu);
Pranith Kumar08e73c42017-02-23 18:29:15 +0000530 /* execute the generated code */
Emilio G. Cota4e2ca832017-07-11 14:29:37 -0400531 trace_exec_tb(tb, pc);
Richard Hendersoneba40352020-10-29 13:18:12 -0700532 cpu_tb_exec(cpu, tb, &tb_exit);
Eduardo Habkost035ba062020-12-12 16:55:16 +0100533 cpu_exec_exit(cpu);
Pranith Kumar08e73c42017-02-23 18:29:15 +0000534 } else {
Pranith Kumar08e73c42017-02-23 18:29:15 +0000535#ifndef CONFIG_SOFTMMU
Richard Hendersonf920ffd2021-09-13 13:01:07 -0700536 clear_helper_retaddr();
Richard Henderson297368c2022-08-10 17:14:26 -0700537 if (have_mmap_lock()) {
538 mmap_unlock();
539 }
Pranith Kumar08e73c42017-02-23 18:29:15 +0000540#endif
Emilio G. Cota6aaa24f2019-01-15 14:47:54 -0500541 if (qemu_mutex_iothread_locked()) {
542 qemu_mutex_unlock_iothread();
543 }
Emilio G. Cotafaa93722018-02-22 20:50:29 -0500544 assert_no_pages_locked();
Emilio G. Cotae6d86be2018-10-21 13:24:26 -0400545 qemu_plugin_disable_mem_helpers(cpu);
Pranith Kumar08e73c42017-02-23 18:29:15 +0000546 }
Peter Maydell426eeec2017-11-02 16:35:36 +0000547
Alex Bennée886cc682020-02-14 14:49:52 +0000548 /*
549 * As we start the exclusive region before codegen we must still
550 * be in the region if we longjump out of either the codegen or
551 * the execution.
552 */
553 g_assert(cpu_in_exclusive_context(cpu));
Douglas Crosherbfff0722020-09-22 17:42:41 +1000554 cpu->running = false;
Alex Bennée886cc682020-02-14 14:49:52 +0000555 end_exclusive();
Richard Hendersonfdbc2b52016-06-29 22:12:55 -0700556}
557
Richard Hendersona8583392017-07-31 22:02:31 -0700558void tb_set_jmp_target(TranslationBlock *tb, int n, uintptr_t addr)
559{
560 if (TCG_TARGET_HAS_direct_jump) {
561 uintptr_t offset = tb->jmp_target_arg[n];
Emilio G. Cotae7e168f2017-07-12 00:08:21 -0400562 uintptr_t tc_ptr = (uintptr_t)tb->tc.ptr;
Richard Henderson1acbad02020-10-28 23:30:21 -0700563 uintptr_t jmp_rx = tc_ptr + offset;
564 uintptr_t jmp_rw = jmp_rx - tcg_splitwx_diff;
565 tb_target_set_jmp_target(tc_ptr, jmp_rx, jmp_rw, addr);
Richard Hendersona8583392017-07-31 22:02:31 -0700566 } else {
567 tb->jmp_target_arg[n] = addr;
568 }
569}
570
Richard Hendersona8583392017-07-31 22:02:31 -0700571static inline void tb_add_jump(TranslationBlock *tb, int n,
572 TranslationBlock *tb_next)
573{
Emilio G. Cota194125e2017-08-02 20:34:06 -0400574 uintptr_t old;
575
Roman Bolshakov653b87e2021-01-13 06:28:07 +0300576 qemu_thread_jit_write();
Richard Hendersona8583392017-07-31 22:02:31 -0700577 assert(n < ARRAY_SIZE(tb->jmp_list_next));
Emilio G. Cota194125e2017-08-02 20:34:06 -0400578 qemu_spin_lock(&tb_next->jmp_lock);
579
580 /* make sure the destination TB is valid */
581 if (tb_next->cflags & CF_INVALID) {
582 goto out_unlock_next;
Richard Hendersona8583392017-07-31 22:02:31 -0700583 }
Emilio G. Cota194125e2017-08-02 20:34:06 -0400584 /* Atomically claim the jump destination slot only if it was NULL */
Stefan Hajnoczid73415a2020-09-23 11:56:46 +0100585 old = qatomic_cmpxchg(&tb->jmp_dest[n], (uintptr_t)NULL,
586 (uintptr_t)tb_next);
Emilio G. Cota194125e2017-08-02 20:34:06 -0400587 if (old) {
588 goto out_unlock_next;
589 }
590
591 /* patch the native jump address */
592 tb_set_jmp_target(tb, n, (uintptr_t)tb_next->tc.ptr);
593
594 /* add in TB jmp list */
595 tb->jmp_list_next[n] = tb_next->jmp_list_head;
596 tb_next->jmp_list_head = (uintptr_t)tb | n;
597
598 qemu_spin_unlock(&tb_next->jmp_lock);
599
Richard Hendersona8583392017-07-31 22:02:31 -0700600 qemu_log_mask_and_addr(CPU_LOG_EXEC, tb->pc,
601 "Linking TBs %p [" TARGET_FMT_lx
602 "] index %d -> %p [" TARGET_FMT_lx "]\n",
Emilio G. Cotae7e168f2017-07-12 00:08:21 -0400603 tb->tc.ptr, tb->pc, n,
604 tb_next->tc.ptr, tb_next->pc);
Emilio G. Cota194125e2017-08-02 20:34:06 -0400605 return;
Richard Hendersona8583392017-07-31 22:02:31 -0700606
Emilio G. Cota194125e2017-08-02 20:34:06 -0400607 out_unlock_next:
608 qemu_spin_unlock(&tb_next->jmp_lock);
609 return;
Richard Hendersona8583392017-07-31 22:02:31 -0700610}
611
Sergey Fedorov8b2d34e2016-05-11 13:21:47 +0300612static inline bool cpu_handle_halt(CPUState *cpu)
613{
Philippe Mathieu-Daudé0596fa12021-09-12 19:27:02 +0200614#ifndef CONFIG_USER_ONLY
Sergey Fedorov8b2d34e2016-05-11 13:21:47 +0300615 if (cpu->halted) {
Philippe Mathieu-Daudé0596fa12021-09-12 19:27:02 +0200616#if defined(TARGET_I386)
Pavel Dovgalyuk40848932020-10-03 20:12:51 +0300617 if (cpu->interrupt_request & CPU_INTERRUPT_POLL) {
Sergey Fedorov8b2d34e2016-05-11 13:21:47 +0300618 X86CPU *x86_cpu = X86_CPU(cpu);
Jan Kiszka8d04fb52017-02-23 18:29:11 +0000619 qemu_mutex_lock_iothread();
Sergey Fedorov8b2d34e2016-05-11 13:21:47 +0300620 apic_poll_irq(x86_cpu->apic_state);
621 cpu_reset_interrupt(cpu, CPU_INTERRUPT_POLL);
Jan Kiszka8d04fb52017-02-23 18:29:11 +0000622 qemu_mutex_unlock_iothread();
Sergey Fedorov8b2d34e2016-05-11 13:21:47 +0300623 }
Philippe Mathieu-Daudé0596fa12021-09-12 19:27:02 +0200624#endif /* TARGET_I386 */
Sergey Fedorov8b2d34e2016-05-11 13:21:47 +0300625 if (!cpu_has_work(cpu)) {
Sergey Fedorov8b2d34e2016-05-11 13:21:47 +0300626 return true;
627 }
628
629 cpu->halted = 0;
630 }
Philippe Mathieu-Daudé0596fa12021-09-12 19:27:02 +0200631#endif /* !CONFIG_USER_ONLY */
Sergey Fedorov8b2d34e2016-05-11 13:21:47 +0300632
633 return false;
634}
635
Sergey Fedorovea284762016-05-11 13:21:48 +0300636static inline void cpu_handle_debug_exception(CPUState *cpu)
Jan Kiszka1009d2e2011-03-15 12:26:13 +0100637{
Peter Maydell86025ee2014-09-12 14:06:48 +0100638 CPUClass *cc = CPU_GET_CLASS(cpu);
Jan Kiszka1009d2e2011-03-15 12:26:13 +0100639 CPUWatchpoint *wp;
640
Andreas Färberff4700b2013-08-26 18:23:18 +0200641 if (!cpu->watchpoint_hit) {
642 QTAILQ_FOREACH(wp, &cpu->watchpoints, entry) {
Jan Kiszka1009d2e2011-03-15 12:26:13 +0100643 wp->flags &= ~BP_WATCHPOINT_HIT;
644 }
645 }
Peter Maydell86025ee2014-09-12 14:06:48 +0100646
Claudio Fontana78271682021-02-04 17:39:23 +0100647 if (cc->tcg_ops->debug_excp_handler) {
648 cc->tcg_ops->debug_excp_handler(cpu);
Eduardo Habkost710384d2020-12-12 16:55:18 +0100649 }
Jan Kiszka1009d2e2011-03-15 12:26:13 +0100650}
651
Sergey Fedorovea284762016-05-11 13:21:48 +0300652static inline bool cpu_handle_exception(CPUState *cpu, int *ret)
653{
Pavel Dovgalyuk17b50b02017-11-14 11:18:18 +0300654 if (cpu->exception_index < 0) {
Sergey Fedorovea284762016-05-11 13:21:48 +0300655#ifndef CONFIG_USER_ONLY
Pavel Dovgalyuk17b50b02017-11-14 11:18:18 +0300656 if (replay_has_exception()
Richard Henderson5e140192019-03-28 11:54:23 -1000657 && cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra == 0) {
Alex Bennéea11bbb62021-02-13 13:03:19 +0000658 /* Execute just one insn to trigger exception pending in the log */
Pavel Dovgalyukc3e97f62022-01-31 14:25:40 +0300659 cpu->cflags_next_tb = (curr_cflags(cpu) & ~CF_USE_ICOUNT)
660 | CF_NOIRQ | 1;
Pavel Dovgalyuk17b50b02017-11-14 11:18:18 +0300661 }
662#endif
Alex Bennéea11bbb62021-02-13 13:03:19 +0000663 return false;
Pavel Dovgalyuk17b50b02017-11-14 11:18:18 +0300664 }
Pavel Dovgalyuk17b50b02017-11-14 11:18:18 +0300665 if (cpu->exception_index >= EXCP_INTERRUPT) {
666 /* exit request from the cpu execution loop */
667 *ret = cpu->exception_index;
668 if (*ret == EXCP_DEBUG) {
669 cpu_handle_debug_exception(cpu);
670 }
671 cpu->exception_index = -1;
Sergey Fedorovea284762016-05-11 13:21:48 +0300672 return true;
Pavel Dovgalyuk17b50b02017-11-14 11:18:18 +0300673 } else {
674#if defined(CONFIG_USER_ONLY)
675 /* if user mode only, we simulate a fake exception
676 which will be handled outside the cpu execution
677 loop */
678#if defined(TARGET_I386)
679 CPUClass *cc = CPU_GET_CLASS(cpu);
Philippe Mathieu-Daudé12096422021-09-11 18:54:15 +0200680 cc->tcg_ops->fake_user_interrupt(cpu);
681#endif /* TARGET_I386 */
Pavel Dovgalyuk17b50b02017-11-14 11:18:18 +0300682 *ret = cpu->exception_index;
683 cpu->exception_index = -1;
684 return true;
685#else
686 if (replay_exception()) {
687 CPUClass *cc = CPU_GET_CLASS(cpu);
688 qemu_mutex_lock_iothread();
Claudio Fontana78271682021-02-04 17:39:23 +0100689 cc->tcg_ops->do_interrupt(cpu);
Pavel Dovgalyuk17b50b02017-11-14 11:18:18 +0300690 qemu_mutex_unlock_iothread();
691 cpu->exception_index = -1;
Luc Michela7ba7442020-07-16 21:39:47 +0200692
693 if (unlikely(cpu->singlestep_enabled)) {
694 /*
695 * After processing the exception, ensure an EXCP_DEBUG is
696 * raised when single-stepping so that GDB doesn't miss the
697 * next instruction.
698 */
699 *ret = EXCP_DEBUG;
700 cpu_handle_debug_exception(cpu);
701 return true;
702 }
Pavel Dovgalyuk17b50b02017-11-14 11:18:18 +0300703 } else if (!replay_has_interrupt()) {
704 /* give a chance to iothread in replay mode */
705 *ret = EXCP_INTERRUPT;
706 return true;
707 }
Sergey Fedorovea284762016-05-11 13:21:48 +0300708#endif
709 }
710
711 return false;
712}
713
Philippe Mathieu-Daudé77c0fc42021-09-11 18:54:33 +0200714#ifndef CONFIG_USER_ONLY
Pavel Dovgalyuk40848932020-10-03 20:12:51 +0300715/*
716 * CPU_INTERRUPT_POLL is a virtual event which gets converted into a
717 * "real" interrupt event later. It does not need to be recorded for
718 * replay purposes.
719 */
720static inline bool need_replay_interrupt(int interrupt_request)
721{
722#if defined(TARGET_I386)
723 return !(interrupt_request & CPU_INTERRUPT_POLL);
724#else
725 return true;
726#endif
727}
Philippe Mathieu-Daudé77c0fc42021-09-11 18:54:33 +0200728#endif /* !CONFIG_USER_ONLY */
Pavel Dovgalyuk40848932020-10-03 20:12:51 +0300729
Paolo Bonzini209b71b2017-01-27 10:57:18 +0100730static inline bool cpu_handle_interrupt(CPUState *cpu,
Sergey Fedorovc385e6e2016-05-11 13:21:49 +0300731 TranslationBlock **last_tb)
732{
Alex Bennéeaff0e202021-11-29 14:09:26 +0000733 /*
734 * If we have requested custom cflags with CF_NOIRQ we should
735 * skip checking here. Any pending interrupts will get picked up
736 * by the next TB we execute under normal cflags.
737 */
738 if (cpu->cflags_next_tb != -1 && cpu->cflags_next_tb & CF_NOIRQ) {
739 return false;
740 }
741
Pavel Dovgalyuk17b50b02017-11-14 11:18:18 +0300742 /* Clear the interrupt flag now since we're processing
743 * cpu->interrupt_request and cpu->exit_request.
David Hildenbrandd84be022017-11-29 20:13:19 +0100744 * Ensure zeroing happens before reading cpu->exit_request or
745 * cpu->interrupt_request (see also smp_wmb in cpu_exit())
Pavel Dovgalyuk17b50b02017-11-14 11:18:18 +0300746 */
Stefan Hajnoczid73415a2020-09-23 11:56:46 +0100747 qatomic_mb_set(&cpu_neg(cpu)->icount_decr.u16.high, 0);
Sergey Fedorovc385e6e2016-05-11 13:21:49 +0300748
Stefan Hajnoczid73415a2020-09-23 11:56:46 +0100749 if (unlikely(qatomic_read(&cpu->interrupt_request))) {
Jan Kiszka8d04fb52017-02-23 18:29:11 +0000750 int interrupt_request;
751 qemu_mutex_lock_iothread();
752 interrupt_request = cpu->interrupt_request;
Sergey Fedorovc385e6e2016-05-11 13:21:49 +0300753 if (unlikely(cpu->singlestep_enabled & SSTEP_NOIRQ)) {
754 /* Mask out external interrupts for this step. */
755 interrupt_request &= ~CPU_INTERRUPT_SSTEP_MASK;
756 }
757 if (interrupt_request & CPU_INTERRUPT_DEBUG) {
758 cpu->interrupt_request &= ~CPU_INTERRUPT_DEBUG;
759 cpu->exception_index = EXCP_DEBUG;
Jan Kiszka8d04fb52017-02-23 18:29:11 +0000760 qemu_mutex_unlock_iothread();
Paolo Bonzini209b71b2017-01-27 10:57:18 +0100761 return true;
Sergey Fedorovc385e6e2016-05-11 13:21:49 +0300762 }
Philippe Mathieu-Daudé77c0fc42021-09-11 18:54:33 +0200763#if !defined(CONFIG_USER_ONLY)
Sergey Fedorovc385e6e2016-05-11 13:21:49 +0300764 if (replay_mode == REPLAY_MODE_PLAY && !replay_has_interrupt()) {
765 /* Do nothing */
766 } else if (interrupt_request & CPU_INTERRUPT_HALT) {
767 replay_interrupt();
768 cpu->interrupt_request &= ~CPU_INTERRUPT_HALT;
769 cpu->halted = 1;
770 cpu->exception_index = EXCP_HLT;
Jan Kiszka8d04fb52017-02-23 18:29:11 +0000771 qemu_mutex_unlock_iothread();
Paolo Bonzini209b71b2017-01-27 10:57:18 +0100772 return true;
Sergey Fedorovc385e6e2016-05-11 13:21:49 +0300773 }
774#if defined(TARGET_I386)
775 else if (interrupt_request & CPU_INTERRUPT_INIT) {
776 X86CPU *x86_cpu = X86_CPU(cpu);
777 CPUArchState *env = &x86_cpu->env;
778 replay_interrupt();
Paolo Bonzini65c9d602017-02-16 12:30:05 +0100779 cpu_svm_check_intercept_param(env, SVM_EXIT_INIT, 0, 0);
Sergey Fedorovc385e6e2016-05-11 13:21:49 +0300780 do_cpu_init(x86_cpu);
781 cpu->exception_index = EXCP_HALTED;
Jan Kiszka8d04fb52017-02-23 18:29:11 +0000782 qemu_mutex_unlock_iothread();
Paolo Bonzini209b71b2017-01-27 10:57:18 +0100783 return true;
Sergey Fedorovc385e6e2016-05-11 13:21:49 +0300784 }
785#else
786 else if (interrupt_request & CPU_INTERRUPT_RESET) {
787 replay_interrupt();
788 cpu_reset(cpu);
Jan Kiszka8d04fb52017-02-23 18:29:11 +0000789 qemu_mutex_unlock_iothread();
Paolo Bonzini209b71b2017-01-27 10:57:18 +0100790 return true;
Sergey Fedorovc385e6e2016-05-11 13:21:49 +0300791 }
Philippe Mathieu-Daudé77c0fc42021-09-11 18:54:33 +0200792#endif /* !TARGET_I386 */
Sergey Fedorovc385e6e2016-05-11 13:21:49 +0300793 /* The target hook has 3 exit conditions:
794 False when the interrupt isn't processed,
795 True when it is, and we should restart on a new TB,
796 and via longjmp via cpu_loop_exit. */
797 else {
Philippe Mathieu-Daudé77c0fc42021-09-11 18:54:33 +0200798 CPUClass *cc = CPU_GET_CLASS(cpu);
799
Claudio Fontana78271682021-02-04 17:39:23 +0100800 if (cc->tcg_ops->cpu_exec_interrupt &&
801 cc->tcg_ops->cpu_exec_interrupt(cpu, interrupt_request)) {
Pavel Dovgalyuk40848932020-10-03 20:12:51 +0300802 if (need_replay_interrupt(interrupt_request)) {
803 replay_interrupt();
804 }
Richard Hendersonba3c35d2020-07-17 09:26:59 -0700805 /*
806 * After processing the interrupt, ensure an EXCP_DEBUG is
807 * raised when single-stepping so that GDB doesn't miss the
808 * next instruction.
809 */
Luc Michel5b7b1972022-02-24 14:52:42 -1000810 if (unlikely(cpu->singlestep_enabled)) {
811 cpu->exception_index = EXCP_DEBUG;
812 qemu_mutex_unlock_iothread();
813 return true;
814 }
815 cpu->exception_index = -1;
Sergey Fedorovc385e6e2016-05-11 13:21:49 +0300816 *last_tb = NULL;
817 }
Sergey Fedorov8b1fe3f2016-05-12 19:52:17 +0300818 /* The target hook may have updated the 'cpu->interrupt_request';
819 * reload the 'interrupt_request' value */
820 interrupt_request = cpu->interrupt_request;
Sergey Fedorovc385e6e2016-05-11 13:21:49 +0300821 }
Philippe Mathieu-Daudé77c0fc42021-09-11 18:54:33 +0200822#endif /* !CONFIG_USER_ONLY */
Sergey Fedorov8b1fe3f2016-05-12 19:52:17 +0300823 if (interrupt_request & CPU_INTERRUPT_EXITTB) {
Sergey Fedorovc385e6e2016-05-11 13:21:49 +0300824 cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
825 /* ensure that no TB jump will be modified as
826 the program flow was changed */
827 *last_tb = NULL;
828 }
Jan Kiszka8d04fb52017-02-23 18:29:11 +0000829
830 /* If we exit via cpu_loop_exit/longjmp it is reset in cpu_exec */
831 qemu_mutex_unlock_iothread();
Sergey Fedorovc385e6e2016-05-11 13:21:49 +0300832 }
Jan Kiszka8d04fb52017-02-23 18:29:11 +0000833
Pavel Dovgalyukcfb2d022017-02-07 09:54:57 +0300834 /* Finally, check if we need to exit to the main loop. */
Stefan Hajnoczid73415a2020-09-23 11:56:46 +0100835 if (unlikely(qatomic_read(&cpu->exit_request))
Claudio Fontana740b1752020-08-19 13:17:19 +0200836 || (icount_enabled()
Alex Bennéea11bbb62021-02-13 13:03:19 +0000837 && (cpu->cflags_next_tb == -1 || cpu->cflags_next_tb & CF_USE_ICOUNT)
Richard Henderson5e140192019-03-28 11:54:23 -1000838 && cpu_neg(cpu)->icount_decr.u16.low + cpu->icount_extra == 0)) {
Stefan Hajnoczid73415a2020-09-23 11:56:46 +0100839 qatomic_set(&cpu->exit_request, 0);
Pavel Dovgalyuk5f3bdfd2018-02-27 12:51:41 +0300840 if (cpu->exception_index == -1) {
841 cpu->exception_index = EXCP_INTERRUPT;
842 }
Paolo Bonzini209b71b2017-01-27 10:57:18 +0100843 return true;
Sergey Fedorovc385e6e2016-05-11 13:21:49 +0300844 }
Paolo Bonzini209b71b2017-01-27 10:57:18 +0100845
846 return false;
Sergey Fedorovc385e6e2016-05-11 13:21:49 +0300847}
848
Sergey Fedorov928de9e2016-05-11 13:21:50 +0300849static inline void cpu_loop_exec_tb(CPUState *cpu, TranslationBlock *tb,
Pavel Dovgalyukcfb2d022017-02-07 09:54:57 +0300850 TranslationBlock **last_tb, int *tb_exit)
Sergey Fedorov928de9e2016-05-11 13:21:50 +0300851{
Paolo Bonzini1aab16c2017-01-27 11:25:33 +0100852 int32_t insns_left;
Sergey Fedorov928de9e2016-05-11 13:21:50 +0300853
854 trace_exec_tb(tb, tb->pc);
Richard Hendersoneba40352020-10-29 13:18:12 -0700855 tb = cpu_tb_exec(cpu, tb, tb_exit);
Paolo Bonzini1aab16c2017-01-27 11:25:33 +0100856 if (*tb_exit != TB_EXIT_REQUESTED) {
857 *last_tb = tb;
858 return;
859 }
860
861 *last_tb = NULL;
Stefan Hajnoczid73415a2020-09-23 11:56:46 +0100862 insns_left = qatomic_read(&cpu_neg(cpu)->icount_decr.u32);
Paolo Bonzini1aab16c2017-01-27 11:25:33 +0100863 if (insns_left < 0) {
Alex Bennéee5143e32017-02-23 18:29:12 +0000864 /* Something asked us to stop executing chained TBs; just
865 * continue round the main loop. Whatever requested the exit
Paolo Bonzini30f3dda2017-03-03 16:39:18 +0100866 * will also have set something else (eg exit_request or
Pavel Dovgalyuk17b50b02017-11-14 11:18:18 +0300867 * interrupt_request) which will be handled by
868 * cpu_handle_interrupt. cpu_handle_interrupt will also
869 * clear cpu->icount_decr.u16.high.
Sergey Fedorov928de9e2016-05-11 13:21:50 +0300870 */
Paolo Bonzini1aab16c2017-01-27 11:25:33 +0100871 return;
872 }
873
874 /* Instruction counter expired. */
Claudio Fontana740b1752020-08-19 13:17:19 +0200875 assert(icount_enabled());
Paolo Bonzini1aab16c2017-01-27 11:25:33 +0100876#ifndef CONFIG_USER_ONLY
Alex Bennéeeda5f7c2017-04-05 12:35:48 +0100877 /* Ensure global icount has gone forward */
Claudio Fontana8191d362020-08-31 16:18:34 +0200878 icount_update(cpu);
Alex Bennéeeda5f7c2017-04-05 12:35:48 +0100879 /* Refill decrementer and continue execution. */
Peter Maydelldf3a2de2021-07-25 18:44:04 +0100880 insns_left = MIN(0xffff, cpu->icount_budget);
Richard Henderson5e140192019-03-28 11:54:23 -1000881 cpu_neg(cpu)->icount_decr.u16.low = insns_left;
Alex Bennéeeda5f7c2017-04-05 12:35:48 +0100882 cpu->icount_extra = cpu->icount_budget - insns_left;
Alex Bennéebc662a32021-02-13 13:03:18 +0000883
884 /*
885 * If the next tb has more instructions than we have left to
886 * execute we need to ensure we find/generate a TB with exactly
887 * insns_left instructions in it.
888 */
Peter Maydellc8cf47a2021-07-25 18:44:05 +0100889 if (insns_left > 0 && insns_left < tb->icount) {
890 assert(insns_left <= CF_COUNT_MASK);
891 assert(cpu->icount_extra == 0);
Alex Bennéebc662a32021-02-13 13:03:18 +0000892 cpu->cflags_next_tb = (tb->cflags & ~CF_COUNT_MASK) | insns_left;
Paolo Bonzini1aab16c2017-01-27 11:25:33 +0100893 }
Sergey Fedorov928de9e2016-05-11 13:21:50 +0300894#endif
Sergey Fedorov928de9e2016-05-11 13:21:50 +0300895}
896
bellard7d132992003-03-06 23:23:54 +0000897/* main execution loop */
898
Peter Crosthwaiteea3e9842015-06-18 10:24:55 -0700899int cpu_exec(CPUState *cpu)
bellard7d132992003-03-06 23:23:54 +0000900{
Sergey Fedorovc385e6e2016-05-11 13:21:49 +0300901 int ret;
Pavel Dovgalyukcfb2d022017-02-07 09:54:57 +0300902 SyncClocks sc = { 0 };
Sebastian Tanasec2aa5f82014-07-25 11:56:31 +0200903
Pavel Dovgalyuk6f060962015-09-17 19:24:16 +0300904 /* replay_interrupt may need current_cpu */
905 current_cpu = cpu;
906
Sergey Fedorov8b2d34e2016-05-11 13:21:47 +0300907 if (cpu_handle_halt(cpu)) {
908 return EXCP_HALTED;
Paolo Bonzinieda48c32011-03-12 17:43:56 +0100909 }
bellard5a1e3cf2005-11-23 21:02:53 +0000910
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100911 rcu_read_lock();
912
Eduardo Habkost035ba062020-12-12 16:55:16 +0100913 cpu_exec_enter(cpu);
bellard9d27abd2003-05-10 13:13:54 +0000914
Sebastian Tanasec2aa5f82014-07-25 11:56:31 +0200915 /* Calculate difference between guest clock and host clock.
916 * This delay includes the delay of the last cycle, so
917 * what we have to do is sleep until it is 0. As for the
918 * advance/delay we gain here, we try to fix it next time.
919 */
920 init_delay_params(&sc, cpu);
921
Paolo Bonzini4515e582017-01-29 10:55:14 +0100922 /* prepare setjmp context for exception handling */
923 if (sigsetjmp(cpu->jmp_env, 0) != 0) {
Philippe Mathieu-Daudé19a84312020-12-10 17:47:41 +0400924#if defined(__clang__)
Peter Maydelle6a41a02021-01-29 13:03:30 +0000925 /*
926 * Some compilers wrongly smash all local variables after
927 * siglongjmp (the spec requires that only non-volatile locals
928 * which are changed between the sigsetjmp and siglongjmp are
929 * permitted to be trashed). There were bug reports for gcc
930 * 4.5.0 and clang. The bug is fixed in all versions of gcc
931 * that we support, but is still unfixed in clang:
932 * https://bugs.llvm.org/show_bug.cgi?id=21183
933 *
Richard Henderson2521c772021-07-12 18:29:34 +0000934 * Reload an essential local variable here for those compilers.
Peter Maydelle6a41a02021-01-29 13:03:30 +0000935 * Newer versions of gcc would complain about this code (-Wclobbered),
936 * so we only perform the workaround for clang.
937 */
Paolo Bonzini4515e582017-01-29 10:55:14 +0100938 cpu = current_cpu;
Peter Maydelle6a41a02021-01-29 13:03:30 +0000939#else
Richard Henderson2521c772021-07-12 18:29:34 +0000940 /* Non-buggy compilers preserve this; assert the correct value. */
Paolo Bonzini4515e582017-01-29 10:55:14 +0100941 g_assert(cpu == current_cpu);
Peter Maydelle6a41a02021-01-29 13:03:30 +0000942#endif
943
Emilio G. Cota0ac20312017-08-04 23:46:31 -0400944#ifndef CONFIG_SOFTMMU
Richard Hendersonf920ffd2021-09-13 13:01:07 -0700945 clear_helper_retaddr();
Richard Henderson297368c2022-08-10 17:14:26 -0700946 if (have_mmap_lock()) {
947 mmap_unlock();
948 }
Emilio G. Cota0ac20312017-08-04 23:46:31 -0400949#endif
Jan Kiszka8d04fb52017-02-23 18:29:11 +0000950 if (qemu_mutex_iothread_locked()) {
951 qemu_mutex_unlock_iothread();
952 }
Emilio G. Cotae6d86be2018-10-21 13:24:26 -0400953 qemu_plugin_disable_mem_helpers(cpu);
954
Emilio G. Cota8fd3a9b2019-01-15 14:47:53 -0500955 assert_no_pages_locked();
Paolo Bonzini4515e582017-01-29 10:55:14 +0100956 }
957
958 /* if an exception is pending, we execute it here */
959 while (!cpu_handle_exception(cpu, &ret)) {
960 TranslationBlock *last_tb = NULL;
961 int tb_exit = 0;
962
963 while (!cpu_handle_interrupt(cpu, &last_tb)) {
Richard Henderson9b990ee2017-10-13 10:50:02 -0700964 TranslationBlock *tb;
Richard Henderson11c1d5f2021-07-19 12:40:57 -1000965 target_ulong cs_base, pc;
966 uint32_t flags, cflags;
Richard Henderson9b990ee2017-10-13 10:50:02 -0700967
Richard Henderson10c37822021-07-19 09:03:21 -1000968 cpu_get_tb_cpu_state(cpu->env_ptr, &pc, &cs_base, &flags);
969
Richard Henderson11c1d5f2021-07-19 12:40:57 -1000970 /*
971 * When requested, use an exact setting for cflags for the next
972 * execution. This is used for icount, precise smc, and stop-
973 * after-access watchpoints. Since this request should never
974 * have CF_INVALID set, -1 is a convenient invalid value that
975 * does not require tcg headers for cpu_common_reset.
976 */
977 cflags = cpu->cflags_next_tb;
Richard Henderson9b990ee2017-10-13 10:50:02 -0700978 if (cflags == -1) {
Alex Bennéec0ae3962021-02-24 16:58:08 +0000979 cflags = curr_cflags(cpu);
Richard Henderson9b990ee2017-10-13 10:50:02 -0700980 } else {
981 cpu->cflags_next_tb = -1;
982 }
983
Richard Henderson10c37822021-07-19 09:03:21 -1000984 if (check_for_breakpoints(cpu, pc, &cflags)) {
985 break;
986 }
Richard Henderson11c1d5f2021-07-19 12:40:57 -1000987
988 tb = tb_lookup(cpu, pc, cs_base, flags, cflags);
989 if (tb == NULL) {
990 mmap_lock();
991 tb = tb_gen_code(cpu, pc, cs_base, flags, cflags);
992 mmap_unlock();
993 /*
994 * We add the TB in the virtual pc hash table
995 * for the fast lookup
996 */
997 qatomic_set(&cpu->tb_jmp_cache[tb_jmp_cache_hash_func(pc)], tb);
998 }
999
1000#ifndef CONFIG_USER_ONLY
1001 /*
1002 * We don't take care of direct jumps when address mapping
1003 * changes in system emulation. So it's not safe to make a
1004 * direct jump to a TB spanning two pages because the mapping
1005 * for the second page can change.
1006 */
1007 if (tb->page_addr[1] != -1) {
1008 last_tb = NULL;
1009 }
1010#endif
1011 /* See if we can patch the calling TB. */
1012 if (last_tb) {
1013 tb_add_jump(last_tb, tb_exit, tb);
1014 }
1015
Pavel Dovgalyukcfb2d022017-02-07 09:54:57 +03001016 cpu_loop_exec_tb(cpu, tb, &last_tb, &tb_exit);
Richard Henderson11c1d5f2021-07-19 12:40:57 -10001017
Paolo Bonzini4515e582017-01-29 10:55:14 +01001018 /* Try to align the host and virtual clocks
1019 if the guest is in advance */
1020 align_clocks(&sc, cpu);
bellard7d132992003-03-06 23:23:54 +00001021 }
Paolo Bonzini4515e582017-01-29 10:55:14 +01001022 }
bellard3fb2ded2003-06-24 13:22:59 +00001023
Eduardo Habkost035ba062020-12-12 16:55:16 +01001024 cpu_exec_exit(cpu);
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +01001025 rcu_read_unlock();
pbrook1057eaa2007-02-04 13:37:44 +00001026
bellard7d132992003-03-06 23:23:54 +00001027 return ret;
1028}
Claudio Fontana740b1752020-08-19 13:17:19 +02001029
Claudio Fontana7df5e3d2021-02-04 17:39:11 +01001030void tcg_exec_realizefn(CPUState *cpu, Error **errp)
1031{
1032 static bool tcg_target_initialized;
1033 CPUClass *cc = CPU_GET_CLASS(cpu);
1034
1035 if (!tcg_target_initialized) {
Claudio Fontana78271682021-02-04 17:39:23 +01001036 cc->tcg_ops->initialize();
Claudio Fontana7df5e3d2021-02-04 17:39:11 +01001037 tcg_target_initialized = true;
1038 }
1039 tlb_init(cpu);
1040 qemu_plugin_vcpu_init_hook(cpu);
1041
1042#ifndef CONFIG_USER_ONLY
1043 tcg_iommu_init_notifier_list(cpu);
1044#endif /* !CONFIG_USER_ONLY */
1045}
1046
1047/* undo the initializations in reverse order */
1048void tcg_exec_unrealizefn(CPUState *cpu)
1049{
1050#ifndef CONFIG_USER_ONLY
1051 tcg_iommu_free_notifier_list(cpu);
1052#endif /* !CONFIG_USER_ONLY */
1053
1054 qemu_plugin_vcpu_exit_hook(cpu);
1055 tlb_destroy(cpu);
1056}
1057
Claudio Fontana740b1752020-08-19 13:17:19 +02001058#ifndef CONFIG_USER_ONLY
1059
Bernhard Beschow7112ffd2022-05-20 20:01:07 +02001060static void dump_drift_info(GString *buf)
Claudio Fontana740b1752020-08-19 13:17:19 +02001061{
1062 if (!icount_enabled()) {
1063 return;
1064 }
1065
Daniel P. Berrangé3a841ab2021-09-08 10:35:43 +01001066 g_string_append_printf(buf, "Host - Guest clock %"PRIi64" ms\n",
1067 (cpu_get_clock() - icount_get()) / SCALE_MS);
Claudio Fontana740b1752020-08-19 13:17:19 +02001068 if (icount_align_option) {
Daniel P. Berrangé3a841ab2021-09-08 10:35:43 +01001069 g_string_append_printf(buf, "Max guest delay %"PRIi64" ms\n",
1070 -max_delay / SCALE_MS);
1071 g_string_append_printf(buf, "Max guest advance %"PRIi64" ms\n",
1072 max_advance / SCALE_MS);
Claudio Fontana740b1752020-08-19 13:17:19 +02001073 } else {
Daniel P. Berrangé3a841ab2021-09-08 10:35:43 +01001074 g_string_append_printf(buf, "Max guest delay NA\n");
1075 g_string_append_printf(buf, "Max guest advance NA\n");
Claudio Fontana740b1752020-08-19 13:17:19 +02001076 }
1077}
1078
Daniel P. Berrangé3a841ab2021-09-08 10:35:43 +01001079HumanReadableText *qmp_x_query_jit(Error **errp)
1080{
1081 g_autoptr(GString) buf = g_string_new("");
1082
1083 if (!tcg_enabled()) {
1084 error_setg(errp, "JIT information is only available with accel=tcg");
1085 return NULL;
1086 }
1087
1088 dump_exec_info(buf);
1089 dump_drift_info(buf);
1090
1091 return human_readable_text_from_str(buf);
1092}
1093
Daniel P. Berrangéb6a7f3e2021-09-08 10:35:43 +01001094HumanReadableText *qmp_x_query_opcount(Error **errp)
1095{
1096 g_autoptr(GString) buf = g_string_new("");
1097
1098 if (!tcg_enabled()) {
1099 error_setg(errp, "Opcode count information is only available with accel=tcg");
1100 return NULL;
1101 }
1102
Bernhard Beschowb01841f2022-05-20 20:01:08 +02001103 tcg_dump_op_count(buf);
Daniel P. Berrangéb6a7f3e2021-09-08 10:35:43 +01001104
1105 return human_readable_text_from_str(buf);
1106}
1107
Alex Bennée92e28c02022-01-05 13:49:57 +00001108#ifdef CONFIG_PROFILER
1109
1110int64_t dev_time;
1111
1112HumanReadableText *qmp_x_query_profile(Error **errp)
1113{
1114 g_autoptr(GString) buf = g_string_new("");
1115 static int64_t last_cpu_exec_time;
1116 int64_t cpu_exec_time;
1117 int64_t delta;
1118
1119 cpu_exec_time = tcg_cpu_exec_time();
1120 delta = cpu_exec_time - last_cpu_exec_time;
1121
1122 g_string_append_printf(buf, "async time %" PRId64 " (%0.3f)\n",
1123 dev_time, dev_time / (double)NANOSECONDS_PER_SECOND);
1124 g_string_append_printf(buf, "qemu time %" PRId64 " (%0.3f)\n",
1125 delta, delta / (double)NANOSECONDS_PER_SECOND);
1126 last_cpu_exec_time = cpu_exec_time;
1127 dev_time = 0;
1128
1129 return human_readable_text_from_str(buf);
1130}
1131#else
1132HumanReadableText *qmp_x_query_profile(Error **errp)
1133{
1134 error_setg(errp, "Internal profiler not compiled");
1135 return NULL;
1136}
1137#endif
1138
Claudio Fontana740b1752020-08-19 13:17:19 +02001139#endif /* !CONFIG_USER_ONLY */