blob: 0848e0dbdb3f89baf0399c88e45f52e7b47b8bdb [file] [log] [blame]
Blue Swirl296af7c2010-03-29 19:23:50 +00001/*
2 * QEMU System Emulator
3 *
4 * Copyright (c) 2003-2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
Peter Maydell7b31bbc2016-01-26 18:16:56 +000025#include "qemu/osdep.h"
Paolo Bonzini83c90892012-12-17 18:19:49 +010026#include "monitor/monitor.h"
Stefan Hajnoczid5d2b152022-02-22 14:01:50 +000027#include "qemu/coroutine-tls.h"
Markus Armbrustere688df62018-02-01 12:18:31 +010028#include "qapi/error.h"
Philippe Mathieu-Daudédf7a1f482020-10-12 14:15:32 +020029#include "qapi/qapi-commands-machine.h"
Markus Armbruster112ed242018-02-26 17:13:27 -060030#include "qapi/qapi-commands-misc.h"
Markus Armbruster9af23982018-02-11 10:36:01 +010031#include "qapi/qapi-events-run-state.h"
Wenchao Xiaa4e15de2014-06-18 08:43:36 +020032#include "qapi/qmp/qerror.h"
Paolo Bonzini022c62c2012-12-17 18:19:49 +010033#include "exec/gdbstub.h"
Vincent Palatinb3946622017-01-10 11:59:55 +010034#include "sysemu/hw_accel.h"
Philippe Mathieu-Daudé73842ef2022-02-03 02:13:28 +010035#include "exec/cpu-common.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010036#include "qemu/thread.h"
Markus Armbrustere2c1c342022-12-21 14:35:49 +010037#include "qemu/main-loop.h"
Emilio G. Cota30865f32018-10-21 13:30:35 -040038#include "qemu/plugin.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010039#include "sysemu/cpus.h"
Richard Henderson9c09a252019-03-14 13:06:29 -070040#include "qemu/guest-random.h"
Alexey Kardashevskiy9cb805f2014-08-20 22:16:33 +100041#include "hw/nmi.h"
Pavel Dovgalyuk8b427042015-09-17 19:24:05 +030042#include "sysemu/replay.h"
Markus Armbruster54d31232019-08-12 07:23:59 +020043#include "sysemu/runstate.h"
Claudio Fontana740b1752020-08-19 13:17:19 +020044#include "sysemu/cpu-timers.h"
Sunil Muthuswamyfaf20792020-10-28 02:23:19 +000045#include "sysemu/whpx.h"
Like Xu5cc87672019-05-19 04:54:21 +080046#include "hw/boards.h"
Markus Armbruster650d1032019-08-12 07:23:48 +020047#include "hw/hw.h"
Daniel P. Berrangé8af3f5c2021-04-15 14:33:51 +010048#include "trace.h"
Jan Kiszka0ff0fc12011-06-23 10:15:55 +020049
Jan Kiszka6d9cb732011-02-01 22:15:58 +010050#ifdef CONFIG_LINUX
51
52#include <sys/prctl.h>
53
Marcelo Tosattic0532a72010-10-11 15:31:21 -030054#ifndef PR_MCE_KILL
55#define PR_MCE_KILL 33
56#endif
57
Jan Kiszka6d9cb732011-02-01 22:15:58 +010058#ifndef PR_MCE_KILL_SET
59#define PR_MCE_KILL_SET 1
60#endif
61
62#ifndef PR_MCE_KILL_EARLY
63#define PR_MCE_KILL_EARLY 1
64#endif
65
66#endif /* CONFIG_LINUX */
67
Yury Kotovbd1f7ff2019-09-09 16:13:34 +030068static QemuMutex qemu_global_mutex;
69
Philippe Mathieu-Daudé5f74af82022-02-03 12:51:06 +010070/*
71 * The chosen accelerator is supposed to register this.
72 */
73static const AccelOpsClass *cpus_accel;
74
Tiejun Chen321bc0b2013-08-02 09:43:09 +080075bool cpu_is_stopped(CPUState *cpu)
76{
77 return cpu->stopped || !runstate_is_running();
78}
79
Claudio Fontana430065d2020-07-31 12:23:42 +020080bool cpu_work_list_empty(CPUState *cpu)
Emilio G. Cota0c0fcc22020-06-12 20:02:24 +010081{
Idan Horowitz25e82fb2022-01-14 02:43:57 +020082 return QSIMPLEQ_EMPTY_ATOMIC(&cpu->work_list);
Emilio G. Cota0c0fcc22020-06-12 20:02:24 +010083}
84
Claudio Fontana430065d2020-07-31 12:23:42 +020085bool cpu_thread_is_idle(CPUState *cpu)
Peter Maydellac873f12012-07-19 16:52:27 +010086{
Emilio G. Cota0c0fcc22020-06-12 20:02:24 +010087 if (cpu->stop || !cpu_work_list_empty(cpu)) {
Peter Maydellac873f12012-07-19 16:52:27 +010088 return false;
89 }
Tiejun Chen321bc0b2013-08-02 09:43:09 +080090 if (cpu_is_stopped(cpu)) {
Peter Maydellac873f12012-07-19 16:52:27 +010091 return true;
92 }
Philippe Mathieu-Daudéad7d6842022-02-03 01:58:55 +010093 if (!cpu->halted || cpu_has_work(cpu)) {
Peter Maydellac873f12012-07-19 16:52:27 +010094 return false;
95 }
Philippe Mathieu-Daudéad7d6842022-02-03 01:58:55 +010096 if (cpus_accel->cpu_thread_is_idle) {
97 return cpus_accel->cpu_thread_is_idle(cpu);
98 }
Peter Maydellac873f12012-07-19 16:52:27 +010099 return true;
100}
101
Claudio Fontana740b1752020-08-19 13:17:19 +0200102bool all_cpu_threads_idle(void)
Peter Maydellac873f12012-07-19 16:52:27 +0100103{
Andreas Färber182735e2013-05-29 22:29:20 +0200104 CPUState *cpu;
Peter Maydellac873f12012-07-19 16:52:27 +0100105
Andreas Färberbdc44642013-06-24 23:50:24 +0200106 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +0200107 if (!cpu_thread_is_idle(cpu)) {
Peter Maydellac873f12012-07-19 16:52:27 +0100108 return false;
109 }
110 }
111 return true;
112}
113
Alex Bennée65467062017-02-23 18:29:09 +0000114/***********************************************************/
Blue Swirl296af7c2010-03-29 19:23:50 +0000115void hw_error(const char *fmt, ...)
116{
117 va_list ap;
Andreas Färber55e5c282012-12-17 06:18:02 +0100118 CPUState *cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +0000119
120 va_start(ap, fmt);
121 fprintf(stderr, "qemu: hardware error: ");
122 vfprintf(stderr, fmt, ap);
123 fprintf(stderr, "\n");
Andreas Färberbdc44642013-06-24 23:50:24 +0200124 CPU_FOREACH(cpu) {
Andreas Färber55e5c282012-12-17 06:18:02 +0100125 fprintf(stderr, "CPU #%d:\n", cpu->cpu_index);
Markus Armbruster90c84c52019-04-17 21:18:02 +0200126 cpu_dump_state(cpu, stderr, CPU_DUMP_FPU);
Blue Swirl296af7c2010-03-29 19:23:50 +0000127 }
128 va_end(ap);
129 abort();
130}
131
132void cpu_synchronize_all_states(void)
133{
Andreas Färber182735e2013-05-29 22:29:20 +0200134 CPUState *cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +0000135
Andreas Färberbdc44642013-06-24 23:50:24 +0200136 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +0200137 cpu_synchronize_state(cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +0000138 }
139}
140
141void cpu_synchronize_all_post_reset(void)
142{
Andreas Färber182735e2013-05-29 22:29:20 +0200143 CPUState *cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +0000144
Andreas Färberbdc44642013-06-24 23:50:24 +0200145 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +0200146 cpu_synchronize_post_reset(cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +0000147 }
148}
149
150void cpu_synchronize_all_post_init(void)
151{
Andreas Färber182735e2013-05-29 22:29:20 +0200152 CPUState *cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +0000153
Andreas Färberbdc44642013-06-24 23:50:24 +0200154 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +0200155 cpu_synchronize_post_init(cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +0000156 }
157}
158
David Gibson75e972d2017-05-26 14:46:28 +1000159void cpu_synchronize_all_pre_loadvm(void)
160{
161 CPUState *cpu;
162
163 CPU_FOREACH(cpu) {
164 cpu_synchronize_pre_loadvm(cpu);
165 }
166}
167
Claudio Fontana430065d2020-07-31 12:23:42 +0200168void cpu_synchronize_state(CPUState *cpu)
169{
Claudio Fontana994aa172020-08-19 16:01:03 +0200170 if (cpus_accel->synchronize_state) {
Claudio Fontana430065d2020-07-31 12:23:42 +0200171 cpus_accel->synchronize_state(cpu);
172 }
Claudio Fontana430065d2020-07-31 12:23:42 +0200173}
174
175void cpu_synchronize_post_reset(CPUState *cpu)
176{
Claudio Fontana994aa172020-08-19 16:01:03 +0200177 if (cpus_accel->synchronize_post_reset) {
Claudio Fontana430065d2020-07-31 12:23:42 +0200178 cpus_accel->synchronize_post_reset(cpu);
179 }
Claudio Fontana430065d2020-07-31 12:23:42 +0200180}
181
182void cpu_synchronize_post_init(CPUState *cpu)
183{
Claudio Fontana994aa172020-08-19 16:01:03 +0200184 if (cpus_accel->synchronize_post_init) {
Claudio Fontana430065d2020-07-31 12:23:42 +0200185 cpus_accel->synchronize_post_init(cpu);
186 }
Claudio Fontana430065d2020-07-31 12:23:42 +0200187}
188
189void cpu_synchronize_pre_loadvm(CPUState *cpu)
190{
Claudio Fontana994aa172020-08-19 16:01:03 +0200191 if (cpus_accel->synchronize_pre_loadvm) {
Claudio Fontana430065d2020-07-31 12:23:42 +0200192 cpus_accel->synchronize_pre_loadvm(cpu);
193 }
Claudio Fontana430065d2020-07-31 12:23:42 +0200194}
195
Tom Lendacky92a51992021-01-26 11:36:47 -0600196bool cpus_are_resettable(void)
197{
Philippe Mathieu-Daudé39196352022-02-03 02:03:57 +0100198 if (cpus_accel->cpus_are_resettable) {
199 return cpus_accel->cpus_are_resettable();
200 }
201 return true;
Tom Lendacky92a51992021-01-26 11:36:47 -0600202}
203
Claudio Fontana430065d2020-07-31 12:23:42 +0200204int64_t cpus_get_virtual_clock(void)
205{
Claudio Fontana994aa172020-08-19 16:01:03 +0200206 /*
207 * XXX
208 *
209 * need to check that cpus_accel is not NULL, because qcow2 calls
210 * qemu_get_clock_ns(CLOCK_VIRTUAL) without any accel initialized and
211 * with ticks disabled in some io-tests:
212 * 030 040 041 060 099 120 127 140 156 161 172 181 191 192 195 203 229 249 256 267
213 *
214 * is this expected?
215 *
216 * XXX
217 */
Claudio Fontana430065d2020-07-31 12:23:42 +0200218 if (cpus_accel && cpus_accel->get_virtual_clock) {
219 return cpus_accel->get_virtual_clock();
220 }
Claudio Fontana430065d2020-07-31 12:23:42 +0200221 return cpu_get_clock();
222}
223
224/*
225 * return the time elapsed in VM between vm_start and vm_stop. Unless
226 * icount is active, cpus_get_elapsed_ticks() uses units of the host CPU cycle
227 * counter.
228 */
229int64_t cpus_get_elapsed_ticks(void)
230{
Claudio Fontana994aa172020-08-19 16:01:03 +0200231 if (cpus_accel->get_elapsed_ticks) {
Claudio Fontana430065d2020-07-31 12:23:42 +0200232 return cpus_accel->get_elapsed_ticks();
233 }
Claudio Fontana430065d2020-07-31 12:23:42 +0200234 return cpu_get_ticks();
235}
236
Claudio Fontanabb4776b2020-08-11 15:16:33 +0200237static void generic_handle_interrupt(CPUState *cpu, int mask)
238{
239 cpu->interrupt_request |= mask;
240
241 if (!qemu_cpu_is_self(cpu)) {
242 qemu_cpu_kick(cpu);
243 }
244}
245
246void cpu_interrupt(CPUState *cpu, int mask)
247{
248 if (cpus_accel->handle_interrupt) {
249 cpus_accel->handle_interrupt(cpu, mask);
250 } else {
251 generic_handle_interrupt(cpu, mask);
252 }
253}
254
Stefan Hajnoczi4486e892018-03-07 14:42:05 +0000255static int do_vm_stop(RunState state, bool send_stop)
Blue Swirl296af7c2010-03-29 19:23:50 +0000256{
Kevin Wolf56983462013-07-05 13:49:54 +0200257 int ret = 0;
258
Luiz Capitulino13548692011-07-29 15:36:43 -0300259 if (runstate_is_running()) {
Longpengf962cac2020-03-16 16:37:32 +0800260 runstate_set(state);
Blue Swirl296af7c2010-03-29 19:23:50 +0000261 cpu_disable_ticks();
Blue Swirl296af7c2010-03-29 19:23:50 +0000262 pause_all_vcpus();
Luiz Capitulino1dfb4dd2011-07-29 14:26:33 -0300263 vm_state_notify(0, state);
Stefan Hajnoczi4486e892018-03-07 14:42:05 +0000264 if (send_stop) {
Peter Xu3ab72382018-08-15 21:37:37 +0800265 qapi_event_send_stop();
Stefan Hajnoczi4486e892018-03-07 14:42:05 +0000266 }
Blue Swirl296af7c2010-03-29 19:23:50 +0000267 }
Kevin Wolf56983462013-07-05 13:49:54 +0200268
Kevin Wolf594a45c2013-07-18 14:52:19 +0200269 bdrv_drain_all();
John Snow22af08e2016-09-22 21:45:51 -0400270 ret = bdrv_flush_all();
Daniel P. Berrangé8af3f5c2021-04-15 14:33:51 +0100271 trace_vm_stop_flush_all(ret);
Kevin Wolf594a45c2013-07-18 14:52:19 +0200272
Kevin Wolf56983462013-07-05 13:49:54 +0200273 return ret;
Blue Swirl296af7c2010-03-29 19:23:50 +0000274}
275
Stefan Hajnoczi4486e892018-03-07 14:42:05 +0000276/* Special vm_stop() variant for terminating the process. Historically clients
277 * did not expect a QMP STOP event and so we need to retain compatibility.
278 */
279int vm_shutdown(void)
280{
281 return do_vm_stop(RUN_STATE_SHUTDOWN, false);
282}
283
Claudio Fontana430065d2020-07-31 12:23:42 +0200284bool cpu_can_run(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +0000285{
Andreas Färber4fdeee72012-05-02 23:10:09 +0200286 if (cpu->stop) {
Andreas Färbera1fcaa72012-05-02 23:42:26 +0200287 return false;
Jan Kiszka0ab07c62011-02-07 12:19:14 +0100288 }
Tiejun Chen321bc0b2013-08-02 09:43:09 +0800289 if (cpu_is_stopped(cpu)) {
Andreas Färbera1fcaa72012-05-02 23:42:26 +0200290 return false;
Jan Kiszka0ab07c62011-02-07 12:19:14 +0100291 }
Andreas Färbera1fcaa72012-05-02 23:42:26 +0200292 return true;
Blue Swirl296af7c2010-03-29 19:23:50 +0000293}
294
Claudio Fontana430065d2020-07-31 12:23:42 +0200295void cpu_handle_guest_debug(CPUState *cpu)
Jan Kiszka3c638d02010-06-25 16:56:56 +0200296{
Pavel Dovgalyukfda84582020-10-03 20:13:43 +0300297 if (replay_running_debug()) {
298 if (!cpu->singlestep_enabled) {
Pavel Dovgalyukcda38252020-10-03 20:13:49 +0300299 /*
300 * Report about the breakpoint and
301 * make a single step to skip it
302 */
303 replay_breakpoint();
Pavel Dovgalyukfda84582020-10-03 20:13:43 +0300304 cpu_single_step(cpu, SSTEP_ENABLE);
305 } else {
306 cpu_single_step(cpu, 0);
307 }
308 } else {
309 gdb_set_stop_cpu(cpu);
310 qemu_system_debug_request();
311 cpu->stopped = true;
312 }
Jan Kiszka3c638d02010-06-25 16:56:56 +0200313}
314
Jan Kiszka6d9cb732011-02-01 22:15:58 +0100315#ifdef CONFIG_LINUX
316static void sigbus_reraise(void)
317{
318 sigset_t set;
319 struct sigaction action;
320
321 memset(&action, 0, sizeof(action));
322 action.sa_handler = SIG_DFL;
323 if (!sigaction(SIGBUS, &action, NULL)) {
324 raise(SIGBUS);
325 sigemptyset(&set);
326 sigaddset(&set, SIGBUS);
Peter Maydella2d17612016-05-16 18:33:59 +0100327 pthread_sigmask(SIG_UNBLOCK, &set, NULL);
Jan Kiszka6d9cb732011-02-01 22:15:58 +0100328 }
Li Zhijianeb1960a2021-07-06 17:44:33 +0800329 perror("Failed to re-raise SIGBUS!");
Jan Kiszka6d9cb732011-02-01 22:15:58 +0100330 abort();
331}
332
Paolo Bonzinid98d4072017-02-08 13:22:12 +0100333static void sigbus_handler(int n, siginfo_t *siginfo, void *ctx)
Jan Kiszka6d9cb732011-02-01 22:15:58 +0100334{
Paolo Bonzinia16fc072017-02-09 09:50:02 +0100335 if (siginfo->si_code != BUS_MCEERR_AO && siginfo->si_code != BUS_MCEERR_AR) {
336 sigbus_reraise();
337 }
338
Paolo Bonzini2ae41db2017-02-08 12:48:54 +0100339 if (current_cpu) {
340 /* Called asynchronously in VCPU thread. */
341 if (kvm_on_sigbus_vcpu(current_cpu, siginfo->si_code, siginfo->si_addr)) {
342 sigbus_reraise();
343 }
344 } else {
345 /* Called synchronously (via signalfd) in main thread. */
346 if (kvm_on_sigbus(siginfo->si_code, siginfo->si_addr)) {
347 sigbus_reraise();
348 }
Jan Kiszka6d9cb732011-02-01 22:15:58 +0100349 }
350}
351
352static void qemu_init_sigbus(void)
353{
354 struct sigaction action;
355
David Hildenbrand29b838c2021-12-17 14:46:10 +0100356 /*
357 * ALERT: when modifying this, take care that SIGBUS forwarding in
David Hildenbrand6556aad2022-10-14 15:47:14 +0200358 * qemu_prealloc_mem() will continue working as expected.
David Hildenbrand29b838c2021-12-17 14:46:10 +0100359 */
Jan Kiszka6d9cb732011-02-01 22:15:58 +0100360 memset(&action, 0, sizeof(action));
361 action.sa_flags = SA_SIGINFO;
Paolo Bonzinid98d4072017-02-08 13:22:12 +0100362 action.sa_sigaction = sigbus_handler;
Jan Kiszka6d9cb732011-02-01 22:15:58 +0100363 sigaction(SIGBUS, &action, NULL);
364
365 prctl(PR_MCE_KILL, PR_MCE_KILL_SET, PR_MCE_KILL_EARLY, 0, 0);
366}
Paolo Bonzinia16fc072017-02-09 09:50:02 +0100367#else /* !CONFIG_LINUX */
368static void qemu_init_sigbus(void)
369{
370}
Paolo Bonzinia16fc072017-02-09 09:50:02 +0100371#endif /* !CONFIG_LINUX */
Blue Swirl296af7c2010-03-29 19:23:50 +0000372
Blue Swirl296af7c2010-03-29 19:23:50 +0000373static QemuThread io_thread;
374
Blue Swirl296af7c2010-03-29 19:23:50 +0000375/* cpu creation */
376static QemuCond qemu_cpu_cond;
377/* system init */
Blue Swirl296af7c2010-03-29 19:23:50 +0000378static QemuCond qemu_pause_cond;
379
Paolo Bonzinid3b12f52011-09-13 10:30:52 +0200380void qemu_init_cpu_loop(void)
Blue Swirl296af7c2010-03-29 19:23:50 +0000381{
Jan Kiszka6d9cb732011-02-01 22:15:58 +0100382 qemu_init_sigbus();
Anthony Liguoried945922011-02-08 18:18:18 +0100383 qemu_cond_init(&qemu_cpu_cond);
Anthony Liguoried945922011-02-08 18:18:18 +0100384 qemu_cond_init(&qemu_pause_cond);
Blue Swirl296af7c2010-03-29 19:23:50 +0000385 qemu_mutex_init(&qemu_global_mutex);
Blue Swirl296af7c2010-03-29 19:23:50 +0000386
Jan Kiszkab7680cb2011-03-12 17:43:51 +0100387 qemu_thread_get_self(&io_thread);
Blue Swirl296af7c2010-03-29 19:23:50 +0000388}
389
Paolo Bonzini14e6fe12016-10-31 10:36:08 +0100390void run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data)
Marcelo Tosattie82bcec2010-05-04 09:45:22 -0300391{
Sergey Fedorovd148d902016-08-29 09:51:00 +0200392 do_run_on_cpu(cpu, func, data, &qemu_global_mutex);
Chegu Vinod3c022702013-06-24 03:49:41 -0600393}
394
David Hildenbrandebd05fe2017-11-29 20:12:15 +0100395static void qemu_cpu_stop(CPUState *cpu, bool exit)
396{
397 g_assert(qemu_cpu_is_self(cpu));
398 cpu->stop = false;
399 cpu->stopped = true;
400 if (exit) {
401 cpu_exit(cpu);
402 }
403 qemu_cond_broadcast(&qemu_pause_cond);
404}
405
Claudio Fontana430065d2020-07-31 12:23:42 +0200406void qemu_wait_io_event_common(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +0000407{
Paolo Bonzini06831002023-03-03 14:37:51 +0100408 qatomic_set_mb(&cpu->thread_kicked, false);
Andreas Färber4fdeee72012-05-02 23:10:09 +0200409 if (cpu->stop) {
David Hildenbrandebd05fe2017-11-29 20:12:15 +0100410 qemu_cpu_stop(cpu, false);
Blue Swirl296af7c2010-03-29 19:23:50 +0000411 }
Sergey Fedorova5403c62016-08-02 18:27:36 +0100412 process_queued_cpu_work(cpu);
Alex Bennée37257942017-02-23 18:29:14 +0000413}
414
Claudio Fontana430065d2020-07-31 12:23:42 +0200415void qemu_wait_io_event(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +0000416{
Emilio G. Cota30865f32018-10-21 13:30:35 -0400417 bool slept = false;
418
Andreas Färbera98ae1d2013-05-26 23:21:08 +0200419 while (cpu_thread_is_idle(cpu)) {
Emilio G. Cota30865f32018-10-21 13:30:35 -0400420 if (!slept) {
421 slept = true;
422 qemu_plugin_vcpu_idle_cb(cpu);
423 }
Andreas Färberf5c121b2012-05-03 01:22:49 +0200424 qemu_cond_wait(cpu->halt_cond, &qemu_global_mutex);
Jan Kiszka16400322011-02-09 16:29:37 +0100425 }
Emilio G. Cota30865f32018-10-21 13:30:35 -0400426 if (slept) {
427 qemu_plugin_vcpu_resume_cb(cpu);
428 }
Blue Swirl296af7c2010-03-29 19:23:50 +0000429
Sergio Andres Gomez Del Realc97d6d22017-09-13 04:05:09 -0500430 qemu_wait_io_event_common(cpu);
431}
432
Claudio Fontana430065d2020-07-31 12:23:42 +0200433void cpus_kick_thread(CPUState *cpu)
Paolo Bonzinicc015e92011-03-12 17:44:08 +0100434{
Paolo Bonzinie0c38212015-08-26 00:19:19 +0200435 if (cpu->thread_kicked) {
436 return;
Paolo Bonzini9102ded2015-08-18 06:52:09 -0700437 }
Paolo Bonzinie0c38212015-08-26 00:19:19 +0200438 cpu->thread_kicked = true;
Xuzhou Chengc9923552022-10-28 12:57:26 +0800439
440#ifndef _WIN32
441 int err = pthread_kill(cpu->thread->thread, SIG_IPI);
Laurent Vivierd455ebc2019-01-02 15:16:03 +0100442 if (err && err != ESRCH) {
Paolo Bonzinicc015e92011-03-12 17:44:08 +0100443 fprintf(stderr, "qemu:%s: %s", __func__, strerror(err));
444 exit(1);
445 }
Xuzhou Chengc9923552022-10-28 12:57:26 +0800446#else
447 qemu_sem_post(&cpu->sem);
Paolo Bonzinicc015e92011-03-12 17:44:08 +0100448#endif
449}
450
Andreas Färberc08d7422012-05-03 04:34:15 +0200451void qemu_cpu_kick(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +0000452{
Andreas Färberf5c121b2012-05-03 01:22:49 +0200453 qemu_cond_broadcast(cpu->halt_cond);
Claudio Fontana994aa172020-08-19 16:01:03 +0200454 if (cpus_accel->kick_vcpu_thread) {
Claudio Fontana430065d2020-07-31 12:23:42 +0200455 cpus_accel->kick_vcpu_thread(cpu);
Claudio Fontanae92558e2020-07-07 11:18:49 +0200456 } else { /* default */
Claudio Fontana430065d2020-07-31 12:23:42 +0200457 cpus_kick_thread(cpu);
Paolo Bonzinie0c38212015-08-26 00:19:19 +0200458 }
Blue Swirl296af7c2010-03-29 19:23:50 +0000459}
460
Jan Kiszka46d62fa2011-02-01 22:15:59 +0100461void qemu_cpu_kick_self(void)
462{
Andreas Färber4917cf42013-05-27 05:17:50 +0200463 assert(current_cpu);
Claudio Fontana430065d2020-07-31 12:23:42 +0200464 cpus_kick_thread(current_cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +0000465}
466
Andreas Färber60e82572012-05-02 22:23:49 +0200467bool qemu_cpu_is_self(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +0000468{
Andreas Färber814e6122012-05-02 17:00:37 +0200469 return qemu_thread_is_self(cpu->thread);
Blue Swirl296af7c2010-03-29 19:23:50 +0000470}
471
Paolo Bonzini79e2b9a2015-01-21 12:09:14 +0100472bool qemu_in_vcpu_thread(void)
Juan Quintelaaa723c22012-09-18 16:30:11 +0200473{
Andreas Färber4917cf42013-05-27 05:17:50 +0200474 return current_cpu && qemu_cpu_is_self(current_cpu);
Juan Quintelaaa723c22012-09-18 16:30:11 +0200475}
476
Stefan Hajnoczid5d2b152022-02-22 14:01:50 +0000477QEMU_DEFINE_STATIC_CO_TLS(bool, iothread_locked)
Paolo Bonziniafbe7052015-06-18 18:47:19 +0200478
479bool qemu_mutex_iothread_locked(void)
480{
Stefan Hajnoczid5d2b152022-02-22 14:01:50 +0000481 return get_iothread_locked();
Paolo Bonziniafbe7052015-06-18 18:47:19 +0200482}
483
Emanuele Giuseppe Esposito65386922022-03-03 10:15:46 -0500484bool qemu_in_main_thread(void)
485{
486 return qemu_mutex_iothread_locked();
487}
488
Emilio G. Cotacb764d02017-10-28 02:16:41 -0400489/*
490 * The BQL is taken from so many places that it is worth profiling the
491 * callers directly, instead of funneling them all through a single function.
492 */
493void qemu_mutex_lock_iothread_impl(const char *file, int line)
Blue Swirl296af7c2010-03-29 19:23:50 +0000494{
Stefan Hajnoczid73415a2020-09-23 11:56:46 +0100495 QemuMutexLockFunc bql_lock = qatomic_read(&qemu_bql_mutex_lock_func);
Emilio G. Cotacb764d02017-10-28 02:16:41 -0400496
Jan Kiszka8d04fb52017-02-23 18:29:11 +0000497 g_assert(!qemu_mutex_iothread_locked());
Emilio G. Cotacb764d02017-10-28 02:16:41 -0400498 bql_lock(&qemu_global_mutex, file, line);
Stefan Hajnoczid5d2b152022-02-22 14:01:50 +0000499 set_iothread_locked(true);
Blue Swirl296af7c2010-03-29 19:23:50 +0000500}
501
502void qemu_mutex_unlock_iothread(void)
503{
Jan Kiszka8d04fb52017-02-23 18:29:11 +0000504 g_assert(qemu_mutex_iothread_locked());
Stefan Hajnoczid5d2b152022-02-22 14:01:50 +0000505 set_iothread_locked(false);
Blue Swirl296af7c2010-03-29 19:23:50 +0000506 qemu_mutex_unlock(&qemu_global_mutex);
507}
508
Aravinda Prasad19e067e2020-01-31 00:14:17 +0530509void qemu_cond_wait_iothread(QemuCond *cond)
510{
511 qemu_cond_wait(cond, &qemu_global_mutex);
512}
513
Claudio Fontanab0c3cf92020-06-29 11:35:03 +0200514void qemu_cond_timedwait_iothread(QemuCond *cond, int ms)
515{
516 qemu_cond_timedwait(cond, &qemu_global_mutex, ms);
517}
518
Claudio Fontana430065d2020-07-31 12:23:42 +0200519/* signal CPU creation */
520void cpu_thread_signal_created(CPUState *cpu)
521{
522 cpu->created = true;
523 qemu_cond_signal(&qemu_cpu_cond);
524}
525
526/* signal CPU destruction */
527void cpu_thread_signal_destroyed(CPUState *cpu)
528{
529 cpu->created = false;
530 qemu_cond_signal(&qemu_cpu_cond);
531}
532
533
Alex Bennéee8faee02016-10-27 16:09:58 +0100534static bool all_vcpus_paused(void)
Blue Swirl296af7c2010-03-29 19:23:50 +0000535{
Andreas Färberbdc44642013-06-24 23:50:24 +0200536 CPUState *cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +0000537
Andreas Färberbdc44642013-06-24 23:50:24 +0200538 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +0200539 if (!cpu->stopped) {
Alex Bennéee8faee02016-10-27 16:09:58 +0100540 return false;
Jan Kiszka0ab07c62011-02-07 12:19:14 +0100541 }
Blue Swirl296af7c2010-03-29 19:23:50 +0000542 }
543
Alex Bennéee8faee02016-10-27 16:09:58 +0100544 return true;
Blue Swirl296af7c2010-03-29 19:23:50 +0000545}
546
547void pause_all_vcpus(void)
548{
Andreas Färberbdc44642013-06-24 23:50:24 +0200549 CPUState *cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +0000550
Alex Bligh40daca52013-08-21 16:03:02 +0100551 qemu_clock_enable(QEMU_CLOCK_VIRTUAL, false);
Andreas Färberbdc44642013-06-24 23:50:24 +0200552 CPU_FOREACH(cpu) {
David Hildenbrandebd05fe2017-11-29 20:12:15 +0100553 if (qemu_cpu_is_self(cpu)) {
554 qemu_cpu_stop(cpu, true);
555 } else {
556 cpu->stop = true;
557 qemu_cpu_kick(cpu);
558 }
Jan Kiszkad798e972012-02-17 18:31:16 +0100559 }
560
Alex Bennéed759c952018-02-27 12:52:48 +0300561 /* We need to drop the replay_lock so any vCPU threads woken up
562 * can finish their replay tasks
563 */
564 replay_mutex_unlock();
565
Blue Swirl296af7c2010-03-29 19:23:50 +0000566 while (!all_vcpus_paused()) {
Paolo Bonzinibe7d6c52011-03-12 17:44:02 +0100567 qemu_cond_wait(&qemu_pause_cond, &qemu_global_mutex);
Andreas Färberbdc44642013-06-24 23:50:24 +0200568 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +0200569 qemu_cpu_kick(cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +0000570 }
571 }
Alex Bennéed759c952018-02-27 12:52:48 +0300572
573 qemu_mutex_unlock_iothread();
574 replay_mutex_lock();
575 qemu_mutex_lock_iothread();
Blue Swirl296af7c2010-03-29 19:23:50 +0000576}
577
Igor Mammedov29936832013-04-23 10:29:37 +0200578void cpu_resume(CPUState *cpu)
579{
580 cpu->stop = false;
581 cpu->stopped = false;
582 qemu_cpu_kick(cpu);
583}
584
Blue Swirl296af7c2010-03-29 19:23:50 +0000585void resume_all_vcpus(void)
586{
Andreas Färberbdc44642013-06-24 23:50:24 +0200587 CPUState *cpu;
Blue Swirl296af7c2010-03-29 19:23:50 +0000588
Longpengf962cac2020-03-16 16:37:32 +0800589 if (!runstate_is_running()) {
590 return;
591 }
592
Alex Bligh40daca52013-08-21 16:03:02 +0100593 qemu_clock_enable(QEMU_CLOCK_VIRTUAL, true);
Andreas Färberbdc44642013-06-24 23:50:24 +0200594 CPU_FOREACH(cpu) {
Andreas Färber182735e2013-05-29 22:29:20 +0200595 cpu_resume(cpu);
Blue Swirl296af7c2010-03-29 19:23:50 +0000596 }
597}
598
Paolo Bonzinidbadee42018-01-30 16:40:12 +0100599void cpu_remove_sync(CPUState *cpu)
Gu Zheng4c055ab2016-05-12 09:18:13 +0530600{
601 cpu->stop = true;
602 cpu->unplug = true;
603 qemu_cpu_kick(cpu);
Paolo Bonzinidbadee42018-01-30 16:40:12 +0100604 qemu_mutex_unlock_iothread();
605 qemu_thread_join(cpu->thread);
606 qemu_mutex_lock_iothread();
Bharata B Rao2c579042016-05-12 09:18:14 +0530607}
608
Claudio Fontanab86f59c2021-02-04 17:39:25 +0100609void cpus_register_accel(const AccelOpsClass *ops)
Claudio Fontana430065d2020-07-31 12:23:42 +0200610{
Claudio Fontanab86f59c2021-02-04 17:39:25 +0100611 assert(ops != NULL);
612 assert(ops->create_vcpu_thread != NULL); /* mandatory */
613 cpus_accel = ops;
Claudio Fontana430065d2020-07-31 12:23:42 +0200614}
615
Alex Bennéeae7467b2022-09-29 12:42:24 +0100616const AccelOpsClass *cpus_get_accel(void)
617{
618 /* broken if we call this early */
619 assert(cpus_accel);
620 return cpus_accel;
621}
622
Andreas Färberc643bed2013-05-27 03:23:24 +0200623void qemu_init_vcpu(CPUState *cpu)
Blue Swirl296af7c2010-03-29 19:23:50 +0000624{
Like Xu5cc87672019-05-19 04:54:21 +0800625 MachineState *ms = MACHINE(qdev_get_machine());
626
627 cpu->nr_cores = ms->smp.cores;
628 cpu->nr_threads = ms->smp.threads;
Andreas Färberf324e762012-05-02 23:26:21 +0200629 cpu->stopped = true;
Richard Henderson9c09a252019-03-14 13:06:29 -0700630 cpu->random_seed = qemu_guest_random_seed_thread_part1();
Peter Maydell56943e82016-01-21 14:15:04 +0000631
632 if (!cpu->as) {
633 /* If the target cpu hasn't set up any address spaces itself,
634 * give it the default one.
635 */
Peter Maydell12ebc9a2016-01-21 14:15:04 +0000636 cpu->num_ases = 1;
Peter Xu80ceb072017-11-23 17:23:32 +0800637 cpu_address_space_init(cpu, 0, "cpu-memory", cpu->memory);
Peter Maydell56943e82016-01-21 14:15:04 +0000638 }
639
Claudio Fontanab86f59c2021-02-04 17:39:25 +0100640 /* accelerators all implement the AccelOpsClass */
Claudio Fontana994aa172020-08-19 16:01:03 +0200641 g_assert(cpus_accel != NULL && cpus_accel->create_vcpu_thread != NULL);
642 cpus_accel->create_vcpu_thread(cpu);
David Hildenbrand81e96312018-02-09 20:52:38 +0100643
644 while (!cpu->created) {
645 qemu_cond_wait(&qemu_cpu_cond, &qemu_global_mutex);
646 }
Blue Swirl296af7c2010-03-29 19:23:50 +0000647}
648
Jan Kiszkab4a3d962011-02-01 22:15:43 +0100649void cpu_stop_current(void)
Blue Swirl296af7c2010-03-29 19:23:50 +0000650{
Andreas Färber4917cf42013-05-27 05:17:50 +0200651 if (current_cpu) {
Peter Maydell0ec7e672019-01-07 15:23:47 +0000652 current_cpu->stop = true;
653 cpu_exit(current_cpu);
Jan Kiszkab4a3d962011-02-01 22:15:43 +0100654 }
Blue Swirl296af7c2010-03-29 19:23:50 +0000655}
656
Kevin Wolf56983462013-07-05 13:49:54 +0200657int vm_stop(RunState state)
Blue Swirl296af7c2010-03-29 19:23:50 +0000658{
Juan Quintelaaa723c22012-09-18 16:30:11 +0200659 if (qemu_in_vcpu_thread()) {
Paolo Bonzini74892d22014-06-05 14:53:58 +0200660 qemu_system_vmstop_request_prepare();
Luiz Capitulino1dfb4dd2011-07-29 14:26:33 -0300661 qemu_system_vmstop_request(state);
Blue Swirl296af7c2010-03-29 19:23:50 +0000662 /*
663 * FIXME: should not return to device code in case
664 * vm_stop() has been requested.
665 */
Jan Kiszkab4a3d962011-02-01 22:15:43 +0100666 cpu_stop_current();
Kevin Wolf56983462013-07-05 13:49:54 +0200667 return 0;
Blue Swirl296af7c2010-03-29 19:23:50 +0000668 }
Kevin Wolf56983462013-07-05 13:49:54 +0200669
Stefan Hajnoczi4486e892018-03-07 14:42:05 +0000670 return do_vm_stop(state, true);
Blue Swirl296af7c2010-03-29 19:23:50 +0000671}
672
Claudio Imbrenda2d76e822017-02-14 18:07:47 +0100673/**
674 * Prepare for (re)starting the VM.
675 * Returns -1 if the vCPUs are not to be restarted (e.g. if they are already
676 * running or in case of an error condition), 0 otherwise.
677 */
Ivan Shcherbakovd7482ff2022-03-02 17:28:33 -0800678int vm_prepare_start(bool step_pending)
Claudio Imbrenda2d76e822017-02-14 18:07:47 +0100679{
680 RunState requested;
Claudio Imbrenda2d76e822017-02-14 18:07:47 +0100681
682 qemu_vmstop_requested(&requested);
683 if (runstate_is_running() && requested == RUN_STATE__MAX) {
684 return -1;
685 }
686
687 /* Ensure that a STOP/RESUME pair of events is emitted if a
688 * vmstop request was pending. The BLOCK_IO_ERROR event, for
689 * example, according to documentation is always followed by
690 * the STOP event.
691 */
692 if (runstate_is_running()) {
Peter Xu3ab72382018-08-15 21:37:37 +0800693 qapi_event_send_stop();
694 qapi_event_send_resume();
Markus Armbrusterf0561582018-04-23 10:45:18 +0200695 return -1;
Claudio Imbrenda2d76e822017-02-14 18:07:47 +0100696 }
697
Ivan Shcherbakovd7482ff2022-03-02 17:28:33 -0800698 /*
699 * WHPX accelerator needs to know whether we are going to step
700 * any CPUs, before starting the first one.
701 */
702 if (cpus_accel->synchronize_pre_resume) {
703 cpus_accel->synchronize_pre_resume(step_pending);
704 }
705
Claudio Imbrenda2d76e822017-02-14 18:07:47 +0100706 /* We are sending this now, but the CPUs will be resumed shortly later */
Peter Xu3ab72382018-08-15 21:37:37 +0800707 qapi_event_send_resume();
Markus Armbrusterf0561582018-04-23 10:45:18 +0200708
Markus Armbrusterf0561582018-04-23 10:45:18 +0200709 cpu_enable_ticks();
710 runstate_set(RUN_STATE_RUNNING);
711 vm_state_notify(1, RUN_STATE_RUNNING);
712 return 0;
Claudio Imbrenda2d76e822017-02-14 18:07:47 +0100713}
714
715void vm_start(void)
716{
Ivan Shcherbakovd7482ff2022-03-02 17:28:33 -0800717 if (!vm_prepare_start(false)) {
Claudio Imbrenda2d76e822017-02-14 18:07:47 +0100718 resume_all_vcpus();
719 }
720}
721
Luiz Capitulino8a9236f2011-10-14 11:18:09 -0300722/* does a state transition even if the VM is already stopped,
723 current state is forgotten forever */
Kevin Wolf56983462013-07-05 13:49:54 +0200724int vm_stop_force_state(RunState state)
Luiz Capitulino8a9236f2011-10-14 11:18:09 -0300725{
726 if (runstate_is_running()) {
Kevin Wolf56983462013-07-05 13:49:54 +0200727 return vm_stop(state);
Luiz Capitulino8a9236f2011-10-14 11:18:09 -0300728 } else {
Daniel P. Berrangé8af3f5c2021-04-15 14:33:51 +0100729 int ret;
Luiz Capitulino8a9236f2011-10-14 11:18:09 -0300730 runstate_set(state);
Wen Congyangb2780d32015-11-20 17:34:38 +0800731
732 bdrv_drain_all();
Kevin Wolf594a45c2013-07-18 14:52:19 +0200733 /* Make sure to return an error if the flush in a previous vm_stop()
734 * failed. */
Daniel P. Berrangé8af3f5c2021-04-15 14:33:51 +0100735 ret = bdrv_flush_all();
736 trace_vm_stop_flush_all(ret);
737 return ret;
Luiz Capitulino8a9236f2011-10-14 11:18:09 -0300738 }
739}
740
Luiz Capitulino0cfd6a92011-11-22 16:32:37 -0200741void qmp_memsave(int64_t addr, int64_t size, const char *filename,
742 bool has_cpu, int64_t cpu_index, Error **errp)
743{
744 FILE *f;
745 uint32_t l;
Andreas Färber55e5c282012-12-17 06:18:02 +0100746 CPUState *cpu;
Luiz Capitulino0cfd6a92011-11-22 16:32:37 -0200747 uint8_t buf[1024];
Borislav Petkov0dc9daf2015-02-08 13:14:38 +0100748 int64_t orig_addr = addr, orig_size = size;
Luiz Capitulino0cfd6a92011-11-22 16:32:37 -0200749
750 if (!has_cpu) {
751 cpu_index = 0;
752 }
753
Andreas Färber151d1322013-02-15 15:41:49 +0100754 cpu = qemu_get_cpu(cpu_index);
755 if (cpu == NULL) {
Markus Armbrusterc6bd8c72015-03-17 11:54:50 +0100756 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cpu-index",
757 "a CPU number");
Luiz Capitulino0cfd6a92011-11-22 16:32:37 -0200758 return;
759 }
760
761 f = fopen(filename, "wb");
762 if (!f) {
Luiz Capitulino618da852013-06-07 14:35:06 -0400763 error_setg_file_open(errp, errno, filename);
Luiz Capitulino0cfd6a92011-11-22 16:32:37 -0200764 return;
765 }
766
767 while (size != 0) {
768 l = sizeof(buf);
769 if (l > size)
770 l = size;
Aneesh Kumar K.V2f4d0f52013-10-01 21:49:30 +0530771 if (cpu_memory_rw_debug(cpu, addr, buf, l, 0) != 0) {
Borislav Petkov0dc9daf2015-02-08 13:14:38 +0100772 error_setg(errp, "Invalid addr 0x%016" PRIx64 "/size %" PRId64
773 " specified", orig_addr, orig_size);
Aneesh Kumar K.V2f4d0f52013-10-01 21:49:30 +0530774 goto exit;
775 }
Luiz Capitulino0cfd6a92011-11-22 16:32:37 -0200776 if (fwrite(buf, 1, l, f) != l) {
Markus Armbrusterc6bd8c72015-03-17 11:54:50 +0100777 error_setg(errp, QERR_IO_ERROR);
Luiz Capitulino0cfd6a92011-11-22 16:32:37 -0200778 goto exit;
779 }
780 addr += l;
781 size -= l;
782 }
783
784exit:
785 fclose(f);
786}
Luiz Capitulino6d3962b2011-11-22 17:26:46 -0200787
788void qmp_pmemsave(int64_t addr, int64_t size, const char *filename,
789 Error **errp)
790{
791 FILE *f;
792 uint32_t l;
793 uint8_t buf[1024];
794
795 f = fopen(filename, "wb");
796 if (!f) {
Luiz Capitulino618da852013-06-07 14:35:06 -0400797 error_setg_file_open(errp, errno, filename);
Luiz Capitulino6d3962b2011-11-22 17:26:46 -0200798 return;
799 }
800
801 while (size != 0) {
802 l = sizeof(buf);
803 if (l > size)
804 l = size;
Stefan Weileb6282f2014-04-07 20:28:23 +0200805 cpu_physical_memory_read(addr, buf, l);
Luiz Capitulino6d3962b2011-11-22 17:26:46 -0200806 if (fwrite(buf, 1, l, f) != l) {
Markus Armbrusterc6bd8c72015-03-17 11:54:50 +0100807 error_setg(errp, QERR_IO_ERROR);
Luiz Capitulino6d3962b2011-11-22 17:26:46 -0200808 goto exit;
809 }
810 addr += l;
811 size -= l;
812 }
813
814exit:
815 fclose(f);
816}
Luiz Capitulinoab49ab52011-11-23 12:55:53 -0200817
818void qmp_inject_nmi(Error **errp)
819{
Kevin Wolf947e4742020-10-05 17:58:44 +0200820 nmi_monitor_handle(monitor_get_cpu_index(monitor_cur()), errp);
Luiz Capitulinoab49ab52011-11-23 12:55:53 -0200821}
Sebastian Tanase27498be2014-07-25 11:56:33 +0200822