cpu: introduce cpu_in_exclusive_context()
Suggested-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Alex Bennée <alex.bennee@linaro.org>
Signed-off-by: Emilio G. Cota <cota@braap.org>
[AJB: moved inside start/end_exclusive fns + cleanup]
Signed-off-by: Alex Bennée <alex.bennee@linaro.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
index 48272c7..81c33d6 100644
--- a/accel/tcg/cpu-exec.c
+++ b/accel/tcg/cpu-exec.c
@@ -238,8 +238,6 @@
uint32_t flags;
uint32_t cflags = 1;
uint32_t cf_mask = cflags & CF_HASH_MASK;
- /* volatile because we modify it between setjmp and longjmp */
- volatile bool in_exclusive_region = false;
if (sigsetjmp(cpu->jmp_env, 0) == 0) {
tb = tb_lookup__cpu_state(cpu, &pc, &cs_base, &flags, cf_mask);
@@ -253,7 +251,6 @@
/* Since we got here, we know that parallel_cpus must be true. */
parallel_cpus = false;
- in_exclusive_region = true;
cc->cpu_exec_enter(cpu);
/* execute the generated code */
trace_exec_tb(tb, pc);
@@ -273,7 +270,7 @@
assert_no_pages_locked();
}
- if (in_exclusive_region) {
+ if (cpu_in_exclusive_context(cpu)) {
/* We might longjump out of either the codegen or the
* execution, so must make sure we only end the exclusive
* region if we started it.
diff --git a/cpus-common.c b/cpus-common.c
index af3385a..eaf590c 100644
--- a/cpus-common.c
+++ b/cpus-common.c
@@ -200,11 +200,15 @@
* section until end_exclusive resets pending_cpus to 0.
*/
qemu_mutex_unlock(&qemu_cpu_list_lock);
+
+ current_cpu->in_exclusive_context = true;
}
/* Finish an exclusive operation. */
void end_exclusive(void)
{
+ current_cpu->in_exclusive_context = false;
+
qemu_mutex_lock(&qemu_cpu_list_lock);
atomic_set(&pending_cpus, 0);
qemu_cond_broadcast(&exclusive_resume);
diff --git a/include/hw/core/cpu.h b/include/hw/core/cpu.h
index 031f587..07f2ab0 100644
--- a/include/hw/core/cpu.h
+++ b/include/hw/core/cpu.h
@@ -372,6 +372,7 @@
bool unplug;
bool crash_occurred;
bool exit_request;
+ bool in_exclusive_context;
uint32_t cflags_next_tb;
/* updates protected by BQL */
uint32_t interrupt_request;
@@ -784,6 +785,18 @@
void async_safe_run_on_cpu(CPUState *cpu, run_on_cpu_func func, run_on_cpu_data data);
/**
+ * cpu_in_exclusive_context()
+ * @cpu: The vCPU to check
+ *
+ * Returns true if @cpu is an exclusive context, for example running
+ * something which has previously been queued via async_safe_run_on_cpu().
+ */
+static inline bool cpu_in_exclusive_context(const CPUState *cpu)
+{
+ return cpu->in_exclusive_context;
+}
+
+/**
* qemu_get_cpu:
* @index: The CPUState@cpu_index value of the CPU to obtain.
*