accel/tcg: Restrict TCGCPUOps::cpu_exec_interrupt() to sysemu
All targets call TCGCPUOps::cpu_exec_interrupt() from sysemu code.
Move its declaration to restrict it to system emulation.
Extend the code guarded.
Restrict the static inlined need_replay_interrupt() method to
avoid a "defined but not used" warning.
Signed-off-by: Philippe Mathieu-Daudé <f4bug@amsat.org>
Reviewed-by: Richard Henderson <richard.henderson@linaro.org>
Message-Id: <20210911165434.531552-24-f4bug@amsat.org>
Signed-off-by: Richard Henderson <richard.henderson@linaro.org>
diff --git a/accel/tcg/cpu-exec.c b/accel/tcg/cpu-exec.c
index 2838177..75dbc1e 100644
--- a/accel/tcg/cpu-exec.c
+++ b/accel/tcg/cpu-exec.c
@@ -685,6 +685,7 @@
return false;
}
+#ifndef CONFIG_USER_ONLY
/*
* CPU_INTERRUPT_POLL is a virtual event which gets converted into a
* "real" interrupt event later. It does not need to be recorded for
@@ -698,12 +699,11 @@
return true;
#endif
}
+#endif /* !CONFIG_USER_ONLY */
static inline bool cpu_handle_interrupt(CPUState *cpu,
TranslationBlock **last_tb)
{
- CPUClass *cc = CPU_GET_CLASS(cpu);
-
/* Clear the interrupt flag now since we're processing
* cpu->interrupt_request and cpu->exit_request.
* Ensure zeroing happens before reading cpu->exit_request or
@@ -725,6 +725,7 @@
qemu_mutex_unlock_iothread();
return true;
}
+#if !defined(CONFIG_USER_ONLY)
if (replay_mode == REPLAY_MODE_PLAY && !replay_has_interrupt()) {
/* Do nothing */
} else if (interrupt_request & CPU_INTERRUPT_HALT) {
@@ -753,12 +754,14 @@
qemu_mutex_unlock_iothread();
return true;
}
-#endif
+#endif /* !TARGET_I386 */
/* The target hook has 3 exit conditions:
False when the interrupt isn't processed,
True when it is, and we should restart on a new TB,
and via longjmp via cpu_loop_exit. */
else {
+ CPUClass *cc = CPU_GET_CLASS(cpu);
+
if (cc->tcg_ops->cpu_exec_interrupt &&
cc->tcg_ops->cpu_exec_interrupt(cpu, interrupt_request)) {
if (need_replay_interrupt(interrupt_request)) {
@@ -777,6 +780,7 @@
* reload the 'interrupt_request' value */
interrupt_request = cpu->interrupt_request;
}
+#endif /* !CONFIG_USER_ONLY */
if (interrupt_request & CPU_INTERRUPT_EXITTB) {
cpu->interrupt_request &= ~CPU_INTERRUPT_EXITTB;
/* ensure that no TB jump will be modified as
diff --git a/include/hw/core/tcg-cpu-ops.h b/include/hw/core/tcg-cpu-ops.h
index 6c7ab96..55123cb 100644
--- a/include/hw/core/tcg-cpu-ops.h
+++ b/include/hw/core/tcg-cpu-ops.h
@@ -35,8 +35,6 @@
void (*cpu_exec_enter)(CPUState *cpu);
/** @cpu_exec_exit: Callback for cpu_exec cleanup */
void (*cpu_exec_exit)(CPUState *cpu);
- /** @cpu_exec_interrupt: Callback for processing interrupts in cpu_exec */
- bool (*cpu_exec_interrupt)(CPUState *cpu, int interrupt_request);
/**
* @tlb_fill: Handle a softmmu tlb miss or user-only address fault
*
@@ -68,6 +66,8 @@
void (*do_interrupt)(CPUState *cpu);
#endif /* !CONFIG_USER_ONLY || !TARGET_I386 */
#ifdef CONFIG_SOFTMMU
+ /** @cpu_exec_interrupt: Callback for processing interrupts in cpu_exec */
+ bool (*cpu_exec_interrupt)(CPUState *cpu, int interrupt_request);
/**
* @do_transaction_failed: Callback for handling failed memory transactions
* (ie bus faults or external aborts; not MMU faults)