qemu/atomic.h: rename atomic_ to qatomic_
clang's C11 atomic_fetch_*() functions only take a C11 atomic type
pointer argument. QEMU uses direct types (int, etc) and this causes a
compiler error when a QEMU code calls these functions in a source file
that also included <stdatomic.h> via a system header file:
$ CC=clang CXX=clang++ ./configure ... && make
../util/async.c:79:17: error: address argument to atomic operation must be a pointer to _Atomic type ('unsigned int *' invalid)
Avoid using atomic_*() names in QEMU's atomic.h since that namespace is
used by <stdatomic.h>. Prefix QEMU's APIs with 'q' so that atomic.h
and <stdatomic.h> can co-exist. I checked /usr/include on my machine and
searched GitHub for existing "qatomic_" users but there seem to be none.
This patch was generated using:
$ git grep -h -o '\<atomic\(64\)\?_[a-z0-9_]\+' include/qemu/atomic.h | \
sort -u >/tmp/changed_identifiers
$ for identifier in $(</tmp/changed_identifiers); do
sed -i "s%\<$identifier\>%q$identifier%g" \
$(git grep -I -l "\<$identifier\>")
done
I manually fixed line-wrap issues and misaligned rST tables.
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Philippe Mathieu-Daudé <philmd@redhat.com>
Acked-by: Paolo Bonzini <pbonzini@redhat.com>
Message-Id: <20200923105646.47864-1-stefanha@redhat.com>
diff --git a/softmmu/cpu-throttle.c b/softmmu/cpu-throttle.c
index 4e6b281..2ec4b8e 100644
--- a/softmmu/cpu-throttle.c
+++ b/softmmu/cpu-throttle.c
@@ -64,7 +64,7 @@
}
sleeptime_ns = endtime_ns - qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
}
- atomic_set(&cpu->throttle_thread_scheduled, 0);
+ qatomic_set(&cpu->throttle_thread_scheduled, 0);
}
static void cpu_throttle_timer_tick(void *opaque)
@@ -77,7 +77,7 @@
return;
}
CPU_FOREACH(cpu) {
- if (!atomic_xchg(&cpu->throttle_thread_scheduled, 1)) {
+ if (!qatomic_xchg(&cpu->throttle_thread_scheduled, 1)) {
async_run_on_cpu(cpu, cpu_throttle_thread,
RUN_ON_CPU_NULL);
}
@@ -94,7 +94,7 @@
new_throttle_pct = MIN(new_throttle_pct, CPU_THROTTLE_PCT_MAX);
new_throttle_pct = MAX(new_throttle_pct, CPU_THROTTLE_PCT_MIN);
- atomic_set(&throttle_percentage, new_throttle_pct);
+ qatomic_set(&throttle_percentage, new_throttle_pct);
timer_mod(throttle_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL_RT) +
CPU_THROTTLE_TIMESLICE_NS);
@@ -102,7 +102,7 @@
void cpu_throttle_stop(void)
{
- atomic_set(&throttle_percentage, 0);
+ qatomic_set(&throttle_percentage, 0);
}
bool cpu_throttle_active(void)
@@ -112,7 +112,7 @@
int cpu_throttle_get_percentage(void)
{
- return atomic_read(&throttle_percentage);
+ return qatomic_read(&throttle_percentage);
}
void cpu_throttle_init(void)
diff --git a/softmmu/cpus.c b/softmmu/cpus.c
index e3b9806..ac8940d 100644
--- a/softmmu/cpus.c
+++ b/softmmu/cpus.c
@@ -192,7 +192,7 @@
int64_t executed = cpu_get_icount_executed(cpu);
cpu->icount_budget -= executed;
- atomic_set_i64(&timers_state.qemu_icount,
+ qatomic_set_i64(&timers_state.qemu_icount,
timers_state.qemu_icount + executed);
}
@@ -223,13 +223,13 @@
cpu_update_icount_locked(cpu);
}
/* The read is protected by the seqlock, but needs atomic64 to avoid UB */
- return atomic_read_i64(&timers_state.qemu_icount);
+ return qatomic_read_i64(&timers_state.qemu_icount);
}
static int64_t cpu_get_icount_locked(void)
{
int64_t icount = cpu_get_icount_raw_locked();
- return atomic_read_i64(&timers_state.qemu_icount_bias) +
+ return qatomic_read_i64(&timers_state.qemu_icount_bias) +
cpu_icount_to_ns(icount);
}
@@ -262,7 +262,7 @@
int64_t cpu_icount_to_ns(int64_t icount)
{
- return icount << atomic_read(&timers_state.icount_time_shift);
+ return icount << qatomic_read(&timers_state.icount_time_shift);
}
static int64_t cpu_get_ticks_locked(void)
@@ -393,18 +393,18 @@
&& last_delta + ICOUNT_WOBBLE < delta * 2
&& timers_state.icount_time_shift > 0) {
/* The guest is getting too far ahead. Slow time down. */
- atomic_set(&timers_state.icount_time_shift,
+ qatomic_set(&timers_state.icount_time_shift,
timers_state.icount_time_shift - 1);
}
if (delta < 0
&& last_delta - ICOUNT_WOBBLE > delta * 2
&& timers_state.icount_time_shift < MAX_ICOUNT_SHIFT) {
/* The guest is getting too far behind. Speed time up. */
- atomic_set(&timers_state.icount_time_shift,
+ qatomic_set(&timers_state.icount_time_shift,
timers_state.icount_time_shift + 1);
}
last_delta = delta;
- atomic_set_i64(&timers_state.qemu_icount_bias,
+ qatomic_set_i64(&timers_state.qemu_icount_bias,
cur_icount - (timers_state.qemu_icount
<< timers_state.icount_time_shift));
seqlock_write_unlock(&timers_state.vm_clock_seqlock,
@@ -428,7 +428,7 @@
static int64_t qemu_icount_round(int64_t count)
{
- int shift = atomic_read(&timers_state.icount_time_shift);
+ int shift = qatomic_read(&timers_state.icount_time_shift);
return (count + (1 << shift) - 1) >> shift;
}
@@ -466,7 +466,7 @@
int64_t delta = clock - cur_icount;
warp_delta = MIN(warp_delta, delta);
}
- atomic_set_i64(&timers_state.qemu_icount_bias,
+ qatomic_set_i64(&timers_state.qemu_icount_bias,
timers_state.qemu_icount_bias + warp_delta);
}
timers_state.vm_clock_warp_start = -1;
@@ -499,7 +499,7 @@
seqlock_write_lock(&timers_state.vm_clock_seqlock,
&timers_state.vm_clock_lock);
- atomic_set_i64(&timers_state.qemu_icount_bias,
+ qatomic_set_i64(&timers_state.qemu_icount_bias,
timers_state.qemu_icount_bias + warp);
seqlock_write_unlock(&timers_state.vm_clock_seqlock,
&timers_state.vm_clock_lock);
@@ -583,7 +583,7 @@
*/
seqlock_write_lock(&timers_state.vm_clock_seqlock,
&timers_state.vm_clock_lock);
- atomic_set_i64(&timers_state.qemu_icount_bias,
+ qatomic_set_i64(&timers_state.qemu_icount_bias,
timers_state.qemu_icount_bias + deadline);
seqlock_write_unlock(&timers_state.vm_clock_seqlock,
&timers_state.vm_clock_lock);
@@ -837,11 +837,11 @@
{
CPUState *cpu;
do {
- cpu = atomic_mb_read(&tcg_current_rr_cpu);
+ cpu = qatomic_mb_read(&tcg_current_rr_cpu);
if (cpu) {
cpu_exit(cpu);
}
- } while (cpu != atomic_mb_read(&tcg_current_rr_cpu));
+ } while (cpu != qatomic_mb_read(&tcg_current_rr_cpu));
}
/* Kick all RR vCPUs */
@@ -1110,7 +1110,7 @@
static void qemu_wait_io_event_common(CPUState *cpu)
{
- atomic_mb_set(&cpu->thread_kicked, false);
+ qatomic_mb_set(&cpu->thread_kicked, false);
if (cpu->stop) {
qemu_cpu_stop(cpu, false);
}
@@ -1356,7 +1356,7 @@
ret = cpu_exec(cpu);
cpu_exec_end(cpu);
#ifdef CONFIG_PROFILER
- atomic_set(&tcg_ctx->prof.cpu_exec_time,
+ qatomic_set(&tcg_ctx->prof.cpu_exec_time,
tcg_ctx->prof.cpu_exec_time + profile_getclock() - ti);
#endif
return ret;
@@ -1443,7 +1443,7 @@
while (cpu && cpu_work_list_empty(cpu) && !cpu->exit_request) {
- atomic_mb_set(&tcg_current_rr_cpu, cpu);
+ qatomic_mb_set(&tcg_current_rr_cpu, cpu);
current_cpu = cpu;
qemu_clock_enable(QEMU_CLOCK_VIRTUAL,
@@ -1479,11 +1479,11 @@
cpu = CPU_NEXT(cpu);
} /* while (cpu && !cpu->exit_request).. */
- /* Does not need atomic_mb_set because a spurious wakeup is okay. */
- atomic_set(&tcg_current_rr_cpu, NULL);
+ /* Does not need qatomic_mb_set because a spurious wakeup is okay. */
+ qatomic_set(&tcg_current_rr_cpu, NULL);
if (cpu && cpu->exit_request) {
- atomic_mb_set(&cpu->exit_request, 0);
+ qatomic_mb_set(&cpu->exit_request, 0);
}
if (use_icount && all_cpu_threads_idle()) {
@@ -1687,7 +1687,7 @@
}
}
- atomic_mb_set(&cpu->exit_request, 0);
+ qatomic_mb_set(&cpu->exit_request, 0);
qemu_wait_io_event(cpu);
} while (!cpu->unplug || cpu_can_run(cpu));
@@ -1776,7 +1776,7 @@
*/
void qemu_mutex_lock_iothread_impl(const char *file, int line)
{
- QemuMutexLockFunc bql_lock = atomic_read(&qemu_bql_mutex_lock_func);
+ QemuMutexLockFunc bql_lock = qatomic_read(&qemu_bql_mutex_lock_func);
g_assert(!qemu_mutex_iothread_locked());
bql_lock(&qemu_global_mutex, file, line);
diff --git a/softmmu/memory.c b/softmmu/memory.c
index d030eb6..da5f90f 100644
--- a/softmmu/memory.c
+++ b/softmmu/memory.c
@@ -294,12 +294,12 @@
static bool flatview_ref(FlatView *view)
{
- return atomic_fetch_inc_nonzero(&view->ref) > 0;
+ return qatomic_fetch_inc_nonzero(&view->ref) > 0;
}
void flatview_unref(FlatView *view)
{
- if (atomic_fetch_dec(&view->ref) == 1) {
+ if (qatomic_fetch_dec(&view->ref) == 1) {
trace_flatview_destroy_rcu(view, view->root);
assert(view->root);
call_rcu(view, flatview_destroy, rcu);
@@ -1027,7 +1027,7 @@
}
/* Writes are protected by the BQL. */
- atomic_rcu_set(&as->current_map, new_view);
+ qatomic_rcu_set(&as->current_map, new_view);
if (old_view) {
flatview_unref(old_view);
}
diff --git a/softmmu/vl.c b/softmmu/vl.c
index f7b1034..50d8c2e 100644
--- a/softmmu/vl.c
+++ b/softmmu/vl.c
@@ -1320,7 +1320,7 @@
static int qemu_shutdown_requested(void)
{
- return atomic_xchg(&shutdown_requested, SHUTDOWN_CAUSE_NONE);
+ return qatomic_xchg(&shutdown_requested, SHUTDOWN_CAUSE_NONE);
}
static void qemu_kill_report(void)