blob: 8f90ddc3047a9f0567da47ab6f89b6190e1d0f9d [file] [log] [blame]
Kevin Wolf4f999d02009-10-22 17:54:37 +02001/*
Paolo Bonzinic2b38b22017-02-13 14:52:18 +01002 * Data plane event loop
Kevin Wolf4f999d02009-10-22 17:54:37 +02003 *
4 * Copyright (c) 2003-2008 Fabrice Bellard
Paolo Bonzinic2b38b22017-02-13 14:52:18 +01005 * Copyright (c) 2009-2017 QEMU contributors
Kevin Wolf4f999d02009-10-22 17:54:37 +02006 *
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23 * THE SOFTWARE.
24 */
25
Peter Maydelld38ea872016-01-29 17:50:05 +000026#include "qemu/osdep.h"
Markus Armbrusterda34e652016-03-14 09:01:28 +010027#include "qapi/error.h"
Paolo Bonzini737e1502012-12-17 18:19:44 +010028#include "block/aio.h"
Stefan Hajnoczi9b342772013-03-07 13:41:47 +010029#include "block/thread-pool.h"
Emanuele Giuseppe Esposito587d82f2022-12-07 14:18:24 +010030#include "block/graph-lock.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010031#include "qemu/main-loop.h"
Paolo Bonzini0ceb8492014-07-07 15:18:04 +020032#include "qemu/atomic.h"
Stefan Hajnoczi8c6b0352020-02-21 09:39:51 +000033#include "qemu/rcu_queue.h"
Paolo Bonzini0187f5c2016-07-04 18:33:20 +020034#include "block/raw-aio.h"
Paolo Bonzini0c330a72017-02-13 14:52:19 +010035#include "qemu/coroutine_int.h"
Stefan Hajnoczi47b74462022-02-22 14:01:48 +000036#include "qemu/coroutine-tls.h"
Pavel Dovgalyuk75bbe5e2022-05-27 13:46:13 +030037#include "sysemu/cpu-timers.h"
Paolo Bonzini0c330a72017-02-13 14:52:19 +010038#include "trace.h"
Kevin Wolf9a1e9482009-10-22 17:54:38 +020039
Kevin Wolf4f999d02009-10-22 17:54:37 +020040/***********************************************************/
41/* bottom halves (can be seen as timers which expire ASAP) */
42
Stefan Hajnoczi8c6b0352020-02-21 09:39:51 +000043/* QEMUBH::flags values */
44enum {
45 /* Already enqueued and waiting for aio_bh_poll() */
46 BH_PENDING = (1 << 0),
47
48 /* Invoke the callback */
49 BH_SCHEDULED = (1 << 1),
50
51 /* Delete without invoking callback */
52 BH_DELETED = (1 << 2),
53
54 /* Delete after invoking callback */
55 BH_ONESHOT = (1 << 3),
56
57 /* Schedule periodically when the event loop is idle */
58 BH_IDLE = (1 << 4),
59};
60
Kevin Wolf4f999d02009-10-22 17:54:37 +020061struct QEMUBH {
Paolo Bonzini2f4dc3c2012-09-24 18:44:14 +020062 AioContext *ctx;
Stefan Hajnoczi0f085862021-04-14 21:02:46 +010063 const char *name;
Kevin Wolf4f999d02009-10-22 17:54:37 +020064 QEMUBHFunc *cb;
65 void *opaque;
Stefan Hajnoczi8c6b0352020-02-21 09:39:51 +000066 QSLIST_ENTRY(QEMUBH) next;
67 unsigned flags;
Alexander Bulekov9c86c972023-04-27 17:10:07 -040068 MemReentrancyGuard *reentrancy_guard;
Kevin Wolf4f999d02009-10-22 17:54:37 +020069};
70
Stefan Hajnoczi8c6b0352020-02-21 09:39:51 +000071/* Called concurrently from any thread */
72static void aio_bh_enqueue(QEMUBH *bh, unsigned new_flags)
73{
74 AioContext *ctx = bh->ctx;
75 unsigned old_flags;
76
77 /*
Paolo Bonzini8dd48652023-03-06 10:15:06 +010078 * Synchronizes with atomic_fetch_and() in aio_bh_dequeue(), ensuring that
79 * insertion starts after BH_PENDING is set.
Stefan Hajnoczi8c6b0352020-02-21 09:39:51 +000080 */
Stefan Hajnoczid73415a2020-09-23 11:56:46 +010081 old_flags = qatomic_fetch_or(&bh->flags, BH_PENDING | new_flags);
Paolo Bonzini8dd48652023-03-06 10:15:06 +010082
Stefan Hajnoczi8c6b0352020-02-21 09:39:51 +000083 if (!(old_flags & BH_PENDING)) {
Paolo Bonzini8dd48652023-03-06 10:15:06 +010084 /*
85 * At this point the bottom half becomes visible to aio_bh_poll().
86 * This insertion thus synchronizes with QSLIST_MOVE_ATOMIC in
87 * aio_bh_poll(), ensuring that:
88 * 1. any writes needed by the callback are visible from the callback
89 * after aio_bh_dequeue() returns bh.
90 * 2. ctx is loaded before the callback has a chance to execute and bh
91 * could be freed.
92 */
Stefan Hajnoczi8c6b0352020-02-21 09:39:51 +000093 QSLIST_INSERT_HEAD_ATOMIC(&ctx->bh_list, bh, next);
94 }
95
96 aio_notify(ctx);
Pavel Dovgalyuk75bbe5e2022-05-27 13:46:13 +030097 /*
98 * Workaround for record/replay.
99 * vCPU execution should be suspended when new BH is set.
100 * This is needed to avoid guest timeouts caused
101 * by the long cycles of the execution.
102 */
103 icount_notify_exit();
Stefan Hajnoczi8c6b0352020-02-21 09:39:51 +0000104}
105
106/* Only called from aio_bh_poll() and aio_ctx_finalize() */
107static QEMUBH *aio_bh_dequeue(BHList *head, unsigned *flags)
108{
109 QEMUBH *bh = QSLIST_FIRST_RCU(head);
110
111 if (!bh) {
112 return NULL;
113 }
114
115 QSLIST_REMOVE_HEAD(head, next);
116
117 /*
Paolo Bonzini8dd48652023-03-06 10:15:06 +0100118 * Synchronizes with qatomic_fetch_or() in aio_bh_enqueue(), ensuring that
119 * the removal finishes before BH_PENDING is reset.
Stefan Hajnoczi8c6b0352020-02-21 09:39:51 +0000120 */
Stefan Hajnoczid73415a2020-09-23 11:56:46 +0100121 *flags = qatomic_fetch_and(&bh->flags,
Stefan Hajnoczi8c6b0352020-02-21 09:39:51 +0000122 ~(BH_PENDING | BH_SCHEDULED | BH_IDLE));
123 return bh;
124}
125
Stefan Hajnoczi0f085862021-04-14 21:02:46 +0100126void aio_bh_schedule_oneshot_full(AioContext *ctx, QEMUBHFunc *cb,
127 void *opaque, const char *name)
Paolo Bonzini5b8bb352016-10-03 18:14:15 +0200128{
129 QEMUBH *bh;
130 bh = g_new(QEMUBH, 1);
131 *bh = (QEMUBH){
132 .ctx = ctx,
133 .cb = cb,
134 .opaque = opaque,
Stefan Hajnoczi0f085862021-04-14 21:02:46 +0100135 .name = name,
Paolo Bonzini5b8bb352016-10-03 18:14:15 +0200136 };
Stefan Hajnoczi8c6b0352020-02-21 09:39:51 +0000137 aio_bh_enqueue(bh, BH_SCHEDULED | BH_ONESHOT);
Paolo Bonzini5b8bb352016-10-03 18:14:15 +0200138}
139
Stefan Hajnoczi0f085862021-04-14 21:02:46 +0100140QEMUBH *aio_bh_new_full(AioContext *ctx, QEMUBHFunc *cb, void *opaque,
Alexander Bulekov9c86c972023-04-27 17:10:07 -0400141 const char *name, MemReentrancyGuard *reentrancy_guard)
Kevin Wolf4f999d02009-10-22 17:54:37 +0200142{
143 QEMUBH *bh;
Paolo Bonziniee823102014-12-17 16:10:00 +0100144 bh = g_new(QEMUBH, 1);
145 *bh = (QEMUBH){
146 .ctx = ctx,
147 .cb = cb,
148 .opaque = opaque,
Stefan Hajnoczi0f085862021-04-14 21:02:46 +0100149 .name = name,
Alexander Bulekov9c86c972023-04-27 17:10:07 -0400150 .reentrancy_guard = reentrancy_guard,
Paolo Bonziniee823102014-12-17 16:10:00 +0100151 };
Kevin Wolf4f999d02009-10-22 17:54:37 +0200152 return bh;
153}
154
Pavel Dovgalyukdf281b82015-09-17 19:24:50 +0300155void aio_bh_call(QEMUBH *bh)
156{
Alexander Bulekov9c86c972023-04-27 17:10:07 -0400157 bool last_engaged_in_io = false;
158
Alexander Bulekov7915bd02023-05-01 10:19:56 -0400159 /* Make a copy of the guard-pointer as cb may free the bh */
160 MemReentrancyGuard *reentrancy_guard = bh->reentrancy_guard;
161 if (reentrancy_guard) {
162 last_engaged_in_io = reentrancy_guard->engaged_in_io;
163 if (reentrancy_guard->engaged_in_io) {
Alexander Bulekov9c86c972023-04-27 17:10:07 -0400164 trace_reentrant_aio(bh->ctx, bh->name);
165 }
Alexander Bulekov7915bd02023-05-01 10:19:56 -0400166 reentrancy_guard->engaged_in_io = true;
Alexander Bulekov9c86c972023-04-27 17:10:07 -0400167 }
168
Pavel Dovgalyukdf281b82015-09-17 19:24:50 +0300169 bh->cb(bh->opaque);
Alexander Bulekov9c86c972023-04-27 17:10:07 -0400170
Alexander Bulekov7915bd02023-05-01 10:19:56 -0400171 if (reentrancy_guard) {
172 reentrancy_guard->engaged_in_io = last_engaged_in_io;
Alexander Bulekov9c86c972023-04-27 17:10:07 -0400173 }
Pavel Dovgalyukdf281b82015-09-17 19:24:50 +0300174}
175
Stefan Hajnoczi8c6b0352020-02-21 09:39:51 +0000176/* Multiple occurrences of aio_bh_poll cannot be called concurrently. */
Paolo Bonzinif627aab2012-10-29 23:45:23 +0100177int aio_bh_poll(AioContext *ctx)
Kevin Wolf4f999d02009-10-22 17:54:37 +0200178{
Stefan Hajnoczi8c6b0352020-02-21 09:39:51 +0000179 BHListSlice slice;
180 BHListSlice *s;
181 int ret = 0;
Kevin Wolf648fb0e2011-09-01 16:16:10 +0200182
Paolo Bonzini8dd48652023-03-06 10:15:06 +0100183 /* Synchronizes with QSLIST_INSERT_HEAD_ATOMIC in aio_bh_enqueue(). */
Stefan Hajnoczi8c6b0352020-02-21 09:39:51 +0000184 QSLIST_MOVE_ATOMIC(&slice.bh_list, &ctx->bh_list);
Cédric Le Goaterd66ba6d2023-04-20 22:29:39 +0200185
186 /*
187 * GCC13 [-Werror=dangling-pointer=] complains that the local variable
188 * 'slice' is being stored in the global 'ctx->bh_slice_list' but the
189 * list is emptied before this function returns.
190 */
191#if !defined(__clang__)
192#pragma GCC diagnostic push
193#pragma GCC diagnostic ignored "-Wpragmas"
194#pragma GCC diagnostic ignored "-Wdangling-pointer="
195#endif
Stefan Hajnoczi8c6b0352020-02-21 09:39:51 +0000196 QSIMPLEQ_INSERT_TAIL(&ctx->bh_slice_list, &slice, next);
Cédric Le Goaterd66ba6d2023-04-20 22:29:39 +0200197#if !defined(__clang__)
198#pragma GCC diagnostic pop
199#endif
Stefan Hajnoczi8c6b0352020-02-21 09:39:51 +0000200
201 while ((s = QSIMPLEQ_FIRST(&ctx->bh_slice_list))) {
202 QEMUBH *bh;
203 unsigned flags;
204
205 bh = aio_bh_dequeue(&s->bh_list, &flags);
206 if (!bh) {
207 QSIMPLEQ_REMOVE_HEAD(&ctx->bh_slice_list, next);
208 continue;
209 }
210
211 if ((flags & (BH_SCHEDULED | BH_DELETED)) == BH_SCHEDULED) {
Paolo Bonzini65c1b5b2016-10-27 12:49:06 +0200212 /* Idle BHs don't count as progress */
Stefan Hajnoczi8c6b0352020-02-21 09:39:51 +0000213 if (!(flags & BH_IDLE)) {
Kevin Wolf4f999d02009-10-22 17:54:37 +0200214 ret = 1;
Stefan Hajnoczica96ac42015-07-28 18:34:09 +0200215 }
Pavel Dovgalyukdf281b82015-09-17 19:24:50 +0300216 aio_bh_call(bh);
Kevin Wolf4f999d02009-10-22 17:54:37 +0200217 }
Stefan Hajnoczi8c6b0352020-02-21 09:39:51 +0000218 if (flags & (BH_DELETED | BH_ONESHOT)) {
219 g_free(bh);
Paolo Bonzini7d506c92017-01-12 19:08:00 +0100220 }
Kevin Wolf4f999d02009-10-22 17:54:37 +0200221 }
222
Kevin Wolf4f999d02009-10-22 17:54:37 +0200223 return ret;
224}
225
226void qemu_bh_schedule_idle(QEMUBH *bh)
227{
Stefan Hajnoczi8c6b0352020-02-21 09:39:51 +0000228 aio_bh_enqueue(bh, BH_SCHEDULED | BH_IDLE);
Kevin Wolf4f999d02009-10-22 17:54:37 +0200229}
230
231void qemu_bh_schedule(QEMUBH *bh)
232{
Stefan Hajnoczi8c6b0352020-02-21 09:39:51 +0000233 aio_bh_enqueue(bh, BH_SCHEDULED);
Kevin Wolf4f999d02009-10-22 17:54:37 +0200234}
235
Liu Ping Fandcc772e2013-07-16 12:28:58 +0800236/* This func is async.
237 */
Kevin Wolf4f999d02009-10-22 17:54:37 +0200238void qemu_bh_cancel(QEMUBH *bh)
239{
Stefan Hajnoczid73415a2020-09-23 11:56:46 +0100240 qatomic_and(&bh->flags, ~BH_SCHEDULED);
Kevin Wolf4f999d02009-10-22 17:54:37 +0200241}
242
Liu Ping Fandcc772e2013-07-16 12:28:58 +0800243/* This func is async.The bottom half will do the delete action at the finial
244 * end.
245 */
Kevin Wolf4f999d02009-10-22 17:54:37 +0200246void qemu_bh_delete(QEMUBH *bh)
247{
Stefan Hajnoczi8c6b0352020-02-21 09:39:51 +0000248 aio_bh_enqueue(bh, BH_DELETED);
Kevin Wolf4f999d02009-10-22 17:54:37 +0200249}
250
Stefan Hajnoczi8c6b0352020-02-21 09:39:51 +0000251static int64_t aio_compute_bh_timeout(BHList *head, int timeout)
Kevin Wolf4f999d02009-10-22 17:54:37 +0200252{
253 QEMUBH *bh;
254
Stefan Hajnoczi8c6b0352020-02-21 09:39:51 +0000255 QSLIST_FOREACH_RCU(bh, head, next) {
256 if ((bh->flags & (BH_SCHEDULED | BH_DELETED)) == BH_SCHEDULED) {
257 if (bh->flags & BH_IDLE) {
Kevin Wolf4f999d02009-10-22 17:54:37 +0200258 /* idle bottom halves will be polled at least
259 * every 10ms */
Paolo Bonzini845ca102014-07-09 11:53:01 +0200260 timeout = 10000000;
Kevin Wolf4f999d02009-10-22 17:54:37 +0200261 } else {
262 /* non-idle bottom halves will be executed
263 * immediately */
Paolo Bonzini845ca102014-07-09 11:53:01 +0200264 return 0;
Kevin Wolf4f999d02009-10-22 17:54:37 +0200265 }
266 }
267 }
Kevin Wolf4f999d02009-10-22 17:54:37 +0200268
Stefan Hajnoczi8c6b0352020-02-21 09:39:51 +0000269 return timeout;
270}
271
272int64_t
273aio_compute_timeout(AioContext *ctx)
274{
275 BHListSlice *s;
276 int64_t deadline;
277 int timeout = -1;
278
279 timeout = aio_compute_bh_timeout(&ctx->bh_list, timeout);
280 if (timeout == 0) {
281 return 0;
282 }
283
284 QSIMPLEQ_FOREACH(s, &ctx->bh_slice_list, next) {
285 timeout = aio_compute_bh_timeout(&s->bh_list, timeout);
286 if (timeout == 0) {
287 return 0;
288 }
289 }
290
Paolo Bonzini845ca102014-07-09 11:53:01 +0200291 deadline = timerlistgroup_deadline_ns(&ctx->tlg);
Alex Bligh533a8cf2013-08-21 16:02:51 +0100292 if (deadline == 0) {
Paolo Bonzini845ca102014-07-09 11:53:01 +0200293 return 0;
Alex Bligh533a8cf2013-08-21 16:02:51 +0100294 } else {
Paolo Bonzini845ca102014-07-09 11:53:01 +0200295 return qemu_soonest_timeout(timeout, deadline);
Alex Bligh533a8cf2013-08-21 16:02:51 +0100296 }
Paolo Bonzini845ca102014-07-09 11:53:01 +0200297}
Alex Bligh533a8cf2013-08-21 16:02:51 +0100298
Paolo Bonzini845ca102014-07-09 11:53:01 +0200299static gboolean
300aio_ctx_prepare(GSource *source, gint *timeout)
301{
302 AioContext *ctx = (AioContext *) source;
303
Stefan Hajnoczid73415a2020-09-23 11:56:46 +0100304 qatomic_set(&ctx->notify_me, qatomic_read(&ctx->notify_me) | 1);
Paolo Bonzini5710a3e2020-04-07 10:07:46 -0400305
306 /*
307 * Write ctx->notify_me before computing the timeout
308 * (reading bottom half flags, etc.). Pairs with
309 * smp_mb in aio_notify().
310 */
311 smp_mb();
Paolo Bonzinieabc9772015-07-21 16:07:51 +0200312
Paolo Bonzini845ca102014-07-09 11:53:01 +0200313 /* We assume there is no timeout already supplied */
314 *timeout = qemu_timeout_ns_to_ms(aio_compute_timeout(ctx));
Paolo Bonzinia3462c62014-07-09 11:53:08 +0200315
316 if (aio_prepare(ctx)) {
317 *timeout = 0;
318 }
319
Paolo Bonzini845ca102014-07-09 11:53:01 +0200320 return *timeout == 0;
Paolo Bonzinie3713e02012-09-24 14:57:41 +0200321}
322
323static gboolean
324aio_ctx_check(GSource *source)
325{
326 AioContext *ctx = (AioContext *) source;
327 QEMUBH *bh;
Stefan Hajnoczi8c6b0352020-02-21 09:39:51 +0000328 BHListSlice *s;
Paolo Bonzinie3713e02012-09-24 14:57:41 +0200329
Paolo Bonzini5710a3e2020-04-07 10:07:46 -0400330 /* Finish computing the timeout before clearing the flag. */
Stefan Hajnoczid73415a2020-09-23 11:56:46 +0100331 qatomic_store_release(&ctx->notify_me, qatomic_read(&ctx->notify_me) & ~1);
Paolo Bonzini05e514b2015-07-21 16:07:53 +0200332 aio_notify_accept(ctx);
Paolo Bonzini21a03d12015-07-21 16:07:52 +0200333
Stefan Hajnoczi8c6b0352020-02-21 09:39:51 +0000334 QSLIST_FOREACH_RCU(bh, &ctx->bh_list, next) {
335 if ((bh->flags & (BH_SCHEDULED | BH_DELETED)) == BH_SCHEDULED) {
Paolo Bonzinie3713e02012-09-24 14:57:41 +0200336 return true;
Cao jin6977d902016-07-14 21:10:43 +0800337 }
Paolo Bonzinie3713e02012-09-24 14:57:41 +0200338 }
Stefan Hajnoczi8c6b0352020-02-21 09:39:51 +0000339
340 QSIMPLEQ_FOREACH(s, &ctx->bh_slice_list, next) {
341 QSLIST_FOREACH_RCU(bh, &s->bh_list, next) {
342 if ((bh->flags & (BH_SCHEDULED | BH_DELETED)) == BH_SCHEDULED) {
343 return true;
344 }
345 }
346 }
Alex Bligh533a8cf2013-08-21 16:02:51 +0100347 return aio_pending(ctx) || (timerlistgroup_deadline_ns(&ctx->tlg) == 0);
Paolo Bonzinie3713e02012-09-24 14:57:41 +0200348}
349
350static gboolean
351aio_ctx_dispatch(GSource *source,
352 GSourceFunc callback,
353 gpointer user_data)
354{
355 AioContext *ctx = (AioContext *) source;
356
357 assert(callback == NULL);
Paolo Bonzinia153bf52017-02-13 14:52:33 +0100358 aio_dispatch(ctx);
Paolo Bonzinie3713e02012-09-24 14:57:41 +0200359 return true;
360}
361
Paolo Bonzini2f4dc3c2012-09-24 18:44:14 +0200362static void
363aio_ctx_finalize(GSource *source)
364{
365 AioContext *ctx = (AioContext *) source;
Stefan Hajnoczi8c6b0352020-02-21 09:39:51 +0000366 QEMUBH *bh;
367 unsigned flags;
Paolo Bonzini2f4dc3c2012-09-24 18:44:14 +0200368
Stefan Hajnoczi9b342772013-03-07 13:41:47 +0100369 thread_pool_free(ctx->thread_pool);
Stefan Hajnoczia0769722015-07-28 18:34:08 +0200370
Paolo Bonzini0187f5c2016-07-04 18:33:20 +0200371#ifdef CONFIG_LINUX_AIO
372 if (ctx->linux_aio) {
373 laio_detach_aio_context(ctx->linux_aio, ctx);
374 laio_cleanup(ctx->linux_aio);
375 ctx->linux_aio = NULL;
376 }
377#endif
378
Aarushi Mehtafcb7a4a2020-01-20 14:18:49 +0000379#ifdef CONFIG_LINUX_IO_URING
380 if (ctx->linux_io_uring) {
381 luring_detach_aio_context(ctx->linux_io_uring, ctx);
382 luring_cleanup(ctx->linux_io_uring);
383 ctx->linux_io_uring = NULL;
384 }
385#endif
386
Paolo Bonzini0c330a72017-02-13 14:52:19 +0100387 assert(QSLIST_EMPTY(&ctx->scheduled_coroutines));
388 qemu_bh_delete(ctx->co_schedule_bh);
389
Stefan Hajnoczi8c6b0352020-02-21 09:39:51 +0000390 /* There must be no aio_bh_poll() calls going on */
391 assert(QSIMPLEQ_EMPTY(&ctx->bh_slice_list));
Stefan Hajnoczia0769722015-07-28 18:34:08 +0200392
Stefan Hajnoczi8c6b0352020-02-21 09:39:51 +0000393 while ((bh = aio_bh_dequeue(&ctx->bh_list, &flags))) {
Stefan Hajnoczi023ca422021-04-14 21:02:47 +0100394 /*
395 * qemu_bh_delete() must have been called on BHs in this AioContext. In
396 * many cases memory leaks, hangs, or inconsistent state occur when a
397 * BH is leaked because something still expects it to run.
398 *
399 * If you hit this, fix the lifecycle of the BH so that
400 * qemu_bh_delete() and any associated cleanup is called before the
401 * AioContext is finalized.
402 */
403 if (unlikely(!(flags & BH_DELETED))) {
404 fprintf(stderr, "%s: BH '%s' leaked, aborting...\n",
405 __func__, bh->name);
406 abort();
407 }
Stefan Hajnoczia0769722015-07-28 18:34:08 +0200408
Stefan Hajnoczi8c6b0352020-02-21 09:39:51 +0000409 g_free(bh);
Stefan Hajnoczia0769722015-07-28 18:34:08 +0200410 }
Stefan Hajnoczia0769722015-07-28 18:34:08 +0200411
Stefan Hajnoczi60f782b2023-05-16 15:02:38 -0400412 aio_set_event_notifier(ctx, &ctx->notifier, NULL, NULL, NULL);
Paolo Bonzini2f4dc3c2012-09-24 18:44:14 +0200413 event_notifier_cleanup(&ctx->notifier);
Paolo Bonzini3fe71222016-10-27 12:49:08 +0200414 qemu_rec_mutex_destroy(&ctx->lock);
Paolo Bonzinid7c99a12017-01-12 19:07:53 +0100415 qemu_lockcnt_destroy(&ctx->list_lock);
Alex Blighdae21b92013-08-21 16:02:49 +0100416 timerlistgroup_deinit(&ctx->tlg);
Emanuele Giuseppe Esposito587d82f2022-12-07 14:18:24 +0100417 unregister_aiocontext(ctx);
Jie Wangcd0a6d22018-05-17 08:42:43 +0800418 aio_context_destroy(ctx);
Paolo Bonzini2f4dc3c2012-09-24 18:44:14 +0200419}
420
Paolo Bonzinie3713e02012-09-24 14:57:41 +0200421static GSourceFuncs aio_source_funcs = {
422 aio_ctx_prepare,
423 aio_ctx_check,
424 aio_ctx_dispatch,
Paolo Bonzini2f4dc3c2012-09-24 18:44:14 +0200425 aio_ctx_finalize
Paolo Bonzinie3713e02012-09-24 14:57:41 +0200426};
427
428GSource *aio_get_g_source(AioContext *ctx)
429{
Stefan Hajnocziba607ca2020-05-11 19:36:30 +0100430 aio_context_use_g_source(ctx);
Paolo Bonzinie3713e02012-09-24 14:57:41 +0200431 g_source_ref(&ctx->source);
432 return &ctx->source;
433}
Paolo Bonzinia915f4b2012-09-13 12:28:51 +0200434
Stefan Hajnoczi9b342772013-03-07 13:41:47 +0100435ThreadPool *aio_get_thread_pool(AioContext *ctx)
436{
437 if (!ctx->thread_pool) {
438 ctx->thread_pool = thread_pool_new(ctx);
439 }
440 return ctx->thread_pool;
441}
442
Paolo Bonzini0187f5c2016-07-04 18:33:20 +0200443#ifdef CONFIG_LINUX_AIO
Nishanth Aravamudaned6e2162018-06-22 12:37:00 -0700444LinuxAioState *aio_setup_linux_aio(AioContext *ctx, Error **errp)
Paolo Bonzini0187f5c2016-07-04 18:33:20 +0200445{
446 if (!ctx->linux_aio) {
Nishanth Aravamudaned6e2162018-06-22 12:37:00 -0700447 ctx->linux_aio = laio_init(errp);
448 if (ctx->linux_aio) {
449 laio_attach_aio_context(ctx->linux_aio, ctx);
450 }
Paolo Bonzini0187f5c2016-07-04 18:33:20 +0200451 }
452 return ctx->linux_aio;
453}
Nishanth Aravamudaned6e2162018-06-22 12:37:00 -0700454
455LinuxAioState *aio_get_linux_aio(AioContext *ctx)
456{
457 assert(ctx->linux_aio);
458 return ctx->linux_aio;
459}
Paolo Bonzini0187f5c2016-07-04 18:33:20 +0200460#endif
461
Aarushi Mehtafcb7a4a2020-01-20 14:18:49 +0000462#ifdef CONFIG_LINUX_IO_URING
463LuringState *aio_setup_linux_io_uring(AioContext *ctx, Error **errp)
464{
465 if (ctx->linux_io_uring) {
466 return ctx->linux_io_uring;
467 }
468
469 ctx->linux_io_uring = luring_init(errp);
470 if (!ctx->linux_io_uring) {
471 return NULL;
472 }
473
474 luring_attach_aio_context(ctx->linux_io_uring, ctx);
475 return ctx->linux_io_uring;
476}
477
478LuringState *aio_get_linux_io_uring(AioContext *ctx)
479{
480 assert(ctx->linux_io_uring);
481 return ctx->linux_io_uring;
482}
483#endif
484
Paolo Bonzini2f4dc3c2012-09-24 18:44:14 +0200485void aio_notify(AioContext *ctx)
486{
Stefan Hajnoczi601829f2020-08-06 14:18:01 +0100487 /*
Paolo Bonzini8dd48652023-03-06 10:15:06 +0100488 * Write e.g. ctx->bh_list before writing ctx->notified. Pairs with
489 * smp_mb() in aio_notify_accept().
Stefan Hajnoczi601829f2020-08-06 14:18:01 +0100490 */
491 smp_wmb();
Stefan Hajnoczid73415a2020-09-23 11:56:46 +0100492 qatomic_set(&ctx->notified, true);
Stefan Hajnoczi601829f2020-08-06 14:18:01 +0100493
494 /*
Paolo Bonzini8dd48652023-03-06 10:15:06 +0100495 * Write ctx->notified (and also ctx->bh_list) before reading ctx->notify_me.
496 * Pairs with smp_mb() in aio_ctx_prepare or aio_poll.
Paolo Bonzinieabc9772015-07-21 16:07:51 +0200497 */
Paolo Bonzini0ceb8492014-07-07 15:18:04 +0200498 smp_mb();
Stefan Hajnoczid73415a2020-09-23 11:56:46 +0100499 if (qatomic_read(&ctx->notify_me)) {
Paolo Bonzini0ceb8492014-07-07 15:18:04 +0200500 event_notifier_set(&ctx->notifier);
Paolo Bonzini05e514b2015-07-21 16:07:53 +0200501 }
502}
503
504void aio_notify_accept(AioContext *ctx)
505{
Stefan Hajnoczid73415a2020-09-23 11:56:46 +0100506 qatomic_set(&ctx->notified, false);
Stefan Hajnoczi601829f2020-08-06 14:18:01 +0100507
508 /*
Paolo Bonzini62294382023-03-06 10:43:52 +0100509 * Order reads of ctx->notified (in aio_context_notifier_poll()) and the
510 * above clearing of ctx->notified before reads of e.g. bh->flags. Pairs
511 * with smp_wmb() in aio_notify.
Stefan Hajnoczi601829f2020-08-06 14:18:01 +0100512 */
513 smp_mb();
Paolo Bonzini2f4dc3c2012-09-24 18:44:14 +0200514}
515
Paolo Bonzini3f53bc62017-03-03 11:50:29 +0100516static void aio_timerlist_notify(void *opaque, QEMUClockType type)
Alex Blighd5541d82013-08-21 16:02:50 +0100517{
518 aio_notify(opaque);
519}
520
Stefan Hajnoczi601829f2020-08-06 14:18:01 +0100521static void aio_context_notifier_cb(EventNotifier *e)
Paolo Bonzini21a03d12015-07-21 16:07:52 +0200522{
Stefan Hajnoczi601829f2020-08-06 14:18:01 +0100523 AioContext *ctx = container_of(e, AioContext, notifier);
524
525 event_notifier_test_and_clear(&ctx->notifier);
Paolo Bonzini21a03d12015-07-21 16:07:52 +0200526}
527
Stefan Hajnoczi4a1cba32016-12-01 19:26:42 +0000528/* Returns true if aio_notify() was called (e.g. a BH was scheduled) */
Stefan Hajnoczic13be5a2020-08-06 14:18:00 +0100529static bool aio_context_notifier_poll(void *opaque)
Stefan Hajnoczi4a1cba32016-12-01 19:26:42 +0000530{
531 EventNotifier *e = opaque;
532 AioContext *ctx = container_of(e, AioContext, notifier);
533
Paolo Bonzini62294382023-03-06 10:43:52 +0100534 /*
535 * No need for load-acquire because we just want to kick the
536 * event loop. aio_notify_accept() takes care of synchronizing
537 * the event loop with the producers.
538 */
Stefan Hajnoczid73415a2020-09-23 11:56:46 +0100539 return qatomic_read(&ctx->notified);
Stefan Hajnoczi4a1cba32016-12-01 19:26:42 +0000540}
541
Stefan Hajnoczi826cc322021-12-07 13:23:31 +0000542static void aio_context_notifier_poll_ready(EventNotifier *e)
543{
544 /* Do nothing, we just wanted to kick the event loop */
545}
546
Paolo Bonzini0c330a72017-02-13 14:52:19 +0100547static void co_schedule_bh_cb(void *opaque)
548{
549 AioContext *ctx = opaque;
550 QSLIST_HEAD(, Coroutine) straight, reversed;
551
552 QSLIST_MOVE_ATOMIC(&reversed, &ctx->scheduled_coroutines);
553 QSLIST_INIT(&straight);
554
555 while (!QSLIST_EMPTY(&reversed)) {
556 Coroutine *co = QSLIST_FIRST(&reversed);
557 QSLIST_REMOVE_HEAD(&reversed, co_scheduled_next);
558 QSLIST_INSERT_HEAD(&straight, co, co_scheduled_next);
559 }
560
561 while (!QSLIST_EMPTY(&straight)) {
562 Coroutine *co = QSLIST_FIRST(&straight);
563 QSLIST_REMOVE_HEAD(&straight, co_scheduled_next);
564 trace_aio_co_schedule_bh_cb(ctx, co);
Paolo Bonzini19196312017-02-13 14:52:31 +0100565 aio_context_acquire(ctx);
Jeff Cody6133b392017-11-17 22:27:09 -0500566
567 /* Protected by write barrier in qemu_aio_coroutine_enter */
Stefan Hajnoczid73415a2020-09-23 11:56:46 +0100568 qatomic_set(&co->scheduled, NULL);
Sergio Lopez6808ae02018-09-05 11:33:51 +0200569 qemu_aio_coroutine_enter(ctx, co);
Paolo Bonzini19196312017-02-13 14:52:31 +0100570 aio_context_release(ctx);
Paolo Bonzini0c330a72017-02-13 14:52:19 +0100571 }
572}
573
Chrysostomos Nanakos2f78e492014-09-18 14:30:49 +0300574AioContext *aio_context_new(Error **errp)
Paolo Bonzinif627aab2012-10-29 23:45:23 +0100575{
Chrysostomos Nanakos2f78e492014-09-18 14:30:49 +0300576 int ret;
Paolo Bonzini2f4dc3c2012-09-24 18:44:14 +0200577 AioContext *ctx;
Fam Zheng37fcee52015-10-30 12:06:28 +0800578
Paolo Bonzini2f4dc3c2012-09-24 18:44:14 +0200579 ctx = (AioContext *) g_source_new(&aio_source_funcs, sizeof(AioContext));
Stefan Hajnoczi8c6b0352020-02-21 09:39:51 +0000580 QSLIST_INIT(&ctx->bh_list);
581 QSIMPLEQ_INIT(&ctx->bh_slice_list);
Cao jin7e003462016-07-15 18:28:44 +0800582 aio_context_setup(ctx);
583
Chrysostomos Nanakos2f78e492014-09-18 14:30:49 +0300584 ret = event_notifier_init(&ctx->notifier, false);
585 if (ret < 0) {
Chrysostomos Nanakos2f78e492014-09-18 14:30:49 +0300586 error_setg_errno(errp, -ret, "Failed to initialize event notifier");
Fam Zheng37fcee52015-10-30 12:06:28 +0800587 goto fail;
Chrysostomos Nanakos2f78e492014-09-18 14:30:49 +0300588 }
Paolo Bonzinifcf5def2014-12-17 16:09:58 +0100589 g_source_set_can_recurse(&ctx->source, true);
Paolo Bonzinid7c99a12017-01-12 19:07:53 +0100590 qemu_lockcnt_init(&ctx->list_lock);
Paolo Bonzini0c330a72017-02-13 14:52:19 +0100591
592 ctx->co_schedule_bh = aio_bh_new(ctx, co_schedule_bh_cb, ctx);
593 QSLIST_INIT(&ctx->scheduled_coroutines);
594
Chrysostomos Nanakos2f78e492014-09-18 14:30:49 +0300595 aio_set_event_notifier(ctx, &ctx->notifier,
Stefan Hajnoczi601829f2020-08-06 14:18:01 +0100596 aio_context_notifier_cb,
Stefan Hajnoczi826cc322021-12-07 13:23:31 +0000597 aio_context_notifier_poll,
598 aio_context_notifier_poll_ready);
Paolo Bonzini0187f5c2016-07-04 18:33:20 +0200599#ifdef CONFIG_LINUX_AIO
600 ctx->linux_aio = NULL;
601#endif
Aarushi Mehtafcb7a4a2020-01-20 14:18:49 +0000602
603#ifdef CONFIG_LINUX_IO_URING
604 ctx->linux_io_uring = NULL;
605#endif
606
Stefan Hajnoczi9b342772013-03-07 13:41:47 +0100607 ctx->thread_pool = NULL;
Paolo Bonzini3fe71222016-10-27 12:49:08 +0200608 qemu_rec_mutex_init(&ctx->lock);
Alex Blighd5541d82013-08-21 16:02:50 +0100609 timerlistgroup_init(&ctx->tlg, aio_timerlist_notify, ctx);
Paolo Bonzini2f4dc3c2012-09-24 18:44:14 +0200610
Stefan Hajnoczi82a41182016-12-01 19:26:51 +0000611 ctx->poll_ns = 0;
Stefan Hajnoczi4a1cba32016-12-01 19:26:42 +0000612 ctx->poll_max_ns = 0;
Stefan Hajnoczi82a41182016-12-01 19:26:51 +0000613 ctx->poll_grow = 0;
614 ctx->poll_shrink = 0;
Stefan Hajnoczi4a1cba32016-12-01 19:26:42 +0000615
Stefano Garzarella1793ad02021-07-21 11:42:10 +0200616 ctx->aio_max_batch = 0;
617
Nicolas Saenz Julienne71ad4712022-04-25 09:57:23 +0200618 ctx->thread_pool_min = 0;
619 ctx->thread_pool_max = THREAD_POOL_MAX_THREADS_DEFAULT;
620
Emanuele Giuseppe Esposito587d82f2022-12-07 14:18:24 +0100621 register_aiocontext(ctx);
622
Paolo Bonzini2f4dc3c2012-09-24 18:44:14 +0200623 return ctx;
Fam Zheng37fcee52015-10-30 12:06:28 +0800624fail:
625 g_source_destroy(&ctx->source);
626 return NULL;
Paolo Bonzinie3713e02012-09-24 14:57:41 +0200627}
628
Paolo Bonzini0c330a72017-02-13 14:52:19 +0100629void aio_co_schedule(AioContext *ctx, Coroutine *co)
630{
631 trace_aio_co_schedule(ctx, co);
Stefan Hajnoczid73415a2020-09-23 11:56:46 +0100632 const char *scheduled = qatomic_cmpxchg(&co->scheduled, NULL,
Jeff Cody6133b392017-11-17 22:27:09 -0500633 __func__);
634
635 if (scheduled) {
636 fprintf(stderr,
637 "%s: Co-routine was already scheduled in '%s'\n",
638 __func__, scheduled);
639 abort();
640 }
641
Stefan Hajnoczif0f81002019-07-23 20:06:23 +0100642 /* The coroutine might run and release the last ctx reference before we
643 * invoke qemu_bh_schedule(). Take a reference to keep ctx alive until
644 * we're done.
645 */
646 aio_context_ref(ctx);
647
Paolo Bonzini0c330a72017-02-13 14:52:19 +0100648 QSLIST_INSERT_HEAD_ATOMIC(&ctx->scheduled_coroutines,
649 co, co_scheduled_next);
650 qemu_bh_schedule(ctx->co_schedule_bh);
Stefan Hajnoczif0f81002019-07-23 20:06:23 +0100651
652 aio_context_unref(ctx);
Paolo Bonzini0c330a72017-02-13 14:52:19 +0100653}
654
Kevin Wolf26b0b692020-10-05 17:58:52 +0200655typedef struct AioCoRescheduleSelf {
656 Coroutine *co;
657 AioContext *new_ctx;
658} AioCoRescheduleSelf;
659
660static void aio_co_reschedule_self_bh(void *opaque)
661{
662 AioCoRescheduleSelf *data = opaque;
663 aio_co_schedule(data->new_ctx, data->co);
664}
665
666void coroutine_fn aio_co_reschedule_self(AioContext *new_ctx)
667{
668 AioContext *old_ctx = qemu_get_current_aio_context();
669
670 if (old_ctx != new_ctx) {
671 AioCoRescheduleSelf data = {
672 .co = qemu_coroutine_self(),
673 .new_ctx = new_ctx,
674 };
675 /*
676 * We can't directly schedule the coroutine in the target context
677 * because this would be racy: The other thread could try to enter the
678 * coroutine before it has yielded in this one.
679 */
680 aio_bh_schedule_oneshot(old_ctx, aio_co_reschedule_self_bh, &data);
681 qemu_coroutine_yield();
682 }
683}
684
Markus Armbruster43695602022-12-21 14:14:35 +0100685void aio_co_wake(Coroutine *co)
Paolo Bonzini0c330a72017-02-13 14:52:19 +0100686{
687 AioContext *ctx;
688
689 /* Read coroutine before co->ctx. Matches smp_wmb in
690 * qemu_coroutine_enter.
691 */
692 smp_read_barrier_depends();
Stefan Hajnoczid73415a2020-09-23 11:56:46 +0100693 ctx = qatomic_read(&co->ctx);
Paolo Bonzini0c330a72017-02-13 14:52:19 +0100694
Fam Zheng88658522017-04-10 20:07:35 +0800695 aio_co_enter(ctx, co);
696}
697
Markus Armbruster43695602022-12-21 14:14:35 +0100698void aio_co_enter(AioContext *ctx, Coroutine *co)
Fam Zheng88658522017-04-10 20:07:35 +0800699{
Paolo Bonzini0c330a72017-02-13 14:52:19 +0100700 if (ctx != qemu_get_current_aio_context()) {
701 aio_co_schedule(ctx, co);
702 return;
703 }
704
705 if (qemu_in_coroutine()) {
706 Coroutine *self = qemu_coroutine_self();
707 assert(self != co);
708 QSIMPLEQ_INSERT_TAIL(&self->co_queue_wakeup, co, co_queue_next);
709 } else {
710 aio_context_acquire(ctx);
Fam Zheng88658522017-04-10 20:07:35 +0800711 qemu_aio_coroutine_enter(ctx, co);
Paolo Bonzini0c330a72017-02-13 14:52:19 +0100712 aio_context_release(ctx);
713 }
714}
715
Paolo Bonzinie3713e02012-09-24 14:57:41 +0200716void aio_context_ref(AioContext *ctx)
717{
718 g_source_ref(&ctx->source);
719}
720
721void aio_context_unref(AioContext *ctx)
722{
723 g_source_unref(&ctx->source);
Paolo Bonzinif627aab2012-10-29 23:45:23 +0100724}
Stefan Hajnoczi98563fc2014-03-03 11:30:04 +0100725
726void aio_context_acquire(AioContext *ctx)
727{
Paolo Bonzini3fe71222016-10-27 12:49:08 +0200728 qemu_rec_mutex_lock(&ctx->lock);
Stefan Hajnoczi98563fc2014-03-03 11:30:04 +0100729}
730
731void aio_context_release(AioContext *ctx)
732{
Paolo Bonzini3fe71222016-10-27 12:49:08 +0200733 qemu_rec_mutex_unlock(&ctx->lock);
Stefan Hajnoczi98563fc2014-03-03 11:30:04 +0100734}
Paolo Bonzini5f50be92021-06-09 14:22:34 +0200735
Stefan Hajnoczi47b74462022-02-22 14:01:48 +0000736QEMU_DEFINE_STATIC_CO_TLS(AioContext *, my_aiocontext)
Paolo Bonzini5f50be92021-06-09 14:22:34 +0200737
738AioContext *qemu_get_current_aio_context(void)
739{
Stefan Hajnoczi47b74462022-02-22 14:01:48 +0000740 AioContext *ctx = get_my_aiocontext();
741 if (ctx) {
742 return ctx;
Paolo Bonzini5f50be92021-06-09 14:22:34 +0200743 }
744 if (qemu_mutex_iothread_locked()) {
745 /* Possibly in a vCPU thread. */
746 return qemu_get_aio_context();
747 }
748 return NULL;
749}
750
751void qemu_set_current_aio_context(AioContext *ctx)
752{
Stefan Hajnoczi47b74462022-02-22 14:01:48 +0000753 assert(!get_my_aiocontext());
754 set_my_aiocontext(ctx);
Paolo Bonzini5f50be92021-06-09 14:22:34 +0200755}
Nicolas Saenz Julienne71ad4712022-04-25 09:57:23 +0200756
757void aio_context_set_thread_pool_params(AioContext *ctx, int64_t min,
758 int64_t max, Error **errp)
759{
760
761 if (min > max || !max || min > INT_MAX || max > INT_MAX) {
762 error_setg(errp, "bad thread-pool-min/thread-pool-max values");
763 return;
764 }
765
766 ctx->thread_pool_min = min;
767 ctx->thread_pool_max = max;
768
769 if (ctx->thread_pool) {
770 thread_pool_update_params(ctx->thread_pool, ctx);
771 }
772}