blob: 674dbefb7c2494887b3a7a1648908cb7209f0b4c [file] [log] [blame]
Kevin Wolf4f999d02009-10-22 17:54:37 +02001/*
Paolo Bonzinic2b38b22017-02-13 14:52:18 +01002 * Data plane event loop
Kevin Wolf4f999d02009-10-22 17:54:37 +02003 *
4 * Copyright (c) 2003-2008 Fabrice Bellard
Paolo Bonzinic2b38b22017-02-13 14:52:18 +01005 * Copyright (c) 2009-2017 QEMU contributors
Kevin Wolf4f999d02009-10-22 17:54:37 +02006 *
7 * Permission is hereby granted, free of charge, to any person obtaining a copy
8 * of this software and associated documentation files (the "Software"), to deal
9 * in the Software without restriction, including without limitation the rights
10 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
11 * copies of the Software, and to permit persons to whom the Software is
12 * furnished to do so, subject to the following conditions:
13 *
14 * The above copyright notice and this permission notice shall be included in
15 * all copies or substantial portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
18 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
19 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
20 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
21 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
22 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
23 * THE SOFTWARE.
24 */
25
Peter Maydelld38ea872016-01-29 17:50:05 +000026#include "qemu/osdep.h"
Markus Armbrusterda34e652016-03-14 09:01:28 +010027#include "qapi/error.h"
Paolo Bonzini737e1502012-12-17 18:19:44 +010028#include "block/aio.h"
Stefan Hajnoczi9b342772013-03-07 13:41:47 +010029#include "block/thread-pool.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010030#include "qemu/main-loop.h"
Paolo Bonzini0ceb8492014-07-07 15:18:04 +020031#include "qemu/atomic.h"
Stefan Hajnoczi8c6b0352020-02-21 09:39:51 +000032#include "qemu/rcu_queue.h"
Paolo Bonzini0187f5c2016-07-04 18:33:20 +020033#include "block/raw-aio.h"
Paolo Bonzini0c330a72017-02-13 14:52:19 +010034#include "qemu/coroutine_int.h"
35#include "trace.h"
Kevin Wolf9a1e9482009-10-22 17:54:38 +020036
Kevin Wolf4f999d02009-10-22 17:54:37 +020037/***********************************************************/
38/* bottom halves (can be seen as timers which expire ASAP) */
39
Stefan Hajnoczi8c6b0352020-02-21 09:39:51 +000040/* QEMUBH::flags values */
41enum {
42 /* Already enqueued and waiting for aio_bh_poll() */
43 BH_PENDING = (1 << 0),
44
45 /* Invoke the callback */
46 BH_SCHEDULED = (1 << 1),
47
48 /* Delete without invoking callback */
49 BH_DELETED = (1 << 2),
50
51 /* Delete after invoking callback */
52 BH_ONESHOT = (1 << 3),
53
54 /* Schedule periodically when the event loop is idle */
55 BH_IDLE = (1 << 4),
56};
57
Kevin Wolf4f999d02009-10-22 17:54:37 +020058struct QEMUBH {
Paolo Bonzini2f4dc3c2012-09-24 18:44:14 +020059 AioContext *ctx;
Kevin Wolf4f999d02009-10-22 17:54:37 +020060 QEMUBHFunc *cb;
61 void *opaque;
Stefan Hajnoczi8c6b0352020-02-21 09:39:51 +000062 QSLIST_ENTRY(QEMUBH) next;
63 unsigned flags;
Kevin Wolf4f999d02009-10-22 17:54:37 +020064};
65
Stefan Hajnoczi8c6b0352020-02-21 09:39:51 +000066/* Called concurrently from any thread */
67static void aio_bh_enqueue(QEMUBH *bh, unsigned new_flags)
68{
69 AioContext *ctx = bh->ctx;
70 unsigned old_flags;
71
72 /*
Stefan Hajnoczid73415a2020-09-23 11:56:46 +010073 * The memory barrier implicit in qatomic_fetch_or makes sure that:
Stefan Hajnoczi8c6b0352020-02-21 09:39:51 +000074 * 1. idle & any writes needed by the callback are done before the
75 * locations are read in the aio_bh_poll.
76 * 2. ctx is loaded before the callback has a chance to execute and bh
77 * could be freed.
78 */
Stefan Hajnoczid73415a2020-09-23 11:56:46 +010079 old_flags = qatomic_fetch_or(&bh->flags, BH_PENDING | new_flags);
Stefan Hajnoczi8c6b0352020-02-21 09:39:51 +000080 if (!(old_flags & BH_PENDING)) {
81 QSLIST_INSERT_HEAD_ATOMIC(&ctx->bh_list, bh, next);
82 }
83
84 aio_notify(ctx);
85}
86
87/* Only called from aio_bh_poll() and aio_ctx_finalize() */
88static QEMUBH *aio_bh_dequeue(BHList *head, unsigned *flags)
89{
90 QEMUBH *bh = QSLIST_FIRST_RCU(head);
91
92 if (!bh) {
93 return NULL;
94 }
95
96 QSLIST_REMOVE_HEAD(head, next);
97
98 /*
Stefan Hajnoczid73415a2020-09-23 11:56:46 +010099 * The qatomic_and is paired with aio_bh_enqueue(). The implicit memory
Stefan Hajnoczi8c6b0352020-02-21 09:39:51 +0000100 * barrier ensures that the callback sees all writes done by the scheduling
101 * thread. It also ensures that the scheduling thread sees the cleared
102 * flag before bh->cb has run, and thus will call aio_notify again if
103 * necessary.
104 */
Stefan Hajnoczid73415a2020-09-23 11:56:46 +0100105 *flags = qatomic_fetch_and(&bh->flags,
Stefan Hajnoczi8c6b0352020-02-21 09:39:51 +0000106 ~(BH_PENDING | BH_SCHEDULED | BH_IDLE));
107 return bh;
108}
109
Paolo Bonzini5b8bb352016-10-03 18:14:15 +0200110void aio_bh_schedule_oneshot(AioContext *ctx, QEMUBHFunc *cb, void *opaque)
111{
112 QEMUBH *bh;
113 bh = g_new(QEMUBH, 1);
114 *bh = (QEMUBH){
115 .ctx = ctx,
116 .cb = cb,
117 .opaque = opaque,
118 };
Stefan Hajnoczi8c6b0352020-02-21 09:39:51 +0000119 aio_bh_enqueue(bh, BH_SCHEDULED | BH_ONESHOT);
Paolo Bonzini5b8bb352016-10-03 18:14:15 +0200120}
121
Paolo Bonzinif627aab2012-10-29 23:45:23 +0100122QEMUBH *aio_bh_new(AioContext *ctx, QEMUBHFunc *cb, void *opaque)
Kevin Wolf4f999d02009-10-22 17:54:37 +0200123{
124 QEMUBH *bh;
Paolo Bonziniee823102014-12-17 16:10:00 +0100125 bh = g_new(QEMUBH, 1);
126 *bh = (QEMUBH){
127 .ctx = ctx,
128 .cb = cb,
129 .opaque = opaque,
130 };
Kevin Wolf4f999d02009-10-22 17:54:37 +0200131 return bh;
132}
133
Pavel Dovgalyukdf281b82015-09-17 19:24:50 +0300134void aio_bh_call(QEMUBH *bh)
135{
136 bh->cb(bh->opaque);
137}
138
Stefan Hajnoczi8c6b0352020-02-21 09:39:51 +0000139/* Multiple occurrences of aio_bh_poll cannot be called concurrently. */
Paolo Bonzinif627aab2012-10-29 23:45:23 +0100140int aio_bh_poll(AioContext *ctx)
Kevin Wolf4f999d02009-10-22 17:54:37 +0200141{
Stefan Hajnoczi8c6b0352020-02-21 09:39:51 +0000142 BHListSlice slice;
143 BHListSlice *s;
144 int ret = 0;
Kevin Wolf648fb0e2011-09-01 16:16:10 +0200145
Stefan Hajnoczi8c6b0352020-02-21 09:39:51 +0000146 QSLIST_MOVE_ATOMIC(&slice.bh_list, &ctx->bh_list);
147 QSIMPLEQ_INSERT_TAIL(&ctx->bh_slice_list, &slice, next);
148
149 while ((s = QSIMPLEQ_FIRST(&ctx->bh_slice_list))) {
150 QEMUBH *bh;
151 unsigned flags;
152
153 bh = aio_bh_dequeue(&s->bh_list, &flags);
154 if (!bh) {
155 QSIMPLEQ_REMOVE_HEAD(&ctx->bh_slice_list, next);
156 continue;
157 }
158
159 if ((flags & (BH_SCHEDULED | BH_DELETED)) == BH_SCHEDULED) {
Paolo Bonzini65c1b5b2016-10-27 12:49:06 +0200160 /* Idle BHs don't count as progress */
Stefan Hajnoczi8c6b0352020-02-21 09:39:51 +0000161 if (!(flags & BH_IDLE)) {
Kevin Wolf4f999d02009-10-22 17:54:37 +0200162 ret = 1;
Stefan Hajnoczica96ac42015-07-28 18:34:09 +0200163 }
Pavel Dovgalyukdf281b82015-09-17 19:24:50 +0300164 aio_bh_call(bh);
Kevin Wolf4f999d02009-10-22 17:54:37 +0200165 }
Stefan Hajnoczi8c6b0352020-02-21 09:39:51 +0000166 if (flags & (BH_DELETED | BH_ONESHOT)) {
167 g_free(bh);
Paolo Bonzini7d506c92017-01-12 19:08:00 +0100168 }
Kevin Wolf4f999d02009-10-22 17:54:37 +0200169 }
170
Kevin Wolf4f999d02009-10-22 17:54:37 +0200171 return ret;
172}
173
174void qemu_bh_schedule_idle(QEMUBH *bh)
175{
Stefan Hajnoczi8c6b0352020-02-21 09:39:51 +0000176 aio_bh_enqueue(bh, BH_SCHEDULED | BH_IDLE);
Kevin Wolf4f999d02009-10-22 17:54:37 +0200177}
178
179void qemu_bh_schedule(QEMUBH *bh)
180{
Stefan Hajnoczi8c6b0352020-02-21 09:39:51 +0000181 aio_bh_enqueue(bh, BH_SCHEDULED);
Kevin Wolf4f999d02009-10-22 17:54:37 +0200182}
183
Liu Ping Fandcc772e2013-07-16 12:28:58 +0800184/* This func is async.
185 */
Kevin Wolf4f999d02009-10-22 17:54:37 +0200186void qemu_bh_cancel(QEMUBH *bh)
187{
Stefan Hajnoczid73415a2020-09-23 11:56:46 +0100188 qatomic_and(&bh->flags, ~BH_SCHEDULED);
Kevin Wolf4f999d02009-10-22 17:54:37 +0200189}
190
Liu Ping Fandcc772e2013-07-16 12:28:58 +0800191/* This func is async.The bottom half will do the delete action at the finial
192 * end.
193 */
Kevin Wolf4f999d02009-10-22 17:54:37 +0200194void qemu_bh_delete(QEMUBH *bh)
195{
Stefan Hajnoczi8c6b0352020-02-21 09:39:51 +0000196 aio_bh_enqueue(bh, BH_DELETED);
Kevin Wolf4f999d02009-10-22 17:54:37 +0200197}
198
Stefan Hajnoczi8c6b0352020-02-21 09:39:51 +0000199static int64_t aio_compute_bh_timeout(BHList *head, int timeout)
Kevin Wolf4f999d02009-10-22 17:54:37 +0200200{
201 QEMUBH *bh;
202
Stefan Hajnoczi8c6b0352020-02-21 09:39:51 +0000203 QSLIST_FOREACH_RCU(bh, head, next) {
204 if ((bh->flags & (BH_SCHEDULED | BH_DELETED)) == BH_SCHEDULED) {
205 if (bh->flags & BH_IDLE) {
Kevin Wolf4f999d02009-10-22 17:54:37 +0200206 /* idle bottom halves will be polled at least
207 * every 10ms */
Paolo Bonzini845ca102014-07-09 11:53:01 +0200208 timeout = 10000000;
Kevin Wolf4f999d02009-10-22 17:54:37 +0200209 } else {
210 /* non-idle bottom halves will be executed
211 * immediately */
Paolo Bonzini845ca102014-07-09 11:53:01 +0200212 return 0;
Kevin Wolf4f999d02009-10-22 17:54:37 +0200213 }
214 }
215 }
Kevin Wolf4f999d02009-10-22 17:54:37 +0200216
Stefan Hajnoczi8c6b0352020-02-21 09:39:51 +0000217 return timeout;
218}
219
220int64_t
221aio_compute_timeout(AioContext *ctx)
222{
223 BHListSlice *s;
224 int64_t deadline;
225 int timeout = -1;
226
227 timeout = aio_compute_bh_timeout(&ctx->bh_list, timeout);
228 if (timeout == 0) {
229 return 0;
230 }
231
232 QSIMPLEQ_FOREACH(s, &ctx->bh_slice_list, next) {
233 timeout = aio_compute_bh_timeout(&s->bh_list, timeout);
234 if (timeout == 0) {
235 return 0;
236 }
237 }
238
Paolo Bonzini845ca102014-07-09 11:53:01 +0200239 deadline = timerlistgroup_deadline_ns(&ctx->tlg);
Alex Bligh533a8cf2013-08-21 16:02:51 +0100240 if (deadline == 0) {
Paolo Bonzini845ca102014-07-09 11:53:01 +0200241 return 0;
Alex Bligh533a8cf2013-08-21 16:02:51 +0100242 } else {
Paolo Bonzini845ca102014-07-09 11:53:01 +0200243 return qemu_soonest_timeout(timeout, deadline);
Alex Bligh533a8cf2013-08-21 16:02:51 +0100244 }
Paolo Bonzini845ca102014-07-09 11:53:01 +0200245}
Alex Bligh533a8cf2013-08-21 16:02:51 +0100246
Paolo Bonzini845ca102014-07-09 11:53:01 +0200247static gboolean
248aio_ctx_prepare(GSource *source, gint *timeout)
249{
250 AioContext *ctx = (AioContext *) source;
251
Stefan Hajnoczid73415a2020-09-23 11:56:46 +0100252 qatomic_set(&ctx->notify_me, qatomic_read(&ctx->notify_me) | 1);
Paolo Bonzini5710a3e2020-04-07 10:07:46 -0400253
254 /*
255 * Write ctx->notify_me before computing the timeout
256 * (reading bottom half flags, etc.). Pairs with
257 * smp_mb in aio_notify().
258 */
259 smp_mb();
Paolo Bonzinieabc9772015-07-21 16:07:51 +0200260
Paolo Bonzini845ca102014-07-09 11:53:01 +0200261 /* We assume there is no timeout already supplied */
262 *timeout = qemu_timeout_ns_to_ms(aio_compute_timeout(ctx));
Paolo Bonzinia3462c62014-07-09 11:53:08 +0200263
264 if (aio_prepare(ctx)) {
265 *timeout = 0;
266 }
267
Paolo Bonzini845ca102014-07-09 11:53:01 +0200268 return *timeout == 0;
Paolo Bonzinie3713e02012-09-24 14:57:41 +0200269}
270
271static gboolean
272aio_ctx_check(GSource *source)
273{
274 AioContext *ctx = (AioContext *) source;
275 QEMUBH *bh;
Stefan Hajnoczi8c6b0352020-02-21 09:39:51 +0000276 BHListSlice *s;
Paolo Bonzinie3713e02012-09-24 14:57:41 +0200277
Paolo Bonzini5710a3e2020-04-07 10:07:46 -0400278 /* Finish computing the timeout before clearing the flag. */
Stefan Hajnoczid73415a2020-09-23 11:56:46 +0100279 qatomic_store_release(&ctx->notify_me, qatomic_read(&ctx->notify_me) & ~1);
Paolo Bonzini05e514b2015-07-21 16:07:53 +0200280 aio_notify_accept(ctx);
Paolo Bonzini21a03d12015-07-21 16:07:52 +0200281
Stefan Hajnoczi8c6b0352020-02-21 09:39:51 +0000282 QSLIST_FOREACH_RCU(bh, &ctx->bh_list, next) {
283 if ((bh->flags & (BH_SCHEDULED | BH_DELETED)) == BH_SCHEDULED) {
Paolo Bonzinie3713e02012-09-24 14:57:41 +0200284 return true;
Cao jin6977d902016-07-14 21:10:43 +0800285 }
Paolo Bonzinie3713e02012-09-24 14:57:41 +0200286 }
Stefan Hajnoczi8c6b0352020-02-21 09:39:51 +0000287
288 QSIMPLEQ_FOREACH(s, &ctx->bh_slice_list, next) {
289 QSLIST_FOREACH_RCU(bh, &s->bh_list, next) {
290 if ((bh->flags & (BH_SCHEDULED | BH_DELETED)) == BH_SCHEDULED) {
291 return true;
292 }
293 }
294 }
Alex Bligh533a8cf2013-08-21 16:02:51 +0100295 return aio_pending(ctx) || (timerlistgroup_deadline_ns(&ctx->tlg) == 0);
Paolo Bonzinie3713e02012-09-24 14:57:41 +0200296}
297
298static gboolean
299aio_ctx_dispatch(GSource *source,
300 GSourceFunc callback,
301 gpointer user_data)
302{
303 AioContext *ctx = (AioContext *) source;
304
305 assert(callback == NULL);
Paolo Bonzinia153bf52017-02-13 14:52:33 +0100306 aio_dispatch(ctx);
Paolo Bonzinie3713e02012-09-24 14:57:41 +0200307 return true;
308}
309
Paolo Bonzini2f4dc3c2012-09-24 18:44:14 +0200310static void
311aio_ctx_finalize(GSource *source)
312{
313 AioContext *ctx = (AioContext *) source;
Stefan Hajnoczi8c6b0352020-02-21 09:39:51 +0000314 QEMUBH *bh;
315 unsigned flags;
Paolo Bonzini2f4dc3c2012-09-24 18:44:14 +0200316
Stefan Hajnoczi9b342772013-03-07 13:41:47 +0100317 thread_pool_free(ctx->thread_pool);
Stefan Hajnoczia0769722015-07-28 18:34:08 +0200318
Paolo Bonzini0187f5c2016-07-04 18:33:20 +0200319#ifdef CONFIG_LINUX_AIO
320 if (ctx->linux_aio) {
321 laio_detach_aio_context(ctx->linux_aio, ctx);
322 laio_cleanup(ctx->linux_aio);
323 ctx->linux_aio = NULL;
324 }
325#endif
326
Aarushi Mehtafcb7a4a2020-01-20 14:18:49 +0000327#ifdef CONFIG_LINUX_IO_URING
328 if (ctx->linux_io_uring) {
329 luring_detach_aio_context(ctx->linux_io_uring, ctx);
330 luring_cleanup(ctx->linux_io_uring);
331 ctx->linux_io_uring = NULL;
332 }
333#endif
334
Paolo Bonzini0c330a72017-02-13 14:52:19 +0100335 assert(QSLIST_EMPTY(&ctx->scheduled_coroutines));
336 qemu_bh_delete(ctx->co_schedule_bh);
337
Stefan Hajnoczi8c6b0352020-02-21 09:39:51 +0000338 /* There must be no aio_bh_poll() calls going on */
339 assert(QSIMPLEQ_EMPTY(&ctx->bh_slice_list));
Stefan Hajnoczia0769722015-07-28 18:34:08 +0200340
Stefan Hajnoczi8c6b0352020-02-21 09:39:51 +0000341 while ((bh = aio_bh_dequeue(&ctx->bh_list, &flags))) {
Stefan Hajnoczia0769722015-07-28 18:34:08 +0200342 /* qemu_bh_delete() must have been called on BHs in this AioContext */
Stefan Hajnoczi8c6b0352020-02-21 09:39:51 +0000343 assert(flags & BH_DELETED);
Stefan Hajnoczia0769722015-07-28 18:34:08 +0200344
Stefan Hajnoczi8c6b0352020-02-21 09:39:51 +0000345 g_free(bh);
Stefan Hajnoczia0769722015-07-28 18:34:08 +0200346 }
Stefan Hajnoczia0769722015-07-28 18:34:08 +0200347
Stefan Hajnoczif6a51c82016-12-01 19:26:41 +0000348 aio_set_event_notifier(ctx, &ctx->notifier, false, NULL, NULL);
Paolo Bonzini2f4dc3c2012-09-24 18:44:14 +0200349 event_notifier_cleanup(&ctx->notifier);
Paolo Bonzini3fe71222016-10-27 12:49:08 +0200350 qemu_rec_mutex_destroy(&ctx->lock);
Paolo Bonzinid7c99a12017-01-12 19:07:53 +0100351 qemu_lockcnt_destroy(&ctx->list_lock);
Alex Blighdae21b92013-08-21 16:02:49 +0100352 timerlistgroup_deinit(&ctx->tlg);
Jie Wangcd0a6d22018-05-17 08:42:43 +0800353 aio_context_destroy(ctx);
Paolo Bonzini2f4dc3c2012-09-24 18:44:14 +0200354}
355
Paolo Bonzinie3713e02012-09-24 14:57:41 +0200356static GSourceFuncs aio_source_funcs = {
357 aio_ctx_prepare,
358 aio_ctx_check,
359 aio_ctx_dispatch,
Paolo Bonzini2f4dc3c2012-09-24 18:44:14 +0200360 aio_ctx_finalize
Paolo Bonzinie3713e02012-09-24 14:57:41 +0200361};
362
363GSource *aio_get_g_source(AioContext *ctx)
364{
Stefan Hajnocziba607ca2020-05-11 19:36:30 +0100365 aio_context_use_g_source(ctx);
Paolo Bonzinie3713e02012-09-24 14:57:41 +0200366 g_source_ref(&ctx->source);
367 return &ctx->source;
368}
Paolo Bonzinia915f4b2012-09-13 12:28:51 +0200369
Stefan Hajnoczi9b342772013-03-07 13:41:47 +0100370ThreadPool *aio_get_thread_pool(AioContext *ctx)
371{
372 if (!ctx->thread_pool) {
373 ctx->thread_pool = thread_pool_new(ctx);
374 }
375 return ctx->thread_pool;
376}
377
Paolo Bonzini0187f5c2016-07-04 18:33:20 +0200378#ifdef CONFIG_LINUX_AIO
Nishanth Aravamudaned6e2162018-06-22 12:37:00 -0700379LinuxAioState *aio_setup_linux_aio(AioContext *ctx, Error **errp)
Paolo Bonzini0187f5c2016-07-04 18:33:20 +0200380{
381 if (!ctx->linux_aio) {
Nishanth Aravamudaned6e2162018-06-22 12:37:00 -0700382 ctx->linux_aio = laio_init(errp);
383 if (ctx->linux_aio) {
384 laio_attach_aio_context(ctx->linux_aio, ctx);
385 }
Paolo Bonzini0187f5c2016-07-04 18:33:20 +0200386 }
387 return ctx->linux_aio;
388}
Nishanth Aravamudaned6e2162018-06-22 12:37:00 -0700389
390LinuxAioState *aio_get_linux_aio(AioContext *ctx)
391{
392 assert(ctx->linux_aio);
393 return ctx->linux_aio;
394}
Paolo Bonzini0187f5c2016-07-04 18:33:20 +0200395#endif
396
Aarushi Mehtafcb7a4a2020-01-20 14:18:49 +0000397#ifdef CONFIG_LINUX_IO_URING
398LuringState *aio_setup_linux_io_uring(AioContext *ctx, Error **errp)
399{
400 if (ctx->linux_io_uring) {
401 return ctx->linux_io_uring;
402 }
403
404 ctx->linux_io_uring = luring_init(errp);
405 if (!ctx->linux_io_uring) {
406 return NULL;
407 }
408
409 luring_attach_aio_context(ctx->linux_io_uring, ctx);
410 return ctx->linux_io_uring;
411}
412
413LuringState *aio_get_linux_io_uring(AioContext *ctx)
414{
415 assert(ctx->linux_io_uring);
416 return ctx->linux_io_uring;
417}
418#endif
419
Paolo Bonzini2f4dc3c2012-09-24 18:44:14 +0200420void aio_notify(AioContext *ctx)
421{
Stefan Hajnoczi601829f2020-08-06 14:18:01 +0100422 /*
423 * Write e.g. bh->flags before writing ctx->notified. Pairs with smp_mb in
424 * aio_notify_accept.
425 */
426 smp_wmb();
Stefan Hajnoczid73415a2020-09-23 11:56:46 +0100427 qatomic_set(&ctx->notified, true);
Stefan Hajnoczi601829f2020-08-06 14:18:01 +0100428
429 /*
430 * Write ctx->notified before reading ctx->notify_me. Pairs
Paolo Bonzini5710a3e2020-04-07 10:07:46 -0400431 * with smp_mb in aio_ctx_prepare or aio_poll.
Paolo Bonzinieabc9772015-07-21 16:07:51 +0200432 */
Paolo Bonzini0ceb8492014-07-07 15:18:04 +0200433 smp_mb();
Stefan Hajnoczid73415a2020-09-23 11:56:46 +0100434 if (qatomic_read(&ctx->notify_me)) {
Paolo Bonzini0ceb8492014-07-07 15:18:04 +0200435 event_notifier_set(&ctx->notifier);
Paolo Bonzini05e514b2015-07-21 16:07:53 +0200436 }
437}
438
439void aio_notify_accept(AioContext *ctx)
440{
Stefan Hajnoczid73415a2020-09-23 11:56:46 +0100441 qatomic_set(&ctx->notified, false);
Stefan Hajnoczi601829f2020-08-06 14:18:01 +0100442
443 /*
444 * Write ctx->notified before reading e.g. bh->flags. Pairs with smp_wmb
445 * in aio_notify.
446 */
447 smp_mb();
Paolo Bonzini2f4dc3c2012-09-24 18:44:14 +0200448}
449
Paolo Bonzini3f53bc62017-03-03 11:50:29 +0100450static void aio_timerlist_notify(void *opaque, QEMUClockType type)
Alex Blighd5541d82013-08-21 16:02:50 +0100451{
452 aio_notify(opaque);
453}
454
Stefan Hajnoczi601829f2020-08-06 14:18:01 +0100455static void aio_context_notifier_cb(EventNotifier *e)
Paolo Bonzini21a03d12015-07-21 16:07:52 +0200456{
Stefan Hajnoczi601829f2020-08-06 14:18:01 +0100457 AioContext *ctx = container_of(e, AioContext, notifier);
458
459 event_notifier_test_and_clear(&ctx->notifier);
Paolo Bonzini21a03d12015-07-21 16:07:52 +0200460}
461
Stefan Hajnoczi4a1cba32016-12-01 19:26:42 +0000462/* Returns true if aio_notify() was called (e.g. a BH was scheduled) */
Stefan Hajnoczic13be5a2020-08-06 14:18:00 +0100463static bool aio_context_notifier_poll(void *opaque)
Stefan Hajnoczi4a1cba32016-12-01 19:26:42 +0000464{
465 EventNotifier *e = opaque;
466 AioContext *ctx = container_of(e, AioContext, notifier);
467
Stefan Hajnoczid73415a2020-09-23 11:56:46 +0100468 return qatomic_read(&ctx->notified);
Stefan Hajnoczi4a1cba32016-12-01 19:26:42 +0000469}
470
Paolo Bonzini0c330a72017-02-13 14:52:19 +0100471static void co_schedule_bh_cb(void *opaque)
472{
473 AioContext *ctx = opaque;
474 QSLIST_HEAD(, Coroutine) straight, reversed;
475
476 QSLIST_MOVE_ATOMIC(&reversed, &ctx->scheduled_coroutines);
477 QSLIST_INIT(&straight);
478
479 while (!QSLIST_EMPTY(&reversed)) {
480 Coroutine *co = QSLIST_FIRST(&reversed);
481 QSLIST_REMOVE_HEAD(&reversed, co_scheduled_next);
482 QSLIST_INSERT_HEAD(&straight, co, co_scheduled_next);
483 }
484
485 while (!QSLIST_EMPTY(&straight)) {
486 Coroutine *co = QSLIST_FIRST(&straight);
487 QSLIST_REMOVE_HEAD(&straight, co_scheduled_next);
488 trace_aio_co_schedule_bh_cb(ctx, co);
Paolo Bonzini19196312017-02-13 14:52:31 +0100489 aio_context_acquire(ctx);
Jeff Cody6133b392017-11-17 22:27:09 -0500490
491 /* Protected by write barrier in qemu_aio_coroutine_enter */
Stefan Hajnoczid73415a2020-09-23 11:56:46 +0100492 qatomic_set(&co->scheduled, NULL);
Sergio Lopez6808ae02018-09-05 11:33:51 +0200493 qemu_aio_coroutine_enter(ctx, co);
Paolo Bonzini19196312017-02-13 14:52:31 +0100494 aio_context_release(ctx);
Paolo Bonzini0c330a72017-02-13 14:52:19 +0100495 }
496}
497
Chrysostomos Nanakos2f78e492014-09-18 14:30:49 +0300498AioContext *aio_context_new(Error **errp)
Paolo Bonzinif627aab2012-10-29 23:45:23 +0100499{
Chrysostomos Nanakos2f78e492014-09-18 14:30:49 +0300500 int ret;
Paolo Bonzini2f4dc3c2012-09-24 18:44:14 +0200501 AioContext *ctx;
Fam Zheng37fcee52015-10-30 12:06:28 +0800502
Paolo Bonzini2f4dc3c2012-09-24 18:44:14 +0200503 ctx = (AioContext *) g_source_new(&aio_source_funcs, sizeof(AioContext));
Stefan Hajnoczi8c6b0352020-02-21 09:39:51 +0000504 QSLIST_INIT(&ctx->bh_list);
505 QSIMPLEQ_INIT(&ctx->bh_slice_list);
Cao jin7e003462016-07-15 18:28:44 +0800506 aio_context_setup(ctx);
507
Chrysostomos Nanakos2f78e492014-09-18 14:30:49 +0300508 ret = event_notifier_init(&ctx->notifier, false);
509 if (ret < 0) {
Chrysostomos Nanakos2f78e492014-09-18 14:30:49 +0300510 error_setg_errno(errp, -ret, "Failed to initialize event notifier");
Fam Zheng37fcee52015-10-30 12:06:28 +0800511 goto fail;
Chrysostomos Nanakos2f78e492014-09-18 14:30:49 +0300512 }
Paolo Bonzinifcf5def2014-12-17 16:09:58 +0100513 g_source_set_can_recurse(&ctx->source, true);
Paolo Bonzinid7c99a12017-01-12 19:07:53 +0100514 qemu_lockcnt_init(&ctx->list_lock);
Paolo Bonzini0c330a72017-02-13 14:52:19 +0100515
516 ctx->co_schedule_bh = aio_bh_new(ctx, co_schedule_bh_cb, ctx);
517 QSLIST_INIT(&ctx->scheduled_coroutines);
518
Chrysostomos Nanakos2f78e492014-09-18 14:30:49 +0300519 aio_set_event_notifier(ctx, &ctx->notifier,
Fam Zhengdca21ef2015-10-23 11:08:05 +0800520 false,
Stefan Hajnoczi601829f2020-08-06 14:18:01 +0100521 aio_context_notifier_cb,
Stefan Hajnoczic13be5a2020-08-06 14:18:00 +0100522 aio_context_notifier_poll);
Paolo Bonzini0187f5c2016-07-04 18:33:20 +0200523#ifdef CONFIG_LINUX_AIO
524 ctx->linux_aio = NULL;
525#endif
Aarushi Mehtafcb7a4a2020-01-20 14:18:49 +0000526
527#ifdef CONFIG_LINUX_IO_URING
528 ctx->linux_io_uring = NULL;
529#endif
530
Stefan Hajnoczi9b342772013-03-07 13:41:47 +0100531 ctx->thread_pool = NULL;
Paolo Bonzini3fe71222016-10-27 12:49:08 +0200532 qemu_rec_mutex_init(&ctx->lock);
Alex Blighd5541d82013-08-21 16:02:50 +0100533 timerlistgroup_init(&ctx->tlg, aio_timerlist_notify, ctx);
Paolo Bonzini2f4dc3c2012-09-24 18:44:14 +0200534
Stefan Hajnoczi82a41182016-12-01 19:26:51 +0000535 ctx->poll_ns = 0;
Stefan Hajnoczi4a1cba32016-12-01 19:26:42 +0000536 ctx->poll_max_ns = 0;
Stefan Hajnoczi82a41182016-12-01 19:26:51 +0000537 ctx->poll_grow = 0;
538 ctx->poll_shrink = 0;
Stefan Hajnoczi4a1cba32016-12-01 19:26:42 +0000539
Paolo Bonzini2f4dc3c2012-09-24 18:44:14 +0200540 return ctx;
Fam Zheng37fcee52015-10-30 12:06:28 +0800541fail:
542 g_source_destroy(&ctx->source);
543 return NULL;
Paolo Bonzinie3713e02012-09-24 14:57:41 +0200544}
545
Paolo Bonzini0c330a72017-02-13 14:52:19 +0100546void aio_co_schedule(AioContext *ctx, Coroutine *co)
547{
548 trace_aio_co_schedule(ctx, co);
Stefan Hajnoczid73415a2020-09-23 11:56:46 +0100549 const char *scheduled = qatomic_cmpxchg(&co->scheduled, NULL,
Jeff Cody6133b392017-11-17 22:27:09 -0500550 __func__);
551
552 if (scheduled) {
553 fprintf(stderr,
554 "%s: Co-routine was already scheduled in '%s'\n",
555 __func__, scheduled);
556 abort();
557 }
558
Stefan Hajnoczif0f81002019-07-23 20:06:23 +0100559 /* The coroutine might run and release the last ctx reference before we
560 * invoke qemu_bh_schedule(). Take a reference to keep ctx alive until
561 * we're done.
562 */
563 aio_context_ref(ctx);
564
Paolo Bonzini0c330a72017-02-13 14:52:19 +0100565 QSLIST_INSERT_HEAD_ATOMIC(&ctx->scheduled_coroutines,
566 co, co_scheduled_next);
567 qemu_bh_schedule(ctx->co_schedule_bh);
Stefan Hajnoczif0f81002019-07-23 20:06:23 +0100568
569 aio_context_unref(ctx);
Paolo Bonzini0c330a72017-02-13 14:52:19 +0100570}
571
Kevin Wolf26b0b692020-10-05 17:58:52 +0200572typedef struct AioCoRescheduleSelf {
573 Coroutine *co;
574 AioContext *new_ctx;
575} AioCoRescheduleSelf;
576
577static void aio_co_reschedule_self_bh(void *opaque)
578{
579 AioCoRescheduleSelf *data = opaque;
580 aio_co_schedule(data->new_ctx, data->co);
581}
582
583void coroutine_fn aio_co_reschedule_self(AioContext *new_ctx)
584{
585 AioContext *old_ctx = qemu_get_current_aio_context();
586
587 if (old_ctx != new_ctx) {
588 AioCoRescheduleSelf data = {
589 .co = qemu_coroutine_self(),
590 .new_ctx = new_ctx,
591 };
592 /*
593 * We can't directly schedule the coroutine in the target context
594 * because this would be racy: The other thread could try to enter the
595 * coroutine before it has yielded in this one.
596 */
597 aio_bh_schedule_oneshot(old_ctx, aio_co_reschedule_self_bh, &data);
598 qemu_coroutine_yield();
599 }
600}
601
Paolo Bonzini0c330a72017-02-13 14:52:19 +0100602void aio_co_wake(struct Coroutine *co)
603{
604 AioContext *ctx;
605
606 /* Read coroutine before co->ctx. Matches smp_wmb in
607 * qemu_coroutine_enter.
608 */
609 smp_read_barrier_depends();
Stefan Hajnoczid73415a2020-09-23 11:56:46 +0100610 ctx = qatomic_read(&co->ctx);
Paolo Bonzini0c330a72017-02-13 14:52:19 +0100611
Fam Zheng88658522017-04-10 20:07:35 +0800612 aio_co_enter(ctx, co);
613}
614
615void aio_co_enter(AioContext *ctx, struct Coroutine *co)
616{
Paolo Bonzini0c330a72017-02-13 14:52:19 +0100617 if (ctx != qemu_get_current_aio_context()) {
618 aio_co_schedule(ctx, co);
619 return;
620 }
621
622 if (qemu_in_coroutine()) {
623 Coroutine *self = qemu_coroutine_self();
624 assert(self != co);
625 QSIMPLEQ_INSERT_TAIL(&self->co_queue_wakeup, co, co_queue_next);
626 } else {
627 aio_context_acquire(ctx);
Fam Zheng88658522017-04-10 20:07:35 +0800628 qemu_aio_coroutine_enter(ctx, co);
Paolo Bonzini0c330a72017-02-13 14:52:19 +0100629 aio_context_release(ctx);
630 }
631}
632
Paolo Bonzinie3713e02012-09-24 14:57:41 +0200633void aio_context_ref(AioContext *ctx)
634{
635 g_source_ref(&ctx->source);
636}
637
638void aio_context_unref(AioContext *ctx)
639{
640 g_source_unref(&ctx->source);
Paolo Bonzinif627aab2012-10-29 23:45:23 +0100641}
Stefan Hajnoczi98563fc2014-03-03 11:30:04 +0100642
643void aio_context_acquire(AioContext *ctx)
644{
Paolo Bonzini3fe71222016-10-27 12:49:08 +0200645 qemu_rec_mutex_lock(&ctx->lock);
Stefan Hajnoczi98563fc2014-03-03 11:30:04 +0100646}
647
648void aio_context_release(AioContext *ctx)
649{
Paolo Bonzini3fe71222016-10-27 12:49:08 +0200650 qemu_rec_mutex_unlock(&ctx->lock);
Stefan Hajnoczi98563fc2014-03-03 11:30:04 +0100651}