blob: 43883a8a33a8efcab92da58a8071ac082365ed25 [file] [log] [blame]
aliguoria76bab42008-09-22 19:17:18 +00001/*
2 * QEMU aio implementation
3 *
4 * Copyright IBM, Corp. 2008
5 *
6 * Authors:
7 * Anthony Liguori <aliguori@us.ibm.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 */
13
14#ifndef QEMU_AIO_H
15#define QEMU_AIO_H
16
Stefan Hajnoczi73fd2822020-03-05 17:08:04 +000017#ifdef CONFIG_LINUX_IO_URING
18#include <liburing.h>
19#endif
Markus Armbruster68ba85c2022-12-21 14:14:34 +010020#include "qemu/coroutine-core.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010021#include "qemu/queue.h"
22#include "qemu/event_notifier.h"
Peter Maydell51483f62024-10-14 17:05:54 +010023#include "qemu/lockcnt.h"
Liu Ping Fandcc772e2013-07-16 12:28:58 +080024#include "qemu/thread.h"
Alex Blighdae21b92013-08-21 16:02:49 +010025#include "qemu/timer.h"
Paolo Bonziniaead9dc2022-12-07 14:18:22 +010026#include "block/graph-lock.h"
Alexander Bulekov9c86c972023-04-27 17:10:07 -040027#include "hw/qdev-core.h"
28
aliguoria76bab42008-09-22 19:17:18 +000029
Markus Armbruster7c84b1b2014-10-07 13:59:14 +020030typedef struct BlockAIOCB BlockAIOCB;
Markus Armbruster097310b2014-10-07 13:59:15 +020031typedef void BlockCompletionFunc(void *opaque, int ret);
Paolo Bonzini85e8dab2012-03-12 17:01:48 +010032
Stefan Hajnoczid7331be2012-10-31 16:34:37 +010033typedef struct AIOCBInfo {
Markus Armbruster7c84b1b2014-10-07 13:59:14 +020034 void (*cancel_async)(BlockAIOCB *acb);
Stefan Hajnoczi8c82e9a2012-10-31 16:34:35 +010035 size_t aiocb_size;
Stefan Hajnoczid7331be2012-10-31 16:34:37 +010036} AIOCBInfo;
Paolo Bonzini85e8dab2012-03-12 17:01:48 +010037
Markus Armbruster7c84b1b2014-10-07 13:59:14 +020038struct BlockAIOCB {
Stefan Hajnoczid7331be2012-10-31 16:34:37 +010039 const AIOCBInfo *aiocb_info;
Paolo Bonzini85e8dab2012-03-12 17:01:48 +010040 BlockDriverState *bs;
Markus Armbruster097310b2014-10-07 13:59:15 +020041 BlockCompletionFunc *cb;
Paolo Bonzini85e8dab2012-03-12 17:01:48 +010042 void *opaque;
Fam Zhengf197fe22014-09-11 13:41:08 +080043 int refcnt;
Paolo Bonzini85e8dab2012-03-12 17:01:48 +010044};
45
Stefan Hajnoczid7331be2012-10-31 16:34:37 +010046void *qemu_aio_get(const AIOCBInfo *aiocb_info, BlockDriverState *bs,
Markus Armbruster097310b2014-10-07 13:59:15 +020047 BlockCompletionFunc *cb, void *opaque);
Fam Zheng80074292014-09-11 13:41:28 +080048void qemu_aio_unref(void *p);
Fam Zhengf197fe22014-09-11 13:41:08 +080049void qemu_aio_ref(void *p);
Paolo Bonzini85e8dab2012-03-12 17:01:48 +010050
Paolo Bonzinif627aab2012-10-29 23:45:23 +010051typedef struct AioHandler AioHandler;
Stefan Hajnoczi47490792020-02-14 17:17:11 +000052typedef QLIST_HEAD(, AioHandler) AioHandlerList;
Paolo Bonzinif627aab2012-10-29 23:45:23 +010053typedef void QEMUBHFunc(void *opaque);
Stefan Hajnoczif6a51c82016-12-01 19:26:41 +000054typedef bool AioPollFn(void *opaque);
Paolo Bonzinif627aab2012-10-29 23:45:23 +010055typedef void IOHandler(void *opaque);
56
Paolo Bonzini0187f5c2016-07-04 18:33:20 +020057struct ThreadPool;
58struct LinuxAioState;
Paolo Bonzini3cbc17e2021-07-12 15:10:10 +020059typedef struct LuringState LuringState;
Paolo Bonzini0187f5c2016-07-04 18:33:20 +020060
Stefan Hajnocziaa38e192020-03-05 17:08:05 +000061/* Is polling disabled? */
62bool aio_poll_disabled(AioContext *ctx);
63
Stefan Hajnoczi1f050a42020-03-05 17:08:02 +000064/* Callbacks for file descriptor monitoring implementations */
65typedef struct {
66 /*
67 * update:
68 * @ctx: the AioContext
Stefan Hajnoczib3210512020-03-05 17:08:03 +000069 * @old_node: the existing handler or NULL if this file descriptor is being
70 * monitored for the first time
71 * @new_node: the new handler or NULL if this file descriptor is being
72 * removed
Stefan Hajnoczi1f050a42020-03-05 17:08:02 +000073 *
Stefan Hajnoczib3210512020-03-05 17:08:03 +000074 * Add/remove/modify a monitored file descriptor.
Stefan Hajnoczi1f050a42020-03-05 17:08:02 +000075 *
76 * Called with ctx->list_lock acquired.
77 */
Stefan Hajnoczib3210512020-03-05 17:08:03 +000078 void (*update)(AioContext *ctx, AioHandler *old_node, AioHandler *new_node);
Stefan Hajnoczi1f050a42020-03-05 17:08:02 +000079
80 /*
81 * wait:
82 * @ctx: the AioContext
83 * @ready_list: list for handlers that become ready
84 * @timeout: maximum duration to wait, in nanoseconds
85 *
86 * Wait for file descriptors to become ready and place them on ready_list.
87 *
88 * Called with ctx->list_lock incremented but not locked.
89 *
90 * Returns: number of ready file descriptors.
91 */
92 int (*wait)(AioContext *ctx, AioHandlerList *ready_list, int64_t timeout);
Stefan Hajnocziaa38e192020-03-05 17:08:05 +000093
94 /*
95 * need_wait:
96 * @ctx: the AioContext
97 *
98 * Tell aio_poll() when to stop userspace polling early because ->wait()
99 * has fds ready.
100 *
101 * File descriptor monitoring implementations that cannot poll fd readiness
102 * from userspace should use aio_poll_disabled() here. This ensures that
103 * file descriptors are not starved by handlers that frequently make
104 * progress via userspace polling.
105 *
106 * Returns: true if ->wait() should be called, false otherwise.
107 */
108 bool (*need_wait)(AioContext *ctx);
Stefan Hajnoczi1f050a42020-03-05 17:08:02 +0000109} FDMonOps;
110
Stefan Hajnoczi8c6b0352020-02-21 09:39:51 +0000111/*
112 * Each aio_bh_poll() call carves off a slice of the BH list, so that newly
113 * scheduled BHs are not processed until the next aio_bh_poll() call. All
114 * active aio_bh_poll() calls chain their slices together in a list, so that
115 * nested aio_bh_poll() calls process all scheduled bottom halves.
116 */
117typedef QSLIST_HEAD(, QEMUBH) BHList;
118typedef struct BHListSlice BHListSlice;
119struct BHListSlice {
120 BHList bh_list;
121 QSIMPLEQ_ENTRY(BHListSlice) next;
122};
123
Stefan Hajnoczi73fd2822020-03-05 17:08:04 +0000124typedef QSLIST_HEAD(, AioHandler) AioHandlerSList;
125
Alex Bligh6a1751b2013-08-21 16:02:47 +0100126struct AioContext {
Paolo Bonzinie3713e02012-09-24 14:57:41 +0200127 GSource source;
128
Paolo Bonzini7c690fd2017-01-12 19:07:59 +0100129 /* Used by AioContext users to protect from multi-threaded access. */
Paolo Bonzini3fe71222016-10-27 12:49:08 +0200130 QemuRecMutex lock;
Stefan Hajnoczi98563fc2014-03-03 11:30:04 +0100131
Paolo Bonziniaead9dc2022-12-07 14:18:22 +0100132 /*
133 * Keep track of readers and writers of the block layer graph.
134 * This is essential to avoid performing additions and removal
135 * of nodes and edges from block graph while some
136 * other thread is traversing it.
137 */
138 BdrvGraphRWlock *bdrv_graph;
139
Paolo Bonzini7c690fd2017-01-12 19:07:59 +0100140 /* The list of registered AIO handlers. Protected by ctx->list_lock. */
Stefan Hajnoczi47490792020-02-14 17:17:11 +0000141 AioHandlerList aio_handlers;
142
143 /* The list of AIO handlers to be deleted. Protected by ctx->list_lock. */
144 AioHandlerList deleted_aio_handlers;
Paolo Bonzinia915f4b2012-09-13 12:28:51 +0200145
Paolo Bonzinieabc9772015-07-21 16:07:51 +0200146 /* Used to avoid unnecessary event_notifier_set calls in aio_notify;
Paolo Bonzini3c18a922020-04-07 10:07:45 -0400147 * only written from the AioContext home thread, or under the BQL in
148 * the case of the main AioContext. However, it is read from any
149 * thread so it is still accessed with atomic primitives.
150 *
151 * If this field is 0, everything (file descriptors, bottom halves,
152 * timers) will be re-evaluated before the next blocking poll() or
153 * io_uring wait; therefore, the event_notifier_set call can be
154 * skipped. If it is non-zero, you may need to wake up a concurrent
155 * aio_poll or the glib main event loop, making event_notifier_set
156 * necessary.
Paolo Bonzinieabc9772015-07-21 16:07:51 +0200157 *
158 * Bit 0 is reserved for GSource usage of the AioContext, and is 1
Cao jin54a16a62016-07-15 17:44:18 +0800159 * between a call to aio_ctx_prepare and the next call to aio_ctx_check.
Paolo Bonzinieabc9772015-07-21 16:07:51 +0200160 * Bits 1-31 simply count the number of active calls to aio_poll
161 * that are in the prepare or poll phase.
162 *
163 * The GSource and aio_poll must use a different mechanism because
164 * there is no certainty that a call to GSource's prepare callback
165 * (via g_main_context_prepare) is indeed followed by check and
166 * dispatch. It's not clear whether this would be a bug, but let's
167 * play safe and allow it---it will just cause extra calls to
168 * event_notifier_set until the next call to dispatch.
169 *
170 * Instead, the aio_poll calls include both the prepare and the
171 * dispatch phase, hence a simple counter is enough for them.
Paolo Bonzini0ceb8492014-07-07 15:18:04 +0200172 */
Paolo Bonzinieabc9772015-07-21 16:07:51 +0200173 uint32_t notify_me;
Paolo Bonzini0ceb8492014-07-07 15:18:04 +0200174
Paolo Bonzini7c690fd2017-01-12 19:07:59 +0100175 /* A lock to protect between QEMUBH and AioHandler adders and deleter,
176 * and to ensure that no callbacks are removed while we're walking and
177 * dispatching them.
Paolo Bonzinid7c99a12017-01-12 19:07:53 +0100178 */
179 QemuLockCnt list_lock;
Paolo Bonzini0ceb8492014-07-07 15:18:04 +0200180
Stefan Hajnoczi8c6b0352020-02-21 09:39:51 +0000181 /* Bottom Halves pending aio_bh_poll() processing */
182 BHList bh_list;
183
184 /* Chained BH list slices for each nested aio_bh_poll() call */
185 QSIMPLEQ_HEAD(, BHListSlice) bh_slice_list;
Paolo Bonzinif627aab2012-10-29 23:45:23 +0100186
Paolo Bonzini05e514b2015-07-21 16:07:53 +0200187 /* Used by aio_notify.
188 *
189 * "notified" is used to avoid expensive event_notifier_test_and_clear
190 * calls. When it is clear, the EventNotifier is clear, or one thread
191 * is going to clear "notified" before processing more events. False
192 * positives are possible, i.e. "notified" could be set even though the
193 * EventNotifier is clear.
194 *
195 * Note that event_notifier_set *cannot* be optimized the same way. For
196 * more information on the problem that would result, see "#ifdef BUG2"
197 * in the docs/aio_notify_accept.promela formal model.
198 */
199 bool notified;
Paolo Bonzini2f4dc3c2012-09-24 18:44:14 +0200200 EventNotifier notifier;
Stefan Hajnoczi6b5f8762013-02-20 11:28:32 +0100201
Paolo Bonzini0c330a72017-02-13 14:52:19 +0100202 QSLIST_HEAD(, Coroutine) scheduled_coroutines;
203 QEMUBH *co_schedule_bh;
204
Nicolas Saenz Julienne71ad4712022-04-25 09:57:23 +0200205 int thread_pool_min;
206 int thread_pool_max;
Paolo Bonzini7c690fd2017-01-12 19:07:59 +0100207 /* Thread pool for performing work and receiving completion callbacks.
208 * Has its own locking.
209 */
Stefan Hajnoczi9b342772013-03-07 13:41:47 +0100210 struct ThreadPool *thread_pool;
Alex Blighdae21b92013-08-21 16:02:49 +0100211
Paolo Bonzini0187f5c2016-07-04 18:33:20 +0200212#ifdef CONFIG_LINUX_AIO
Paolo Bonzini0187f5c2016-07-04 18:33:20 +0200213 struct LinuxAioState *linux_aio;
214#endif
Aarushi Mehta6663a0a2020-01-20 14:18:47 +0000215#ifdef CONFIG_LINUX_IO_URING
Paolo Bonzini3cbc17e2021-07-12 15:10:10 +0200216 LuringState *linux_io_uring;
Stefan Hajnoczi73fd2822020-03-05 17:08:04 +0000217
218 /* State for file descriptor monitoring using Linux io_uring */
219 struct io_uring fdmon_io_uring;
220 AioHandlerSList submit_list;
Aarushi Mehta6663a0a2020-01-20 14:18:47 +0000221#endif
Paolo Bonzini0187f5c2016-07-04 18:33:20 +0200222
Paolo Bonzini7c690fd2017-01-12 19:07:59 +0100223 /* TimerLists for calling timers - one per clock type. Has its own
224 * locking.
225 */
Alex Blighdae21b92013-08-21 16:02:49 +0100226 QEMUTimerListGroup tlg;
Fam Zhengc1e1e5f2015-10-23 11:08:08 +0800227
Stefan Hajnoczi4a1cba32016-12-01 19:26:42 +0000228 /* Number of AioHandlers without .io_poll() */
229 int poll_disable_cnt;
230
Stefan Hajnoczi82a41182016-12-01 19:26:51 +0000231 /* Polling mode parameters */
232 int64_t poll_ns; /* current polling time in nanoseconds */
233 int64_t poll_max_ns; /* maximum polling time in nanoseconds */
234 int64_t poll_grow; /* polling time growth factor */
235 int64_t poll_shrink; /* polling time shrink factor */
Stefan Hajnoczi4a1cba32016-12-01 19:26:42 +0000236
Stefano Garzarella1793ad02021-07-21 11:42:10 +0200237 /* AIO engine parameters */
238 int64_t aio_max_batch; /* maximum number of requests in a batch */
239
Stefan Hajnoczid37d0e32020-03-05 17:08:06 +0000240 /*
241 * List of handlers participating in userspace polling. Protected by
242 * ctx->list_lock. Iterated and modified mostly by the event loop thread
243 * from aio_poll() with ctx->list_lock incremented. aio_set_fd_handler()
244 * only touches the list to delete nodes if ctx->list_lock's count is zero.
245 */
246 AioHandlerList poll_aio_handlers;
247
Stefan Hajnoczi684e5082016-12-01 19:26:49 +0000248 /* Are we in polling mode or monitoring file descriptors? */
249 bool poll_started;
250
Fam Zhengfbe3fc52015-10-30 12:06:29 +0800251 /* epoll(7) state used when built with CONFIG_EPOLL */
252 int epollfd;
Stefan Hajnoczi1f050a42020-03-05 17:08:02 +0000253
254 const FDMonOps *fdmon_ops;
Alex Bligh6a1751b2013-08-21 16:02:47 +0100255};
Paolo Bonzinif627aab2012-10-29 23:45:23 +0100256
Paolo Bonzinif627aab2012-10-29 23:45:23 +0100257/**
258 * aio_context_new: Allocate a new AioContext.
259 *
260 * AioContext provide a mini event-loop that can be waited on synchronously.
261 * They also provide bottom halves, a service to execute a piece of code
262 * as soon as possible.
263 */
Chrysostomos Nanakos2f78e492014-09-18 14:30:49 +0300264AioContext *aio_context_new(Error **errp);
Paolo Bonzinif627aab2012-10-29 23:45:23 +0100265
266/**
Paolo Bonzinie3713e02012-09-24 14:57:41 +0200267 * aio_context_ref:
268 * @ctx: The AioContext to operate on.
269 *
270 * Add a reference to an AioContext.
271 */
272void aio_context_ref(AioContext *ctx);
273
274/**
275 * aio_context_unref:
276 * @ctx: The AioContext to operate on.
277 *
278 * Drop a reference to an AioContext.
279 */
280void aio_context_unref(AioContext *ctx);
281
282/**
Stefan Hajnoczi0f085862021-04-14 21:02:46 +0100283 * aio_bh_schedule_oneshot_full: Allocate a new bottom half structure that will
284 * run only once and as soon as possible.
285 *
286 * @name: A human-readable identifier for debugging purposes.
Paolo Bonzini5b8bb352016-10-03 18:14:15 +0200287 */
Stefan Hajnoczi0f085862021-04-14 21:02:46 +0100288void aio_bh_schedule_oneshot_full(AioContext *ctx, QEMUBHFunc *cb, void *opaque,
289 const char *name);
Paolo Bonzini5b8bb352016-10-03 18:14:15 +0200290
291/**
Stefan Hajnoczi0f085862021-04-14 21:02:46 +0100292 * aio_bh_schedule_oneshot: Allocate a new bottom half structure that will run
293 * only once and as soon as possible.
294 *
295 * A convenience wrapper for aio_bh_schedule_oneshot_full() that uses cb as the
296 * name string.
297 */
298#define aio_bh_schedule_oneshot(ctx, cb, opaque) \
299 aio_bh_schedule_oneshot_full((ctx), (cb), (opaque), (stringify(cb)))
300
301/**
302 * aio_bh_new_full: Allocate a new bottom half structure.
Paolo Bonzinif627aab2012-10-29 23:45:23 +0100303 *
304 * Bottom halves are lightweight callbacks whose invocation is guaranteed
305 * to be wait-free, thread-safe and signal-safe. The #QEMUBH structure
306 * is opaque and must be allocated prior to its use.
Stefan Hajnoczi0f085862021-04-14 21:02:46 +0100307 *
308 * @name: A human-readable identifier for debugging purposes.
Alexander Bulekov9c86c972023-04-27 17:10:07 -0400309 * @reentrancy_guard: A guard set when entering a cb to prevent
310 * device-reentrancy issues
Paolo Bonzinif627aab2012-10-29 23:45:23 +0100311 */
Stefan Hajnoczi0f085862021-04-14 21:02:46 +0100312QEMUBH *aio_bh_new_full(AioContext *ctx, QEMUBHFunc *cb, void *opaque,
Alexander Bulekov9c86c972023-04-27 17:10:07 -0400313 const char *name, MemReentrancyGuard *reentrancy_guard);
Stefan Hajnoczi0f085862021-04-14 21:02:46 +0100314
315/**
316 * aio_bh_new: Allocate a new bottom half structure
317 *
318 * A convenience wrapper for aio_bh_new_full() that uses the cb as the name
319 * string.
320 */
321#define aio_bh_new(ctx, cb, opaque) \
Alexander Bulekov9c86c972023-04-27 17:10:07 -0400322 aio_bh_new_full((ctx), (cb), (opaque), (stringify(cb)), NULL)
323
324/**
325 * aio_bh_new_guarded: Allocate a new bottom half structure with a
326 * reentrancy_guard
327 *
328 * A convenience wrapper for aio_bh_new_full() that uses the cb as the name
329 * string.
330 */
331#define aio_bh_new_guarded(ctx, cb, opaque, guard) \
332 aio_bh_new_full((ctx), (cb), (opaque), (stringify(cb)), guard)
Paolo Bonzinif627aab2012-10-29 23:45:23 +0100333
334/**
Paolo Bonzini2f4dc3c2012-09-24 18:44:14 +0200335 * aio_notify: Force processing of pending events.
336 *
337 * Similar to signaling a condition variable, aio_notify forces
Yaowei Bai722f8d92016-11-30 23:30:40 -0500338 * aio_poll to exit, so that the next call will re-examine pending events.
339 * The caller of aio_notify will usually call aio_poll again very soon,
Paolo Bonzini2f4dc3c2012-09-24 18:44:14 +0200340 * or go through another iteration of the GLib main loop. Hence, aio_notify
341 * also has the side effect of recalculating the sets of file descriptors
342 * that the main loop waits for.
343 *
344 * Calling aio_notify is rarely necessary, because for example scheduling
345 * a bottom half calls it already.
346 */
347void aio_notify(AioContext *ctx);
348
349/**
Paolo Bonzini05e514b2015-07-21 16:07:53 +0200350 * aio_notify_accept: Acknowledge receiving an aio_notify.
351 *
352 * aio_notify() uses an EventNotifier in order to wake up a sleeping
353 * aio_poll() or g_main_context_iteration(). Calls to aio_notify() are
354 * usually rare, but the AioContext has to clear the EventNotifier on
355 * every aio_poll() or g_main_context_iteration() in order to avoid
356 * busy waiting. This event_notifier_test_and_clear() cannot be done
357 * using the usual aio_context_set_event_notifier(), because it must
358 * be done before processing all events (file descriptors, bottom halves,
359 * timers).
360 *
361 * aio_notify_accept() is an optimized event_notifier_test_and_clear()
362 * that is specific to an AioContext's notifier; it is used internally
363 * to clear the EventNotifier only if aio_notify() had been called.
364 */
365void aio_notify_accept(AioContext *ctx);
366
367/**
Pavel Dovgalyukdf281b82015-09-17 19:24:50 +0300368 * aio_bh_call: Executes callback function of the specified BH.
369 */
370void aio_bh_call(QEMUBH *bh);
371
372/**
Paolo Bonzinif627aab2012-10-29 23:45:23 +0100373 * aio_bh_poll: Poll bottom halves for an AioContext.
374 *
375 * These are internal functions used by the QEMU main loop.
Liu Ping Fandcc772e2013-07-16 12:28:58 +0800376 * And notice that multiple occurrences of aio_bh_poll cannot
377 * be called concurrently
Paolo Bonzinif627aab2012-10-29 23:45:23 +0100378 */
379int aio_bh_poll(AioContext *ctx);
Paolo Bonzinif627aab2012-10-29 23:45:23 +0100380
381/**
382 * qemu_bh_schedule: Schedule a bottom half.
383 *
384 * Scheduling a bottom half interrupts the main loop and causes the
385 * execution of the callback that was passed to qemu_bh_new.
386 *
387 * Bottom halves that are scheduled from a bottom half handler are instantly
388 * invoked. This can create an infinite loop if a bottom half handler
389 * schedules itself.
390 *
391 * @bh: The bottom half to be scheduled.
392 */
393void qemu_bh_schedule(QEMUBH *bh);
394
395/**
396 * qemu_bh_cancel: Cancel execution of a bottom half.
397 *
398 * Canceling execution of a bottom half undoes the effect of calls to
399 * qemu_bh_schedule without freeing its resources yet. While cancellation
400 * itself is also wait-free and thread-safe, it can of course race with the
401 * loop that executes bottom halves unless you are holding the iothread
402 * mutex. This makes it mostly useless if you are not holding the mutex.
403 *
404 * @bh: The bottom half to be canceled.
405 */
406void qemu_bh_cancel(QEMUBH *bh);
407
408/**
409 *qemu_bh_delete: Cancel execution of a bottom half and free its resources.
410 *
411 * Deleting a bottom half frees the memory that was allocated for it by
412 * qemu_bh_new. It also implies canceling the bottom half if it was
413 * scheduled.
Liu Ping Fandcc772e2013-07-16 12:28:58 +0800414 * This func is async. The bottom half will do the delete action at the finial
415 * end.
Paolo Bonzinif627aab2012-10-29 23:45:23 +0100416 *
417 * @bh: The bottom half to be deleted.
418 */
419void qemu_bh_delete(QEMUBH *bh);
420
Paolo Bonzinicd9ba1e2012-09-24 14:57:22 +0200421/* Return whether there are any pending callbacks from the GSource
Paolo Bonzinia3462c62014-07-09 11:53:08 +0200422 * attached to the AioContext, before g_poll is invoked.
423 *
424 * This is used internally in the implementation of the GSource.
425 */
426bool aio_prepare(AioContext *ctx);
427
428/* Return whether there are any pending callbacks from the GSource
429 * attached to the AioContext, after g_poll is invoked.
Paolo Bonzinicd9ba1e2012-09-24 14:57:22 +0200430 *
431 * This is used internally in the implementation of the GSource.
432 */
433bool aio_pending(AioContext *ctx);
434
Paolo Bonzinie4c7e2d2014-07-09 11:53:05 +0200435/* Dispatch any pending callbacks from the GSource attached to the AioContext.
436 *
437 * This is used internally in the implementation of the GSource.
438 */
Paolo Bonzinia153bf52017-02-13 14:52:33 +0100439void aio_dispatch(AioContext *ctx);
Paolo Bonzinie4c7e2d2014-07-09 11:53:05 +0200440
Paolo Bonzini7c0628b2012-09-24 14:37:53 +0200441/* Progress in completing AIO work to occur. This can issue new pending
442 * aio as a result of executing I/O completion or bh callbacks.
Paolo Bonzinibcdc1852012-04-12 14:00:55 +0200443 *
Paolo Bonziniacfb23a2014-07-09 10:49:46 +0200444 * Return whether any progress was made by executing AIO or bottom half
445 * handlers. If @blocking == true, this should always be true except
446 * if someone called aio_notify.
Paolo Bonzini7c0628b2012-09-24 14:37:53 +0200447 *
448 * If there are no pending bottom halves, but there are pending AIO
449 * operations, it may not be possible to make any progress without
450 * blocking. If @blocking is true, this function will wait until one
451 * or more AIO events have completed, to ensure something has moved
452 * before returning.
Paolo Bonzini7c0628b2012-09-24 14:37:53 +0200453 */
Paolo Bonzinic428b392023-09-08 09:54:58 +0200454bool no_coroutine_fn aio_poll(AioContext *ctx, bool blocking);
aliguoria76bab42008-09-22 19:17:18 +0000455
456/* Register a file descriptor and associated callbacks. Behaves very similarly
Fam Zheng6484e422015-06-04 14:45:19 +0800457 * to qemu_set_fd_handler. Unlike qemu_set_fd_handler, these callbacks will
Paolo Bonzini87f68d32014-07-07 15:18:02 +0200458 * be invoked when using aio_poll().
aliguoria76bab42008-09-22 19:17:18 +0000459 *
460 * Code that invokes AIO completion functions should rely on this function
461 * instead of qemu_set_fd_handler[2].
462 */
Paolo Bonzinia915f4b2012-09-13 12:28:51 +0200463void aio_set_fd_handler(AioContext *ctx,
464 int fd,
465 IOHandler *io_read,
466 IOHandler *io_write,
Stefan Hajnoczif6a51c82016-12-01 19:26:41 +0000467 AioPollFn *io_poll,
Stefan Hajnoczi826cc322021-12-07 13:23:31 +0000468 IOHandler *io_poll_ready,
Paolo Bonzinia915f4b2012-09-13 12:28:51 +0200469 void *opaque);
Paolo Bonzini9958c352012-06-09 03:44:00 +0200470
471/* Register an event notifier and associated callbacks. Behaves very similarly
472 * to event_notifier_set_handler. Unlike event_notifier_set_handler, these callbacks
Paolo Bonzini87f68d32014-07-07 15:18:02 +0200473 * will be invoked when using aio_poll().
Paolo Bonzini9958c352012-06-09 03:44:00 +0200474 *
475 * Code that invokes AIO completion functions should rely on this function
476 * instead of event_notifier_set_handler.
477 */
Paolo Bonzinia915f4b2012-09-13 12:28:51 +0200478void aio_set_event_notifier(AioContext *ctx,
479 EventNotifier *notifier,
Stefan Hajnoczif6a51c82016-12-01 19:26:41 +0000480 EventNotifierHandler *io_read,
Stefan Hajnoczi826cc322021-12-07 13:23:31 +0000481 AioPollFn *io_poll,
482 EventNotifierHandler *io_poll_ready);
Paolo Bonzinia915f4b2012-09-13 12:28:51 +0200483
Hanna Czenczek5bdbaeb2024-02-02 16:31:57 +0100484/*
485 * Set polling begin/end callbacks for an event notifier that has already been
Stefan Hajnoczi684e5082016-12-01 19:26:49 +0000486 * registered with aio_set_event_notifier. Do nothing if the event notifier is
487 * not registered.
Hanna Czenczek5bdbaeb2024-02-02 16:31:57 +0100488 *
489 * Note that if the io_poll_end() callback (or the entire notifier) is removed
490 * during polling, it will not be called, so an io_poll_begin() is not
491 * necessarily always followed by an io_poll_end().
Stefan Hajnoczi684e5082016-12-01 19:26:49 +0000492 */
493void aio_set_event_notifier_poll(AioContext *ctx,
494 EventNotifier *notifier,
495 EventNotifierHandler *io_poll_begin,
496 EventNotifierHandler *io_poll_end);
497
Paolo Bonzinie3713e02012-09-24 14:57:41 +0200498/* Return a GSource that lets the main loop poll the file descriptors attached
499 * to this AioContext.
500 */
501GSource *aio_get_g_source(AioContext *ctx);
502
Stefan Hajnoczi9b342772013-03-07 13:41:47 +0100503/* Return the ThreadPool bound to this AioContext */
504struct ThreadPool *aio_get_thread_pool(AioContext *ctx);
505
Nishanth Aravamudaned6e2162018-06-22 12:37:00 -0700506/* Setup the LinuxAioState bound to this AioContext */
507struct LinuxAioState *aio_setup_linux_aio(AioContext *ctx, Error **errp);
508
Paolo Bonzini0187f5c2016-07-04 18:33:20 +0200509/* Return the LinuxAioState bound to this AioContext */
510struct LinuxAioState *aio_get_linux_aio(AioContext *ctx);
511
Aarushi Mehta6663a0a2020-01-20 14:18:47 +0000512/* Setup the LuringState bound to this AioContext */
Paolo Bonzini3cbc17e2021-07-12 15:10:10 +0200513LuringState *aio_setup_linux_io_uring(AioContext *ctx, Error **errp);
Aarushi Mehta6663a0a2020-01-20 14:18:47 +0000514
515/* Return the LuringState bound to this AioContext */
Paolo Bonzini3cbc17e2021-07-12 15:10:10 +0200516LuringState *aio_get_linux_io_uring(AioContext *ctx);
Alex Bligh4e29e832013-08-21 16:02:52 +0100517/**
Artem Pisarenko89a603a2018-10-17 14:24:19 +0600518 * aio_timer_new_with_attrs:
519 * @ctx: the aio context
520 * @type: the clock type
521 * @scale: the scale
522 * @attributes: 0, or one to multiple OR'ed QEMU_TIMER_ATTR_<id> values
523 * to assign
524 * @cb: the callback to call on timer expiry
525 * @opaque: the opaque pointer to pass to the callback
526 *
527 * Allocate a new timer (with attributes) attached to the context @ctx.
528 * The function is responsible for memory allocation.
529 *
530 * The preferred interface is aio_timer_init or aio_timer_init_with_attrs.
531 * Use that unless you really need dynamic memory allocation.
532 *
533 * Returns: a pointer to the new timer
534 */
535static inline QEMUTimer *aio_timer_new_with_attrs(AioContext *ctx,
536 QEMUClockType type,
537 int scale, int attributes,
538 QEMUTimerCB *cb, void *opaque)
539{
540 return timer_new_full(&ctx->tlg, type, scale, attributes, cb, opaque);
541}
542
543/**
Alex Bligh4e29e832013-08-21 16:02:52 +0100544 * aio_timer_new:
545 * @ctx: the aio context
546 * @type: the clock type
547 * @scale: the scale
548 * @cb: the callback to call on timer expiry
549 * @opaque: the opaque pointer to pass to the callback
550 *
551 * Allocate a new timer attached to the context @ctx.
Artem Pisarenko89a603a2018-10-17 14:24:19 +0600552 * See aio_timer_new_with_attrs for details.
Alex Bligh4e29e832013-08-21 16:02:52 +0100553 *
554 * Returns: a pointer to the new timer
555 */
556static inline QEMUTimer *aio_timer_new(AioContext *ctx, QEMUClockType type,
557 int scale,
558 QEMUTimerCB *cb, void *opaque)
559{
Artem Pisarenko89a603a2018-10-17 14:24:19 +0600560 return timer_new_full(&ctx->tlg, type, scale, 0, cb, opaque);
561}
562
563/**
564 * aio_timer_init_with_attrs:
565 * @ctx: the aio context
566 * @ts: the timer
567 * @type: the clock type
568 * @scale: the scale
569 * @attributes: 0, or one to multiple OR'ed QEMU_TIMER_ATTR_<id> values
570 * to assign
571 * @cb: the callback to call on timer expiry
572 * @opaque: the opaque pointer to pass to the callback
573 *
574 * Initialise a new timer (with attributes) attached to the context @ctx.
575 * The caller is responsible for memory allocation.
576 */
577static inline void aio_timer_init_with_attrs(AioContext *ctx,
578 QEMUTimer *ts, QEMUClockType type,
579 int scale, int attributes,
580 QEMUTimerCB *cb, void *opaque)
581{
582 timer_init_full(ts, &ctx->tlg, type, scale, attributes, cb, opaque);
Alex Bligh4e29e832013-08-21 16:02:52 +0100583}
584
585/**
586 * aio_timer_init:
587 * @ctx: the aio context
588 * @ts: the timer
589 * @type: the clock type
590 * @scale: the scale
591 * @cb: the callback to call on timer expiry
592 * @opaque: the opaque pointer to pass to the callback
593 *
594 * Initialise a new timer attached to the context @ctx.
Artem Pisarenko89a603a2018-10-17 14:24:19 +0600595 * See aio_timer_init_with_attrs for details.
Alex Bligh4e29e832013-08-21 16:02:52 +0100596 */
597static inline void aio_timer_init(AioContext *ctx,
598 QEMUTimer *ts, QEMUClockType type,
599 int scale,
600 QEMUTimerCB *cb, void *opaque)
601{
Artem Pisarenko89a603a2018-10-17 14:24:19 +0600602 timer_init_full(ts, &ctx->tlg, type, scale, 0, cb, opaque);
Alex Bligh4e29e832013-08-21 16:02:52 +0100603}
604
Paolo Bonzini845ca102014-07-09 11:53:01 +0200605/**
606 * aio_compute_timeout:
607 * @ctx: the aio context
608 *
609 * Compute the timeout that a blocking aio_poll should use.
610 */
611int64_t aio_compute_timeout(AioContext *ctx);
612
Fam Zhengc1e1e5f2015-10-23 11:08:08 +0800613/**
Paolo Bonzini0c330a72017-02-13 14:52:19 +0100614 * aio_co_schedule:
615 * @ctx: the aio context
616 * @co: the coroutine
617 *
618 * Start a coroutine on a remote AioContext.
619 *
620 * The coroutine must not be entered by anyone else while aio_co_schedule()
621 * is active. In addition the coroutine must have yielded unless ctx
622 * is the context in which the coroutine is running (i.e. the value of
623 * qemu_get_current_aio_context() from the coroutine itself).
624 */
Markus Armbruster43695602022-12-21 14:14:35 +0100625void aio_co_schedule(AioContext *ctx, Coroutine *co);
Paolo Bonzini0c330a72017-02-13 14:52:19 +0100626
627/**
Kevin Wolf26b0b692020-10-05 17:58:52 +0200628 * aio_co_reschedule_self:
629 * @new_ctx: the new context
630 *
631 * Move the currently running coroutine to new_ctx. If the coroutine is already
632 * running in new_ctx, do nothing.
Stefan Hajnoczie669e802024-05-06 15:06:22 -0400633 *
634 * Note that this function cannot reschedule from iohandler_ctx to
635 * qemu_aio_context.
Kevin Wolf26b0b692020-10-05 17:58:52 +0200636 */
637void coroutine_fn aio_co_reschedule_self(AioContext *new_ctx);
638
639/**
Paolo Bonzini0c330a72017-02-13 14:52:19 +0100640 * aio_co_wake:
641 * @co: the coroutine
642 *
643 * Restart a coroutine on the AioContext where it was running last, thus
644 * preventing coroutines from jumping from one context to another when they
645 * go to sleep.
646 *
647 * aio_co_wake may be executed either in coroutine or non-coroutine
648 * context. The coroutine must not be entered by anyone else while
649 * aio_co_wake() is active.
650 */
Markus Armbruster43695602022-12-21 14:14:35 +0100651void aio_co_wake(Coroutine *co);
Paolo Bonzini0c330a72017-02-13 14:52:19 +0100652
653/**
Fam Zheng88658522017-04-10 20:07:35 +0800654 * aio_co_enter:
655 * @ctx: the context to run the coroutine
656 * @co: the coroutine to run
657 *
658 * Enter a coroutine in the specified AioContext.
659 */
Markus Armbruster43695602022-12-21 14:14:35 +0100660void aio_co_enter(AioContext *ctx, Coroutine *co);
Fam Zheng88658522017-04-10 20:07:35 +0800661
662/**
Paolo Bonzinie4370162016-10-27 12:48:59 +0200663 * Return the AioContext whose event loop runs in the current thread.
664 *
665 * If called from an IOThread this will be the IOThread's AioContext. If
Paolo Bonzini5f50be92021-06-09 14:22:34 +0200666 * called from the main thread or with the "big QEMU lock" taken it
667 * will be the main loop AioContext.
Stefan Hajnoczie669e802024-05-06 15:06:22 -0400668 *
669 * Note that the return value is never the main loop's iohandler_ctx and the
670 * return value is the main loop AioContext instead.
Paolo Bonzinie4370162016-10-27 12:48:59 +0200671 */
672AioContext *qemu_get_current_aio_context(void);
673
Paolo Bonzini5f50be92021-06-09 14:22:34 +0200674void qemu_set_current_aio_context(AioContext *ctx);
675
Paolo Bonzinie4370162016-10-27 12:48:59 +0200676/**
Fam Zheng37fcee52015-10-30 12:06:28 +0800677 * aio_context_setup:
678 * @ctx: the aio context
679 *
680 * Initialize the aio context.
681 */
Cao jin7e003462016-07-15 18:28:44 +0800682void aio_context_setup(AioContext *ctx);
Fam Zheng37fcee52015-10-30 12:06:28 +0800683
Stefan Hajnoczi4a1cba32016-12-01 19:26:42 +0000684/**
Jie Wangcd0a6d22018-05-17 08:42:43 +0800685 * aio_context_destroy:
686 * @ctx: the aio context
687 *
688 * Destroy the aio context.
689 */
690void aio_context_destroy(AioContext *ctx);
691
Stefan Hajnocziba607ca2020-05-11 19:36:30 +0100692/* Used internally, do not call outside AioContext code */
693void aio_context_use_g_source(AioContext *ctx);
694
Jie Wangcd0a6d22018-05-17 08:42:43 +0800695/**
Stefan Hajnoczi4a1cba32016-12-01 19:26:42 +0000696 * aio_context_set_poll_params:
697 * @ctx: the aio context
698 * @max_ns: how long to busy poll for, in nanoseconds
Stefan Hajnoczi82a41182016-12-01 19:26:51 +0000699 * @grow: polling time growth factor
700 * @shrink: polling time shrink factor
Stefan Hajnoczi4a1cba32016-12-01 19:26:42 +0000701 *
702 * Poll mode can be disabled by setting poll_max_ns to 0.
703 */
704void aio_context_set_poll_params(AioContext *ctx, int64_t max_ns,
Stefan Hajnoczi82a41182016-12-01 19:26:51 +0000705 int64_t grow, int64_t shrink,
Stefan Hajnoczi4a1cba32016-12-01 19:26:42 +0000706 Error **errp);
707
Stefano Garzarella1793ad02021-07-21 11:42:10 +0200708/**
709 * aio_context_set_aio_params:
710 * @ctx: the aio context
711 * @max_batch: maximum number of requests in a batch, 0 means that the
712 * engine will use its default
713 */
Philippe Mathieu-Daudé897a06c2023-11-20 18:18:06 +0100714void aio_context_set_aio_params(AioContext *ctx, int64_t max_batch);
Stefano Garzarella1793ad02021-07-21 11:42:10 +0200715
Nicolas Saenz Julienne71ad4712022-04-25 09:57:23 +0200716/**
717 * aio_context_set_thread_pool_params:
718 * @ctx: the aio context
719 * @min: min number of threads to have readily available in the thread pool
720 * @min: max number of threads the thread pool can contain
721 */
722void aio_context_set_thread_pool_params(AioContext *ctx, int64_t min,
723 int64_t max, Error **errp);
aliguoria76bab42008-09-22 19:17:18 +0000724#endif