blob: 5f342267d5cea100def1d01f378e7956bfa2d7f5 [file] [log] [blame]
aliguoria76bab42008-09-22 19:17:18 +00001/*
2 * QEMU aio implementation
3 *
4 * Copyright IBM, Corp. 2008
5 *
6 * Authors:
7 * Anthony Liguori <aliguori@us.ibm.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
12 */
13
14#ifndef QEMU_AIO_H
15#define QEMU_AIO_H
16
Stefan Hajnoczi73fd2822020-03-05 17:08:04 +000017#ifdef CONFIG_LINUX_IO_URING
18#include <liburing.h>
19#endif
Kevin Wolf26b0b692020-10-05 17:58:52 +020020#include "qemu/coroutine.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010021#include "qemu/queue.h"
22#include "qemu/event_notifier.h"
Liu Ping Fandcc772e2013-07-16 12:28:58 +080023#include "qemu/thread.h"
Alex Blighdae21b92013-08-21 16:02:49 +010024#include "qemu/timer.h"
aliguoria76bab42008-09-22 19:17:18 +000025
Markus Armbruster7c84b1b2014-10-07 13:59:14 +020026typedef struct BlockAIOCB BlockAIOCB;
Markus Armbruster097310b2014-10-07 13:59:15 +020027typedef void BlockCompletionFunc(void *opaque, int ret);
Paolo Bonzini85e8dab2012-03-12 17:01:48 +010028
Stefan Hajnoczid7331be2012-10-31 16:34:37 +010029typedef struct AIOCBInfo {
Markus Armbruster7c84b1b2014-10-07 13:59:14 +020030 void (*cancel_async)(BlockAIOCB *acb);
31 AioContext *(*get_aio_context)(BlockAIOCB *acb);
Stefan Hajnoczi8c82e9a2012-10-31 16:34:35 +010032 size_t aiocb_size;
Stefan Hajnoczid7331be2012-10-31 16:34:37 +010033} AIOCBInfo;
Paolo Bonzini85e8dab2012-03-12 17:01:48 +010034
Markus Armbruster7c84b1b2014-10-07 13:59:14 +020035struct BlockAIOCB {
Stefan Hajnoczid7331be2012-10-31 16:34:37 +010036 const AIOCBInfo *aiocb_info;
Paolo Bonzini85e8dab2012-03-12 17:01:48 +010037 BlockDriverState *bs;
Markus Armbruster097310b2014-10-07 13:59:15 +020038 BlockCompletionFunc *cb;
Paolo Bonzini85e8dab2012-03-12 17:01:48 +010039 void *opaque;
Fam Zhengf197fe22014-09-11 13:41:08 +080040 int refcnt;
Paolo Bonzini85e8dab2012-03-12 17:01:48 +010041};
42
Stefan Hajnoczid7331be2012-10-31 16:34:37 +010043void *qemu_aio_get(const AIOCBInfo *aiocb_info, BlockDriverState *bs,
Markus Armbruster097310b2014-10-07 13:59:15 +020044 BlockCompletionFunc *cb, void *opaque);
Fam Zheng80074292014-09-11 13:41:28 +080045void qemu_aio_unref(void *p);
Fam Zhengf197fe22014-09-11 13:41:08 +080046void qemu_aio_ref(void *p);
Paolo Bonzini85e8dab2012-03-12 17:01:48 +010047
Paolo Bonzinif627aab2012-10-29 23:45:23 +010048typedef struct AioHandler AioHandler;
Stefan Hajnoczi47490792020-02-14 17:17:11 +000049typedef QLIST_HEAD(, AioHandler) AioHandlerList;
Paolo Bonzinif627aab2012-10-29 23:45:23 +010050typedef void QEMUBHFunc(void *opaque);
Stefan Hajnoczif6a51c82016-12-01 19:26:41 +000051typedef bool AioPollFn(void *opaque);
Paolo Bonzinif627aab2012-10-29 23:45:23 +010052typedef void IOHandler(void *opaque);
53
Paolo Bonzini0c330a72017-02-13 14:52:19 +010054struct Coroutine;
Paolo Bonzini0187f5c2016-07-04 18:33:20 +020055struct ThreadPool;
56struct LinuxAioState;
Aarushi Mehta6663a0a2020-01-20 14:18:47 +000057struct LuringState;
Paolo Bonzini0187f5c2016-07-04 18:33:20 +020058
Stefan Hajnocziaa38e192020-03-05 17:08:05 +000059/* Is polling disabled? */
60bool aio_poll_disabled(AioContext *ctx);
61
Stefan Hajnoczi1f050a42020-03-05 17:08:02 +000062/* Callbacks for file descriptor monitoring implementations */
63typedef struct {
64 /*
65 * update:
66 * @ctx: the AioContext
Stefan Hajnoczib3210512020-03-05 17:08:03 +000067 * @old_node: the existing handler or NULL if this file descriptor is being
68 * monitored for the first time
69 * @new_node: the new handler or NULL if this file descriptor is being
70 * removed
Stefan Hajnoczi1f050a42020-03-05 17:08:02 +000071 *
Stefan Hajnoczib3210512020-03-05 17:08:03 +000072 * Add/remove/modify a monitored file descriptor.
Stefan Hajnoczi1f050a42020-03-05 17:08:02 +000073 *
74 * Called with ctx->list_lock acquired.
75 */
Stefan Hajnoczib3210512020-03-05 17:08:03 +000076 void (*update)(AioContext *ctx, AioHandler *old_node, AioHandler *new_node);
Stefan Hajnoczi1f050a42020-03-05 17:08:02 +000077
78 /*
79 * wait:
80 * @ctx: the AioContext
81 * @ready_list: list for handlers that become ready
82 * @timeout: maximum duration to wait, in nanoseconds
83 *
84 * Wait for file descriptors to become ready and place them on ready_list.
85 *
86 * Called with ctx->list_lock incremented but not locked.
87 *
88 * Returns: number of ready file descriptors.
89 */
90 int (*wait)(AioContext *ctx, AioHandlerList *ready_list, int64_t timeout);
Stefan Hajnocziaa38e192020-03-05 17:08:05 +000091
92 /*
93 * need_wait:
94 * @ctx: the AioContext
95 *
96 * Tell aio_poll() when to stop userspace polling early because ->wait()
97 * has fds ready.
98 *
99 * File descriptor monitoring implementations that cannot poll fd readiness
100 * from userspace should use aio_poll_disabled() here. This ensures that
101 * file descriptors are not starved by handlers that frequently make
102 * progress via userspace polling.
103 *
104 * Returns: true if ->wait() should be called, false otherwise.
105 */
106 bool (*need_wait)(AioContext *ctx);
Stefan Hajnoczi1f050a42020-03-05 17:08:02 +0000107} FDMonOps;
108
Stefan Hajnoczi8c6b0352020-02-21 09:39:51 +0000109/*
110 * Each aio_bh_poll() call carves off a slice of the BH list, so that newly
111 * scheduled BHs are not processed until the next aio_bh_poll() call. All
112 * active aio_bh_poll() calls chain their slices together in a list, so that
113 * nested aio_bh_poll() calls process all scheduled bottom halves.
114 */
115typedef QSLIST_HEAD(, QEMUBH) BHList;
116typedef struct BHListSlice BHListSlice;
117struct BHListSlice {
118 BHList bh_list;
119 QSIMPLEQ_ENTRY(BHListSlice) next;
120};
121
Stefan Hajnoczi73fd2822020-03-05 17:08:04 +0000122typedef QSLIST_HEAD(, AioHandler) AioHandlerSList;
123
Alex Bligh6a1751b2013-08-21 16:02:47 +0100124struct AioContext {
Paolo Bonzinie3713e02012-09-24 14:57:41 +0200125 GSource source;
126
Paolo Bonzini7c690fd2017-01-12 19:07:59 +0100127 /* Used by AioContext users to protect from multi-threaded access. */
Paolo Bonzini3fe71222016-10-27 12:49:08 +0200128 QemuRecMutex lock;
Stefan Hajnoczi98563fc2014-03-03 11:30:04 +0100129
Paolo Bonzini7c690fd2017-01-12 19:07:59 +0100130 /* The list of registered AIO handlers. Protected by ctx->list_lock. */
Stefan Hajnoczi47490792020-02-14 17:17:11 +0000131 AioHandlerList aio_handlers;
132
133 /* The list of AIO handlers to be deleted. Protected by ctx->list_lock. */
134 AioHandlerList deleted_aio_handlers;
Paolo Bonzinia915f4b2012-09-13 12:28:51 +0200135
Paolo Bonzinieabc9772015-07-21 16:07:51 +0200136 /* Used to avoid unnecessary event_notifier_set calls in aio_notify;
Paolo Bonzini3c18a922020-04-07 10:07:45 -0400137 * only written from the AioContext home thread, or under the BQL in
138 * the case of the main AioContext. However, it is read from any
139 * thread so it is still accessed with atomic primitives.
140 *
141 * If this field is 0, everything (file descriptors, bottom halves,
142 * timers) will be re-evaluated before the next blocking poll() or
143 * io_uring wait; therefore, the event_notifier_set call can be
144 * skipped. If it is non-zero, you may need to wake up a concurrent
145 * aio_poll or the glib main event loop, making event_notifier_set
146 * necessary.
Paolo Bonzinieabc9772015-07-21 16:07:51 +0200147 *
148 * Bit 0 is reserved for GSource usage of the AioContext, and is 1
Cao jin54a16a62016-07-15 17:44:18 +0800149 * between a call to aio_ctx_prepare and the next call to aio_ctx_check.
Paolo Bonzinieabc9772015-07-21 16:07:51 +0200150 * Bits 1-31 simply count the number of active calls to aio_poll
151 * that are in the prepare or poll phase.
152 *
153 * The GSource and aio_poll must use a different mechanism because
154 * there is no certainty that a call to GSource's prepare callback
155 * (via g_main_context_prepare) is indeed followed by check and
156 * dispatch. It's not clear whether this would be a bug, but let's
157 * play safe and allow it---it will just cause extra calls to
158 * event_notifier_set until the next call to dispatch.
159 *
160 * Instead, the aio_poll calls include both the prepare and the
161 * dispatch phase, hence a simple counter is enough for them.
Paolo Bonzini0ceb8492014-07-07 15:18:04 +0200162 */
Paolo Bonzinieabc9772015-07-21 16:07:51 +0200163 uint32_t notify_me;
Paolo Bonzini0ceb8492014-07-07 15:18:04 +0200164
Paolo Bonzini7c690fd2017-01-12 19:07:59 +0100165 /* A lock to protect between QEMUBH and AioHandler adders and deleter,
166 * and to ensure that no callbacks are removed while we're walking and
167 * dispatching them.
Paolo Bonzinid7c99a12017-01-12 19:07:53 +0100168 */
169 QemuLockCnt list_lock;
Paolo Bonzini0ceb8492014-07-07 15:18:04 +0200170
Stefan Hajnoczi8c6b0352020-02-21 09:39:51 +0000171 /* Bottom Halves pending aio_bh_poll() processing */
172 BHList bh_list;
173
174 /* Chained BH list slices for each nested aio_bh_poll() call */
175 QSIMPLEQ_HEAD(, BHListSlice) bh_slice_list;
Paolo Bonzinif627aab2012-10-29 23:45:23 +0100176
Paolo Bonzini05e514b2015-07-21 16:07:53 +0200177 /* Used by aio_notify.
178 *
179 * "notified" is used to avoid expensive event_notifier_test_and_clear
180 * calls. When it is clear, the EventNotifier is clear, or one thread
181 * is going to clear "notified" before processing more events. False
182 * positives are possible, i.e. "notified" could be set even though the
183 * EventNotifier is clear.
184 *
185 * Note that event_notifier_set *cannot* be optimized the same way. For
186 * more information on the problem that would result, see "#ifdef BUG2"
187 * in the docs/aio_notify_accept.promela formal model.
188 */
189 bool notified;
Paolo Bonzini2f4dc3c2012-09-24 18:44:14 +0200190 EventNotifier notifier;
Stefan Hajnoczi6b5f8762013-02-20 11:28:32 +0100191
Paolo Bonzini0c330a72017-02-13 14:52:19 +0100192 QSLIST_HEAD(, Coroutine) scheduled_coroutines;
193 QEMUBH *co_schedule_bh;
194
Paolo Bonzini7c690fd2017-01-12 19:07:59 +0100195 /* Thread pool for performing work and receiving completion callbacks.
196 * Has its own locking.
197 */
Stefan Hajnoczi9b342772013-03-07 13:41:47 +0100198 struct ThreadPool *thread_pool;
Alex Blighdae21b92013-08-21 16:02:49 +0100199
Paolo Bonzini0187f5c2016-07-04 18:33:20 +0200200#ifdef CONFIG_LINUX_AIO
Aarushi Mehta6663a0a2020-01-20 14:18:47 +0000201 /*
202 * State for native Linux AIO. Uses aio_context_acquire/release for
Paolo Bonzini0187f5c2016-07-04 18:33:20 +0200203 * locking.
204 */
205 struct LinuxAioState *linux_aio;
206#endif
Aarushi Mehta6663a0a2020-01-20 14:18:47 +0000207#ifdef CONFIG_LINUX_IO_URING
208 /*
209 * State for Linux io_uring. Uses aio_context_acquire/release for
210 * locking.
211 */
212 struct LuringState *linux_io_uring;
Stefan Hajnoczi73fd2822020-03-05 17:08:04 +0000213
214 /* State for file descriptor monitoring using Linux io_uring */
215 struct io_uring fdmon_io_uring;
216 AioHandlerSList submit_list;
Aarushi Mehta6663a0a2020-01-20 14:18:47 +0000217#endif
Paolo Bonzini0187f5c2016-07-04 18:33:20 +0200218
Paolo Bonzini7c690fd2017-01-12 19:07:59 +0100219 /* TimerLists for calling timers - one per clock type. Has its own
220 * locking.
221 */
Alex Blighdae21b92013-08-21 16:02:49 +0100222 QEMUTimerListGroup tlg;
Fam Zhengc1e1e5f2015-10-23 11:08:08 +0800223
224 int external_disable_cnt;
Fam Zhengfbe3fc52015-10-30 12:06:29 +0800225
Stefan Hajnoczi4a1cba32016-12-01 19:26:42 +0000226 /* Number of AioHandlers without .io_poll() */
227 int poll_disable_cnt;
228
Stefan Hajnoczi82a41182016-12-01 19:26:51 +0000229 /* Polling mode parameters */
230 int64_t poll_ns; /* current polling time in nanoseconds */
231 int64_t poll_max_ns; /* maximum polling time in nanoseconds */
232 int64_t poll_grow; /* polling time growth factor */
233 int64_t poll_shrink; /* polling time shrink factor */
Stefan Hajnoczi4a1cba32016-12-01 19:26:42 +0000234
Stefan Hajnoczid37d0e32020-03-05 17:08:06 +0000235 /*
236 * List of handlers participating in userspace polling. Protected by
237 * ctx->list_lock. Iterated and modified mostly by the event loop thread
238 * from aio_poll() with ctx->list_lock incremented. aio_set_fd_handler()
239 * only touches the list to delete nodes if ctx->list_lock's count is zero.
240 */
241 AioHandlerList poll_aio_handlers;
242
Stefan Hajnoczi684e5082016-12-01 19:26:49 +0000243 /* Are we in polling mode or monitoring file descriptors? */
244 bool poll_started;
245
Fam Zhengfbe3fc52015-10-30 12:06:29 +0800246 /* epoll(7) state used when built with CONFIG_EPOLL */
247 int epollfd;
Stefan Hajnoczi1f050a42020-03-05 17:08:02 +0000248
249 const FDMonOps *fdmon_ops;
Alex Bligh6a1751b2013-08-21 16:02:47 +0100250};
Paolo Bonzinif627aab2012-10-29 23:45:23 +0100251
Paolo Bonzinif627aab2012-10-29 23:45:23 +0100252/**
253 * aio_context_new: Allocate a new AioContext.
254 *
255 * AioContext provide a mini event-loop that can be waited on synchronously.
256 * They also provide bottom halves, a service to execute a piece of code
257 * as soon as possible.
258 */
Chrysostomos Nanakos2f78e492014-09-18 14:30:49 +0300259AioContext *aio_context_new(Error **errp);
Paolo Bonzinif627aab2012-10-29 23:45:23 +0100260
261/**
Paolo Bonzinie3713e02012-09-24 14:57:41 +0200262 * aio_context_ref:
263 * @ctx: The AioContext to operate on.
264 *
265 * Add a reference to an AioContext.
266 */
267void aio_context_ref(AioContext *ctx);
268
269/**
270 * aio_context_unref:
271 * @ctx: The AioContext to operate on.
272 *
273 * Drop a reference to an AioContext.
274 */
275void aio_context_unref(AioContext *ctx);
276
Stefan Hajnoczi98563fc2014-03-03 11:30:04 +0100277/* Take ownership of the AioContext. If the AioContext will be shared between
Paolo Bonzini49110172015-02-20 17:26:51 +0100278 * threads, and a thread does not want to be interrupted, it will have to
279 * take ownership around calls to aio_poll(). Otherwise, aio_poll()
280 * automatically takes care of calling aio_context_acquire and
281 * aio_context_release.
Stefan Hajnoczi98563fc2014-03-03 11:30:04 +0100282 *
Paolo Bonzini7c690fd2017-01-12 19:07:59 +0100283 * Note that this is separate from bdrv_drained_begin/bdrv_drained_end. A
284 * thread still has to call those to avoid being interrupted by the guest.
285 *
286 * Bottom halves, timers and callbacks can be created or removed without
287 * acquiring the AioContext.
Stefan Hajnoczi98563fc2014-03-03 11:30:04 +0100288 */
289void aio_context_acquire(AioContext *ctx);
290
291/* Relinquish ownership of the AioContext. */
292void aio_context_release(AioContext *ctx);
293
Paolo Bonzinie3713e02012-09-24 14:57:41 +0200294/**
Paolo Bonzini5b8bb352016-10-03 18:14:15 +0200295 * aio_bh_schedule_oneshot: Allocate a new bottom half structure that will run
296 * only once and as soon as possible.
297 */
298void aio_bh_schedule_oneshot(AioContext *ctx, QEMUBHFunc *cb, void *opaque);
299
300/**
Paolo Bonzinif627aab2012-10-29 23:45:23 +0100301 * aio_bh_new: Allocate a new bottom half structure.
302 *
303 * Bottom halves are lightweight callbacks whose invocation is guaranteed
304 * to be wait-free, thread-safe and signal-safe. The #QEMUBH structure
305 * is opaque and must be allocated prior to its use.
306 */
307QEMUBH *aio_bh_new(AioContext *ctx, QEMUBHFunc *cb, void *opaque);
308
309/**
Paolo Bonzini2f4dc3c2012-09-24 18:44:14 +0200310 * aio_notify: Force processing of pending events.
311 *
312 * Similar to signaling a condition variable, aio_notify forces
Yaowei Bai722f8d92016-11-30 23:30:40 -0500313 * aio_poll to exit, so that the next call will re-examine pending events.
314 * The caller of aio_notify will usually call aio_poll again very soon,
Paolo Bonzini2f4dc3c2012-09-24 18:44:14 +0200315 * or go through another iteration of the GLib main loop. Hence, aio_notify
316 * also has the side effect of recalculating the sets of file descriptors
317 * that the main loop waits for.
318 *
319 * Calling aio_notify is rarely necessary, because for example scheduling
320 * a bottom half calls it already.
321 */
322void aio_notify(AioContext *ctx);
323
324/**
Paolo Bonzini05e514b2015-07-21 16:07:53 +0200325 * aio_notify_accept: Acknowledge receiving an aio_notify.
326 *
327 * aio_notify() uses an EventNotifier in order to wake up a sleeping
328 * aio_poll() or g_main_context_iteration(). Calls to aio_notify() are
329 * usually rare, but the AioContext has to clear the EventNotifier on
330 * every aio_poll() or g_main_context_iteration() in order to avoid
331 * busy waiting. This event_notifier_test_and_clear() cannot be done
332 * using the usual aio_context_set_event_notifier(), because it must
333 * be done before processing all events (file descriptors, bottom halves,
334 * timers).
335 *
336 * aio_notify_accept() is an optimized event_notifier_test_and_clear()
337 * that is specific to an AioContext's notifier; it is used internally
338 * to clear the EventNotifier only if aio_notify() had been called.
339 */
340void aio_notify_accept(AioContext *ctx);
341
342/**
Pavel Dovgalyukdf281b82015-09-17 19:24:50 +0300343 * aio_bh_call: Executes callback function of the specified BH.
344 */
345void aio_bh_call(QEMUBH *bh);
346
347/**
Paolo Bonzinif627aab2012-10-29 23:45:23 +0100348 * aio_bh_poll: Poll bottom halves for an AioContext.
349 *
350 * These are internal functions used by the QEMU main loop.
Liu Ping Fandcc772e2013-07-16 12:28:58 +0800351 * And notice that multiple occurrences of aio_bh_poll cannot
352 * be called concurrently
Paolo Bonzinif627aab2012-10-29 23:45:23 +0100353 */
354int aio_bh_poll(AioContext *ctx);
Paolo Bonzinif627aab2012-10-29 23:45:23 +0100355
356/**
357 * qemu_bh_schedule: Schedule a bottom half.
358 *
359 * Scheduling a bottom half interrupts the main loop and causes the
360 * execution of the callback that was passed to qemu_bh_new.
361 *
362 * Bottom halves that are scheduled from a bottom half handler are instantly
363 * invoked. This can create an infinite loop if a bottom half handler
364 * schedules itself.
365 *
366 * @bh: The bottom half to be scheduled.
367 */
368void qemu_bh_schedule(QEMUBH *bh);
369
370/**
371 * qemu_bh_cancel: Cancel execution of a bottom half.
372 *
373 * Canceling execution of a bottom half undoes the effect of calls to
374 * qemu_bh_schedule without freeing its resources yet. While cancellation
375 * itself is also wait-free and thread-safe, it can of course race with the
376 * loop that executes bottom halves unless you are holding the iothread
377 * mutex. This makes it mostly useless if you are not holding the mutex.
378 *
379 * @bh: The bottom half to be canceled.
380 */
381void qemu_bh_cancel(QEMUBH *bh);
382
383/**
384 *qemu_bh_delete: Cancel execution of a bottom half and free its resources.
385 *
386 * Deleting a bottom half frees the memory that was allocated for it by
387 * qemu_bh_new. It also implies canceling the bottom half if it was
388 * scheduled.
Liu Ping Fandcc772e2013-07-16 12:28:58 +0800389 * This func is async. The bottom half will do the delete action at the finial
390 * end.
Paolo Bonzinif627aab2012-10-29 23:45:23 +0100391 *
392 * @bh: The bottom half to be deleted.
393 */
394void qemu_bh_delete(QEMUBH *bh);
395
Paolo Bonzinicd9ba1e2012-09-24 14:57:22 +0200396/* Return whether there are any pending callbacks from the GSource
Paolo Bonzinia3462c62014-07-09 11:53:08 +0200397 * attached to the AioContext, before g_poll is invoked.
398 *
399 * This is used internally in the implementation of the GSource.
400 */
401bool aio_prepare(AioContext *ctx);
402
403/* Return whether there are any pending callbacks from the GSource
404 * attached to the AioContext, after g_poll is invoked.
Paolo Bonzinicd9ba1e2012-09-24 14:57:22 +0200405 *
406 * This is used internally in the implementation of the GSource.
407 */
408bool aio_pending(AioContext *ctx);
409
Paolo Bonzinie4c7e2d2014-07-09 11:53:05 +0200410/* Dispatch any pending callbacks from the GSource attached to the AioContext.
411 *
412 * This is used internally in the implementation of the GSource.
413 */
Paolo Bonzinia153bf52017-02-13 14:52:33 +0100414void aio_dispatch(AioContext *ctx);
Paolo Bonzinie4c7e2d2014-07-09 11:53:05 +0200415
Paolo Bonzini7c0628b2012-09-24 14:37:53 +0200416/* Progress in completing AIO work to occur. This can issue new pending
417 * aio as a result of executing I/O completion or bh callbacks.
Paolo Bonzinibcdc1852012-04-12 14:00:55 +0200418 *
Paolo Bonziniacfb23a2014-07-09 10:49:46 +0200419 * Return whether any progress was made by executing AIO or bottom half
420 * handlers. If @blocking == true, this should always be true except
421 * if someone called aio_notify.
Paolo Bonzini7c0628b2012-09-24 14:37:53 +0200422 *
423 * If there are no pending bottom halves, but there are pending AIO
424 * operations, it may not be possible to make any progress without
425 * blocking. If @blocking is true, this function will wait until one
426 * or more AIO events have completed, to ensure something has moved
427 * before returning.
Paolo Bonzini7c0628b2012-09-24 14:37:53 +0200428 */
429bool aio_poll(AioContext *ctx, bool blocking);
aliguoria76bab42008-09-22 19:17:18 +0000430
431/* Register a file descriptor and associated callbacks. Behaves very similarly
Fam Zheng6484e422015-06-04 14:45:19 +0800432 * to qemu_set_fd_handler. Unlike qemu_set_fd_handler, these callbacks will
Paolo Bonzini87f68d32014-07-07 15:18:02 +0200433 * be invoked when using aio_poll().
aliguoria76bab42008-09-22 19:17:18 +0000434 *
435 * Code that invokes AIO completion functions should rely on this function
436 * instead of qemu_set_fd_handler[2].
437 */
Paolo Bonzinia915f4b2012-09-13 12:28:51 +0200438void aio_set_fd_handler(AioContext *ctx,
439 int fd,
Fam Zhengdca21ef2015-10-23 11:08:05 +0800440 bool is_external,
Paolo Bonzinia915f4b2012-09-13 12:28:51 +0200441 IOHandler *io_read,
442 IOHandler *io_write,
Stefan Hajnoczif6a51c82016-12-01 19:26:41 +0000443 AioPollFn *io_poll,
Paolo Bonzinia915f4b2012-09-13 12:28:51 +0200444 void *opaque);
Paolo Bonzini9958c352012-06-09 03:44:00 +0200445
Stefan Hajnoczi684e5082016-12-01 19:26:49 +0000446/* Set polling begin/end callbacks for a file descriptor that has already been
447 * registered with aio_set_fd_handler. Do nothing if the file descriptor is
448 * not registered.
449 */
450void aio_set_fd_poll(AioContext *ctx, int fd,
451 IOHandler *io_poll_begin,
452 IOHandler *io_poll_end);
453
Paolo Bonzini9958c352012-06-09 03:44:00 +0200454/* Register an event notifier and associated callbacks. Behaves very similarly
455 * to event_notifier_set_handler. Unlike event_notifier_set_handler, these callbacks
Paolo Bonzini87f68d32014-07-07 15:18:02 +0200456 * will be invoked when using aio_poll().
Paolo Bonzini9958c352012-06-09 03:44:00 +0200457 *
458 * Code that invokes AIO completion functions should rely on this function
459 * instead of event_notifier_set_handler.
460 */
Paolo Bonzinia915f4b2012-09-13 12:28:51 +0200461void aio_set_event_notifier(AioContext *ctx,
462 EventNotifier *notifier,
Fam Zhengdca21ef2015-10-23 11:08:05 +0800463 bool is_external,
Stefan Hajnoczif6a51c82016-12-01 19:26:41 +0000464 EventNotifierHandler *io_read,
465 AioPollFn *io_poll);
Paolo Bonzinia915f4b2012-09-13 12:28:51 +0200466
Stefan Hajnoczi684e5082016-12-01 19:26:49 +0000467/* Set polling begin/end callbacks for an event notifier that has already been
468 * registered with aio_set_event_notifier. Do nothing if the event notifier is
469 * not registered.
470 */
471void aio_set_event_notifier_poll(AioContext *ctx,
472 EventNotifier *notifier,
473 EventNotifierHandler *io_poll_begin,
474 EventNotifierHandler *io_poll_end);
475
Paolo Bonzinie3713e02012-09-24 14:57:41 +0200476/* Return a GSource that lets the main loop poll the file descriptors attached
477 * to this AioContext.
478 */
479GSource *aio_get_g_source(AioContext *ctx);
480
Stefan Hajnoczi9b342772013-03-07 13:41:47 +0100481/* Return the ThreadPool bound to this AioContext */
482struct ThreadPool *aio_get_thread_pool(AioContext *ctx);
483
Nishanth Aravamudaned6e2162018-06-22 12:37:00 -0700484/* Setup the LinuxAioState bound to this AioContext */
485struct LinuxAioState *aio_setup_linux_aio(AioContext *ctx, Error **errp);
486
Paolo Bonzini0187f5c2016-07-04 18:33:20 +0200487/* Return the LinuxAioState bound to this AioContext */
488struct LinuxAioState *aio_get_linux_aio(AioContext *ctx);
489
Aarushi Mehta6663a0a2020-01-20 14:18:47 +0000490/* Setup the LuringState bound to this AioContext */
491struct LuringState *aio_setup_linux_io_uring(AioContext *ctx, Error **errp);
492
493/* Return the LuringState bound to this AioContext */
494struct LuringState *aio_get_linux_io_uring(AioContext *ctx);
Alex Bligh4e29e832013-08-21 16:02:52 +0100495/**
Artem Pisarenko89a603a2018-10-17 14:24:19 +0600496 * aio_timer_new_with_attrs:
497 * @ctx: the aio context
498 * @type: the clock type
499 * @scale: the scale
500 * @attributes: 0, or one to multiple OR'ed QEMU_TIMER_ATTR_<id> values
501 * to assign
502 * @cb: the callback to call on timer expiry
503 * @opaque: the opaque pointer to pass to the callback
504 *
505 * Allocate a new timer (with attributes) attached to the context @ctx.
506 * The function is responsible for memory allocation.
507 *
508 * The preferred interface is aio_timer_init or aio_timer_init_with_attrs.
509 * Use that unless you really need dynamic memory allocation.
510 *
511 * Returns: a pointer to the new timer
512 */
513static inline QEMUTimer *aio_timer_new_with_attrs(AioContext *ctx,
514 QEMUClockType type,
515 int scale, int attributes,
516 QEMUTimerCB *cb, void *opaque)
517{
518 return timer_new_full(&ctx->tlg, type, scale, attributes, cb, opaque);
519}
520
521/**
Alex Bligh4e29e832013-08-21 16:02:52 +0100522 * aio_timer_new:
523 * @ctx: the aio context
524 * @type: the clock type
525 * @scale: the scale
526 * @cb: the callback to call on timer expiry
527 * @opaque: the opaque pointer to pass to the callback
528 *
529 * Allocate a new timer attached to the context @ctx.
Artem Pisarenko89a603a2018-10-17 14:24:19 +0600530 * See aio_timer_new_with_attrs for details.
Alex Bligh4e29e832013-08-21 16:02:52 +0100531 *
532 * Returns: a pointer to the new timer
533 */
534static inline QEMUTimer *aio_timer_new(AioContext *ctx, QEMUClockType type,
535 int scale,
536 QEMUTimerCB *cb, void *opaque)
537{
Artem Pisarenko89a603a2018-10-17 14:24:19 +0600538 return timer_new_full(&ctx->tlg, type, scale, 0, cb, opaque);
539}
540
541/**
542 * aio_timer_init_with_attrs:
543 * @ctx: the aio context
544 * @ts: the timer
545 * @type: the clock type
546 * @scale: the scale
547 * @attributes: 0, or one to multiple OR'ed QEMU_TIMER_ATTR_<id> values
548 * to assign
549 * @cb: the callback to call on timer expiry
550 * @opaque: the opaque pointer to pass to the callback
551 *
552 * Initialise a new timer (with attributes) attached to the context @ctx.
553 * The caller is responsible for memory allocation.
554 */
555static inline void aio_timer_init_with_attrs(AioContext *ctx,
556 QEMUTimer *ts, QEMUClockType type,
557 int scale, int attributes,
558 QEMUTimerCB *cb, void *opaque)
559{
560 timer_init_full(ts, &ctx->tlg, type, scale, attributes, cb, opaque);
Alex Bligh4e29e832013-08-21 16:02:52 +0100561}
562
563/**
564 * aio_timer_init:
565 * @ctx: the aio context
566 * @ts: the timer
567 * @type: the clock type
568 * @scale: the scale
569 * @cb: the callback to call on timer expiry
570 * @opaque: the opaque pointer to pass to the callback
571 *
572 * Initialise a new timer attached to the context @ctx.
Artem Pisarenko89a603a2018-10-17 14:24:19 +0600573 * See aio_timer_init_with_attrs for details.
Alex Bligh4e29e832013-08-21 16:02:52 +0100574 */
575static inline void aio_timer_init(AioContext *ctx,
576 QEMUTimer *ts, QEMUClockType type,
577 int scale,
578 QEMUTimerCB *cb, void *opaque)
579{
Artem Pisarenko89a603a2018-10-17 14:24:19 +0600580 timer_init_full(ts, &ctx->tlg, type, scale, 0, cb, opaque);
Alex Bligh4e29e832013-08-21 16:02:52 +0100581}
582
Paolo Bonzini845ca102014-07-09 11:53:01 +0200583/**
584 * aio_compute_timeout:
585 * @ctx: the aio context
586 *
587 * Compute the timeout that a blocking aio_poll should use.
588 */
589int64_t aio_compute_timeout(AioContext *ctx);
590
Fam Zhengc1e1e5f2015-10-23 11:08:08 +0800591/**
592 * aio_disable_external:
593 * @ctx: the aio context
594 *
595 * Disable the further processing of external clients.
596 */
597static inline void aio_disable_external(AioContext *ctx)
598{
Stefan Hajnoczid73415a2020-09-23 11:56:46 +0100599 qatomic_inc(&ctx->external_disable_cnt);
Fam Zhengc1e1e5f2015-10-23 11:08:08 +0800600}
601
602/**
603 * aio_enable_external:
604 * @ctx: the aio context
605 *
606 * Enable the processing of external clients.
607 */
608static inline void aio_enable_external(AioContext *ctx)
609{
Stefan Hajnoczi321d1db2017-05-08 14:07:05 -0400610 int old;
611
Stefan Hajnoczid73415a2020-09-23 11:56:46 +0100612 old = qatomic_fetch_dec(&ctx->external_disable_cnt);
Stefan Hajnoczi321d1db2017-05-08 14:07:05 -0400613 assert(old > 0);
614 if (old == 1) {
615 /* Kick event loop so it re-arms file descriptors */
616 aio_notify(ctx);
617 }
Fam Zhengc1e1e5f2015-10-23 11:08:08 +0800618}
619
620/**
Fam Zheng5ceb9e32015-10-30 12:06:27 +0800621 * aio_external_disabled:
622 * @ctx: the aio context
623 *
624 * Return true if the external clients are disabled.
625 */
626static inline bool aio_external_disabled(AioContext *ctx)
627{
Stefan Hajnoczid73415a2020-09-23 11:56:46 +0100628 return qatomic_read(&ctx->external_disable_cnt);
Fam Zheng5ceb9e32015-10-30 12:06:27 +0800629}
630
631/**
Fam Zhengc1e1e5f2015-10-23 11:08:08 +0800632 * aio_node_check:
633 * @ctx: the aio context
634 * @is_external: Whether or not the checked node is an external event source.
635 *
636 * Check if the node's is_external flag is okay to be polled by the ctx at this
637 * moment. True means green light.
638 */
639static inline bool aio_node_check(AioContext *ctx, bool is_external)
640{
Stefan Hajnoczid73415a2020-09-23 11:56:46 +0100641 return !is_external || !qatomic_read(&ctx->external_disable_cnt);
Fam Zhengc1e1e5f2015-10-23 11:08:08 +0800642}
643
Fam Zheng37fcee52015-10-30 12:06:28 +0800644/**
Paolo Bonzini0c330a72017-02-13 14:52:19 +0100645 * aio_co_schedule:
646 * @ctx: the aio context
647 * @co: the coroutine
648 *
649 * Start a coroutine on a remote AioContext.
650 *
651 * The coroutine must not be entered by anyone else while aio_co_schedule()
652 * is active. In addition the coroutine must have yielded unless ctx
653 * is the context in which the coroutine is running (i.e. the value of
654 * qemu_get_current_aio_context() from the coroutine itself).
655 */
656void aio_co_schedule(AioContext *ctx, struct Coroutine *co);
657
658/**
Kevin Wolf26b0b692020-10-05 17:58:52 +0200659 * aio_co_reschedule_self:
660 * @new_ctx: the new context
661 *
662 * Move the currently running coroutine to new_ctx. If the coroutine is already
663 * running in new_ctx, do nothing.
664 */
665void coroutine_fn aio_co_reschedule_self(AioContext *new_ctx);
666
667/**
Paolo Bonzini0c330a72017-02-13 14:52:19 +0100668 * aio_co_wake:
669 * @co: the coroutine
670 *
671 * Restart a coroutine on the AioContext where it was running last, thus
672 * preventing coroutines from jumping from one context to another when they
673 * go to sleep.
674 *
675 * aio_co_wake may be executed either in coroutine or non-coroutine
676 * context. The coroutine must not be entered by anyone else while
677 * aio_co_wake() is active.
678 */
679void aio_co_wake(struct Coroutine *co);
680
681/**
Fam Zheng88658522017-04-10 20:07:35 +0800682 * aio_co_enter:
683 * @ctx: the context to run the coroutine
684 * @co: the coroutine to run
685 *
686 * Enter a coroutine in the specified AioContext.
687 */
688void aio_co_enter(AioContext *ctx, struct Coroutine *co);
689
690/**
Paolo Bonzinie4370162016-10-27 12:48:59 +0200691 * Return the AioContext whose event loop runs in the current thread.
692 *
693 * If called from an IOThread this will be the IOThread's AioContext. If
694 * called from another thread it will be the main loop AioContext.
695 */
696AioContext *qemu_get_current_aio_context(void);
697
698/**
Fam Zheng37fcee52015-10-30 12:06:28 +0800699 * aio_context_setup:
700 * @ctx: the aio context
701 *
702 * Initialize the aio context.
703 */
Cao jin7e003462016-07-15 18:28:44 +0800704void aio_context_setup(AioContext *ctx);
Fam Zheng37fcee52015-10-30 12:06:28 +0800705
Stefan Hajnoczi4a1cba32016-12-01 19:26:42 +0000706/**
Jie Wangcd0a6d22018-05-17 08:42:43 +0800707 * aio_context_destroy:
708 * @ctx: the aio context
709 *
710 * Destroy the aio context.
711 */
712void aio_context_destroy(AioContext *ctx);
713
Stefan Hajnocziba607ca2020-05-11 19:36:30 +0100714/* Used internally, do not call outside AioContext code */
715void aio_context_use_g_source(AioContext *ctx);
716
Jie Wangcd0a6d22018-05-17 08:42:43 +0800717/**
Stefan Hajnoczi4a1cba32016-12-01 19:26:42 +0000718 * aio_context_set_poll_params:
719 * @ctx: the aio context
720 * @max_ns: how long to busy poll for, in nanoseconds
Stefan Hajnoczi82a41182016-12-01 19:26:51 +0000721 * @grow: polling time growth factor
722 * @shrink: polling time shrink factor
Stefan Hajnoczi4a1cba32016-12-01 19:26:42 +0000723 *
724 * Poll mode can be disabled by setting poll_max_ns to 0.
725 */
726void aio_context_set_poll_params(AioContext *ctx, int64_t max_ns,
Stefan Hajnoczi82a41182016-12-01 19:26:51 +0000727 int64_t grow, int64_t shrink,
Stefan Hajnoczi4a1cba32016-12-01 19:26:42 +0000728 Error **errp);
729
aliguoria76bab42008-09-22 19:17:18 +0000730#endif