aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 1 | /* |
| 2 | * QEMU aio implementation |
| 3 | * |
| 4 | * Copyright IBM, Corp. 2008 |
| 5 | * |
| 6 | * Authors: |
| 7 | * Anthony Liguori <aliguori@us.ibm.com> |
| 8 | * |
| 9 | * This work is licensed under the terms of the GNU GPL, version 2. See |
| 10 | * the COPYING file in the top-level directory. |
| 11 | * |
| 12 | */ |
| 13 | |
| 14 | #ifndef QEMU_AIO_H |
| 15 | #define QEMU_AIO_H |
| 16 | |
Stefan Hajnoczi | 73fd282 | 2020-03-05 17:08:04 +0000 | [diff] [blame] | 17 | #ifdef CONFIG_LINUX_IO_URING |
| 18 | #include <liburing.h> |
| 19 | #endif |
Kevin Wolf | 26b0b69 | 2020-10-05 17:58:52 +0200 | [diff] [blame] | 20 | #include "qemu/coroutine.h" |
Paolo Bonzini | 1de7afc | 2012-12-17 18:20:00 +0100 | [diff] [blame] | 21 | #include "qemu/queue.h" |
| 22 | #include "qemu/event_notifier.h" |
Liu Ping Fan | dcc772e | 2013-07-16 12:28:58 +0800 | [diff] [blame] | 23 | #include "qemu/thread.h" |
Alex Bligh | dae21b9 | 2013-08-21 16:02:49 +0100 | [diff] [blame] | 24 | #include "qemu/timer.h" |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 25 | |
Markus Armbruster | 7c84b1b | 2014-10-07 13:59:14 +0200 | [diff] [blame] | 26 | typedef struct BlockAIOCB BlockAIOCB; |
Markus Armbruster | 097310b | 2014-10-07 13:59:15 +0200 | [diff] [blame] | 27 | typedef void BlockCompletionFunc(void *opaque, int ret); |
Paolo Bonzini | 85e8dab | 2012-03-12 17:01:48 +0100 | [diff] [blame] | 28 | |
Stefan Hajnoczi | d7331be | 2012-10-31 16:34:37 +0100 | [diff] [blame] | 29 | typedef struct AIOCBInfo { |
Markus Armbruster | 7c84b1b | 2014-10-07 13:59:14 +0200 | [diff] [blame] | 30 | void (*cancel_async)(BlockAIOCB *acb); |
| 31 | AioContext *(*get_aio_context)(BlockAIOCB *acb); |
Stefan Hajnoczi | 8c82e9a | 2012-10-31 16:34:35 +0100 | [diff] [blame] | 32 | size_t aiocb_size; |
Stefan Hajnoczi | d7331be | 2012-10-31 16:34:37 +0100 | [diff] [blame] | 33 | } AIOCBInfo; |
Paolo Bonzini | 85e8dab | 2012-03-12 17:01:48 +0100 | [diff] [blame] | 34 | |
Markus Armbruster | 7c84b1b | 2014-10-07 13:59:14 +0200 | [diff] [blame] | 35 | struct BlockAIOCB { |
Stefan Hajnoczi | d7331be | 2012-10-31 16:34:37 +0100 | [diff] [blame] | 36 | const AIOCBInfo *aiocb_info; |
Paolo Bonzini | 85e8dab | 2012-03-12 17:01:48 +0100 | [diff] [blame] | 37 | BlockDriverState *bs; |
Markus Armbruster | 097310b | 2014-10-07 13:59:15 +0200 | [diff] [blame] | 38 | BlockCompletionFunc *cb; |
Paolo Bonzini | 85e8dab | 2012-03-12 17:01:48 +0100 | [diff] [blame] | 39 | void *opaque; |
Fam Zheng | f197fe2 | 2014-09-11 13:41:08 +0800 | [diff] [blame] | 40 | int refcnt; |
Paolo Bonzini | 85e8dab | 2012-03-12 17:01:48 +0100 | [diff] [blame] | 41 | }; |
| 42 | |
Stefan Hajnoczi | d7331be | 2012-10-31 16:34:37 +0100 | [diff] [blame] | 43 | void *qemu_aio_get(const AIOCBInfo *aiocb_info, BlockDriverState *bs, |
Markus Armbruster | 097310b | 2014-10-07 13:59:15 +0200 | [diff] [blame] | 44 | BlockCompletionFunc *cb, void *opaque); |
Fam Zheng | 8007429 | 2014-09-11 13:41:28 +0800 | [diff] [blame] | 45 | void qemu_aio_unref(void *p); |
Fam Zheng | f197fe2 | 2014-09-11 13:41:08 +0800 | [diff] [blame] | 46 | void qemu_aio_ref(void *p); |
Paolo Bonzini | 85e8dab | 2012-03-12 17:01:48 +0100 | [diff] [blame] | 47 | |
Paolo Bonzini | f627aab | 2012-10-29 23:45:23 +0100 | [diff] [blame] | 48 | typedef struct AioHandler AioHandler; |
Stefan Hajnoczi | 4749079 | 2020-02-14 17:17:11 +0000 | [diff] [blame] | 49 | typedef QLIST_HEAD(, AioHandler) AioHandlerList; |
Paolo Bonzini | f627aab | 2012-10-29 23:45:23 +0100 | [diff] [blame] | 50 | typedef void QEMUBHFunc(void *opaque); |
Stefan Hajnoczi | f6a51c8 | 2016-12-01 19:26:41 +0000 | [diff] [blame] | 51 | typedef bool AioPollFn(void *opaque); |
Paolo Bonzini | f627aab | 2012-10-29 23:45:23 +0100 | [diff] [blame] | 52 | typedef void IOHandler(void *opaque); |
| 53 | |
Paolo Bonzini | 0c330a7 | 2017-02-13 14:52:19 +0100 | [diff] [blame] | 54 | struct Coroutine; |
Paolo Bonzini | 0187f5c | 2016-07-04 18:33:20 +0200 | [diff] [blame] | 55 | struct ThreadPool; |
| 56 | struct LinuxAioState; |
Aarushi Mehta | 6663a0a | 2020-01-20 14:18:47 +0000 | [diff] [blame] | 57 | struct LuringState; |
Paolo Bonzini | 0187f5c | 2016-07-04 18:33:20 +0200 | [diff] [blame] | 58 | |
Stefan Hajnoczi | aa38e19 | 2020-03-05 17:08:05 +0000 | [diff] [blame] | 59 | /* Is polling disabled? */ |
| 60 | bool aio_poll_disabled(AioContext *ctx); |
| 61 | |
Stefan Hajnoczi | 1f050a4 | 2020-03-05 17:08:02 +0000 | [diff] [blame] | 62 | /* Callbacks for file descriptor monitoring implementations */ |
| 63 | typedef struct { |
| 64 | /* |
| 65 | * update: |
| 66 | * @ctx: the AioContext |
Stefan Hajnoczi | b321051 | 2020-03-05 17:08:03 +0000 | [diff] [blame] | 67 | * @old_node: the existing handler or NULL if this file descriptor is being |
| 68 | * monitored for the first time |
| 69 | * @new_node: the new handler or NULL if this file descriptor is being |
| 70 | * removed |
Stefan Hajnoczi | 1f050a4 | 2020-03-05 17:08:02 +0000 | [diff] [blame] | 71 | * |
Stefan Hajnoczi | b321051 | 2020-03-05 17:08:03 +0000 | [diff] [blame] | 72 | * Add/remove/modify a monitored file descriptor. |
Stefan Hajnoczi | 1f050a4 | 2020-03-05 17:08:02 +0000 | [diff] [blame] | 73 | * |
| 74 | * Called with ctx->list_lock acquired. |
| 75 | */ |
Stefan Hajnoczi | b321051 | 2020-03-05 17:08:03 +0000 | [diff] [blame] | 76 | void (*update)(AioContext *ctx, AioHandler *old_node, AioHandler *new_node); |
Stefan Hajnoczi | 1f050a4 | 2020-03-05 17:08:02 +0000 | [diff] [blame] | 77 | |
| 78 | /* |
| 79 | * wait: |
| 80 | * @ctx: the AioContext |
| 81 | * @ready_list: list for handlers that become ready |
| 82 | * @timeout: maximum duration to wait, in nanoseconds |
| 83 | * |
| 84 | * Wait for file descriptors to become ready and place them on ready_list. |
| 85 | * |
| 86 | * Called with ctx->list_lock incremented but not locked. |
| 87 | * |
| 88 | * Returns: number of ready file descriptors. |
| 89 | */ |
| 90 | int (*wait)(AioContext *ctx, AioHandlerList *ready_list, int64_t timeout); |
Stefan Hajnoczi | aa38e19 | 2020-03-05 17:08:05 +0000 | [diff] [blame] | 91 | |
| 92 | /* |
| 93 | * need_wait: |
| 94 | * @ctx: the AioContext |
| 95 | * |
| 96 | * Tell aio_poll() when to stop userspace polling early because ->wait() |
| 97 | * has fds ready. |
| 98 | * |
| 99 | * File descriptor monitoring implementations that cannot poll fd readiness |
| 100 | * from userspace should use aio_poll_disabled() here. This ensures that |
| 101 | * file descriptors are not starved by handlers that frequently make |
| 102 | * progress via userspace polling. |
| 103 | * |
| 104 | * Returns: true if ->wait() should be called, false otherwise. |
| 105 | */ |
| 106 | bool (*need_wait)(AioContext *ctx); |
Stefan Hajnoczi | 1f050a4 | 2020-03-05 17:08:02 +0000 | [diff] [blame] | 107 | } FDMonOps; |
| 108 | |
Stefan Hajnoczi | 8c6b035 | 2020-02-21 09:39:51 +0000 | [diff] [blame] | 109 | /* |
| 110 | * Each aio_bh_poll() call carves off a slice of the BH list, so that newly |
| 111 | * scheduled BHs are not processed until the next aio_bh_poll() call. All |
| 112 | * active aio_bh_poll() calls chain their slices together in a list, so that |
| 113 | * nested aio_bh_poll() calls process all scheduled bottom halves. |
| 114 | */ |
| 115 | typedef QSLIST_HEAD(, QEMUBH) BHList; |
| 116 | typedef struct BHListSlice BHListSlice; |
| 117 | struct BHListSlice { |
| 118 | BHList bh_list; |
| 119 | QSIMPLEQ_ENTRY(BHListSlice) next; |
| 120 | }; |
| 121 | |
Stefan Hajnoczi | 73fd282 | 2020-03-05 17:08:04 +0000 | [diff] [blame] | 122 | typedef QSLIST_HEAD(, AioHandler) AioHandlerSList; |
| 123 | |
Alex Bligh | 6a1751b | 2013-08-21 16:02:47 +0100 | [diff] [blame] | 124 | struct AioContext { |
Paolo Bonzini | e3713e0 | 2012-09-24 14:57:41 +0200 | [diff] [blame] | 125 | GSource source; |
| 126 | |
Paolo Bonzini | 7c690fd | 2017-01-12 19:07:59 +0100 | [diff] [blame] | 127 | /* Used by AioContext users to protect from multi-threaded access. */ |
Paolo Bonzini | 3fe7122 | 2016-10-27 12:49:08 +0200 | [diff] [blame] | 128 | QemuRecMutex lock; |
Stefan Hajnoczi | 98563fc | 2014-03-03 11:30:04 +0100 | [diff] [blame] | 129 | |
Paolo Bonzini | 7c690fd | 2017-01-12 19:07:59 +0100 | [diff] [blame] | 130 | /* The list of registered AIO handlers. Protected by ctx->list_lock. */ |
Stefan Hajnoczi | 4749079 | 2020-02-14 17:17:11 +0000 | [diff] [blame] | 131 | AioHandlerList aio_handlers; |
| 132 | |
| 133 | /* The list of AIO handlers to be deleted. Protected by ctx->list_lock. */ |
| 134 | AioHandlerList deleted_aio_handlers; |
Paolo Bonzini | a915f4b | 2012-09-13 12:28:51 +0200 | [diff] [blame] | 135 | |
Paolo Bonzini | eabc977 | 2015-07-21 16:07:51 +0200 | [diff] [blame] | 136 | /* Used to avoid unnecessary event_notifier_set calls in aio_notify; |
Paolo Bonzini | 3c18a92 | 2020-04-07 10:07:45 -0400 | [diff] [blame] | 137 | * only written from the AioContext home thread, or under the BQL in |
| 138 | * the case of the main AioContext. However, it is read from any |
| 139 | * thread so it is still accessed with atomic primitives. |
| 140 | * |
| 141 | * If this field is 0, everything (file descriptors, bottom halves, |
| 142 | * timers) will be re-evaluated before the next blocking poll() or |
| 143 | * io_uring wait; therefore, the event_notifier_set call can be |
| 144 | * skipped. If it is non-zero, you may need to wake up a concurrent |
| 145 | * aio_poll or the glib main event loop, making event_notifier_set |
| 146 | * necessary. |
Paolo Bonzini | eabc977 | 2015-07-21 16:07:51 +0200 | [diff] [blame] | 147 | * |
| 148 | * Bit 0 is reserved for GSource usage of the AioContext, and is 1 |
Cao jin | 54a16a6 | 2016-07-15 17:44:18 +0800 | [diff] [blame] | 149 | * between a call to aio_ctx_prepare and the next call to aio_ctx_check. |
Paolo Bonzini | eabc977 | 2015-07-21 16:07:51 +0200 | [diff] [blame] | 150 | * Bits 1-31 simply count the number of active calls to aio_poll |
| 151 | * that are in the prepare or poll phase. |
| 152 | * |
| 153 | * The GSource and aio_poll must use a different mechanism because |
| 154 | * there is no certainty that a call to GSource's prepare callback |
| 155 | * (via g_main_context_prepare) is indeed followed by check and |
| 156 | * dispatch. It's not clear whether this would be a bug, but let's |
| 157 | * play safe and allow it---it will just cause extra calls to |
| 158 | * event_notifier_set until the next call to dispatch. |
| 159 | * |
| 160 | * Instead, the aio_poll calls include both the prepare and the |
| 161 | * dispatch phase, hence a simple counter is enough for them. |
Paolo Bonzini | 0ceb849 | 2014-07-07 15:18:04 +0200 | [diff] [blame] | 162 | */ |
Paolo Bonzini | eabc977 | 2015-07-21 16:07:51 +0200 | [diff] [blame] | 163 | uint32_t notify_me; |
Paolo Bonzini | 0ceb849 | 2014-07-07 15:18:04 +0200 | [diff] [blame] | 164 | |
Paolo Bonzini | 7c690fd | 2017-01-12 19:07:59 +0100 | [diff] [blame] | 165 | /* A lock to protect between QEMUBH and AioHandler adders and deleter, |
| 166 | * and to ensure that no callbacks are removed while we're walking and |
| 167 | * dispatching them. |
Paolo Bonzini | d7c99a1 | 2017-01-12 19:07:53 +0100 | [diff] [blame] | 168 | */ |
| 169 | QemuLockCnt list_lock; |
Paolo Bonzini | 0ceb849 | 2014-07-07 15:18:04 +0200 | [diff] [blame] | 170 | |
Stefan Hajnoczi | 8c6b035 | 2020-02-21 09:39:51 +0000 | [diff] [blame] | 171 | /* Bottom Halves pending aio_bh_poll() processing */ |
| 172 | BHList bh_list; |
| 173 | |
| 174 | /* Chained BH list slices for each nested aio_bh_poll() call */ |
| 175 | QSIMPLEQ_HEAD(, BHListSlice) bh_slice_list; |
Paolo Bonzini | f627aab | 2012-10-29 23:45:23 +0100 | [diff] [blame] | 176 | |
Paolo Bonzini | 05e514b | 2015-07-21 16:07:53 +0200 | [diff] [blame] | 177 | /* Used by aio_notify. |
| 178 | * |
| 179 | * "notified" is used to avoid expensive event_notifier_test_and_clear |
| 180 | * calls. When it is clear, the EventNotifier is clear, or one thread |
| 181 | * is going to clear "notified" before processing more events. False |
| 182 | * positives are possible, i.e. "notified" could be set even though the |
| 183 | * EventNotifier is clear. |
| 184 | * |
| 185 | * Note that event_notifier_set *cannot* be optimized the same way. For |
| 186 | * more information on the problem that would result, see "#ifdef BUG2" |
| 187 | * in the docs/aio_notify_accept.promela formal model. |
| 188 | */ |
| 189 | bool notified; |
Paolo Bonzini | 2f4dc3c | 2012-09-24 18:44:14 +0200 | [diff] [blame] | 190 | EventNotifier notifier; |
Stefan Hajnoczi | 6b5f876 | 2013-02-20 11:28:32 +0100 | [diff] [blame] | 191 | |
Paolo Bonzini | 0c330a7 | 2017-02-13 14:52:19 +0100 | [diff] [blame] | 192 | QSLIST_HEAD(, Coroutine) scheduled_coroutines; |
| 193 | QEMUBH *co_schedule_bh; |
| 194 | |
Paolo Bonzini | 7c690fd | 2017-01-12 19:07:59 +0100 | [diff] [blame] | 195 | /* Thread pool for performing work and receiving completion callbacks. |
| 196 | * Has its own locking. |
| 197 | */ |
Stefan Hajnoczi | 9b34277 | 2013-03-07 13:41:47 +0100 | [diff] [blame] | 198 | struct ThreadPool *thread_pool; |
Alex Bligh | dae21b9 | 2013-08-21 16:02:49 +0100 | [diff] [blame] | 199 | |
Paolo Bonzini | 0187f5c | 2016-07-04 18:33:20 +0200 | [diff] [blame] | 200 | #ifdef CONFIG_LINUX_AIO |
Aarushi Mehta | 6663a0a | 2020-01-20 14:18:47 +0000 | [diff] [blame] | 201 | /* |
| 202 | * State for native Linux AIO. Uses aio_context_acquire/release for |
Paolo Bonzini | 0187f5c | 2016-07-04 18:33:20 +0200 | [diff] [blame] | 203 | * locking. |
| 204 | */ |
| 205 | struct LinuxAioState *linux_aio; |
| 206 | #endif |
Aarushi Mehta | 6663a0a | 2020-01-20 14:18:47 +0000 | [diff] [blame] | 207 | #ifdef CONFIG_LINUX_IO_URING |
| 208 | /* |
| 209 | * State for Linux io_uring. Uses aio_context_acquire/release for |
| 210 | * locking. |
| 211 | */ |
| 212 | struct LuringState *linux_io_uring; |
Stefan Hajnoczi | 73fd282 | 2020-03-05 17:08:04 +0000 | [diff] [blame] | 213 | |
| 214 | /* State for file descriptor monitoring using Linux io_uring */ |
| 215 | struct io_uring fdmon_io_uring; |
| 216 | AioHandlerSList submit_list; |
Aarushi Mehta | 6663a0a | 2020-01-20 14:18:47 +0000 | [diff] [blame] | 217 | #endif |
Paolo Bonzini | 0187f5c | 2016-07-04 18:33:20 +0200 | [diff] [blame] | 218 | |
Paolo Bonzini | 7c690fd | 2017-01-12 19:07:59 +0100 | [diff] [blame] | 219 | /* TimerLists for calling timers - one per clock type. Has its own |
| 220 | * locking. |
| 221 | */ |
Alex Bligh | dae21b9 | 2013-08-21 16:02:49 +0100 | [diff] [blame] | 222 | QEMUTimerListGroup tlg; |
Fam Zheng | c1e1e5f | 2015-10-23 11:08:08 +0800 | [diff] [blame] | 223 | |
| 224 | int external_disable_cnt; |
Fam Zheng | fbe3fc5 | 2015-10-30 12:06:29 +0800 | [diff] [blame] | 225 | |
Stefan Hajnoczi | 4a1cba3 | 2016-12-01 19:26:42 +0000 | [diff] [blame] | 226 | /* Number of AioHandlers without .io_poll() */ |
| 227 | int poll_disable_cnt; |
| 228 | |
Stefan Hajnoczi | 82a4118 | 2016-12-01 19:26:51 +0000 | [diff] [blame] | 229 | /* Polling mode parameters */ |
| 230 | int64_t poll_ns; /* current polling time in nanoseconds */ |
| 231 | int64_t poll_max_ns; /* maximum polling time in nanoseconds */ |
| 232 | int64_t poll_grow; /* polling time growth factor */ |
| 233 | int64_t poll_shrink; /* polling time shrink factor */ |
Stefan Hajnoczi | 4a1cba3 | 2016-12-01 19:26:42 +0000 | [diff] [blame] | 234 | |
Stefan Hajnoczi | d37d0e3 | 2020-03-05 17:08:06 +0000 | [diff] [blame] | 235 | /* |
| 236 | * List of handlers participating in userspace polling. Protected by |
| 237 | * ctx->list_lock. Iterated and modified mostly by the event loop thread |
| 238 | * from aio_poll() with ctx->list_lock incremented. aio_set_fd_handler() |
| 239 | * only touches the list to delete nodes if ctx->list_lock's count is zero. |
| 240 | */ |
| 241 | AioHandlerList poll_aio_handlers; |
| 242 | |
Stefan Hajnoczi | 684e508 | 2016-12-01 19:26:49 +0000 | [diff] [blame] | 243 | /* Are we in polling mode or monitoring file descriptors? */ |
| 244 | bool poll_started; |
| 245 | |
Fam Zheng | fbe3fc5 | 2015-10-30 12:06:29 +0800 | [diff] [blame] | 246 | /* epoll(7) state used when built with CONFIG_EPOLL */ |
| 247 | int epollfd; |
Stefan Hajnoczi | 1f050a4 | 2020-03-05 17:08:02 +0000 | [diff] [blame] | 248 | |
| 249 | const FDMonOps *fdmon_ops; |
Alex Bligh | 6a1751b | 2013-08-21 16:02:47 +0100 | [diff] [blame] | 250 | }; |
Paolo Bonzini | f627aab | 2012-10-29 23:45:23 +0100 | [diff] [blame] | 251 | |
Paolo Bonzini | f627aab | 2012-10-29 23:45:23 +0100 | [diff] [blame] | 252 | /** |
| 253 | * aio_context_new: Allocate a new AioContext. |
| 254 | * |
| 255 | * AioContext provide a mini event-loop that can be waited on synchronously. |
| 256 | * They also provide bottom halves, a service to execute a piece of code |
| 257 | * as soon as possible. |
| 258 | */ |
Chrysostomos Nanakos | 2f78e49 | 2014-09-18 14:30:49 +0300 | [diff] [blame] | 259 | AioContext *aio_context_new(Error **errp); |
Paolo Bonzini | f627aab | 2012-10-29 23:45:23 +0100 | [diff] [blame] | 260 | |
| 261 | /** |
Paolo Bonzini | e3713e0 | 2012-09-24 14:57:41 +0200 | [diff] [blame] | 262 | * aio_context_ref: |
| 263 | * @ctx: The AioContext to operate on. |
| 264 | * |
| 265 | * Add a reference to an AioContext. |
| 266 | */ |
| 267 | void aio_context_ref(AioContext *ctx); |
| 268 | |
| 269 | /** |
| 270 | * aio_context_unref: |
| 271 | * @ctx: The AioContext to operate on. |
| 272 | * |
| 273 | * Drop a reference to an AioContext. |
| 274 | */ |
| 275 | void aio_context_unref(AioContext *ctx); |
| 276 | |
Stefan Hajnoczi | 98563fc | 2014-03-03 11:30:04 +0100 | [diff] [blame] | 277 | /* Take ownership of the AioContext. If the AioContext will be shared between |
Paolo Bonzini | 4911017 | 2015-02-20 17:26:51 +0100 | [diff] [blame] | 278 | * threads, and a thread does not want to be interrupted, it will have to |
| 279 | * take ownership around calls to aio_poll(). Otherwise, aio_poll() |
| 280 | * automatically takes care of calling aio_context_acquire and |
| 281 | * aio_context_release. |
Stefan Hajnoczi | 98563fc | 2014-03-03 11:30:04 +0100 | [diff] [blame] | 282 | * |
Paolo Bonzini | 7c690fd | 2017-01-12 19:07:59 +0100 | [diff] [blame] | 283 | * Note that this is separate from bdrv_drained_begin/bdrv_drained_end. A |
| 284 | * thread still has to call those to avoid being interrupted by the guest. |
| 285 | * |
| 286 | * Bottom halves, timers and callbacks can be created or removed without |
| 287 | * acquiring the AioContext. |
Stefan Hajnoczi | 98563fc | 2014-03-03 11:30:04 +0100 | [diff] [blame] | 288 | */ |
| 289 | void aio_context_acquire(AioContext *ctx); |
| 290 | |
| 291 | /* Relinquish ownership of the AioContext. */ |
| 292 | void aio_context_release(AioContext *ctx); |
| 293 | |
Paolo Bonzini | e3713e0 | 2012-09-24 14:57:41 +0200 | [diff] [blame] | 294 | /** |
Paolo Bonzini | 5b8bb35 | 2016-10-03 18:14:15 +0200 | [diff] [blame] | 295 | * aio_bh_schedule_oneshot: Allocate a new bottom half structure that will run |
| 296 | * only once and as soon as possible. |
| 297 | */ |
| 298 | void aio_bh_schedule_oneshot(AioContext *ctx, QEMUBHFunc *cb, void *opaque); |
| 299 | |
| 300 | /** |
Paolo Bonzini | f627aab | 2012-10-29 23:45:23 +0100 | [diff] [blame] | 301 | * aio_bh_new: Allocate a new bottom half structure. |
| 302 | * |
| 303 | * Bottom halves are lightweight callbacks whose invocation is guaranteed |
| 304 | * to be wait-free, thread-safe and signal-safe. The #QEMUBH structure |
| 305 | * is opaque and must be allocated prior to its use. |
| 306 | */ |
| 307 | QEMUBH *aio_bh_new(AioContext *ctx, QEMUBHFunc *cb, void *opaque); |
| 308 | |
| 309 | /** |
Paolo Bonzini | 2f4dc3c | 2012-09-24 18:44:14 +0200 | [diff] [blame] | 310 | * aio_notify: Force processing of pending events. |
| 311 | * |
| 312 | * Similar to signaling a condition variable, aio_notify forces |
Yaowei Bai | 722f8d9 | 2016-11-30 23:30:40 -0500 | [diff] [blame] | 313 | * aio_poll to exit, so that the next call will re-examine pending events. |
| 314 | * The caller of aio_notify will usually call aio_poll again very soon, |
Paolo Bonzini | 2f4dc3c | 2012-09-24 18:44:14 +0200 | [diff] [blame] | 315 | * or go through another iteration of the GLib main loop. Hence, aio_notify |
| 316 | * also has the side effect of recalculating the sets of file descriptors |
| 317 | * that the main loop waits for. |
| 318 | * |
| 319 | * Calling aio_notify is rarely necessary, because for example scheduling |
| 320 | * a bottom half calls it already. |
| 321 | */ |
| 322 | void aio_notify(AioContext *ctx); |
| 323 | |
| 324 | /** |
Paolo Bonzini | 05e514b | 2015-07-21 16:07:53 +0200 | [diff] [blame] | 325 | * aio_notify_accept: Acknowledge receiving an aio_notify. |
| 326 | * |
| 327 | * aio_notify() uses an EventNotifier in order to wake up a sleeping |
| 328 | * aio_poll() or g_main_context_iteration(). Calls to aio_notify() are |
| 329 | * usually rare, but the AioContext has to clear the EventNotifier on |
| 330 | * every aio_poll() or g_main_context_iteration() in order to avoid |
| 331 | * busy waiting. This event_notifier_test_and_clear() cannot be done |
| 332 | * using the usual aio_context_set_event_notifier(), because it must |
| 333 | * be done before processing all events (file descriptors, bottom halves, |
| 334 | * timers). |
| 335 | * |
| 336 | * aio_notify_accept() is an optimized event_notifier_test_and_clear() |
| 337 | * that is specific to an AioContext's notifier; it is used internally |
| 338 | * to clear the EventNotifier only if aio_notify() had been called. |
| 339 | */ |
| 340 | void aio_notify_accept(AioContext *ctx); |
| 341 | |
| 342 | /** |
Pavel Dovgalyuk | df281b8 | 2015-09-17 19:24:50 +0300 | [diff] [blame] | 343 | * aio_bh_call: Executes callback function of the specified BH. |
| 344 | */ |
| 345 | void aio_bh_call(QEMUBH *bh); |
| 346 | |
| 347 | /** |
Paolo Bonzini | f627aab | 2012-10-29 23:45:23 +0100 | [diff] [blame] | 348 | * aio_bh_poll: Poll bottom halves for an AioContext. |
| 349 | * |
| 350 | * These are internal functions used by the QEMU main loop. |
Liu Ping Fan | dcc772e | 2013-07-16 12:28:58 +0800 | [diff] [blame] | 351 | * And notice that multiple occurrences of aio_bh_poll cannot |
| 352 | * be called concurrently |
Paolo Bonzini | f627aab | 2012-10-29 23:45:23 +0100 | [diff] [blame] | 353 | */ |
| 354 | int aio_bh_poll(AioContext *ctx); |
Paolo Bonzini | f627aab | 2012-10-29 23:45:23 +0100 | [diff] [blame] | 355 | |
| 356 | /** |
| 357 | * qemu_bh_schedule: Schedule a bottom half. |
| 358 | * |
| 359 | * Scheduling a bottom half interrupts the main loop and causes the |
| 360 | * execution of the callback that was passed to qemu_bh_new. |
| 361 | * |
| 362 | * Bottom halves that are scheduled from a bottom half handler are instantly |
| 363 | * invoked. This can create an infinite loop if a bottom half handler |
| 364 | * schedules itself. |
| 365 | * |
| 366 | * @bh: The bottom half to be scheduled. |
| 367 | */ |
| 368 | void qemu_bh_schedule(QEMUBH *bh); |
| 369 | |
| 370 | /** |
| 371 | * qemu_bh_cancel: Cancel execution of a bottom half. |
| 372 | * |
| 373 | * Canceling execution of a bottom half undoes the effect of calls to |
| 374 | * qemu_bh_schedule without freeing its resources yet. While cancellation |
| 375 | * itself is also wait-free and thread-safe, it can of course race with the |
| 376 | * loop that executes bottom halves unless you are holding the iothread |
| 377 | * mutex. This makes it mostly useless if you are not holding the mutex. |
| 378 | * |
| 379 | * @bh: The bottom half to be canceled. |
| 380 | */ |
| 381 | void qemu_bh_cancel(QEMUBH *bh); |
| 382 | |
| 383 | /** |
| 384 | *qemu_bh_delete: Cancel execution of a bottom half and free its resources. |
| 385 | * |
| 386 | * Deleting a bottom half frees the memory that was allocated for it by |
| 387 | * qemu_bh_new. It also implies canceling the bottom half if it was |
| 388 | * scheduled. |
Liu Ping Fan | dcc772e | 2013-07-16 12:28:58 +0800 | [diff] [blame] | 389 | * This func is async. The bottom half will do the delete action at the finial |
| 390 | * end. |
Paolo Bonzini | f627aab | 2012-10-29 23:45:23 +0100 | [diff] [blame] | 391 | * |
| 392 | * @bh: The bottom half to be deleted. |
| 393 | */ |
| 394 | void qemu_bh_delete(QEMUBH *bh); |
| 395 | |
Paolo Bonzini | cd9ba1e | 2012-09-24 14:57:22 +0200 | [diff] [blame] | 396 | /* Return whether there are any pending callbacks from the GSource |
Paolo Bonzini | a3462c6 | 2014-07-09 11:53:08 +0200 | [diff] [blame] | 397 | * attached to the AioContext, before g_poll is invoked. |
| 398 | * |
| 399 | * This is used internally in the implementation of the GSource. |
| 400 | */ |
| 401 | bool aio_prepare(AioContext *ctx); |
| 402 | |
| 403 | /* Return whether there are any pending callbacks from the GSource |
| 404 | * attached to the AioContext, after g_poll is invoked. |
Paolo Bonzini | cd9ba1e | 2012-09-24 14:57:22 +0200 | [diff] [blame] | 405 | * |
| 406 | * This is used internally in the implementation of the GSource. |
| 407 | */ |
| 408 | bool aio_pending(AioContext *ctx); |
| 409 | |
Paolo Bonzini | e4c7e2d | 2014-07-09 11:53:05 +0200 | [diff] [blame] | 410 | /* Dispatch any pending callbacks from the GSource attached to the AioContext. |
| 411 | * |
| 412 | * This is used internally in the implementation of the GSource. |
| 413 | */ |
Paolo Bonzini | a153bf5 | 2017-02-13 14:52:33 +0100 | [diff] [blame] | 414 | void aio_dispatch(AioContext *ctx); |
Paolo Bonzini | e4c7e2d | 2014-07-09 11:53:05 +0200 | [diff] [blame] | 415 | |
Paolo Bonzini | 7c0628b | 2012-09-24 14:37:53 +0200 | [diff] [blame] | 416 | /* Progress in completing AIO work to occur. This can issue new pending |
| 417 | * aio as a result of executing I/O completion or bh callbacks. |
Paolo Bonzini | bcdc185 | 2012-04-12 14:00:55 +0200 | [diff] [blame] | 418 | * |
Paolo Bonzini | acfb23a | 2014-07-09 10:49:46 +0200 | [diff] [blame] | 419 | * Return whether any progress was made by executing AIO or bottom half |
| 420 | * handlers. If @blocking == true, this should always be true except |
| 421 | * if someone called aio_notify. |
Paolo Bonzini | 7c0628b | 2012-09-24 14:37:53 +0200 | [diff] [blame] | 422 | * |
| 423 | * If there are no pending bottom halves, but there are pending AIO |
| 424 | * operations, it may not be possible to make any progress without |
| 425 | * blocking. If @blocking is true, this function will wait until one |
| 426 | * or more AIO events have completed, to ensure something has moved |
| 427 | * before returning. |
Paolo Bonzini | 7c0628b | 2012-09-24 14:37:53 +0200 | [diff] [blame] | 428 | */ |
| 429 | bool aio_poll(AioContext *ctx, bool blocking); |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 430 | |
| 431 | /* Register a file descriptor and associated callbacks. Behaves very similarly |
Fam Zheng | 6484e42 | 2015-06-04 14:45:19 +0800 | [diff] [blame] | 432 | * to qemu_set_fd_handler. Unlike qemu_set_fd_handler, these callbacks will |
Paolo Bonzini | 87f68d3 | 2014-07-07 15:18:02 +0200 | [diff] [blame] | 433 | * be invoked when using aio_poll(). |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 434 | * |
| 435 | * Code that invokes AIO completion functions should rely on this function |
| 436 | * instead of qemu_set_fd_handler[2]. |
| 437 | */ |
Paolo Bonzini | a915f4b | 2012-09-13 12:28:51 +0200 | [diff] [blame] | 438 | void aio_set_fd_handler(AioContext *ctx, |
| 439 | int fd, |
Fam Zheng | dca21ef | 2015-10-23 11:08:05 +0800 | [diff] [blame] | 440 | bool is_external, |
Paolo Bonzini | a915f4b | 2012-09-13 12:28:51 +0200 | [diff] [blame] | 441 | IOHandler *io_read, |
| 442 | IOHandler *io_write, |
Stefan Hajnoczi | f6a51c8 | 2016-12-01 19:26:41 +0000 | [diff] [blame] | 443 | AioPollFn *io_poll, |
Paolo Bonzini | a915f4b | 2012-09-13 12:28:51 +0200 | [diff] [blame] | 444 | void *opaque); |
Paolo Bonzini | 9958c35 | 2012-06-09 03:44:00 +0200 | [diff] [blame] | 445 | |
Stefan Hajnoczi | 684e508 | 2016-12-01 19:26:49 +0000 | [diff] [blame] | 446 | /* Set polling begin/end callbacks for a file descriptor that has already been |
| 447 | * registered with aio_set_fd_handler. Do nothing if the file descriptor is |
| 448 | * not registered. |
| 449 | */ |
| 450 | void aio_set_fd_poll(AioContext *ctx, int fd, |
| 451 | IOHandler *io_poll_begin, |
| 452 | IOHandler *io_poll_end); |
| 453 | |
Paolo Bonzini | 9958c35 | 2012-06-09 03:44:00 +0200 | [diff] [blame] | 454 | /* Register an event notifier and associated callbacks. Behaves very similarly |
| 455 | * to event_notifier_set_handler. Unlike event_notifier_set_handler, these callbacks |
Paolo Bonzini | 87f68d3 | 2014-07-07 15:18:02 +0200 | [diff] [blame] | 456 | * will be invoked when using aio_poll(). |
Paolo Bonzini | 9958c35 | 2012-06-09 03:44:00 +0200 | [diff] [blame] | 457 | * |
| 458 | * Code that invokes AIO completion functions should rely on this function |
| 459 | * instead of event_notifier_set_handler. |
| 460 | */ |
Paolo Bonzini | a915f4b | 2012-09-13 12:28:51 +0200 | [diff] [blame] | 461 | void aio_set_event_notifier(AioContext *ctx, |
| 462 | EventNotifier *notifier, |
Fam Zheng | dca21ef | 2015-10-23 11:08:05 +0800 | [diff] [blame] | 463 | bool is_external, |
Stefan Hajnoczi | f6a51c8 | 2016-12-01 19:26:41 +0000 | [diff] [blame] | 464 | EventNotifierHandler *io_read, |
| 465 | AioPollFn *io_poll); |
Paolo Bonzini | a915f4b | 2012-09-13 12:28:51 +0200 | [diff] [blame] | 466 | |
Stefan Hajnoczi | 684e508 | 2016-12-01 19:26:49 +0000 | [diff] [blame] | 467 | /* Set polling begin/end callbacks for an event notifier that has already been |
| 468 | * registered with aio_set_event_notifier. Do nothing if the event notifier is |
| 469 | * not registered. |
| 470 | */ |
| 471 | void aio_set_event_notifier_poll(AioContext *ctx, |
| 472 | EventNotifier *notifier, |
| 473 | EventNotifierHandler *io_poll_begin, |
| 474 | EventNotifierHandler *io_poll_end); |
| 475 | |
Paolo Bonzini | e3713e0 | 2012-09-24 14:57:41 +0200 | [diff] [blame] | 476 | /* Return a GSource that lets the main loop poll the file descriptors attached |
| 477 | * to this AioContext. |
| 478 | */ |
| 479 | GSource *aio_get_g_source(AioContext *ctx); |
| 480 | |
Stefan Hajnoczi | 9b34277 | 2013-03-07 13:41:47 +0100 | [diff] [blame] | 481 | /* Return the ThreadPool bound to this AioContext */ |
| 482 | struct ThreadPool *aio_get_thread_pool(AioContext *ctx); |
| 483 | |
Nishanth Aravamudan | ed6e216 | 2018-06-22 12:37:00 -0700 | [diff] [blame] | 484 | /* Setup the LinuxAioState bound to this AioContext */ |
| 485 | struct LinuxAioState *aio_setup_linux_aio(AioContext *ctx, Error **errp); |
| 486 | |
Paolo Bonzini | 0187f5c | 2016-07-04 18:33:20 +0200 | [diff] [blame] | 487 | /* Return the LinuxAioState bound to this AioContext */ |
| 488 | struct LinuxAioState *aio_get_linux_aio(AioContext *ctx); |
| 489 | |
Aarushi Mehta | 6663a0a | 2020-01-20 14:18:47 +0000 | [diff] [blame] | 490 | /* Setup the LuringState bound to this AioContext */ |
| 491 | struct LuringState *aio_setup_linux_io_uring(AioContext *ctx, Error **errp); |
| 492 | |
| 493 | /* Return the LuringState bound to this AioContext */ |
| 494 | struct LuringState *aio_get_linux_io_uring(AioContext *ctx); |
Alex Bligh | 4e29e83 | 2013-08-21 16:02:52 +0100 | [diff] [blame] | 495 | /** |
Artem Pisarenko | 89a603a | 2018-10-17 14:24:19 +0600 | [diff] [blame] | 496 | * aio_timer_new_with_attrs: |
| 497 | * @ctx: the aio context |
| 498 | * @type: the clock type |
| 499 | * @scale: the scale |
| 500 | * @attributes: 0, or one to multiple OR'ed QEMU_TIMER_ATTR_<id> values |
| 501 | * to assign |
| 502 | * @cb: the callback to call on timer expiry |
| 503 | * @opaque: the opaque pointer to pass to the callback |
| 504 | * |
| 505 | * Allocate a new timer (with attributes) attached to the context @ctx. |
| 506 | * The function is responsible for memory allocation. |
| 507 | * |
| 508 | * The preferred interface is aio_timer_init or aio_timer_init_with_attrs. |
| 509 | * Use that unless you really need dynamic memory allocation. |
| 510 | * |
| 511 | * Returns: a pointer to the new timer |
| 512 | */ |
| 513 | static inline QEMUTimer *aio_timer_new_with_attrs(AioContext *ctx, |
| 514 | QEMUClockType type, |
| 515 | int scale, int attributes, |
| 516 | QEMUTimerCB *cb, void *opaque) |
| 517 | { |
| 518 | return timer_new_full(&ctx->tlg, type, scale, attributes, cb, opaque); |
| 519 | } |
| 520 | |
| 521 | /** |
Alex Bligh | 4e29e83 | 2013-08-21 16:02:52 +0100 | [diff] [blame] | 522 | * aio_timer_new: |
| 523 | * @ctx: the aio context |
| 524 | * @type: the clock type |
| 525 | * @scale: the scale |
| 526 | * @cb: the callback to call on timer expiry |
| 527 | * @opaque: the opaque pointer to pass to the callback |
| 528 | * |
| 529 | * Allocate a new timer attached to the context @ctx. |
Artem Pisarenko | 89a603a | 2018-10-17 14:24:19 +0600 | [diff] [blame] | 530 | * See aio_timer_new_with_attrs for details. |
Alex Bligh | 4e29e83 | 2013-08-21 16:02:52 +0100 | [diff] [blame] | 531 | * |
| 532 | * Returns: a pointer to the new timer |
| 533 | */ |
| 534 | static inline QEMUTimer *aio_timer_new(AioContext *ctx, QEMUClockType type, |
| 535 | int scale, |
| 536 | QEMUTimerCB *cb, void *opaque) |
| 537 | { |
Artem Pisarenko | 89a603a | 2018-10-17 14:24:19 +0600 | [diff] [blame] | 538 | return timer_new_full(&ctx->tlg, type, scale, 0, cb, opaque); |
| 539 | } |
| 540 | |
| 541 | /** |
| 542 | * aio_timer_init_with_attrs: |
| 543 | * @ctx: the aio context |
| 544 | * @ts: the timer |
| 545 | * @type: the clock type |
| 546 | * @scale: the scale |
| 547 | * @attributes: 0, or one to multiple OR'ed QEMU_TIMER_ATTR_<id> values |
| 548 | * to assign |
| 549 | * @cb: the callback to call on timer expiry |
| 550 | * @opaque: the opaque pointer to pass to the callback |
| 551 | * |
| 552 | * Initialise a new timer (with attributes) attached to the context @ctx. |
| 553 | * The caller is responsible for memory allocation. |
| 554 | */ |
| 555 | static inline void aio_timer_init_with_attrs(AioContext *ctx, |
| 556 | QEMUTimer *ts, QEMUClockType type, |
| 557 | int scale, int attributes, |
| 558 | QEMUTimerCB *cb, void *opaque) |
| 559 | { |
| 560 | timer_init_full(ts, &ctx->tlg, type, scale, attributes, cb, opaque); |
Alex Bligh | 4e29e83 | 2013-08-21 16:02:52 +0100 | [diff] [blame] | 561 | } |
| 562 | |
| 563 | /** |
| 564 | * aio_timer_init: |
| 565 | * @ctx: the aio context |
| 566 | * @ts: the timer |
| 567 | * @type: the clock type |
| 568 | * @scale: the scale |
| 569 | * @cb: the callback to call on timer expiry |
| 570 | * @opaque: the opaque pointer to pass to the callback |
| 571 | * |
| 572 | * Initialise a new timer attached to the context @ctx. |
Artem Pisarenko | 89a603a | 2018-10-17 14:24:19 +0600 | [diff] [blame] | 573 | * See aio_timer_init_with_attrs for details. |
Alex Bligh | 4e29e83 | 2013-08-21 16:02:52 +0100 | [diff] [blame] | 574 | */ |
| 575 | static inline void aio_timer_init(AioContext *ctx, |
| 576 | QEMUTimer *ts, QEMUClockType type, |
| 577 | int scale, |
| 578 | QEMUTimerCB *cb, void *opaque) |
| 579 | { |
Artem Pisarenko | 89a603a | 2018-10-17 14:24:19 +0600 | [diff] [blame] | 580 | timer_init_full(ts, &ctx->tlg, type, scale, 0, cb, opaque); |
Alex Bligh | 4e29e83 | 2013-08-21 16:02:52 +0100 | [diff] [blame] | 581 | } |
| 582 | |
Paolo Bonzini | 845ca10 | 2014-07-09 11:53:01 +0200 | [diff] [blame] | 583 | /** |
| 584 | * aio_compute_timeout: |
| 585 | * @ctx: the aio context |
| 586 | * |
| 587 | * Compute the timeout that a blocking aio_poll should use. |
| 588 | */ |
| 589 | int64_t aio_compute_timeout(AioContext *ctx); |
| 590 | |
Fam Zheng | c1e1e5f | 2015-10-23 11:08:08 +0800 | [diff] [blame] | 591 | /** |
| 592 | * aio_disable_external: |
| 593 | * @ctx: the aio context |
| 594 | * |
| 595 | * Disable the further processing of external clients. |
| 596 | */ |
| 597 | static inline void aio_disable_external(AioContext *ctx) |
| 598 | { |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 599 | qatomic_inc(&ctx->external_disable_cnt); |
Fam Zheng | c1e1e5f | 2015-10-23 11:08:08 +0800 | [diff] [blame] | 600 | } |
| 601 | |
| 602 | /** |
| 603 | * aio_enable_external: |
| 604 | * @ctx: the aio context |
| 605 | * |
| 606 | * Enable the processing of external clients. |
| 607 | */ |
| 608 | static inline void aio_enable_external(AioContext *ctx) |
| 609 | { |
Stefan Hajnoczi | 321d1db | 2017-05-08 14:07:05 -0400 | [diff] [blame] | 610 | int old; |
| 611 | |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 612 | old = qatomic_fetch_dec(&ctx->external_disable_cnt); |
Stefan Hajnoczi | 321d1db | 2017-05-08 14:07:05 -0400 | [diff] [blame] | 613 | assert(old > 0); |
| 614 | if (old == 1) { |
| 615 | /* Kick event loop so it re-arms file descriptors */ |
| 616 | aio_notify(ctx); |
| 617 | } |
Fam Zheng | c1e1e5f | 2015-10-23 11:08:08 +0800 | [diff] [blame] | 618 | } |
| 619 | |
| 620 | /** |
Fam Zheng | 5ceb9e3 | 2015-10-30 12:06:27 +0800 | [diff] [blame] | 621 | * aio_external_disabled: |
| 622 | * @ctx: the aio context |
| 623 | * |
| 624 | * Return true if the external clients are disabled. |
| 625 | */ |
| 626 | static inline bool aio_external_disabled(AioContext *ctx) |
| 627 | { |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 628 | return qatomic_read(&ctx->external_disable_cnt); |
Fam Zheng | 5ceb9e3 | 2015-10-30 12:06:27 +0800 | [diff] [blame] | 629 | } |
| 630 | |
| 631 | /** |
Fam Zheng | c1e1e5f | 2015-10-23 11:08:08 +0800 | [diff] [blame] | 632 | * aio_node_check: |
| 633 | * @ctx: the aio context |
| 634 | * @is_external: Whether or not the checked node is an external event source. |
| 635 | * |
| 636 | * Check if the node's is_external flag is okay to be polled by the ctx at this |
| 637 | * moment. True means green light. |
| 638 | */ |
| 639 | static inline bool aio_node_check(AioContext *ctx, bool is_external) |
| 640 | { |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 641 | return !is_external || !qatomic_read(&ctx->external_disable_cnt); |
Fam Zheng | c1e1e5f | 2015-10-23 11:08:08 +0800 | [diff] [blame] | 642 | } |
| 643 | |
Fam Zheng | 37fcee5 | 2015-10-30 12:06:28 +0800 | [diff] [blame] | 644 | /** |
Paolo Bonzini | 0c330a7 | 2017-02-13 14:52:19 +0100 | [diff] [blame] | 645 | * aio_co_schedule: |
| 646 | * @ctx: the aio context |
| 647 | * @co: the coroutine |
| 648 | * |
| 649 | * Start a coroutine on a remote AioContext. |
| 650 | * |
| 651 | * The coroutine must not be entered by anyone else while aio_co_schedule() |
| 652 | * is active. In addition the coroutine must have yielded unless ctx |
| 653 | * is the context in which the coroutine is running (i.e. the value of |
| 654 | * qemu_get_current_aio_context() from the coroutine itself). |
| 655 | */ |
| 656 | void aio_co_schedule(AioContext *ctx, struct Coroutine *co); |
| 657 | |
| 658 | /** |
Kevin Wolf | 26b0b69 | 2020-10-05 17:58:52 +0200 | [diff] [blame] | 659 | * aio_co_reschedule_self: |
| 660 | * @new_ctx: the new context |
| 661 | * |
| 662 | * Move the currently running coroutine to new_ctx. If the coroutine is already |
| 663 | * running in new_ctx, do nothing. |
| 664 | */ |
| 665 | void coroutine_fn aio_co_reschedule_self(AioContext *new_ctx); |
| 666 | |
| 667 | /** |
Paolo Bonzini | 0c330a7 | 2017-02-13 14:52:19 +0100 | [diff] [blame] | 668 | * aio_co_wake: |
| 669 | * @co: the coroutine |
| 670 | * |
| 671 | * Restart a coroutine on the AioContext where it was running last, thus |
| 672 | * preventing coroutines from jumping from one context to another when they |
| 673 | * go to sleep. |
| 674 | * |
| 675 | * aio_co_wake may be executed either in coroutine or non-coroutine |
| 676 | * context. The coroutine must not be entered by anyone else while |
| 677 | * aio_co_wake() is active. |
| 678 | */ |
| 679 | void aio_co_wake(struct Coroutine *co); |
| 680 | |
| 681 | /** |
Fam Zheng | 8865852 | 2017-04-10 20:07:35 +0800 | [diff] [blame] | 682 | * aio_co_enter: |
| 683 | * @ctx: the context to run the coroutine |
| 684 | * @co: the coroutine to run |
| 685 | * |
| 686 | * Enter a coroutine in the specified AioContext. |
| 687 | */ |
| 688 | void aio_co_enter(AioContext *ctx, struct Coroutine *co); |
| 689 | |
| 690 | /** |
Paolo Bonzini | e437016 | 2016-10-27 12:48:59 +0200 | [diff] [blame] | 691 | * Return the AioContext whose event loop runs in the current thread. |
| 692 | * |
| 693 | * If called from an IOThread this will be the IOThread's AioContext. If |
| 694 | * called from another thread it will be the main loop AioContext. |
| 695 | */ |
| 696 | AioContext *qemu_get_current_aio_context(void); |
| 697 | |
| 698 | /** |
Fam Zheng | 37fcee5 | 2015-10-30 12:06:28 +0800 | [diff] [blame] | 699 | * aio_context_setup: |
| 700 | * @ctx: the aio context |
| 701 | * |
| 702 | * Initialize the aio context. |
| 703 | */ |
Cao jin | 7e00346 | 2016-07-15 18:28:44 +0800 | [diff] [blame] | 704 | void aio_context_setup(AioContext *ctx); |
Fam Zheng | 37fcee5 | 2015-10-30 12:06:28 +0800 | [diff] [blame] | 705 | |
Stefan Hajnoczi | 4a1cba3 | 2016-12-01 19:26:42 +0000 | [diff] [blame] | 706 | /** |
Jie Wang | cd0a6d2 | 2018-05-17 08:42:43 +0800 | [diff] [blame] | 707 | * aio_context_destroy: |
| 708 | * @ctx: the aio context |
| 709 | * |
| 710 | * Destroy the aio context. |
| 711 | */ |
| 712 | void aio_context_destroy(AioContext *ctx); |
| 713 | |
Stefan Hajnoczi | ba607ca | 2020-05-11 19:36:30 +0100 | [diff] [blame] | 714 | /* Used internally, do not call outside AioContext code */ |
| 715 | void aio_context_use_g_source(AioContext *ctx); |
| 716 | |
Jie Wang | cd0a6d2 | 2018-05-17 08:42:43 +0800 | [diff] [blame] | 717 | /** |
Stefan Hajnoczi | 4a1cba3 | 2016-12-01 19:26:42 +0000 | [diff] [blame] | 718 | * aio_context_set_poll_params: |
| 719 | * @ctx: the aio context |
| 720 | * @max_ns: how long to busy poll for, in nanoseconds |
Stefan Hajnoczi | 82a4118 | 2016-12-01 19:26:51 +0000 | [diff] [blame] | 721 | * @grow: polling time growth factor |
| 722 | * @shrink: polling time shrink factor |
Stefan Hajnoczi | 4a1cba3 | 2016-12-01 19:26:42 +0000 | [diff] [blame] | 723 | * |
| 724 | * Poll mode can be disabled by setting poll_max_ns to 0. |
| 725 | */ |
| 726 | void aio_context_set_poll_params(AioContext *ctx, int64_t max_ns, |
Stefan Hajnoczi | 82a4118 | 2016-12-01 19:26:51 +0000 | [diff] [blame] | 727 | int64_t grow, int64_t shrink, |
Stefan Hajnoczi | 4a1cba3 | 2016-12-01 19:26:42 +0000 | [diff] [blame] | 728 | Error **errp); |
| 729 | |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 730 | #endif |