Kevin Wolf | 4f999d0 | 2009-10-22 17:54:37 +0200 | [diff] [blame] | 1 | /* |
Paolo Bonzini | c2b38b2 | 2017-02-13 14:52:18 +0100 | [diff] [blame] | 2 | * Data plane event loop |
Kevin Wolf | 4f999d0 | 2009-10-22 17:54:37 +0200 | [diff] [blame] | 3 | * |
| 4 | * Copyright (c) 2003-2008 Fabrice Bellard |
Paolo Bonzini | c2b38b2 | 2017-02-13 14:52:18 +0100 | [diff] [blame] | 5 | * Copyright (c) 2009-2017 QEMU contributors |
Kevin Wolf | 4f999d0 | 2009-10-22 17:54:37 +0200 | [diff] [blame] | 6 | * |
| 7 | * Permission is hereby granted, free of charge, to any person obtaining a copy |
| 8 | * of this software and associated documentation files (the "Software"), to deal |
| 9 | * in the Software without restriction, including without limitation the rights |
| 10 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell |
| 11 | * copies of the Software, and to permit persons to whom the Software is |
| 12 | * furnished to do so, subject to the following conditions: |
| 13 | * |
| 14 | * The above copyright notice and this permission notice shall be included in |
| 15 | * all copies or substantial portions of the Software. |
| 16 | * |
| 17 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 18 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 19 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 20 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| 21 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
| 22 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN |
| 23 | * THE SOFTWARE. |
| 24 | */ |
| 25 | |
Peter Maydell | d38ea87 | 2016-01-29 17:50:05 +0000 | [diff] [blame] | 26 | #include "qemu/osdep.h" |
Markus Armbruster | da34e65 | 2016-03-14 09:01:28 +0100 | [diff] [blame] | 27 | #include "qapi/error.h" |
Paolo Bonzini | 737e150 | 2012-12-17 18:19:44 +0100 | [diff] [blame] | 28 | #include "block/aio.h" |
Stefan Hajnoczi | 9b34277 | 2013-03-07 13:41:47 +0100 | [diff] [blame] | 29 | #include "block/thread-pool.h" |
Emanuele Giuseppe Esposito | 587d82f | 2022-12-07 14:18:24 +0100 | [diff] [blame] | 30 | #include "block/graph-lock.h" |
Paolo Bonzini | 1de7afc | 2012-12-17 18:20:00 +0100 | [diff] [blame] | 31 | #include "qemu/main-loop.h" |
Paolo Bonzini | 0ceb849 | 2014-07-07 15:18:04 +0200 | [diff] [blame] | 32 | #include "qemu/atomic.h" |
Stefan Hajnoczi | 8c6b035 | 2020-02-21 09:39:51 +0000 | [diff] [blame] | 33 | #include "qemu/rcu_queue.h" |
Paolo Bonzini | 0187f5c | 2016-07-04 18:33:20 +0200 | [diff] [blame] | 34 | #include "block/raw-aio.h" |
Paolo Bonzini | 0c330a7 | 2017-02-13 14:52:19 +0100 | [diff] [blame] | 35 | #include "qemu/coroutine_int.h" |
Stefan Hajnoczi | 47b7446 | 2022-02-22 14:01:48 +0000 | [diff] [blame] | 36 | #include "qemu/coroutine-tls.h" |
Pavel Dovgalyuk | 75bbe5e | 2022-05-27 13:46:13 +0300 | [diff] [blame] | 37 | #include "sysemu/cpu-timers.h" |
Paolo Bonzini | 0c330a7 | 2017-02-13 14:52:19 +0100 | [diff] [blame] | 38 | #include "trace.h" |
Kevin Wolf | 9a1e948 | 2009-10-22 17:54:38 +0200 | [diff] [blame] | 39 | |
Kevin Wolf | 4f999d0 | 2009-10-22 17:54:37 +0200 | [diff] [blame] | 40 | /***********************************************************/ |
| 41 | /* bottom halves (can be seen as timers which expire ASAP) */ |
| 42 | |
Stefan Hajnoczi | 8c6b035 | 2020-02-21 09:39:51 +0000 | [diff] [blame] | 43 | /* QEMUBH::flags values */ |
| 44 | enum { |
| 45 | /* Already enqueued and waiting for aio_bh_poll() */ |
| 46 | BH_PENDING = (1 << 0), |
| 47 | |
| 48 | /* Invoke the callback */ |
| 49 | BH_SCHEDULED = (1 << 1), |
| 50 | |
| 51 | /* Delete without invoking callback */ |
| 52 | BH_DELETED = (1 << 2), |
| 53 | |
| 54 | /* Delete after invoking callback */ |
| 55 | BH_ONESHOT = (1 << 3), |
| 56 | |
| 57 | /* Schedule periodically when the event loop is idle */ |
| 58 | BH_IDLE = (1 << 4), |
| 59 | }; |
| 60 | |
Kevin Wolf | 4f999d0 | 2009-10-22 17:54:37 +0200 | [diff] [blame] | 61 | struct QEMUBH { |
Paolo Bonzini | 2f4dc3c | 2012-09-24 18:44:14 +0200 | [diff] [blame] | 62 | AioContext *ctx; |
Stefan Hajnoczi | 0f08586 | 2021-04-14 21:02:46 +0100 | [diff] [blame] | 63 | const char *name; |
Kevin Wolf | 4f999d0 | 2009-10-22 17:54:37 +0200 | [diff] [blame] | 64 | QEMUBHFunc *cb; |
| 65 | void *opaque; |
Stefan Hajnoczi | 8c6b035 | 2020-02-21 09:39:51 +0000 | [diff] [blame] | 66 | QSLIST_ENTRY(QEMUBH) next; |
| 67 | unsigned flags; |
Alexander Bulekov | 9c86c97 | 2023-04-27 17:10:07 -0400 | [diff] [blame] | 68 | MemReentrancyGuard *reentrancy_guard; |
Kevin Wolf | 4f999d0 | 2009-10-22 17:54:37 +0200 | [diff] [blame] | 69 | }; |
| 70 | |
Stefan Hajnoczi | 8c6b035 | 2020-02-21 09:39:51 +0000 | [diff] [blame] | 71 | /* Called concurrently from any thread */ |
| 72 | static void aio_bh_enqueue(QEMUBH *bh, unsigned new_flags) |
| 73 | { |
| 74 | AioContext *ctx = bh->ctx; |
| 75 | unsigned old_flags; |
| 76 | |
| 77 | /* |
Paolo Bonzini | 8dd4865 | 2023-03-06 10:15:06 +0100 | [diff] [blame] | 78 | * Synchronizes with atomic_fetch_and() in aio_bh_dequeue(), ensuring that |
| 79 | * insertion starts after BH_PENDING is set. |
Stefan Hajnoczi | 8c6b035 | 2020-02-21 09:39:51 +0000 | [diff] [blame] | 80 | */ |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 81 | old_flags = qatomic_fetch_or(&bh->flags, BH_PENDING | new_flags); |
Paolo Bonzini | 8dd4865 | 2023-03-06 10:15:06 +0100 | [diff] [blame] | 82 | |
Stefan Hajnoczi | 8c6b035 | 2020-02-21 09:39:51 +0000 | [diff] [blame] | 83 | if (!(old_flags & BH_PENDING)) { |
Paolo Bonzini | 8dd4865 | 2023-03-06 10:15:06 +0100 | [diff] [blame] | 84 | /* |
| 85 | * At this point the bottom half becomes visible to aio_bh_poll(). |
| 86 | * This insertion thus synchronizes with QSLIST_MOVE_ATOMIC in |
| 87 | * aio_bh_poll(), ensuring that: |
| 88 | * 1. any writes needed by the callback are visible from the callback |
| 89 | * after aio_bh_dequeue() returns bh. |
| 90 | * 2. ctx is loaded before the callback has a chance to execute and bh |
| 91 | * could be freed. |
| 92 | */ |
Stefan Hajnoczi | 8c6b035 | 2020-02-21 09:39:51 +0000 | [diff] [blame] | 93 | QSLIST_INSERT_HEAD_ATOMIC(&ctx->bh_list, bh, next); |
| 94 | } |
| 95 | |
| 96 | aio_notify(ctx); |
Pavel Dovgalyuk | 75bbe5e | 2022-05-27 13:46:13 +0300 | [diff] [blame] | 97 | /* |
| 98 | * Workaround for record/replay. |
| 99 | * vCPU execution should be suspended when new BH is set. |
| 100 | * This is needed to avoid guest timeouts caused |
| 101 | * by the long cycles of the execution. |
| 102 | */ |
| 103 | icount_notify_exit(); |
Stefan Hajnoczi | 8c6b035 | 2020-02-21 09:39:51 +0000 | [diff] [blame] | 104 | } |
| 105 | |
| 106 | /* Only called from aio_bh_poll() and aio_ctx_finalize() */ |
| 107 | static QEMUBH *aio_bh_dequeue(BHList *head, unsigned *flags) |
| 108 | { |
| 109 | QEMUBH *bh = QSLIST_FIRST_RCU(head); |
| 110 | |
| 111 | if (!bh) { |
| 112 | return NULL; |
| 113 | } |
| 114 | |
| 115 | QSLIST_REMOVE_HEAD(head, next); |
| 116 | |
| 117 | /* |
Paolo Bonzini | 8dd4865 | 2023-03-06 10:15:06 +0100 | [diff] [blame] | 118 | * Synchronizes with qatomic_fetch_or() in aio_bh_enqueue(), ensuring that |
| 119 | * the removal finishes before BH_PENDING is reset. |
Stefan Hajnoczi | 8c6b035 | 2020-02-21 09:39:51 +0000 | [diff] [blame] | 120 | */ |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 121 | *flags = qatomic_fetch_and(&bh->flags, |
Stefan Hajnoczi | 8c6b035 | 2020-02-21 09:39:51 +0000 | [diff] [blame] | 122 | ~(BH_PENDING | BH_SCHEDULED | BH_IDLE)); |
| 123 | return bh; |
| 124 | } |
| 125 | |
Stefan Hajnoczi | 0f08586 | 2021-04-14 21:02:46 +0100 | [diff] [blame] | 126 | void aio_bh_schedule_oneshot_full(AioContext *ctx, QEMUBHFunc *cb, |
| 127 | void *opaque, const char *name) |
Paolo Bonzini | 5b8bb35 | 2016-10-03 18:14:15 +0200 | [diff] [blame] | 128 | { |
| 129 | QEMUBH *bh; |
| 130 | bh = g_new(QEMUBH, 1); |
| 131 | *bh = (QEMUBH){ |
| 132 | .ctx = ctx, |
| 133 | .cb = cb, |
| 134 | .opaque = opaque, |
Stefan Hajnoczi | 0f08586 | 2021-04-14 21:02:46 +0100 | [diff] [blame] | 135 | .name = name, |
Paolo Bonzini | 5b8bb35 | 2016-10-03 18:14:15 +0200 | [diff] [blame] | 136 | }; |
Stefan Hajnoczi | 8c6b035 | 2020-02-21 09:39:51 +0000 | [diff] [blame] | 137 | aio_bh_enqueue(bh, BH_SCHEDULED | BH_ONESHOT); |
Paolo Bonzini | 5b8bb35 | 2016-10-03 18:14:15 +0200 | [diff] [blame] | 138 | } |
| 139 | |
Stefan Hajnoczi | 0f08586 | 2021-04-14 21:02:46 +0100 | [diff] [blame] | 140 | QEMUBH *aio_bh_new_full(AioContext *ctx, QEMUBHFunc *cb, void *opaque, |
Alexander Bulekov | 9c86c97 | 2023-04-27 17:10:07 -0400 | [diff] [blame] | 141 | const char *name, MemReentrancyGuard *reentrancy_guard) |
Kevin Wolf | 4f999d0 | 2009-10-22 17:54:37 +0200 | [diff] [blame] | 142 | { |
| 143 | QEMUBH *bh; |
Paolo Bonzini | ee82310 | 2014-12-17 16:10:00 +0100 | [diff] [blame] | 144 | bh = g_new(QEMUBH, 1); |
| 145 | *bh = (QEMUBH){ |
| 146 | .ctx = ctx, |
| 147 | .cb = cb, |
| 148 | .opaque = opaque, |
Stefan Hajnoczi | 0f08586 | 2021-04-14 21:02:46 +0100 | [diff] [blame] | 149 | .name = name, |
Alexander Bulekov | 9c86c97 | 2023-04-27 17:10:07 -0400 | [diff] [blame] | 150 | .reentrancy_guard = reentrancy_guard, |
Paolo Bonzini | ee82310 | 2014-12-17 16:10:00 +0100 | [diff] [blame] | 151 | }; |
Kevin Wolf | 4f999d0 | 2009-10-22 17:54:37 +0200 | [diff] [blame] | 152 | return bh; |
| 153 | } |
| 154 | |
Pavel Dovgalyuk | df281b8 | 2015-09-17 19:24:50 +0300 | [diff] [blame] | 155 | void aio_bh_call(QEMUBH *bh) |
| 156 | { |
Alexander Bulekov | 9c86c97 | 2023-04-27 17:10:07 -0400 | [diff] [blame] | 157 | bool last_engaged_in_io = false; |
| 158 | |
Alexander Bulekov | 7915bd0 | 2023-05-01 10:19:56 -0400 | [diff] [blame] | 159 | /* Make a copy of the guard-pointer as cb may free the bh */ |
| 160 | MemReentrancyGuard *reentrancy_guard = bh->reentrancy_guard; |
| 161 | if (reentrancy_guard) { |
| 162 | last_engaged_in_io = reentrancy_guard->engaged_in_io; |
| 163 | if (reentrancy_guard->engaged_in_io) { |
Alexander Bulekov | 9c86c97 | 2023-04-27 17:10:07 -0400 | [diff] [blame] | 164 | trace_reentrant_aio(bh->ctx, bh->name); |
| 165 | } |
Alexander Bulekov | 7915bd0 | 2023-05-01 10:19:56 -0400 | [diff] [blame] | 166 | reentrancy_guard->engaged_in_io = true; |
Alexander Bulekov | 9c86c97 | 2023-04-27 17:10:07 -0400 | [diff] [blame] | 167 | } |
| 168 | |
Pavel Dovgalyuk | df281b8 | 2015-09-17 19:24:50 +0300 | [diff] [blame] | 169 | bh->cb(bh->opaque); |
Alexander Bulekov | 9c86c97 | 2023-04-27 17:10:07 -0400 | [diff] [blame] | 170 | |
Alexander Bulekov | 7915bd0 | 2023-05-01 10:19:56 -0400 | [diff] [blame] | 171 | if (reentrancy_guard) { |
| 172 | reentrancy_guard->engaged_in_io = last_engaged_in_io; |
Alexander Bulekov | 9c86c97 | 2023-04-27 17:10:07 -0400 | [diff] [blame] | 173 | } |
Pavel Dovgalyuk | df281b8 | 2015-09-17 19:24:50 +0300 | [diff] [blame] | 174 | } |
| 175 | |
Stefan Hajnoczi | 8c6b035 | 2020-02-21 09:39:51 +0000 | [diff] [blame] | 176 | /* Multiple occurrences of aio_bh_poll cannot be called concurrently. */ |
Paolo Bonzini | f627aab | 2012-10-29 23:45:23 +0100 | [diff] [blame] | 177 | int aio_bh_poll(AioContext *ctx) |
Kevin Wolf | 4f999d0 | 2009-10-22 17:54:37 +0200 | [diff] [blame] | 178 | { |
Stefan Hajnoczi | 8c6b035 | 2020-02-21 09:39:51 +0000 | [diff] [blame] | 179 | BHListSlice slice; |
| 180 | BHListSlice *s; |
| 181 | int ret = 0; |
Kevin Wolf | 648fb0e | 2011-09-01 16:16:10 +0200 | [diff] [blame] | 182 | |
Paolo Bonzini | 8dd4865 | 2023-03-06 10:15:06 +0100 | [diff] [blame] | 183 | /* Synchronizes with QSLIST_INSERT_HEAD_ATOMIC in aio_bh_enqueue(). */ |
Stefan Hajnoczi | 8c6b035 | 2020-02-21 09:39:51 +0000 | [diff] [blame] | 184 | QSLIST_MOVE_ATOMIC(&slice.bh_list, &ctx->bh_list); |
Cédric Le Goater | d66ba6d | 2023-04-20 22:29:39 +0200 | [diff] [blame] | 185 | |
| 186 | /* |
| 187 | * GCC13 [-Werror=dangling-pointer=] complains that the local variable |
| 188 | * 'slice' is being stored in the global 'ctx->bh_slice_list' but the |
| 189 | * list is emptied before this function returns. |
| 190 | */ |
| 191 | #if !defined(__clang__) |
| 192 | #pragma GCC diagnostic push |
| 193 | #pragma GCC diagnostic ignored "-Wpragmas" |
| 194 | #pragma GCC diagnostic ignored "-Wdangling-pointer=" |
| 195 | #endif |
Stefan Hajnoczi | 8c6b035 | 2020-02-21 09:39:51 +0000 | [diff] [blame] | 196 | QSIMPLEQ_INSERT_TAIL(&ctx->bh_slice_list, &slice, next); |
Cédric Le Goater | d66ba6d | 2023-04-20 22:29:39 +0200 | [diff] [blame] | 197 | #if !defined(__clang__) |
| 198 | #pragma GCC diagnostic pop |
| 199 | #endif |
Stefan Hajnoczi | 8c6b035 | 2020-02-21 09:39:51 +0000 | [diff] [blame] | 200 | |
| 201 | while ((s = QSIMPLEQ_FIRST(&ctx->bh_slice_list))) { |
| 202 | QEMUBH *bh; |
| 203 | unsigned flags; |
| 204 | |
| 205 | bh = aio_bh_dequeue(&s->bh_list, &flags); |
| 206 | if (!bh) { |
| 207 | QSIMPLEQ_REMOVE_HEAD(&ctx->bh_slice_list, next); |
| 208 | continue; |
| 209 | } |
| 210 | |
| 211 | if ((flags & (BH_SCHEDULED | BH_DELETED)) == BH_SCHEDULED) { |
Paolo Bonzini | 65c1b5b | 2016-10-27 12:49:06 +0200 | [diff] [blame] | 212 | /* Idle BHs don't count as progress */ |
Stefan Hajnoczi | 8c6b035 | 2020-02-21 09:39:51 +0000 | [diff] [blame] | 213 | if (!(flags & BH_IDLE)) { |
Kevin Wolf | 4f999d0 | 2009-10-22 17:54:37 +0200 | [diff] [blame] | 214 | ret = 1; |
Stefan Hajnoczi | ca96ac4 | 2015-07-28 18:34:09 +0200 | [diff] [blame] | 215 | } |
Pavel Dovgalyuk | df281b8 | 2015-09-17 19:24:50 +0300 | [diff] [blame] | 216 | aio_bh_call(bh); |
Kevin Wolf | 4f999d0 | 2009-10-22 17:54:37 +0200 | [diff] [blame] | 217 | } |
Stefan Hajnoczi | 8c6b035 | 2020-02-21 09:39:51 +0000 | [diff] [blame] | 218 | if (flags & (BH_DELETED | BH_ONESHOT)) { |
| 219 | g_free(bh); |
Paolo Bonzini | 7d506c9 | 2017-01-12 19:08:00 +0100 | [diff] [blame] | 220 | } |
Kevin Wolf | 4f999d0 | 2009-10-22 17:54:37 +0200 | [diff] [blame] | 221 | } |
| 222 | |
Kevin Wolf | 4f999d0 | 2009-10-22 17:54:37 +0200 | [diff] [blame] | 223 | return ret; |
| 224 | } |
| 225 | |
| 226 | void qemu_bh_schedule_idle(QEMUBH *bh) |
| 227 | { |
Stefan Hajnoczi | 8c6b035 | 2020-02-21 09:39:51 +0000 | [diff] [blame] | 228 | aio_bh_enqueue(bh, BH_SCHEDULED | BH_IDLE); |
Kevin Wolf | 4f999d0 | 2009-10-22 17:54:37 +0200 | [diff] [blame] | 229 | } |
| 230 | |
| 231 | void qemu_bh_schedule(QEMUBH *bh) |
| 232 | { |
Stefan Hajnoczi | 8c6b035 | 2020-02-21 09:39:51 +0000 | [diff] [blame] | 233 | aio_bh_enqueue(bh, BH_SCHEDULED); |
Kevin Wolf | 4f999d0 | 2009-10-22 17:54:37 +0200 | [diff] [blame] | 234 | } |
| 235 | |
Liu Ping Fan | dcc772e | 2013-07-16 12:28:58 +0800 | [diff] [blame] | 236 | /* This func is async. |
| 237 | */ |
Kevin Wolf | 4f999d0 | 2009-10-22 17:54:37 +0200 | [diff] [blame] | 238 | void qemu_bh_cancel(QEMUBH *bh) |
| 239 | { |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 240 | qatomic_and(&bh->flags, ~BH_SCHEDULED); |
Kevin Wolf | 4f999d0 | 2009-10-22 17:54:37 +0200 | [diff] [blame] | 241 | } |
| 242 | |
Liu Ping Fan | dcc772e | 2013-07-16 12:28:58 +0800 | [diff] [blame] | 243 | /* This func is async.The bottom half will do the delete action at the finial |
| 244 | * end. |
| 245 | */ |
Kevin Wolf | 4f999d0 | 2009-10-22 17:54:37 +0200 | [diff] [blame] | 246 | void qemu_bh_delete(QEMUBH *bh) |
| 247 | { |
Stefan Hajnoczi | 8c6b035 | 2020-02-21 09:39:51 +0000 | [diff] [blame] | 248 | aio_bh_enqueue(bh, BH_DELETED); |
Kevin Wolf | 4f999d0 | 2009-10-22 17:54:37 +0200 | [diff] [blame] | 249 | } |
| 250 | |
Stefan Hajnoczi | 8c6b035 | 2020-02-21 09:39:51 +0000 | [diff] [blame] | 251 | static int64_t aio_compute_bh_timeout(BHList *head, int timeout) |
Kevin Wolf | 4f999d0 | 2009-10-22 17:54:37 +0200 | [diff] [blame] | 252 | { |
| 253 | QEMUBH *bh; |
| 254 | |
Stefan Hajnoczi | 8c6b035 | 2020-02-21 09:39:51 +0000 | [diff] [blame] | 255 | QSLIST_FOREACH_RCU(bh, head, next) { |
| 256 | if ((bh->flags & (BH_SCHEDULED | BH_DELETED)) == BH_SCHEDULED) { |
| 257 | if (bh->flags & BH_IDLE) { |
Kevin Wolf | 4f999d0 | 2009-10-22 17:54:37 +0200 | [diff] [blame] | 258 | /* idle bottom halves will be polled at least |
| 259 | * every 10ms */ |
Paolo Bonzini | 845ca10 | 2014-07-09 11:53:01 +0200 | [diff] [blame] | 260 | timeout = 10000000; |
Kevin Wolf | 4f999d0 | 2009-10-22 17:54:37 +0200 | [diff] [blame] | 261 | } else { |
| 262 | /* non-idle bottom halves will be executed |
| 263 | * immediately */ |
Paolo Bonzini | 845ca10 | 2014-07-09 11:53:01 +0200 | [diff] [blame] | 264 | return 0; |
Kevin Wolf | 4f999d0 | 2009-10-22 17:54:37 +0200 | [diff] [blame] | 265 | } |
| 266 | } |
| 267 | } |
Kevin Wolf | 4f999d0 | 2009-10-22 17:54:37 +0200 | [diff] [blame] | 268 | |
Stefan Hajnoczi | 8c6b035 | 2020-02-21 09:39:51 +0000 | [diff] [blame] | 269 | return timeout; |
| 270 | } |
| 271 | |
| 272 | int64_t |
| 273 | aio_compute_timeout(AioContext *ctx) |
| 274 | { |
| 275 | BHListSlice *s; |
| 276 | int64_t deadline; |
| 277 | int timeout = -1; |
| 278 | |
| 279 | timeout = aio_compute_bh_timeout(&ctx->bh_list, timeout); |
| 280 | if (timeout == 0) { |
| 281 | return 0; |
| 282 | } |
| 283 | |
| 284 | QSIMPLEQ_FOREACH(s, &ctx->bh_slice_list, next) { |
| 285 | timeout = aio_compute_bh_timeout(&s->bh_list, timeout); |
| 286 | if (timeout == 0) { |
| 287 | return 0; |
| 288 | } |
| 289 | } |
| 290 | |
Paolo Bonzini | 845ca10 | 2014-07-09 11:53:01 +0200 | [diff] [blame] | 291 | deadline = timerlistgroup_deadline_ns(&ctx->tlg); |
Alex Bligh | 533a8cf | 2013-08-21 16:02:51 +0100 | [diff] [blame] | 292 | if (deadline == 0) { |
Paolo Bonzini | 845ca10 | 2014-07-09 11:53:01 +0200 | [diff] [blame] | 293 | return 0; |
Alex Bligh | 533a8cf | 2013-08-21 16:02:51 +0100 | [diff] [blame] | 294 | } else { |
Paolo Bonzini | 845ca10 | 2014-07-09 11:53:01 +0200 | [diff] [blame] | 295 | return qemu_soonest_timeout(timeout, deadline); |
Alex Bligh | 533a8cf | 2013-08-21 16:02:51 +0100 | [diff] [blame] | 296 | } |
Paolo Bonzini | 845ca10 | 2014-07-09 11:53:01 +0200 | [diff] [blame] | 297 | } |
Alex Bligh | 533a8cf | 2013-08-21 16:02:51 +0100 | [diff] [blame] | 298 | |
Paolo Bonzini | 845ca10 | 2014-07-09 11:53:01 +0200 | [diff] [blame] | 299 | static gboolean |
| 300 | aio_ctx_prepare(GSource *source, gint *timeout) |
| 301 | { |
| 302 | AioContext *ctx = (AioContext *) source; |
| 303 | |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 304 | qatomic_set(&ctx->notify_me, qatomic_read(&ctx->notify_me) | 1); |
Paolo Bonzini | 5710a3e | 2020-04-07 10:07:46 -0400 | [diff] [blame] | 305 | |
| 306 | /* |
| 307 | * Write ctx->notify_me before computing the timeout |
| 308 | * (reading bottom half flags, etc.). Pairs with |
| 309 | * smp_mb in aio_notify(). |
| 310 | */ |
| 311 | smp_mb(); |
Paolo Bonzini | eabc977 | 2015-07-21 16:07:51 +0200 | [diff] [blame] | 312 | |
Paolo Bonzini | 845ca10 | 2014-07-09 11:53:01 +0200 | [diff] [blame] | 313 | /* We assume there is no timeout already supplied */ |
| 314 | *timeout = qemu_timeout_ns_to_ms(aio_compute_timeout(ctx)); |
Paolo Bonzini | a3462c6 | 2014-07-09 11:53:08 +0200 | [diff] [blame] | 315 | |
| 316 | if (aio_prepare(ctx)) { |
| 317 | *timeout = 0; |
| 318 | } |
| 319 | |
Paolo Bonzini | 845ca10 | 2014-07-09 11:53:01 +0200 | [diff] [blame] | 320 | return *timeout == 0; |
Paolo Bonzini | e3713e0 | 2012-09-24 14:57:41 +0200 | [diff] [blame] | 321 | } |
| 322 | |
| 323 | static gboolean |
| 324 | aio_ctx_check(GSource *source) |
| 325 | { |
| 326 | AioContext *ctx = (AioContext *) source; |
| 327 | QEMUBH *bh; |
Stefan Hajnoczi | 8c6b035 | 2020-02-21 09:39:51 +0000 | [diff] [blame] | 328 | BHListSlice *s; |
Paolo Bonzini | e3713e0 | 2012-09-24 14:57:41 +0200 | [diff] [blame] | 329 | |
Paolo Bonzini | 5710a3e | 2020-04-07 10:07:46 -0400 | [diff] [blame] | 330 | /* Finish computing the timeout before clearing the flag. */ |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 331 | qatomic_store_release(&ctx->notify_me, qatomic_read(&ctx->notify_me) & ~1); |
Paolo Bonzini | 05e514b | 2015-07-21 16:07:53 +0200 | [diff] [blame] | 332 | aio_notify_accept(ctx); |
Paolo Bonzini | 21a03d1 | 2015-07-21 16:07:52 +0200 | [diff] [blame] | 333 | |
Stefan Hajnoczi | 8c6b035 | 2020-02-21 09:39:51 +0000 | [diff] [blame] | 334 | QSLIST_FOREACH_RCU(bh, &ctx->bh_list, next) { |
| 335 | if ((bh->flags & (BH_SCHEDULED | BH_DELETED)) == BH_SCHEDULED) { |
Paolo Bonzini | e3713e0 | 2012-09-24 14:57:41 +0200 | [diff] [blame] | 336 | return true; |
Cao jin | 6977d90 | 2016-07-14 21:10:43 +0800 | [diff] [blame] | 337 | } |
Paolo Bonzini | e3713e0 | 2012-09-24 14:57:41 +0200 | [diff] [blame] | 338 | } |
Stefan Hajnoczi | 8c6b035 | 2020-02-21 09:39:51 +0000 | [diff] [blame] | 339 | |
| 340 | QSIMPLEQ_FOREACH(s, &ctx->bh_slice_list, next) { |
| 341 | QSLIST_FOREACH_RCU(bh, &s->bh_list, next) { |
| 342 | if ((bh->flags & (BH_SCHEDULED | BH_DELETED)) == BH_SCHEDULED) { |
| 343 | return true; |
| 344 | } |
| 345 | } |
| 346 | } |
Alex Bligh | 533a8cf | 2013-08-21 16:02:51 +0100 | [diff] [blame] | 347 | return aio_pending(ctx) || (timerlistgroup_deadline_ns(&ctx->tlg) == 0); |
Paolo Bonzini | e3713e0 | 2012-09-24 14:57:41 +0200 | [diff] [blame] | 348 | } |
| 349 | |
| 350 | static gboolean |
| 351 | aio_ctx_dispatch(GSource *source, |
| 352 | GSourceFunc callback, |
| 353 | gpointer user_data) |
| 354 | { |
| 355 | AioContext *ctx = (AioContext *) source; |
| 356 | |
| 357 | assert(callback == NULL); |
Paolo Bonzini | a153bf5 | 2017-02-13 14:52:33 +0100 | [diff] [blame] | 358 | aio_dispatch(ctx); |
Paolo Bonzini | e3713e0 | 2012-09-24 14:57:41 +0200 | [diff] [blame] | 359 | return true; |
| 360 | } |
| 361 | |
Paolo Bonzini | 2f4dc3c | 2012-09-24 18:44:14 +0200 | [diff] [blame] | 362 | static void |
| 363 | aio_ctx_finalize(GSource *source) |
| 364 | { |
| 365 | AioContext *ctx = (AioContext *) source; |
Stefan Hajnoczi | 8c6b035 | 2020-02-21 09:39:51 +0000 | [diff] [blame] | 366 | QEMUBH *bh; |
| 367 | unsigned flags; |
Paolo Bonzini | 2f4dc3c | 2012-09-24 18:44:14 +0200 | [diff] [blame] | 368 | |
Stefan Hajnoczi | 9b34277 | 2013-03-07 13:41:47 +0100 | [diff] [blame] | 369 | thread_pool_free(ctx->thread_pool); |
Stefan Hajnoczi | a076972 | 2015-07-28 18:34:08 +0200 | [diff] [blame] | 370 | |
Paolo Bonzini | 0187f5c | 2016-07-04 18:33:20 +0200 | [diff] [blame] | 371 | #ifdef CONFIG_LINUX_AIO |
| 372 | if (ctx->linux_aio) { |
| 373 | laio_detach_aio_context(ctx->linux_aio, ctx); |
| 374 | laio_cleanup(ctx->linux_aio); |
| 375 | ctx->linux_aio = NULL; |
| 376 | } |
| 377 | #endif |
| 378 | |
Aarushi Mehta | fcb7a4a | 2020-01-20 14:18:49 +0000 | [diff] [blame] | 379 | #ifdef CONFIG_LINUX_IO_URING |
| 380 | if (ctx->linux_io_uring) { |
| 381 | luring_detach_aio_context(ctx->linux_io_uring, ctx); |
| 382 | luring_cleanup(ctx->linux_io_uring); |
| 383 | ctx->linux_io_uring = NULL; |
| 384 | } |
| 385 | #endif |
| 386 | |
Paolo Bonzini | 0c330a7 | 2017-02-13 14:52:19 +0100 | [diff] [blame] | 387 | assert(QSLIST_EMPTY(&ctx->scheduled_coroutines)); |
| 388 | qemu_bh_delete(ctx->co_schedule_bh); |
| 389 | |
Stefan Hajnoczi | 8c6b035 | 2020-02-21 09:39:51 +0000 | [diff] [blame] | 390 | /* There must be no aio_bh_poll() calls going on */ |
| 391 | assert(QSIMPLEQ_EMPTY(&ctx->bh_slice_list)); |
Stefan Hajnoczi | a076972 | 2015-07-28 18:34:08 +0200 | [diff] [blame] | 392 | |
Stefan Hajnoczi | 8c6b035 | 2020-02-21 09:39:51 +0000 | [diff] [blame] | 393 | while ((bh = aio_bh_dequeue(&ctx->bh_list, &flags))) { |
Stefan Hajnoczi | 023ca42 | 2021-04-14 21:02:47 +0100 | [diff] [blame] | 394 | /* |
| 395 | * qemu_bh_delete() must have been called on BHs in this AioContext. In |
| 396 | * many cases memory leaks, hangs, or inconsistent state occur when a |
| 397 | * BH is leaked because something still expects it to run. |
| 398 | * |
| 399 | * If you hit this, fix the lifecycle of the BH so that |
| 400 | * qemu_bh_delete() and any associated cleanup is called before the |
| 401 | * AioContext is finalized. |
| 402 | */ |
| 403 | if (unlikely(!(flags & BH_DELETED))) { |
| 404 | fprintf(stderr, "%s: BH '%s' leaked, aborting...\n", |
| 405 | __func__, bh->name); |
| 406 | abort(); |
| 407 | } |
Stefan Hajnoczi | a076972 | 2015-07-28 18:34:08 +0200 | [diff] [blame] | 408 | |
Stefan Hajnoczi | 8c6b035 | 2020-02-21 09:39:51 +0000 | [diff] [blame] | 409 | g_free(bh); |
Stefan Hajnoczi | a076972 | 2015-07-28 18:34:08 +0200 | [diff] [blame] | 410 | } |
Stefan Hajnoczi | a076972 | 2015-07-28 18:34:08 +0200 | [diff] [blame] | 411 | |
Stefan Hajnoczi | 60f782b | 2023-05-16 15:02:38 -0400 | [diff] [blame] | 412 | aio_set_event_notifier(ctx, &ctx->notifier, NULL, NULL, NULL); |
Paolo Bonzini | 2f4dc3c | 2012-09-24 18:44:14 +0200 | [diff] [blame] | 413 | event_notifier_cleanup(&ctx->notifier); |
Paolo Bonzini | 3fe7122 | 2016-10-27 12:49:08 +0200 | [diff] [blame] | 414 | qemu_rec_mutex_destroy(&ctx->lock); |
Paolo Bonzini | d7c99a1 | 2017-01-12 19:07:53 +0100 | [diff] [blame] | 415 | qemu_lockcnt_destroy(&ctx->list_lock); |
Alex Bligh | dae21b9 | 2013-08-21 16:02:49 +0100 | [diff] [blame] | 416 | timerlistgroup_deinit(&ctx->tlg); |
Emanuele Giuseppe Esposito | 587d82f | 2022-12-07 14:18:24 +0100 | [diff] [blame] | 417 | unregister_aiocontext(ctx); |
Jie Wang | cd0a6d2 | 2018-05-17 08:42:43 +0800 | [diff] [blame] | 418 | aio_context_destroy(ctx); |
Paolo Bonzini | 2f4dc3c | 2012-09-24 18:44:14 +0200 | [diff] [blame] | 419 | } |
| 420 | |
Paolo Bonzini | e3713e0 | 2012-09-24 14:57:41 +0200 | [diff] [blame] | 421 | static GSourceFuncs aio_source_funcs = { |
| 422 | aio_ctx_prepare, |
| 423 | aio_ctx_check, |
| 424 | aio_ctx_dispatch, |
Paolo Bonzini | 2f4dc3c | 2012-09-24 18:44:14 +0200 | [diff] [blame] | 425 | aio_ctx_finalize |
Paolo Bonzini | e3713e0 | 2012-09-24 14:57:41 +0200 | [diff] [blame] | 426 | }; |
| 427 | |
| 428 | GSource *aio_get_g_source(AioContext *ctx) |
| 429 | { |
Stefan Hajnoczi | ba607ca | 2020-05-11 19:36:30 +0100 | [diff] [blame] | 430 | aio_context_use_g_source(ctx); |
Paolo Bonzini | e3713e0 | 2012-09-24 14:57:41 +0200 | [diff] [blame] | 431 | g_source_ref(&ctx->source); |
| 432 | return &ctx->source; |
| 433 | } |
Paolo Bonzini | a915f4b | 2012-09-13 12:28:51 +0200 | [diff] [blame] | 434 | |
Stefan Hajnoczi | 9b34277 | 2013-03-07 13:41:47 +0100 | [diff] [blame] | 435 | ThreadPool *aio_get_thread_pool(AioContext *ctx) |
| 436 | { |
| 437 | if (!ctx->thread_pool) { |
| 438 | ctx->thread_pool = thread_pool_new(ctx); |
| 439 | } |
| 440 | return ctx->thread_pool; |
| 441 | } |
| 442 | |
Paolo Bonzini | 0187f5c | 2016-07-04 18:33:20 +0200 | [diff] [blame] | 443 | #ifdef CONFIG_LINUX_AIO |
Nishanth Aravamudan | ed6e216 | 2018-06-22 12:37:00 -0700 | [diff] [blame] | 444 | LinuxAioState *aio_setup_linux_aio(AioContext *ctx, Error **errp) |
Paolo Bonzini | 0187f5c | 2016-07-04 18:33:20 +0200 | [diff] [blame] | 445 | { |
| 446 | if (!ctx->linux_aio) { |
Nishanth Aravamudan | ed6e216 | 2018-06-22 12:37:00 -0700 | [diff] [blame] | 447 | ctx->linux_aio = laio_init(errp); |
| 448 | if (ctx->linux_aio) { |
| 449 | laio_attach_aio_context(ctx->linux_aio, ctx); |
| 450 | } |
Paolo Bonzini | 0187f5c | 2016-07-04 18:33:20 +0200 | [diff] [blame] | 451 | } |
| 452 | return ctx->linux_aio; |
| 453 | } |
Nishanth Aravamudan | ed6e216 | 2018-06-22 12:37:00 -0700 | [diff] [blame] | 454 | |
| 455 | LinuxAioState *aio_get_linux_aio(AioContext *ctx) |
| 456 | { |
| 457 | assert(ctx->linux_aio); |
| 458 | return ctx->linux_aio; |
| 459 | } |
Paolo Bonzini | 0187f5c | 2016-07-04 18:33:20 +0200 | [diff] [blame] | 460 | #endif |
| 461 | |
Aarushi Mehta | fcb7a4a | 2020-01-20 14:18:49 +0000 | [diff] [blame] | 462 | #ifdef CONFIG_LINUX_IO_URING |
| 463 | LuringState *aio_setup_linux_io_uring(AioContext *ctx, Error **errp) |
| 464 | { |
| 465 | if (ctx->linux_io_uring) { |
| 466 | return ctx->linux_io_uring; |
| 467 | } |
| 468 | |
| 469 | ctx->linux_io_uring = luring_init(errp); |
| 470 | if (!ctx->linux_io_uring) { |
| 471 | return NULL; |
| 472 | } |
| 473 | |
| 474 | luring_attach_aio_context(ctx->linux_io_uring, ctx); |
| 475 | return ctx->linux_io_uring; |
| 476 | } |
| 477 | |
| 478 | LuringState *aio_get_linux_io_uring(AioContext *ctx) |
| 479 | { |
| 480 | assert(ctx->linux_io_uring); |
| 481 | return ctx->linux_io_uring; |
| 482 | } |
| 483 | #endif |
| 484 | |
Paolo Bonzini | 2f4dc3c | 2012-09-24 18:44:14 +0200 | [diff] [blame] | 485 | void aio_notify(AioContext *ctx) |
| 486 | { |
Stefan Hajnoczi | 601829f | 2020-08-06 14:18:01 +0100 | [diff] [blame] | 487 | /* |
Paolo Bonzini | 8dd4865 | 2023-03-06 10:15:06 +0100 | [diff] [blame] | 488 | * Write e.g. ctx->bh_list before writing ctx->notified. Pairs with |
| 489 | * smp_mb() in aio_notify_accept(). |
Stefan Hajnoczi | 601829f | 2020-08-06 14:18:01 +0100 | [diff] [blame] | 490 | */ |
| 491 | smp_wmb(); |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 492 | qatomic_set(&ctx->notified, true); |
Stefan Hajnoczi | 601829f | 2020-08-06 14:18:01 +0100 | [diff] [blame] | 493 | |
| 494 | /* |
Paolo Bonzini | 8dd4865 | 2023-03-06 10:15:06 +0100 | [diff] [blame] | 495 | * Write ctx->notified (and also ctx->bh_list) before reading ctx->notify_me. |
| 496 | * Pairs with smp_mb() in aio_ctx_prepare or aio_poll. |
Paolo Bonzini | eabc977 | 2015-07-21 16:07:51 +0200 | [diff] [blame] | 497 | */ |
Paolo Bonzini | 0ceb849 | 2014-07-07 15:18:04 +0200 | [diff] [blame] | 498 | smp_mb(); |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 499 | if (qatomic_read(&ctx->notify_me)) { |
Paolo Bonzini | 0ceb849 | 2014-07-07 15:18:04 +0200 | [diff] [blame] | 500 | event_notifier_set(&ctx->notifier); |
Paolo Bonzini | 05e514b | 2015-07-21 16:07:53 +0200 | [diff] [blame] | 501 | } |
| 502 | } |
| 503 | |
| 504 | void aio_notify_accept(AioContext *ctx) |
| 505 | { |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 506 | qatomic_set(&ctx->notified, false); |
Stefan Hajnoczi | 601829f | 2020-08-06 14:18:01 +0100 | [diff] [blame] | 507 | |
| 508 | /* |
Paolo Bonzini | 6229438 | 2023-03-06 10:43:52 +0100 | [diff] [blame] | 509 | * Order reads of ctx->notified (in aio_context_notifier_poll()) and the |
| 510 | * above clearing of ctx->notified before reads of e.g. bh->flags. Pairs |
| 511 | * with smp_wmb() in aio_notify. |
Stefan Hajnoczi | 601829f | 2020-08-06 14:18:01 +0100 | [diff] [blame] | 512 | */ |
| 513 | smp_mb(); |
Paolo Bonzini | 2f4dc3c | 2012-09-24 18:44:14 +0200 | [diff] [blame] | 514 | } |
| 515 | |
Paolo Bonzini | 3f53bc6 | 2017-03-03 11:50:29 +0100 | [diff] [blame] | 516 | static void aio_timerlist_notify(void *opaque, QEMUClockType type) |
Alex Bligh | d5541d8 | 2013-08-21 16:02:50 +0100 | [diff] [blame] | 517 | { |
| 518 | aio_notify(opaque); |
| 519 | } |
| 520 | |
Stefan Hajnoczi | 601829f | 2020-08-06 14:18:01 +0100 | [diff] [blame] | 521 | static void aio_context_notifier_cb(EventNotifier *e) |
Paolo Bonzini | 21a03d1 | 2015-07-21 16:07:52 +0200 | [diff] [blame] | 522 | { |
Stefan Hajnoczi | 601829f | 2020-08-06 14:18:01 +0100 | [diff] [blame] | 523 | AioContext *ctx = container_of(e, AioContext, notifier); |
| 524 | |
| 525 | event_notifier_test_and_clear(&ctx->notifier); |
Paolo Bonzini | 21a03d1 | 2015-07-21 16:07:52 +0200 | [diff] [blame] | 526 | } |
| 527 | |
Stefan Hajnoczi | 4a1cba3 | 2016-12-01 19:26:42 +0000 | [diff] [blame] | 528 | /* Returns true if aio_notify() was called (e.g. a BH was scheduled) */ |
Stefan Hajnoczi | c13be5a | 2020-08-06 14:18:00 +0100 | [diff] [blame] | 529 | static bool aio_context_notifier_poll(void *opaque) |
Stefan Hajnoczi | 4a1cba3 | 2016-12-01 19:26:42 +0000 | [diff] [blame] | 530 | { |
| 531 | EventNotifier *e = opaque; |
| 532 | AioContext *ctx = container_of(e, AioContext, notifier); |
| 533 | |
Paolo Bonzini | 6229438 | 2023-03-06 10:43:52 +0100 | [diff] [blame] | 534 | /* |
| 535 | * No need for load-acquire because we just want to kick the |
| 536 | * event loop. aio_notify_accept() takes care of synchronizing |
| 537 | * the event loop with the producers. |
| 538 | */ |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 539 | return qatomic_read(&ctx->notified); |
Stefan Hajnoczi | 4a1cba3 | 2016-12-01 19:26:42 +0000 | [diff] [blame] | 540 | } |
| 541 | |
Stefan Hajnoczi | 826cc32 | 2021-12-07 13:23:31 +0000 | [diff] [blame] | 542 | static void aio_context_notifier_poll_ready(EventNotifier *e) |
| 543 | { |
| 544 | /* Do nothing, we just wanted to kick the event loop */ |
| 545 | } |
| 546 | |
Paolo Bonzini | 0c330a7 | 2017-02-13 14:52:19 +0100 | [diff] [blame] | 547 | static void co_schedule_bh_cb(void *opaque) |
| 548 | { |
| 549 | AioContext *ctx = opaque; |
| 550 | QSLIST_HEAD(, Coroutine) straight, reversed; |
| 551 | |
| 552 | QSLIST_MOVE_ATOMIC(&reversed, &ctx->scheduled_coroutines); |
| 553 | QSLIST_INIT(&straight); |
| 554 | |
| 555 | while (!QSLIST_EMPTY(&reversed)) { |
| 556 | Coroutine *co = QSLIST_FIRST(&reversed); |
| 557 | QSLIST_REMOVE_HEAD(&reversed, co_scheduled_next); |
| 558 | QSLIST_INSERT_HEAD(&straight, co, co_scheduled_next); |
| 559 | } |
| 560 | |
| 561 | while (!QSLIST_EMPTY(&straight)) { |
| 562 | Coroutine *co = QSLIST_FIRST(&straight); |
| 563 | QSLIST_REMOVE_HEAD(&straight, co_scheduled_next); |
| 564 | trace_aio_co_schedule_bh_cb(ctx, co); |
Paolo Bonzini | 1919631 | 2017-02-13 14:52:31 +0100 | [diff] [blame] | 565 | aio_context_acquire(ctx); |
Jeff Cody | 6133b39 | 2017-11-17 22:27:09 -0500 | [diff] [blame] | 566 | |
| 567 | /* Protected by write barrier in qemu_aio_coroutine_enter */ |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 568 | qatomic_set(&co->scheduled, NULL); |
Sergio Lopez | 6808ae0 | 2018-09-05 11:33:51 +0200 | [diff] [blame] | 569 | qemu_aio_coroutine_enter(ctx, co); |
Paolo Bonzini | 1919631 | 2017-02-13 14:52:31 +0100 | [diff] [blame] | 570 | aio_context_release(ctx); |
Paolo Bonzini | 0c330a7 | 2017-02-13 14:52:19 +0100 | [diff] [blame] | 571 | } |
| 572 | } |
| 573 | |
Chrysostomos Nanakos | 2f78e49 | 2014-09-18 14:30:49 +0300 | [diff] [blame] | 574 | AioContext *aio_context_new(Error **errp) |
Paolo Bonzini | f627aab | 2012-10-29 23:45:23 +0100 | [diff] [blame] | 575 | { |
Chrysostomos Nanakos | 2f78e49 | 2014-09-18 14:30:49 +0300 | [diff] [blame] | 576 | int ret; |
Paolo Bonzini | 2f4dc3c | 2012-09-24 18:44:14 +0200 | [diff] [blame] | 577 | AioContext *ctx; |
Fam Zheng | 37fcee5 | 2015-10-30 12:06:28 +0800 | [diff] [blame] | 578 | |
Paolo Bonzini | 2f4dc3c | 2012-09-24 18:44:14 +0200 | [diff] [blame] | 579 | ctx = (AioContext *) g_source_new(&aio_source_funcs, sizeof(AioContext)); |
Stefan Hajnoczi | 8c6b035 | 2020-02-21 09:39:51 +0000 | [diff] [blame] | 580 | QSLIST_INIT(&ctx->bh_list); |
| 581 | QSIMPLEQ_INIT(&ctx->bh_slice_list); |
Cao jin | 7e00346 | 2016-07-15 18:28:44 +0800 | [diff] [blame] | 582 | aio_context_setup(ctx); |
| 583 | |
Chrysostomos Nanakos | 2f78e49 | 2014-09-18 14:30:49 +0300 | [diff] [blame] | 584 | ret = event_notifier_init(&ctx->notifier, false); |
| 585 | if (ret < 0) { |
Chrysostomos Nanakos | 2f78e49 | 2014-09-18 14:30:49 +0300 | [diff] [blame] | 586 | error_setg_errno(errp, -ret, "Failed to initialize event notifier"); |
Fam Zheng | 37fcee5 | 2015-10-30 12:06:28 +0800 | [diff] [blame] | 587 | goto fail; |
Chrysostomos Nanakos | 2f78e49 | 2014-09-18 14:30:49 +0300 | [diff] [blame] | 588 | } |
Paolo Bonzini | fcf5def | 2014-12-17 16:09:58 +0100 | [diff] [blame] | 589 | g_source_set_can_recurse(&ctx->source, true); |
Paolo Bonzini | d7c99a1 | 2017-01-12 19:07:53 +0100 | [diff] [blame] | 590 | qemu_lockcnt_init(&ctx->list_lock); |
Paolo Bonzini | 0c330a7 | 2017-02-13 14:52:19 +0100 | [diff] [blame] | 591 | |
| 592 | ctx->co_schedule_bh = aio_bh_new(ctx, co_schedule_bh_cb, ctx); |
| 593 | QSLIST_INIT(&ctx->scheduled_coroutines); |
| 594 | |
Chrysostomos Nanakos | 2f78e49 | 2014-09-18 14:30:49 +0300 | [diff] [blame] | 595 | aio_set_event_notifier(ctx, &ctx->notifier, |
Stefan Hajnoczi | 601829f | 2020-08-06 14:18:01 +0100 | [diff] [blame] | 596 | aio_context_notifier_cb, |
Stefan Hajnoczi | 826cc32 | 2021-12-07 13:23:31 +0000 | [diff] [blame] | 597 | aio_context_notifier_poll, |
| 598 | aio_context_notifier_poll_ready); |
Paolo Bonzini | 0187f5c | 2016-07-04 18:33:20 +0200 | [diff] [blame] | 599 | #ifdef CONFIG_LINUX_AIO |
| 600 | ctx->linux_aio = NULL; |
| 601 | #endif |
Aarushi Mehta | fcb7a4a | 2020-01-20 14:18:49 +0000 | [diff] [blame] | 602 | |
| 603 | #ifdef CONFIG_LINUX_IO_URING |
| 604 | ctx->linux_io_uring = NULL; |
| 605 | #endif |
| 606 | |
Stefan Hajnoczi | 9b34277 | 2013-03-07 13:41:47 +0100 | [diff] [blame] | 607 | ctx->thread_pool = NULL; |
Paolo Bonzini | 3fe7122 | 2016-10-27 12:49:08 +0200 | [diff] [blame] | 608 | qemu_rec_mutex_init(&ctx->lock); |
Alex Bligh | d5541d8 | 2013-08-21 16:02:50 +0100 | [diff] [blame] | 609 | timerlistgroup_init(&ctx->tlg, aio_timerlist_notify, ctx); |
Paolo Bonzini | 2f4dc3c | 2012-09-24 18:44:14 +0200 | [diff] [blame] | 610 | |
Stefan Hajnoczi | 82a4118 | 2016-12-01 19:26:51 +0000 | [diff] [blame] | 611 | ctx->poll_ns = 0; |
Stefan Hajnoczi | 4a1cba3 | 2016-12-01 19:26:42 +0000 | [diff] [blame] | 612 | ctx->poll_max_ns = 0; |
Stefan Hajnoczi | 82a4118 | 2016-12-01 19:26:51 +0000 | [diff] [blame] | 613 | ctx->poll_grow = 0; |
| 614 | ctx->poll_shrink = 0; |
Stefan Hajnoczi | 4a1cba3 | 2016-12-01 19:26:42 +0000 | [diff] [blame] | 615 | |
Stefano Garzarella | 1793ad0 | 2021-07-21 11:42:10 +0200 | [diff] [blame] | 616 | ctx->aio_max_batch = 0; |
| 617 | |
Nicolas Saenz Julienne | 71ad471 | 2022-04-25 09:57:23 +0200 | [diff] [blame] | 618 | ctx->thread_pool_min = 0; |
| 619 | ctx->thread_pool_max = THREAD_POOL_MAX_THREADS_DEFAULT; |
| 620 | |
Emanuele Giuseppe Esposito | 587d82f | 2022-12-07 14:18:24 +0100 | [diff] [blame] | 621 | register_aiocontext(ctx); |
| 622 | |
Paolo Bonzini | 2f4dc3c | 2012-09-24 18:44:14 +0200 | [diff] [blame] | 623 | return ctx; |
Fam Zheng | 37fcee5 | 2015-10-30 12:06:28 +0800 | [diff] [blame] | 624 | fail: |
| 625 | g_source_destroy(&ctx->source); |
| 626 | return NULL; |
Paolo Bonzini | e3713e0 | 2012-09-24 14:57:41 +0200 | [diff] [blame] | 627 | } |
| 628 | |
Paolo Bonzini | 0c330a7 | 2017-02-13 14:52:19 +0100 | [diff] [blame] | 629 | void aio_co_schedule(AioContext *ctx, Coroutine *co) |
| 630 | { |
| 631 | trace_aio_co_schedule(ctx, co); |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 632 | const char *scheduled = qatomic_cmpxchg(&co->scheduled, NULL, |
Jeff Cody | 6133b39 | 2017-11-17 22:27:09 -0500 | [diff] [blame] | 633 | __func__); |
| 634 | |
| 635 | if (scheduled) { |
| 636 | fprintf(stderr, |
| 637 | "%s: Co-routine was already scheduled in '%s'\n", |
| 638 | __func__, scheduled); |
| 639 | abort(); |
| 640 | } |
| 641 | |
Stefan Hajnoczi | f0f8100 | 2019-07-23 20:06:23 +0100 | [diff] [blame] | 642 | /* The coroutine might run and release the last ctx reference before we |
| 643 | * invoke qemu_bh_schedule(). Take a reference to keep ctx alive until |
| 644 | * we're done. |
| 645 | */ |
| 646 | aio_context_ref(ctx); |
| 647 | |
Paolo Bonzini | 0c330a7 | 2017-02-13 14:52:19 +0100 | [diff] [blame] | 648 | QSLIST_INSERT_HEAD_ATOMIC(&ctx->scheduled_coroutines, |
| 649 | co, co_scheduled_next); |
| 650 | qemu_bh_schedule(ctx->co_schedule_bh); |
Stefan Hajnoczi | f0f8100 | 2019-07-23 20:06:23 +0100 | [diff] [blame] | 651 | |
| 652 | aio_context_unref(ctx); |
Paolo Bonzini | 0c330a7 | 2017-02-13 14:52:19 +0100 | [diff] [blame] | 653 | } |
| 654 | |
Kevin Wolf | 26b0b69 | 2020-10-05 17:58:52 +0200 | [diff] [blame] | 655 | typedef struct AioCoRescheduleSelf { |
| 656 | Coroutine *co; |
| 657 | AioContext *new_ctx; |
| 658 | } AioCoRescheduleSelf; |
| 659 | |
| 660 | static void aio_co_reschedule_self_bh(void *opaque) |
| 661 | { |
| 662 | AioCoRescheduleSelf *data = opaque; |
| 663 | aio_co_schedule(data->new_ctx, data->co); |
| 664 | } |
| 665 | |
| 666 | void coroutine_fn aio_co_reschedule_self(AioContext *new_ctx) |
| 667 | { |
| 668 | AioContext *old_ctx = qemu_get_current_aio_context(); |
| 669 | |
| 670 | if (old_ctx != new_ctx) { |
| 671 | AioCoRescheduleSelf data = { |
| 672 | .co = qemu_coroutine_self(), |
| 673 | .new_ctx = new_ctx, |
| 674 | }; |
| 675 | /* |
| 676 | * We can't directly schedule the coroutine in the target context |
| 677 | * because this would be racy: The other thread could try to enter the |
| 678 | * coroutine before it has yielded in this one. |
| 679 | */ |
| 680 | aio_bh_schedule_oneshot(old_ctx, aio_co_reschedule_self_bh, &data); |
| 681 | qemu_coroutine_yield(); |
| 682 | } |
| 683 | } |
| 684 | |
Markus Armbruster | 4369560 | 2022-12-21 14:14:35 +0100 | [diff] [blame] | 685 | void aio_co_wake(Coroutine *co) |
Paolo Bonzini | 0c330a7 | 2017-02-13 14:52:19 +0100 | [diff] [blame] | 686 | { |
| 687 | AioContext *ctx; |
| 688 | |
| 689 | /* Read coroutine before co->ctx. Matches smp_wmb in |
| 690 | * qemu_coroutine_enter. |
| 691 | */ |
| 692 | smp_read_barrier_depends(); |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 693 | ctx = qatomic_read(&co->ctx); |
Paolo Bonzini | 0c330a7 | 2017-02-13 14:52:19 +0100 | [diff] [blame] | 694 | |
Fam Zheng | 8865852 | 2017-04-10 20:07:35 +0800 | [diff] [blame] | 695 | aio_co_enter(ctx, co); |
| 696 | } |
| 697 | |
Markus Armbruster | 4369560 | 2022-12-21 14:14:35 +0100 | [diff] [blame] | 698 | void aio_co_enter(AioContext *ctx, Coroutine *co) |
Fam Zheng | 8865852 | 2017-04-10 20:07:35 +0800 | [diff] [blame] | 699 | { |
Paolo Bonzini | 0c330a7 | 2017-02-13 14:52:19 +0100 | [diff] [blame] | 700 | if (ctx != qemu_get_current_aio_context()) { |
| 701 | aio_co_schedule(ctx, co); |
| 702 | return; |
| 703 | } |
| 704 | |
| 705 | if (qemu_in_coroutine()) { |
| 706 | Coroutine *self = qemu_coroutine_self(); |
| 707 | assert(self != co); |
| 708 | QSIMPLEQ_INSERT_TAIL(&self->co_queue_wakeup, co, co_queue_next); |
| 709 | } else { |
| 710 | aio_context_acquire(ctx); |
Fam Zheng | 8865852 | 2017-04-10 20:07:35 +0800 | [diff] [blame] | 711 | qemu_aio_coroutine_enter(ctx, co); |
Paolo Bonzini | 0c330a7 | 2017-02-13 14:52:19 +0100 | [diff] [blame] | 712 | aio_context_release(ctx); |
| 713 | } |
| 714 | } |
| 715 | |
Paolo Bonzini | e3713e0 | 2012-09-24 14:57:41 +0200 | [diff] [blame] | 716 | void aio_context_ref(AioContext *ctx) |
| 717 | { |
| 718 | g_source_ref(&ctx->source); |
| 719 | } |
| 720 | |
| 721 | void aio_context_unref(AioContext *ctx) |
| 722 | { |
| 723 | g_source_unref(&ctx->source); |
Paolo Bonzini | f627aab | 2012-10-29 23:45:23 +0100 | [diff] [blame] | 724 | } |
Stefan Hajnoczi | 98563fc | 2014-03-03 11:30:04 +0100 | [diff] [blame] | 725 | |
| 726 | void aio_context_acquire(AioContext *ctx) |
| 727 | { |
Paolo Bonzini | 3fe7122 | 2016-10-27 12:49:08 +0200 | [diff] [blame] | 728 | qemu_rec_mutex_lock(&ctx->lock); |
Stefan Hajnoczi | 98563fc | 2014-03-03 11:30:04 +0100 | [diff] [blame] | 729 | } |
| 730 | |
| 731 | void aio_context_release(AioContext *ctx) |
| 732 | { |
Paolo Bonzini | 3fe7122 | 2016-10-27 12:49:08 +0200 | [diff] [blame] | 733 | qemu_rec_mutex_unlock(&ctx->lock); |
Stefan Hajnoczi | 98563fc | 2014-03-03 11:30:04 +0100 | [diff] [blame] | 734 | } |
Paolo Bonzini | 5f50be9 | 2021-06-09 14:22:34 +0200 | [diff] [blame] | 735 | |
Stefan Hajnoczi | 47b7446 | 2022-02-22 14:01:48 +0000 | [diff] [blame] | 736 | QEMU_DEFINE_STATIC_CO_TLS(AioContext *, my_aiocontext) |
Paolo Bonzini | 5f50be9 | 2021-06-09 14:22:34 +0200 | [diff] [blame] | 737 | |
| 738 | AioContext *qemu_get_current_aio_context(void) |
| 739 | { |
Stefan Hajnoczi | 47b7446 | 2022-02-22 14:01:48 +0000 | [diff] [blame] | 740 | AioContext *ctx = get_my_aiocontext(); |
| 741 | if (ctx) { |
| 742 | return ctx; |
Paolo Bonzini | 5f50be9 | 2021-06-09 14:22:34 +0200 | [diff] [blame] | 743 | } |
| 744 | if (qemu_mutex_iothread_locked()) { |
| 745 | /* Possibly in a vCPU thread. */ |
| 746 | return qemu_get_aio_context(); |
| 747 | } |
| 748 | return NULL; |
| 749 | } |
| 750 | |
| 751 | void qemu_set_current_aio_context(AioContext *ctx) |
| 752 | { |
Stefan Hajnoczi | 47b7446 | 2022-02-22 14:01:48 +0000 | [diff] [blame] | 753 | assert(!get_my_aiocontext()); |
| 754 | set_my_aiocontext(ctx); |
Paolo Bonzini | 5f50be9 | 2021-06-09 14:22:34 +0200 | [diff] [blame] | 755 | } |
Nicolas Saenz Julienne | 71ad471 | 2022-04-25 09:57:23 +0200 | [diff] [blame] | 756 | |
| 757 | void aio_context_set_thread_pool_params(AioContext *ctx, int64_t min, |
| 758 | int64_t max, Error **errp) |
| 759 | { |
| 760 | |
| 761 | if (min > max || !max || min > INT_MAX || max > INT_MAX) { |
| 762 | error_setg(errp, "bad thread-pool-min/thread-pool-max values"); |
| 763 | return; |
| 764 | } |
| 765 | |
| 766 | ctx->thread_pool_min = min; |
| 767 | ctx->thread_pool_max = max; |
| 768 | |
| 769 | if (ctx->thread_pool) { |
| 770 | thread_pool_update_params(ctx->thread_pool, ctx); |
| 771 | } |
| 772 | } |