Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 1 | /* |
| 2 | * QEMU aio implementation |
| 3 | * |
| 4 | * Copyright IBM Corp., 2008 |
| 5 | * Copyright Red Hat Inc., 2012 |
| 6 | * |
| 7 | * Authors: |
| 8 | * Anthony Liguori <aliguori@us.ibm.com> |
| 9 | * Paolo Bonzini <pbonzini@redhat.com> |
| 10 | * |
| 11 | * This work is licensed under the terms of the GNU GPL, version 2. See |
| 12 | * the COPYING file in the top-level directory. |
| 13 | * |
| 14 | * Contributions after 2012-01-13 are licensed under the terms of the |
| 15 | * GNU GPL, version 2 or (at your option) any later version. |
| 16 | */ |
| 17 | |
Peter Maydell | d38ea87 | 2016-01-29 17:50:05 +0000 | [diff] [blame] | 18 | #include "qemu/osdep.h" |
Paolo Bonzini | 737e150 | 2012-12-17 18:19:44 +0100 | [diff] [blame] | 19 | #include "block/block.h" |
Volker Rümelin | eada6d9 | 2020-10-21 08:40:33 +0200 | [diff] [blame] | 20 | #include "qemu/main-loop.h" |
Paolo Bonzini | 1de7afc | 2012-12-17 18:20:00 +0100 | [diff] [blame] | 21 | #include "qemu/queue.h" |
| 22 | #include "qemu/sockets.h" |
Stefan Hajnoczi | 4a1cba3 | 2016-12-01 19:26:42 +0000 | [diff] [blame] | 23 | #include "qapi/error.h" |
Paolo Bonzini | b92d9a9 | 2017-01-12 19:07:58 +0100 | [diff] [blame] | 24 | #include "qemu/rcu_queue.h" |
Marc-André Lureau | e2a3a21 | 2023-02-21 16:47:54 +0400 | [diff] [blame] | 25 | #include "qemu/error-report.h" |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 26 | |
| 27 | struct AioHandler { |
| 28 | EventNotifier *e; |
Paolo Bonzini | b493317 | 2014-07-09 11:53:10 +0200 | [diff] [blame] | 29 | IOHandler *io_read; |
| 30 | IOHandler *io_write; |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 31 | EventNotifierHandler *io_notify; |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 32 | GPollFD pfd; |
| 33 | int deleted; |
Paolo Bonzini | b493317 | 2014-07-09 11:53:10 +0200 | [diff] [blame] | 34 | void *opaque; |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 35 | QLIST_ENTRY(AioHandler) node; |
| 36 | }; |
| 37 | |
Remy Noel | fef1660 | 2018-12-20 16:20:30 +0100 | [diff] [blame] | 38 | static void aio_remove_fd_handler(AioContext *ctx, AioHandler *node) |
| 39 | { |
Yonggang Luo | da0652c | 2020-09-16 01:12:26 +0800 | [diff] [blame] | 40 | /* |
| 41 | * If the GSource is in the process of being destroyed then |
| 42 | * g_source_remove_poll() causes an assertion failure. Skip |
| 43 | * removal in that case, because glib cleans up its state during |
| 44 | * destruction anyway. |
| 45 | */ |
| 46 | if (!g_source_is_destroyed(&ctx->source)) { |
| 47 | g_source_remove_poll(&ctx->source, &node->pfd); |
| 48 | } |
| 49 | |
Remy Noel | fef1660 | 2018-12-20 16:20:30 +0100 | [diff] [blame] | 50 | /* If aio_poll is in progress, just mark the node as deleted */ |
| 51 | if (qemu_lockcnt_count(&ctx->list_lock)) { |
| 52 | node->deleted = 1; |
| 53 | node->pfd.revents = 0; |
| 54 | } else { |
| 55 | /* Otherwise, delete it for real. We can't just mark it as |
| 56 | * deleted because deleted nodes are only cleaned up after |
| 57 | * releasing the list_lock. |
| 58 | */ |
| 59 | QLIST_REMOVE(node, node); |
| 60 | g_free(node); |
| 61 | } |
| 62 | } |
| 63 | |
Paolo Bonzini | b493317 | 2014-07-09 11:53:10 +0200 | [diff] [blame] | 64 | void aio_set_fd_handler(AioContext *ctx, |
| 65 | int fd, |
| 66 | IOHandler *io_read, |
| 67 | IOHandler *io_write, |
Stefan Hajnoczi | 4a1cba3 | 2016-12-01 19:26:42 +0000 | [diff] [blame] | 68 | AioPollFn *io_poll, |
Stefan Hajnoczi | 826cc32 | 2021-12-07 13:23:31 +0000 | [diff] [blame] | 69 | IOHandler *io_poll_ready, |
Paolo Bonzini | b493317 | 2014-07-09 11:53:10 +0200 | [diff] [blame] | 70 | void *opaque) |
| 71 | { |
Remy Noel | fef1660 | 2018-12-20 16:20:30 +0100 | [diff] [blame] | 72 | AioHandler *old_node; |
| 73 | AioHandler *node = NULL; |
Marc-André Lureau | abe3428 | 2023-02-21 16:47:59 +0400 | [diff] [blame] | 74 | SOCKET s; |
Paolo Bonzini | b493317 | 2014-07-09 11:53:10 +0200 | [diff] [blame] | 75 | |
Marc-André Lureau | e2a3a21 | 2023-02-21 16:47:54 +0400 | [diff] [blame] | 76 | if (!fd_is_socket(fd)) { |
| 77 | error_report("fd=%d is not a socket, AIO implementation is missing", fd); |
| 78 | return; |
| 79 | } |
| 80 | |
Marc-André Lureau | abe3428 | 2023-02-21 16:47:59 +0400 | [diff] [blame] | 81 | s = _get_osfhandle(fd); |
| 82 | |
Paolo Bonzini | b92d9a9 | 2017-01-12 19:07:58 +0100 | [diff] [blame] | 83 | qemu_lockcnt_lock(&ctx->list_lock); |
Remy Noel | fef1660 | 2018-12-20 16:20:30 +0100 | [diff] [blame] | 84 | QLIST_FOREACH(old_node, &ctx->aio_handlers, node) { |
Marc-André Lureau | abe3428 | 2023-02-21 16:47:59 +0400 | [diff] [blame] | 85 | if (old_node->pfd.fd == s && !old_node->deleted) { |
Paolo Bonzini | b493317 | 2014-07-09 11:53:10 +0200 | [diff] [blame] | 86 | break; |
| 87 | } |
| 88 | } |
| 89 | |
Remy Noel | fef1660 | 2018-12-20 16:20:30 +0100 | [diff] [blame] | 90 | if (io_read || io_write) { |
Paolo Bonzini | b493317 | 2014-07-09 11:53:10 +0200 | [diff] [blame] | 91 | HANDLE event; |
Alistair Francis | 55d41b1 | 2017-07-06 13:15:14 -0700 | [diff] [blame] | 92 | long bitmask = 0; |
Paolo Bonzini | b493317 | 2014-07-09 11:53:10 +0200 | [diff] [blame] | 93 | |
Remy Noel | fef1660 | 2018-12-20 16:20:30 +0100 | [diff] [blame] | 94 | /* Alloc and insert if it's not already there */ |
| 95 | node = g_new0(AioHandler, 1); |
Marc-André Lureau | abe3428 | 2023-02-21 16:47:59 +0400 | [diff] [blame] | 96 | node->pfd.fd = s; |
Paolo Bonzini | b493317 | 2014-07-09 11:53:10 +0200 | [diff] [blame] | 97 | |
| 98 | node->pfd.events = 0; |
| 99 | if (node->io_read) { |
| 100 | node->pfd.events |= G_IO_IN; |
| 101 | } |
| 102 | if (node->io_write) { |
| 103 | node->pfd.events |= G_IO_OUT; |
| 104 | } |
| 105 | |
| 106 | node->e = &ctx->notifier; |
| 107 | |
| 108 | /* Update handler with latest information */ |
| 109 | node->opaque = opaque; |
| 110 | node->io_read = io_read; |
| 111 | node->io_write = io_write; |
| 112 | |
Alistair Francis | 55d41b1 | 2017-07-06 13:15:14 -0700 | [diff] [blame] | 113 | if (io_read) { |
| 114 | bitmask |= FD_READ | FD_ACCEPT | FD_CLOSE; |
| 115 | } |
| 116 | |
| 117 | if (io_write) { |
| 118 | bitmask |= FD_WRITE | FD_CONNECT; |
| 119 | } |
| 120 | |
Remy Noel | fef1660 | 2018-12-20 16:20:30 +0100 | [diff] [blame] | 121 | QLIST_INSERT_HEAD_RCU(&ctx->aio_handlers, node, node); |
Paolo Bonzini | b493317 | 2014-07-09 11:53:10 +0200 | [diff] [blame] | 122 | event = event_notifier_get_handle(&ctx->notifier); |
Marc-André Lureau | abe3428 | 2023-02-21 16:47:59 +0400 | [diff] [blame] | 123 | qemu_socket_select(fd, event, bitmask, NULL); |
Paolo Bonzini | b493317 | 2014-07-09 11:53:10 +0200 | [diff] [blame] | 124 | } |
Remy Noel | fef1660 | 2018-12-20 16:20:30 +0100 | [diff] [blame] | 125 | if (old_node) { |
| 126 | aio_remove_fd_handler(ctx, old_node); |
| 127 | } |
Paolo Bonzini | b493317 | 2014-07-09 11:53:10 +0200 | [diff] [blame] | 128 | |
Paolo Bonzini | b92d9a9 | 2017-01-12 19:07:58 +0100 | [diff] [blame] | 129 | qemu_lockcnt_unlock(&ctx->list_lock); |
Paolo Bonzini | b493317 | 2014-07-09 11:53:10 +0200 | [diff] [blame] | 130 | aio_notify(ctx); |
| 131 | } |
| 132 | |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 133 | void aio_set_event_notifier(AioContext *ctx, |
| 134 | EventNotifier *e, |
Stefan Hajnoczi | 4a1cba3 | 2016-12-01 19:26:42 +0000 | [diff] [blame] | 135 | EventNotifierHandler *io_notify, |
Stefan Hajnoczi | 826cc32 | 2021-12-07 13:23:31 +0000 | [diff] [blame] | 136 | AioPollFn *io_poll, |
| 137 | EventNotifierHandler *io_poll_ready) |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 138 | { |
| 139 | AioHandler *node; |
| 140 | |
Paolo Bonzini | b92d9a9 | 2017-01-12 19:07:58 +0100 | [diff] [blame] | 141 | qemu_lockcnt_lock(&ctx->list_lock); |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 142 | QLIST_FOREACH(node, &ctx->aio_handlers, node) { |
| 143 | if (node->e == e && !node->deleted) { |
| 144 | break; |
| 145 | } |
| 146 | } |
| 147 | |
| 148 | /* Are we deleting the fd handler? */ |
| 149 | if (!io_notify) { |
| 150 | if (node) { |
Remy Noel | fef1660 | 2018-12-20 16:20:30 +0100 | [diff] [blame] | 151 | aio_remove_fd_handler(ctx, node); |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 152 | } |
| 153 | } else { |
| 154 | if (node == NULL) { |
| 155 | /* Alloc and insert if it's not already there */ |
Markus Armbruster | 3ba235a | 2014-12-04 13:55:09 +0100 | [diff] [blame] | 156 | node = g_new0(AioHandler, 1); |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 157 | node->e = e; |
| 158 | node->pfd.fd = (uintptr_t)event_notifier_get_handle(e); |
| 159 | node->pfd.events = G_IO_IN; |
Paolo Bonzini | b92d9a9 | 2017-01-12 19:07:58 +0100 | [diff] [blame] | 160 | QLIST_INSERT_HEAD_RCU(&ctx->aio_handlers, node, node); |
Paolo Bonzini | e3713e0 | 2012-09-24 14:57:41 +0200 | [diff] [blame] | 161 | |
| 162 | g_source_add_poll(&ctx->source, &node->pfd); |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 163 | } |
| 164 | /* Update handler with latest information */ |
| 165 | node->io_notify = io_notify; |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 166 | } |
Paolo Bonzini | 7ed2b24 | 2012-09-25 10:22:39 +0200 | [diff] [blame] | 167 | |
Paolo Bonzini | b92d9a9 | 2017-01-12 19:07:58 +0100 | [diff] [blame] | 168 | qemu_lockcnt_unlock(&ctx->list_lock); |
Paolo Bonzini | 7ed2b24 | 2012-09-25 10:22:39 +0200 | [diff] [blame] | 169 | aio_notify(ctx); |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 170 | } |
| 171 | |
Stefan Hajnoczi | 684e508 | 2016-12-01 19:26:49 +0000 | [diff] [blame] | 172 | void aio_set_event_notifier_poll(AioContext *ctx, |
| 173 | EventNotifier *notifier, |
| 174 | EventNotifierHandler *io_poll_begin, |
| 175 | EventNotifierHandler *io_poll_end) |
| 176 | { |
| 177 | /* Not implemented */ |
| 178 | } |
| 179 | |
Paolo Bonzini | a3462c6 | 2014-07-09 11:53:08 +0200 | [diff] [blame] | 180 | bool aio_prepare(AioContext *ctx) |
| 181 | { |
Paolo Bonzini | b493317 | 2014-07-09 11:53:10 +0200 | [diff] [blame] | 182 | static struct timeval tv0; |
| 183 | AioHandler *node; |
| 184 | bool have_select_revents = false; |
| 185 | fd_set rfds, wfds; |
| 186 | |
Paolo Bonzini | b92d9a9 | 2017-01-12 19:07:58 +0100 | [diff] [blame] | 187 | /* |
| 188 | * We have to walk very carefully in case aio_set_fd_handler is |
| 189 | * called while we're walking. |
| 190 | */ |
| 191 | qemu_lockcnt_inc(&ctx->list_lock); |
| 192 | |
Paolo Bonzini | b493317 | 2014-07-09 11:53:10 +0200 | [diff] [blame] | 193 | /* fill fd sets */ |
| 194 | FD_ZERO(&rfds); |
| 195 | FD_ZERO(&wfds); |
Paolo Bonzini | b92d9a9 | 2017-01-12 19:07:58 +0100 | [diff] [blame] | 196 | QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) { |
Paolo Bonzini | b493317 | 2014-07-09 11:53:10 +0200 | [diff] [blame] | 197 | if (node->io_read) { |
| 198 | FD_SET ((SOCKET)node->pfd.fd, &rfds); |
| 199 | } |
| 200 | if (node->io_write) { |
| 201 | FD_SET ((SOCKET)node->pfd.fd, &wfds); |
| 202 | } |
| 203 | } |
| 204 | |
| 205 | if (select(0, &rfds, &wfds, NULL, &tv0) > 0) { |
Paolo Bonzini | b92d9a9 | 2017-01-12 19:07:58 +0100 | [diff] [blame] | 206 | QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) { |
Paolo Bonzini | b493317 | 2014-07-09 11:53:10 +0200 | [diff] [blame] | 207 | node->pfd.revents = 0; |
| 208 | if (FD_ISSET(node->pfd.fd, &rfds)) { |
| 209 | node->pfd.revents |= G_IO_IN; |
| 210 | have_select_revents = true; |
| 211 | } |
| 212 | |
| 213 | if (FD_ISSET(node->pfd.fd, &wfds)) { |
| 214 | node->pfd.revents |= G_IO_OUT; |
| 215 | have_select_revents = true; |
| 216 | } |
| 217 | } |
| 218 | } |
| 219 | |
Paolo Bonzini | b92d9a9 | 2017-01-12 19:07:58 +0100 | [diff] [blame] | 220 | qemu_lockcnt_dec(&ctx->list_lock); |
Paolo Bonzini | b493317 | 2014-07-09 11:53:10 +0200 | [diff] [blame] | 221 | return have_select_revents; |
Paolo Bonzini | a3462c6 | 2014-07-09 11:53:08 +0200 | [diff] [blame] | 222 | } |
| 223 | |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 224 | bool aio_pending(AioContext *ctx) |
| 225 | { |
| 226 | AioHandler *node; |
Paolo Bonzini | b92d9a9 | 2017-01-12 19:07:58 +0100 | [diff] [blame] | 227 | bool result = false; |
Paolo Bonzini | abf90d3 | 2017-01-12 19:07:56 +0100 | [diff] [blame] | 228 | |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 229 | /* |
Paolo Bonzini | 87f68d3 | 2014-07-07 15:18:02 +0200 | [diff] [blame] | 230 | * We have to walk very carefully in case aio_set_fd_handler is |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 231 | * called while we're walking. |
| 232 | */ |
Paolo Bonzini | b92d9a9 | 2017-01-12 19:07:58 +0100 | [diff] [blame] | 233 | qemu_lockcnt_inc(&ctx->list_lock); |
| 234 | QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) { |
| 235 | if (node->pfd.revents && node->io_notify) { |
| 236 | result = true; |
| 237 | break; |
| 238 | } |
| 239 | |
| 240 | if ((node->pfd.revents & G_IO_IN) && node->io_read) { |
| 241 | result = true; |
| 242 | break; |
| 243 | } |
| 244 | if ((node->pfd.revents & G_IO_OUT) && node->io_write) { |
| 245 | result = true; |
| 246 | break; |
| 247 | } |
| 248 | } |
| 249 | |
| 250 | qemu_lockcnt_dec(&ctx->list_lock); |
| 251 | return result; |
| 252 | } |
| 253 | |
| 254 | static bool aio_dispatch_handlers(AioContext *ctx, HANDLE event) |
| 255 | { |
| 256 | AioHandler *node; |
| 257 | bool progress = false; |
| 258 | AioHandler *tmp; |
| 259 | |
Paolo Bonzini | b92d9a9 | 2017-01-12 19:07:58 +0100 | [diff] [blame] | 260 | /* |
| 261 | * We have to walk very carefully in case aio_set_fd_handler is |
| 262 | * called while we're walking. |
| 263 | */ |
| 264 | QLIST_FOREACH_SAFE_RCU(node, &ctx->aio_handlers, node, tmp) { |
Paolo Bonzini | b493317 | 2014-07-09 11:53:10 +0200 | [diff] [blame] | 265 | int revents = node->pfd.revents; |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 266 | |
Paolo Bonzini | a398dea | 2014-07-09 11:53:03 +0200 | [diff] [blame] | 267 | if (!node->deleted && |
Paolo Bonzini | b493317 | 2014-07-09 11:53:10 +0200 | [diff] [blame] | 268 | (revents || event_notifier_get_handle(node->e) == event) && |
Paolo Bonzini | a398dea | 2014-07-09 11:53:03 +0200 | [diff] [blame] | 269 | node->io_notify) { |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 270 | node->pfd.revents = 0; |
| 271 | node->io_notify(node->e); |
Stefan Hajnoczi | 164a101 | 2013-04-11 16:56:50 +0200 | [diff] [blame] | 272 | |
| 273 | /* aio_notify() does not count as progress */ |
Stefan Hajnoczi | 8b2d42d | 2013-08-22 15:28:35 +0200 | [diff] [blame] | 274 | if (node->e != &ctx->notifier) { |
Stefan Hajnoczi | 164a101 | 2013-04-11 16:56:50 +0200 | [diff] [blame] | 275 | progress = true; |
| 276 | } |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 277 | } |
| 278 | |
Paolo Bonzini | b493317 | 2014-07-09 11:53:10 +0200 | [diff] [blame] | 279 | if (!node->deleted && |
| 280 | (node->io_read || node->io_write)) { |
| 281 | node->pfd.revents = 0; |
| 282 | if ((revents & G_IO_IN) && node->io_read) { |
| 283 | node->io_read(node->opaque); |
| 284 | progress = true; |
| 285 | } |
| 286 | if ((revents & G_IO_OUT) && node->io_write) { |
| 287 | node->io_write(node->opaque); |
| 288 | progress = true; |
| 289 | } |
| 290 | |
| 291 | /* if the next select() will return an event, we have progressed */ |
| 292 | if (event == event_notifier_get_handle(&ctx->notifier)) { |
| 293 | WSANETWORKEVENTS ev; |
| 294 | WSAEnumNetworkEvents(node->pfd.fd, event, &ev); |
| 295 | if (ev.lNetworkEvents) { |
| 296 | progress = true; |
| 297 | } |
| 298 | } |
| 299 | } |
| 300 | |
Paolo Bonzini | abf90d3 | 2017-01-12 19:07:56 +0100 | [diff] [blame] | 301 | if (node->deleted) { |
Paolo Bonzini | b92d9a9 | 2017-01-12 19:07:58 +0100 | [diff] [blame] | 302 | if (qemu_lockcnt_dec_if_lock(&ctx->list_lock)) { |
Paolo Bonzini | abf90d3 | 2017-01-12 19:07:56 +0100 | [diff] [blame] | 303 | QLIST_REMOVE(node, node); |
| 304 | g_free(node); |
Paolo Bonzini | b92d9a9 | 2017-01-12 19:07:58 +0100 | [diff] [blame] | 305 | qemu_lockcnt_inc_and_unlock(&ctx->list_lock); |
Paolo Bonzini | abf90d3 | 2017-01-12 19:07:56 +0100 | [diff] [blame] | 306 | } |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 307 | } |
| 308 | } |
| 309 | |
Paolo Bonzini | a398dea | 2014-07-09 11:53:03 +0200 | [diff] [blame] | 310 | return progress; |
| 311 | } |
| 312 | |
Paolo Bonzini | a153bf5 | 2017-02-13 14:52:33 +0100 | [diff] [blame] | 313 | void aio_dispatch(AioContext *ctx) |
Paolo Bonzini | a398dea | 2014-07-09 11:53:03 +0200 | [diff] [blame] | 314 | { |
Paolo Bonzini | bd45143 | 2017-02-13 14:52:34 +0100 | [diff] [blame] | 315 | qemu_lockcnt_inc(&ctx->list_lock); |
Paolo Bonzini | a153bf5 | 2017-02-13 14:52:33 +0100 | [diff] [blame] | 316 | aio_bh_poll(ctx); |
| 317 | aio_dispatch_handlers(ctx, INVALID_HANDLE_VALUE); |
Paolo Bonzini | bd45143 | 2017-02-13 14:52:34 +0100 | [diff] [blame] | 318 | qemu_lockcnt_dec(&ctx->list_lock); |
Paolo Bonzini | a153bf5 | 2017-02-13 14:52:33 +0100 | [diff] [blame] | 319 | timerlistgroup_run_timers(&ctx->tlg); |
Paolo Bonzini | a398dea | 2014-07-09 11:53:03 +0200 | [diff] [blame] | 320 | } |
| 321 | |
| 322 | bool aio_poll(AioContext *ctx, bool blocking) |
| 323 | { |
| 324 | AioHandler *node; |
Bin Meng | e0d034b | 2022-10-19 18:20:15 +0800 | [diff] [blame] | 325 | HANDLE events[MAXIMUM_WAIT_OBJECTS]; |
Paolo Bonzini | eabc977 | 2015-07-21 16:07:51 +0200 | [diff] [blame] | 326 | bool progress, have_select_revents, first; |
Bin Meng | e0d034b | 2022-10-19 18:20:15 +0800 | [diff] [blame] | 327 | unsigned count; |
Paolo Bonzini | a398dea | 2014-07-09 11:53:03 +0200 | [diff] [blame] | 328 | int timeout; |
| 329 | |
Paolo Bonzini | 5710a3e | 2020-04-07 10:07:46 -0400 | [diff] [blame] | 330 | /* |
| 331 | * There cannot be two concurrent aio_poll calls for the same AioContext (or |
| 332 | * an aio_poll concurrent with a GSource prepare/check/dispatch callback). |
| 333 | * We rely on this below to avoid slow locked accesses to ctx->notify_me. |
Volker Rümelin | eada6d9 | 2020-10-21 08:40:33 +0200 | [diff] [blame] | 334 | * |
| 335 | * aio_poll() may only be called in the AioContext's thread. iohandler_ctx |
| 336 | * is special in that it runs in the main thread, but that thread's context |
| 337 | * is qemu_aio_context. |
Paolo Bonzini | 5710a3e | 2020-04-07 10:07:46 -0400 | [diff] [blame] | 338 | */ |
Volker Rümelin | eada6d9 | 2020-10-21 08:40:33 +0200 | [diff] [blame] | 339 | assert(in_aio_context_home_thread(ctx == iohandler_get_aio_context() ? |
| 340 | qemu_get_aio_context() : ctx)); |
Paolo Bonzini | a398dea | 2014-07-09 11:53:03 +0200 | [diff] [blame] | 341 | progress = false; |
| 342 | |
Paolo Bonzini | 0a9dd16 | 2014-07-09 11:53:07 +0200 | [diff] [blame] | 343 | /* aio_notify can avoid the expensive event_notifier_set if |
| 344 | * everything (file descriptors, bottom halves, timers) will |
| 345 | * be re-evaluated before the next blocking poll(). This is |
| 346 | * already true when aio_poll is called with blocking == false; |
Paolo Bonzini | eabc977 | 2015-07-21 16:07:51 +0200 | [diff] [blame] | 347 | * if blocking == true, it is only true after poll() returns, |
| 348 | * so disable the optimization now. |
Paolo Bonzini | 0a9dd16 | 2014-07-09 11:53:07 +0200 | [diff] [blame] | 349 | */ |
Paolo Bonzini | eabc977 | 2015-07-21 16:07:51 +0200 | [diff] [blame] | 350 | if (blocking) { |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 351 | qatomic_set(&ctx->notify_me, qatomic_read(&ctx->notify_me) + 2); |
Paolo Bonzini | 5710a3e | 2020-04-07 10:07:46 -0400 | [diff] [blame] | 352 | /* |
| 353 | * Write ctx->notify_me before computing the timeout |
| 354 | * (reading bottom half flags, etc.). Pairs with |
| 355 | * smp_mb in aio_notify(). |
| 356 | */ |
| 357 | smp_mb(); |
Paolo Bonzini | eabc977 | 2015-07-21 16:07:51 +0200 | [diff] [blame] | 358 | } |
Paolo Bonzini | 0a9dd16 | 2014-07-09 11:53:07 +0200 | [diff] [blame] | 359 | |
Paolo Bonzini | b92d9a9 | 2017-01-12 19:07:58 +0100 | [diff] [blame] | 360 | qemu_lockcnt_inc(&ctx->list_lock); |
Paolo Bonzini | 6493c97 | 2015-07-21 16:07:50 +0200 | [diff] [blame] | 361 | have_select_revents = aio_prepare(ctx); |
| 362 | |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 363 | /* fill fd sets */ |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 364 | count = 0; |
Paolo Bonzini | b92d9a9 | 2017-01-12 19:07:58 +0100 | [diff] [blame] | 365 | QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) { |
Stefan Hajnoczi | 60f782b | 2023-05-16 15:02:38 -0400 | [diff] [blame] | 366 | if (!node->deleted && node->io_notify) { |
Bin Meng | e0d034b | 2022-10-19 18:20:15 +0800 | [diff] [blame] | 367 | assert(count < MAXIMUM_WAIT_OBJECTS); |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 368 | events[count++] = event_notifier_get_handle(node->e); |
| 369 | } |
| 370 | } |
| 371 | |
Paolo Bonzini | 3672fa5 | 2014-07-09 11:53:04 +0200 | [diff] [blame] | 372 | first = true; |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 373 | |
Paolo Bonzini | 6493c97 | 2015-07-21 16:07:50 +0200 | [diff] [blame] | 374 | /* ctx->notifier is always registered. */ |
| 375 | assert(count > 0); |
| 376 | |
| 377 | /* Multiple iterations, all of them non-blocking except the first, |
| 378 | * may be necessary to process all pending events. After the first |
| 379 | * WaitForMultipleObjects call ctx->notify_me will be decremented. |
| 380 | */ |
| 381 | do { |
Paolo Bonzini | b493317 | 2014-07-09 11:53:10 +0200 | [diff] [blame] | 382 | HANDLE event; |
Alex Bligh | 438e1f4 | 2013-08-21 16:02:53 +0100 | [diff] [blame] | 383 | int ret; |
| 384 | |
Paolo Bonzini | 6493c97 | 2015-07-21 16:07:50 +0200 | [diff] [blame] | 385 | timeout = blocking && !have_select_revents |
Paolo Bonzini | 845ca10 | 2014-07-09 11:53:01 +0200 | [diff] [blame] | 386 | ? qemu_timeout_ns_to_ms(aio_compute_timeout(ctx)) : 0; |
Alex Bligh | 438e1f4 | 2013-08-21 16:02:53 +0100 | [diff] [blame] | 387 | ret = WaitForMultipleObjects(count, events, FALSE, timeout); |
Paolo Bonzini | eabc977 | 2015-07-21 16:07:51 +0200 | [diff] [blame] | 388 | if (blocking) { |
| 389 | assert(first); |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 390 | qatomic_store_release(&ctx->notify_me, |
| 391 | qatomic_read(&ctx->notify_me) - 2); |
Fam Zheng | b37548f | 2018-08-09 21:22:59 +0800 | [diff] [blame] | 392 | aio_notify_accept(ctx); |
Paolo Bonzini | eabc977 | 2015-07-21 16:07:51 +0200 | [diff] [blame] | 393 | } |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 394 | |
Paolo Bonzini | 21a03d1 | 2015-07-21 16:07:52 +0200 | [diff] [blame] | 395 | if (first) { |
Paolo Bonzini | 21a03d1 | 2015-07-21 16:07:52 +0200 | [diff] [blame] | 396 | progress |= aio_bh_poll(ctx); |
| 397 | first = false; |
Paolo Bonzini | 3672fa5 | 2014-07-09 11:53:04 +0200 | [diff] [blame] | 398 | } |
Paolo Bonzini | 3672fa5 | 2014-07-09 11:53:04 +0200 | [diff] [blame] | 399 | |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 400 | /* if we have any signaled events, dispatch event */ |
Paolo Bonzini | b493317 | 2014-07-09 11:53:10 +0200 | [diff] [blame] | 401 | event = NULL; |
| 402 | if ((DWORD) (ret - WAIT_OBJECT_0) < count) { |
| 403 | event = events[ret - WAIT_OBJECT_0]; |
Paolo Bonzini | a90d411 | 2014-09-15 14:52:58 +0200 | [diff] [blame] | 404 | events[ret - WAIT_OBJECT_0] = events[--count]; |
Paolo Bonzini | b493317 | 2014-07-09 11:53:10 +0200 | [diff] [blame] | 405 | } else if (!have_select_revents) { |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 406 | break; |
| 407 | } |
| 408 | |
Paolo Bonzini | b493317 | 2014-07-09 11:53:10 +0200 | [diff] [blame] | 409 | have_select_revents = false; |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 410 | blocking = false; |
| 411 | |
Paolo Bonzini | b493317 | 2014-07-09 11:53:10 +0200 | [diff] [blame] | 412 | progress |= aio_dispatch_handlers(ctx, event); |
Paolo Bonzini | 6493c97 | 2015-07-21 16:07:50 +0200 | [diff] [blame] | 413 | } while (count > 0); |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 414 | |
Paolo Bonzini | bd45143 | 2017-02-13 14:52:34 +0100 | [diff] [blame] | 415 | qemu_lockcnt_dec(&ctx->list_lock); |
| 416 | |
Paolo Bonzini | e4c7e2d | 2014-07-09 11:53:05 +0200 | [diff] [blame] | 417 | progress |= timerlistgroup_run_timers(&ctx->tlg); |
Stefan Hajnoczi | 164a101 | 2013-04-11 16:56:50 +0200 | [diff] [blame] | 418 | return progress; |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 419 | } |
Fam Zheng | 37fcee5 | 2015-10-30 12:06:28 +0800 | [diff] [blame] | 420 | |
Cao jin | 7e00346 | 2016-07-15 18:28:44 +0800 | [diff] [blame] | 421 | void aio_context_setup(AioContext *ctx) |
Fam Zheng | 37fcee5 | 2015-10-30 12:06:28 +0800 | [diff] [blame] | 422 | { |
| 423 | } |
Stefan Hajnoczi | 4a1cba3 | 2016-12-01 19:26:42 +0000 | [diff] [blame] | 424 | |
Jie Wang | cd0a6d2 | 2018-05-17 08:42:43 +0800 | [diff] [blame] | 425 | void aio_context_destroy(AioContext *ctx) |
| 426 | { |
| 427 | } |
| 428 | |
Stefan Hajnoczi | ba607ca | 2020-05-11 19:36:30 +0100 | [diff] [blame] | 429 | void aio_context_use_g_source(AioContext *ctx) |
| 430 | { |
| 431 | } |
| 432 | |
Stefan Hajnoczi | 82a4118 | 2016-12-01 19:26:51 +0000 | [diff] [blame] | 433 | void aio_context_set_poll_params(AioContext *ctx, int64_t max_ns, |
| 434 | int64_t grow, int64_t shrink, Error **errp) |
Stefan Hajnoczi | 4a1cba3 | 2016-12-01 19:26:42 +0000 | [diff] [blame] | 435 | { |
Peter Xu | 90c558b | 2018-03-22 16:56:30 +0800 | [diff] [blame] | 436 | if (max_ns) { |
| 437 | error_setg(errp, "AioContext polling is not implemented on Windows"); |
| 438 | } |
Stefan Hajnoczi | 4a1cba3 | 2016-12-01 19:26:42 +0000 | [diff] [blame] | 439 | } |
Stefano Garzarella | 1793ad0 | 2021-07-21 11:42:10 +0200 | [diff] [blame] | 440 | |
| 441 | void aio_context_set_aio_params(AioContext *ctx, int64_t max_batch, |
| 442 | Error **errp) |
| 443 | { |
| 444 | } |