Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 1 | /* |
| 2 | * QEMU aio implementation |
| 3 | * |
| 4 | * Copyright IBM Corp., 2008 |
| 5 | * Copyright Red Hat Inc., 2012 |
| 6 | * |
| 7 | * Authors: |
| 8 | * Anthony Liguori <aliguori@us.ibm.com> |
| 9 | * Paolo Bonzini <pbonzini@redhat.com> |
| 10 | * |
| 11 | * This work is licensed under the terms of the GNU GPL, version 2. See |
| 12 | * the COPYING file in the top-level directory. |
| 13 | * |
| 14 | * Contributions after 2012-01-13 are licensed under the terms of the |
| 15 | * GNU GPL, version 2 or (at your option) any later version. |
| 16 | */ |
| 17 | |
Peter Maydell | d38ea87 | 2016-01-29 17:50:05 +0000 | [diff] [blame] | 18 | #include "qemu/osdep.h" |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 19 | #include "qemu-common.h" |
Paolo Bonzini | 737e150 | 2012-12-17 18:19:44 +0100 | [diff] [blame] | 20 | #include "block/block.h" |
Paolo Bonzini | 1de7afc | 2012-12-17 18:20:00 +0100 | [diff] [blame] | 21 | #include "qemu/queue.h" |
| 22 | #include "qemu/sockets.h" |
Stefan Hajnoczi | 4a1cba3 | 2016-12-01 19:26:42 +0000 | [diff] [blame] | 23 | #include "qapi/error.h" |
Paolo Bonzini | b92d9a9 | 2017-01-12 19:07:58 +0100 | [diff] [blame] | 24 | #include "qemu/rcu_queue.h" |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 25 | |
| 26 | struct AioHandler { |
| 27 | EventNotifier *e; |
Paolo Bonzini | b493317 | 2014-07-09 11:53:10 +0200 | [diff] [blame] | 28 | IOHandler *io_read; |
| 29 | IOHandler *io_write; |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 30 | EventNotifierHandler *io_notify; |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 31 | GPollFD pfd; |
| 32 | int deleted; |
Paolo Bonzini | b493317 | 2014-07-09 11:53:10 +0200 | [diff] [blame] | 33 | void *opaque; |
Fam Zheng | dca21ef | 2015-10-23 11:08:05 +0800 | [diff] [blame] | 34 | bool is_external; |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 35 | QLIST_ENTRY(AioHandler) node; |
| 36 | }; |
| 37 | |
Paolo Bonzini | b493317 | 2014-07-09 11:53:10 +0200 | [diff] [blame] | 38 | void aio_set_fd_handler(AioContext *ctx, |
| 39 | int fd, |
Fam Zheng | dca21ef | 2015-10-23 11:08:05 +0800 | [diff] [blame] | 40 | bool is_external, |
Paolo Bonzini | b493317 | 2014-07-09 11:53:10 +0200 | [diff] [blame] | 41 | IOHandler *io_read, |
| 42 | IOHandler *io_write, |
Stefan Hajnoczi | 4a1cba3 | 2016-12-01 19:26:42 +0000 | [diff] [blame] | 43 | AioPollFn *io_poll, |
Paolo Bonzini | b493317 | 2014-07-09 11:53:10 +0200 | [diff] [blame] | 44 | void *opaque) |
| 45 | { |
| 46 | /* fd is a SOCKET in our case */ |
| 47 | AioHandler *node; |
| 48 | |
Paolo Bonzini | b92d9a9 | 2017-01-12 19:07:58 +0100 | [diff] [blame] | 49 | qemu_lockcnt_lock(&ctx->list_lock); |
Paolo Bonzini | b493317 | 2014-07-09 11:53:10 +0200 | [diff] [blame] | 50 | QLIST_FOREACH(node, &ctx->aio_handlers, node) { |
| 51 | if (node->pfd.fd == fd && !node->deleted) { |
| 52 | break; |
| 53 | } |
| 54 | } |
| 55 | |
| 56 | /* Are we deleting the fd handler? */ |
| 57 | if (!io_read && !io_write) { |
| 58 | if (node) { |
Paolo Bonzini | b92d9a9 | 2017-01-12 19:07:58 +0100 | [diff] [blame] | 59 | /* If aio_poll is in progress, just mark the node as deleted */ |
| 60 | if (qemu_lockcnt_count(&ctx->list_lock)) { |
Paolo Bonzini | b493317 | 2014-07-09 11:53:10 +0200 | [diff] [blame] | 61 | node->deleted = 1; |
| 62 | node->pfd.revents = 0; |
| 63 | } else { |
| 64 | /* Otherwise, delete it for real. We can't just mark it as |
| 65 | * deleted because deleted nodes are only cleaned up after |
Paolo Bonzini | b92d9a9 | 2017-01-12 19:07:58 +0100 | [diff] [blame] | 66 | * releasing the list_lock. |
Paolo Bonzini | b493317 | 2014-07-09 11:53:10 +0200 | [diff] [blame] | 67 | */ |
| 68 | QLIST_REMOVE(node, node); |
| 69 | g_free(node); |
| 70 | } |
| 71 | } |
| 72 | } else { |
| 73 | HANDLE event; |
Alistair Francis | 55d41b1 | 2017-07-06 13:15:14 -0700 | [diff] [blame] | 74 | long bitmask = 0; |
Paolo Bonzini | b493317 | 2014-07-09 11:53:10 +0200 | [diff] [blame] | 75 | |
| 76 | if (node == NULL) { |
| 77 | /* Alloc and insert if it's not already there */ |
Markus Armbruster | 3ba235a | 2014-12-04 13:55:09 +0100 | [diff] [blame] | 78 | node = g_new0(AioHandler, 1); |
Paolo Bonzini | b493317 | 2014-07-09 11:53:10 +0200 | [diff] [blame] | 79 | node->pfd.fd = fd; |
Paolo Bonzini | b92d9a9 | 2017-01-12 19:07:58 +0100 | [diff] [blame] | 80 | QLIST_INSERT_HEAD_RCU(&ctx->aio_handlers, node, node); |
Paolo Bonzini | b493317 | 2014-07-09 11:53:10 +0200 | [diff] [blame] | 81 | } |
| 82 | |
| 83 | node->pfd.events = 0; |
| 84 | if (node->io_read) { |
| 85 | node->pfd.events |= G_IO_IN; |
| 86 | } |
| 87 | if (node->io_write) { |
| 88 | node->pfd.events |= G_IO_OUT; |
| 89 | } |
| 90 | |
| 91 | node->e = &ctx->notifier; |
| 92 | |
| 93 | /* Update handler with latest information */ |
| 94 | node->opaque = opaque; |
| 95 | node->io_read = io_read; |
| 96 | node->io_write = io_write; |
Fam Zheng | dca21ef | 2015-10-23 11:08:05 +0800 | [diff] [blame] | 97 | node->is_external = is_external; |
Paolo Bonzini | b493317 | 2014-07-09 11:53:10 +0200 | [diff] [blame] | 98 | |
Alistair Francis | 55d41b1 | 2017-07-06 13:15:14 -0700 | [diff] [blame] | 99 | if (io_read) { |
| 100 | bitmask |= FD_READ | FD_ACCEPT | FD_CLOSE; |
| 101 | } |
| 102 | |
| 103 | if (io_write) { |
| 104 | bitmask |= FD_WRITE | FD_CONNECT; |
| 105 | } |
| 106 | |
Paolo Bonzini | b493317 | 2014-07-09 11:53:10 +0200 | [diff] [blame] | 107 | event = event_notifier_get_handle(&ctx->notifier); |
Alistair Francis | 55d41b1 | 2017-07-06 13:15:14 -0700 | [diff] [blame] | 108 | WSAEventSelect(node->pfd.fd, event, bitmask); |
Paolo Bonzini | b493317 | 2014-07-09 11:53:10 +0200 | [diff] [blame] | 109 | } |
| 110 | |
Paolo Bonzini | b92d9a9 | 2017-01-12 19:07:58 +0100 | [diff] [blame] | 111 | qemu_lockcnt_unlock(&ctx->list_lock); |
Paolo Bonzini | b493317 | 2014-07-09 11:53:10 +0200 | [diff] [blame] | 112 | aio_notify(ctx); |
| 113 | } |
| 114 | |
Stefan Hajnoczi | 684e508 | 2016-12-01 19:26:49 +0000 | [diff] [blame] | 115 | void aio_set_fd_poll(AioContext *ctx, int fd, |
| 116 | IOHandler *io_poll_begin, |
| 117 | IOHandler *io_poll_end) |
| 118 | { |
| 119 | /* Not implemented */ |
| 120 | } |
| 121 | |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 122 | void aio_set_event_notifier(AioContext *ctx, |
| 123 | EventNotifier *e, |
Fam Zheng | dca21ef | 2015-10-23 11:08:05 +0800 | [diff] [blame] | 124 | bool is_external, |
Stefan Hajnoczi | 4a1cba3 | 2016-12-01 19:26:42 +0000 | [diff] [blame] | 125 | EventNotifierHandler *io_notify, |
| 126 | AioPollFn *io_poll) |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 127 | { |
| 128 | AioHandler *node; |
| 129 | |
Paolo Bonzini | b92d9a9 | 2017-01-12 19:07:58 +0100 | [diff] [blame] | 130 | qemu_lockcnt_lock(&ctx->list_lock); |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 131 | QLIST_FOREACH(node, &ctx->aio_handlers, node) { |
| 132 | if (node->e == e && !node->deleted) { |
| 133 | break; |
| 134 | } |
| 135 | } |
| 136 | |
| 137 | /* Are we deleting the fd handler? */ |
| 138 | if (!io_notify) { |
| 139 | if (node) { |
Paolo Bonzini | e3713e0 | 2012-09-24 14:57:41 +0200 | [diff] [blame] | 140 | g_source_remove_poll(&ctx->source, &node->pfd); |
| 141 | |
Paolo Bonzini | b92d9a9 | 2017-01-12 19:07:58 +0100 | [diff] [blame] | 142 | /* aio_poll is in progress, just mark the node as deleted */ |
| 143 | if (qemu_lockcnt_count(&ctx->list_lock)) { |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 144 | node->deleted = 1; |
| 145 | node->pfd.revents = 0; |
| 146 | } else { |
| 147 | /* Otherwise, delete it for real. We can't just mark it as |
| 148 | * deleted because deleted nodes are only cleaned up after |
Paolo Bonzini | b92d9a9 | 2017-01-12 19:07:58 +0100 | [diff] [blame] | 149 | * releasing the list_lock. |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 150 | */ |
| 151 | QLIST_REMOVE(node, node); |
| 152 | g_free(node); |
| 153 | } |
| 154 | } |
| 155 | } else { |
| 156 | if (node == NULL) { |
| 157 | /* Alloc and insert if it's not already there */ |
Markus Armbruster | 3ba235a | 2014-12-04 13:55:09 +0100 | [diff] [blame] | 158 | node = g_new0(AioHandler, 1); |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 159 | node->e = e; |
| 160 | node->pfd.fd = (uintptr_t)event_notifier_get_handle(e); |
| 161 | node->pfd.events = G_IO_IN; |
Fam Zheng | dca21ef | 2015-10-23 11:08:05 +0800 | [diff] [blame] | 162 | node->is_external = is_external; |
Paolo Bonzini | b92d9a9 | 2017-01-12 19:07:58 +0100 | [diff] [blame] | 163 | QLIST_INSERT_HEAD_RCU(&ctx->aio_handlers, node, node); |
Paolo Bonzini | e3713e0 | 2012-09-24 14:57:41 +0200 | [diff] [blame] | 164 | |
| 165 | g_source_add_poll(&ctx->source, &node->pfd); |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 166 | } |
| 167 | /* Update handler with latest information */ |
| 168 | node->io_notify = io_notify; |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 169 | } |
Paolo Bonzini | 7ed2b24 | 2012-09-25 10:22:39 +0200 | [diff] [blame] | 170 | |
Paolo Bonzini | b92d9a9 | 2017-01-12 19:07:58 +0100 | [diff] [blame] | 171 | qemu_lockcnt_unlock(&ctx->list_lock); |
Paolo Bonzini | 7ed2b24 | 2012-09-25 10:22:39 +0200 | [diff] [blame] | 172 | aio_notify(ctx); |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 173 | } |
| 174 | |
Stefan Hajnoczi | 684e508 | 2016-12-01 19:26:49 +0000 | [diff] [blame] | 175 | void aio_set_event_notifier_poll(AioContext *ctx, |
| 176 | EventNotifier *notifier, |
| 177 | EventNotifierHandler *io_poll_begin, |
| 178 | EventNotifierHandler *io_poll_end) |
| 179 | { |
| 180 | /* Not implemented */ |
| 181 | } |
| 182 | |
Paolo Bonzini | a3462c6 | 2014-07-09 11:53:08 +0200 | [diff] [blame] | 183 | bool aio_prepare(AioContext *ctx) |
| 184 | { |
Paolo Bonzini | b493317 | 2014-07-09 11:53:10 +0200 | [diff] [blame] | 185 | static struct timeval tv0; |
| 186 | AioHandler *node; |
| 187 | bool have_select_revents = false; |
| 188 | fd_set rfds, wfds; |
| 189 | |
Paolo Bonzini | b92d9a9 | 2017-01-12 19:07:58 +0100 | [diff] [blame] | 190 | /* |
| 191 | * We have to walk very carefully in case aio_set_fd_handler is |
| 192 | * called while we're walking. |
| 193 | */ |
| 194 | qemu_lockcnt_inc(&ctx->list_lock); |
| 195 | |
Paolo Bonzini | b493317 | 2014-07-09 11:53:10 +0200 | [diff] [blame] | 196 | /* fill fd sets */ |
| 197 | FD_ZERO(&rfds); |
| 198 | FD_ZERO(&wfds); |
Paolo Bonzini | b92d9a9 | 2017-01-12 19:07:58 +0100 | [diff] [blame] | 199 | QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) { |
Paolo Bonzini | b493317 | 2014-07-09 11:53:10 +0200 | [diff] [blame] | 200 | if (node->io_read) { |
| 201 | FD_SET ((SOCKET)node->pfd.fd, &rfds); |
| 202 | } |
| 203 | if (node->io_write) { |
| 204 | FD_SET ((SOCKET)node->pfd.fd, &wfds); |
| 205 | } |
| 206 | } |
| 207 | |
| 208 | if (select(0, &rfds, &wfds, NULL, &tv0) > 0) { |
Paolo Bonzini | b92d9a9 | 2017-01-12 19:07:58 +0100 | [diff] [blame] | 209 | QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) { |
Paolo Bonzini | b493317 | 2014-07-09 11:53:10 +0200 | [diff] [blame] | 210 | node->pfd.revents = 0; |
| 211 | if (FD_ISSET(node->pfd.fd, &rfds)) { |
| 212 | node->pfd.revents |= G_IO_IN; |
| 213 | have_select_revents = true; |
| 214 | } |
| 215 | |
| 216 | if (FD_ISSET(node->pfd.fd, &wfds)) { |
| 217 | node->pfd.revents |= G_IO_OUT; |
| 218 | have_select_revents = true; |
| 219 | } |
| 220 | } |
| 221 | } |
| 222 | |
Paolo Bonzini | b92d9a9 | 2017-01-12 19:07:58 +0100 | [diff] [blame] | 223 | qemu_lockcnt_dec(&ctx->list_lock); |
Paolo Bonzini | b493317 | 2014-07-09 11:53:10 +0200 | [diff] [blame] | 224 | return have_select_revents; |
Paolo Bonzini | a3462c6 | 2014-07-09 11:53:08 +0200 | [diff] [blame] | 225 | } |
| 226 | |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 227 | bool aio_pending(AioContext *ctx) |
| 228 | { |
| 229 | AioHandler *node; |
Paolo Bonzini | b92d9a9 | 2017-01-12 19:07:58 +0100 | [diff] [blame] | 230 | bool result = false; |
Paolo Bonzini | abf90d3 | 2017-01-12 19:07:56 +0100 | [diff] [blame] | 231 | |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 232 | /* |
Paolo Bonzini | 87f68d3 | 2014-07-07 15:18:02 +0200 | [diff] [blame] | 233 | * We have to walk very carefully in case aio_set_fd_handler is |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 234 | * called while we're walking. |
| 235 | */ |
Paolo Bonzini | b92d9a9 | 2017-01-12 19:07:58 +0100 | [diff] [blame] | 236 | qemu_lockcnt_inc(&ctx->list_lock); |
| 237 | QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) { |
| 238 | if (node->pfd.revents && node->io_notify) { |
| 239 | result = true; |
| 240 | break; |
| 241 | } |
| 242 | |
| 243 | if ((node->pfd.revents & G_IO_IN) && node->io_read) { |
| 244 | result = true; |
| 245 | break; |
| 246 | } |
| 247 | if ((node->pfd.revents & G_IO_OUT) && node->io_write) { |
| 248 | result = true; |
| 249 | break; |
| 250 | } |
| 251 | } |
| 252 | |
| 253 | qemu_lockcnt_dec(&ctx->list_lock); |
| 254 | return result; |
| 255 | } |
| 256 | |
| 257 | static bool aio_dispatch_handlers(AioContext *ctx, HANDLE event) |
| 258 | { |
| 259 | AioHandler *node; |
| 260 | bool progress = false; |
| 261 | AioHandler *tmp; |
| 262 | |
Paolo Bonzini | b92d9a9 | 2017-01-12 19:07:58 +0100 | [diff] [blame] | 263 | /* |
| 264 | * We have to walk very carefully in case aio_set_fd_handler is |
| 265 | * called while we're walking. |
| 266 | */ |
| 267 | QLIST_FOREACH_SAFE_RCU(node, &ctx->aio_handlers, node, tmp) { |
Paolo Bonzini | b493317 | 2014-07-09 11:53:10 +0200 | [diff] [blame] | 268 | int revents = node->pfd.revents; |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 269 | |
Paolo Bonzini | a398dea | 2014-07-09 11:53:03 +0200 | [diff] [blame] | 270 | if (!node->deleted && |
Paolo Bonzini | b493317 | 2014-07-09 11:53:10 +0200 | [diff] [blame] | 271 | (revents || event_notifier_get_handle(node->e) == event) && |
Paolo Bonzini | a398dea | 2014-07-09 11:53:03 +0200 | [diff] [blame] | 272 | node->io_notify) { |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 273 | node->pfd.revents = 0; |
| 274 | node->io_notify(node->e); |
Stefan Hajnoczi | 164a101 | 2013-04-11 16:56:50 +0200 | [diff] [blame] | 275 | |
| 276 | /* aio_notify() does not count as progress */ |
Stefan Hajnoczi | 8b2d42d | 2013-08-22 15:28:35 +0200 | [diff] [blame] | 277 | if (node->e != &ctx->notifier) { |
Stefan Hajnoczi | 164a101 | 2013-04-11 16:56:50 +0200 | [diff] [blame] | 278 | progress = true; |
| 279 | } |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 280 | } |
| 281 | |
Paolo Bonzini | b493317 | 2014-07-09 11:53:10 +0200 | [diff] [blame] | 282 | if (!node->deleted && |
| 283 | (node->io_read || node->io_write)) { |
| 284 | node->pfd.revents = 0; |
| 285 | if ((revents & G_IO_IN) && node->io_read) { |
| 286 | node->io_read(node->opaque); |
| 287 | progress = true; |
| 288 | } |
| 289 | if ((revents & G_IO_OUT) && node->io_write) { |
| 290 | node->io_write(node->opaque); |
| 291 | progress = true; |
| 292 | } |
| 293 | |
| 294 | /* if the next select() will return an event, we have progressed */ |
| 295 | if (event == event_notifier_get_handle(&ctx->notifier)) { |
| 296 | WSANETWORKEVENTS ev; |
| 297 | WSAEnumNetworkEvents(node->pfd.fd, event, &ev); |
| 298 | if (ev.lNetworkEvents) { |
| 299 | progress = true; |
| 300 | } |
| 301 | } |
| 302 | } |
| 303 | |
Paolo Bonzini | abf90d3 | 2017-01-12 19:07:56 +0100 | [diff] [blame] | 304 | if (node->deleted) { |
Paolo Bonzini | b92d9a9 | 2017-01-12 19:07:58 +0100 | [diff] [blame] | 305 | if (qemu_lockcnt_dec_if_lock(&ctx->list_lock)) { |
Paolo Bonzini | abf90d3 | 2017-01-12 19:07:56 +0100 | [diff] [blame] | 306 | QLIST_REMOVE(node, node); |
| 307 | g_free(node); |
Paolo Bonzini | b92d9a9 | 2017-01-12 19:07:58 +0100 | [diff] [blame] | 308 | qemu_lockcnt_inc_and_unlock(&ctx->list_lock); |
Paolo Bonzini | abf90d3 | 2017-01-12 19:07:56 +0100 | [diff] [blame] | 309 | } |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 310 | } |
| 311 | } |
| 312 | |
Paolo Bonzini | a398dea | 2014-07-09 11:53:03 +0200 | [diff] [blame] | 313 | return progress; |
| 314 | } |
| 315 | |
Paolo Bonzini | a153bf5 | 2017-02-13 14:52:33 +0100 | [diff] [blame] | 316 | void aio_dispatch(AioContext *ctx) |
Paolo Bonzini | a398dea | 2014-07-09 11:53:03 +0200 | [diff] [blame] | 317 | { |
Paolo Bonzini | bd45143 | 2017-02-13 14:52:34 +0100 | [diff] [blame] | 318 | qemu_lockcnt_inc(&ctx->list_lock); |
Paolo Bonzini | a153bf5 | 2017-02-13 14:52:33 +0100 | [diff] [blame] | 319 | aio_bh_poll(ctx); |
| 320 | aio_dispatch_handlers(ctx, INVALID_HANDLE_VALUE); |
Paolo Bonzini | bd45143 | 2017-02-13 14:52:34 +0100 | [diff] [blame] | 321 | qemu_lockcnt_dec(&ctx->list_lock); |
Paolo Bonzini | a153bf5 | 2017-02-13 14:52:33 +0100 | [diff] [blame] | 322 | timerlistgroup_run_timers(&ctx->tlg); |
Paolo Bonzini | a398dea | 2014-07-09 11:53:03 +0200 | [diff] [blame] | 323 | } |
| 324 | |
| 325 | bool aio_poll(AioContext *ctx, bool blocking) |
| 326 | { |
| 327 | AioHandler *node; |
| 328 | HANDLE events[MAXIMUM_WAIT_OBJECTS + 1]; |
Paolo Bonzini | eabc977 | 2015-07-21 16:07:51 +0200 | [diff] [blame] | 329 | bool progress, have_select_revents, first; |
Paolo Bonzini | a398dea | 2014-07-09 11:53:03 +0200 | [diff] [blame] | 330 | int count; |
| 331 | int timeout; |
| 332 | |
| 333 | progress = false; |
| 334 | |
Paolo Bonzini | 0a9dd16 | 2014-07-09 11:53:07 +0200 | [diff] [blame] | 335 | /* aio_notify can avoid the expensive event_notifier_set if |
| 336 | * everything (file descriptors, bottom halves, timers) will |
| 337 | * be re-evaluated before the next blocking poll(). This is |
| 338 | * already true when aio_poll is called with blocking == false; |
Paolo Bonzini | eabc977 | 2015-07-21 16:07:51 +0200 | [diff] [blame] | 339 | * if blocking == true, it is only true after poll() returns, |
| 340 | * so disable the optimization now. |
Paolo Bonzini | 0a9dd16 | 2014-07-09 11:53:07 +0200 | [diff] [blame] | 341 | */ |
Paolo Bonzini | eabc977 | 2015-07-21 16:07:51 +0200 | [diff] [blame] | 342 | if (blocking) { |
| 343 | atomic_add(&ctx->notify_me, 2); |
| 344 | } |
Paolo Bonzini | 0a9dd16 | 2014-07-09 11:53:07 +0200 | [diff] [blame] | 345 | |
Paolo Bonzini | b92d9a9 | 2017-01-12 19:07:58 +0100 | [diff] [blame] | 346 | qemu_lockcnt_inc(&ctx->list_lock); |
Paolo Bonzini | 6493c97 | 2015-07-21 16:07:50 +0200 | [diff] [blame] | 347 | have_select_revents = aio_prepare(ctx); |
| 348 | |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 349 | /* fill fd sets */ |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 350 | count = 0; |
Paolo Bonzini | b92d9a9 | 2017-01-12 19:07:58 +0100 | [diff] [blame] | 351 | QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) { |
Fam Zheng | c1e1e5f | 2015-10-23 11:08:08 +0800 | [diff] [blame] | 352 | if (!node->deleted && node->io_notify |
| 353 | && aio_node_check(ctx, node->is_external)) { |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 354 | events[count++] = event_notifier_get_handle(node->e); |
| 355 | } |
| 356 | } |
| 357 | |
Paolo Bonzini | 3672fa5 | 2014-07-09 11:53:04 +0200 | [diff] [blame] | 358 | first = true; |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 359 | |
Paolo Bonzini | 6493c97 | 2015-07-21 16:07:50 +0200 | [diff] [blame] | 360 | /* ctx->notifier is always registered. */ |
| 361 | assert(count > 0); |
| 362 | |
| 363 | /* Multiple iterations, all of them non-blocking except the first, |
| 364 | * may be necessary to process all pending events. After the first |
| 365 | * WaitForMultipleObjects call ctx->notify_me will be decremented. |
| 366 | */ |
| 367 | do { |
Paolo Bonzini | b493317 | 2014-07-09 11:53:10 +0200 | [diff] [blame] | 368 | HANDLE event; |
Alex Bligh | 438e1f4 | 2013-08-21 16:02:53 +0100 | [diff] [blame] | 369 | int ret; |
| 370 | |
Paolo Bonzini | 6493c97 | 2015-07-21 16:07:50 +0200 | [diff] [blame] | 371 | timeout = blocking && !have_select_revents |
Paolo Bonzini | 845ca10 | 2014-07-09 11:53:01 +0200 | [diff] [blame] | 372 | ? qemu_timeout_ns_to_ms(aio_compute_timeout(ctx)) : 0; |
Alex Bligh | 438e1f4 | 2013-08-21 16:02:53 +0100 | [diff] [blame] | 373 | ret = WaitForMultipleObjects(count, events, FALSE, timeout); |
Paolo Bonzini | eabc977 | 2015-07-21 16:07:51 +0200 | [diff] [blame] | 374 | if (blocking) { |
| 375 | assert(first); |
| 376 | atomic_sub(&ctx->notify_me, 2); |
| 377 | } |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 378 | |
Paolo Bonzini | 21a03d1 | 2015-07-21 16:07:52 +0200 | [diff] [blame] | 379 | if (first) { |
Paolo Bonzini | 05e514b | 2015-07-21 16:07:53 +0200 | [diff] [blame] | 380 | aio_notify_accept(ctx); |
Paolo Bonzini | 21a03d1 | 2015-07-21 16:07:52 +0200 | [diff] [blame] | 381 | progress |= aio_bh_poll(ctx); |
| 382 | first = false; |
Paolo Bonzini | 3672fa5 | 2014-07-09 11:53:04 +0200 | [diff] [blame] | 383 | } |
Paolo Bonzini | 3672fa5 | 2014-07-09 11:53:04 +0200 | [diff] [blame] | 384 | |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 385 | /* if we have any signaled events, dispatch event */ |
Paolo Bonzini | b493317 | 2014-07-09 11:53:10 +0200 | [diff] [blame] | 386 | event = NULL; |
| 387 | if ((DWORD) (ret - WAIT_OBJECT_0) < count) { |
| 388 | event = events[ret - WAIT_OBJECT_0]; |
Paolo Bonzini | a90d411 | 2014-09-15 14:52:58 +0200 | [diff] [blame] | 389 | events[ret - WAIT_OBJECT_0] = events[--count]; |
Paolo Bonzini | b493317 | 2014-07-09 11:53:10 +0200 | [diff] [blame] | 390 | } else if (!have_select_revents) { |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 391 | break; |
| 392 | } |
| 393 | |
Paolo Bonzini | b493317 | 2014-07-09 11:53:10 +0200 | [diff] [blame] | 394 | have_select_revents = false; |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 395 | blocking = false; |
| 396 | |
Paolo Bonzini | b493317 | 2014-07-09 11:53:10 +0200 | [diff] [blame] | 397 | progress |= aio_dispatch_handlers(ctx, event); |
Paolo Bonzini | 6493c97 | 2015-07-21 16:07:50 +0200 | [diff] [blame] | 398 | } while (count > 0); |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 399 | |
Paolo Bonzini | bd45143 | 2017-02-13 14:52:34 +0100 | [diff] [blame] | 400 | qemu_lockcnt_dec(&ctx->list_lock); |
| 401 | |
Paolo Bonzini | e4c7e2d | 2014-07-09 11:53:05 +0200 | [diff] [blame] | 402 | progress |= timerlistgroup_run_timers(&ctx->tlg); |
Stefan Hajnoczi | 164a101 | 2013-04-11 16:56:50 +0200 | [diff] [blame] | 403 | return progress; |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 404 | } |
Fam Zheng | 37fcee5 | 2015-10-30 12:06:28 +0800 | [diff] [blame] | 405 | |
Cao jin | 7e00346 | 2016-07-15 18:28:44 +0800 | [diff] [blame] | 406 | void aio_context_setup(AioContext *ctx) |
Fam Zheng | 37fcee5 | 2015-10-30 12:06:28 +0800 | [diff] [blame] | 407 | { |
| 408 | } |
Stefan Hajnoczi | 4a1cba3 | 2016-12-01 19:26:42 +0000 | [diff] [blame] | 409 | |
Jie Wang | cd0a6d2 | 2018-05-17 08:42:43 +0800 | [diff] [blame] | 410 | void aio_context_destroy(AioContext *ctx) |
| 411 | { |
| 412 | } |
| 413 | |
Stefan Hajnoczi | 82a4118 | 2016-12-01 19:26:51 +0000 | [diff] [blame] | 414 | void aio_context_set_poll_params(AioContext *ctx, int64_t max_ns, |
| 415 | int64_t grow, int64_t shrink, Error **errp) |
Stefan Hajnoczi | 4a1cba3 | 2016-12-01 19:26:42 +0000 | [diff] [blame] | 416 | { |
Peter Xu | 90c558b | 2018-03-22 16:56:30 +0800 | [diff] [blame] | 417 | if (max_ns) { |
| 418 | error_setg(errp, "AioContext polling is not implemented on Windows"); |
| 419 | } |
Stefan Hajnoczi | 4a1cba3 | 2016-12-01 19:26:42 +0000 | [diff] [blame] | 420 | } |