Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 1 | /* |
| 2 | * QEMU aio implementation |
| 3 | * |
| 4 | * Copyright IBM Corp., 2008 |
| 5 | * Copyright Red Hat Inc., 2012 |
| 6 | * |
| 7 | * Authors: |
| 8 | * Anthony Liguori <aliguori@us.ibm.com> |
| 9 | * Paolo Bonzini <pbonzini@redhat.com> |
| 10 | * |
| 11 | * This work is licensed under the terms of the GNU GPL, version 2. See |
| 12 | * the COPYING file in the top-level directory. |
| 13 | * |
| 14 | * Contributions after 2012-01-13 are licensed under the terms of the |
| 15 | * GNU GPL, version 2 or (at your option) any later version. |
| 16 | */ |
| 17 | |
| 18 | #include "qemu-common.h" |
Paolo Bonzini | 737e150 | 2012-12-17 18:19:44 +0100 | [diff] [blame] | 19 | #include "block/block.h" |
Paolo Bonzini | 1de7afc | 2012-12-17 18:20:00 +0100 | [diff] [blame] | 20 | #include "qemu/queue.h" |
| 21 | #include "qemu/sockets.h" |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 22 | |
| 23 | struct AioHandler { |
| 24 | EventNotifier *e; |
Paolo Bonzini | b493317 | 2014-07-09 11:53:10 +0200 | [diff] [blame] | 25 | IOHandler *io_read; |
| 26 | IOHandler *io_write; |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 27 | EventNotifierHandler *io_notify; |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 28 | GPollFD pfd; |
| 29 | int deleted; |
Paolo Bonzini | b493317 | 2014-07-09 11:53:10 +0200 | [diff] [blame] | 30 | void *opaque; |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 31 | QLIST_ENTRY(AioHandler) node; |
| 32 | }; |
| 33 | |
Paolo Bonzini | b493317 | 2014-07-09 11:53:10 +0200 | [diff] [blame] | 34 | void aio_set_fd_handler(AioContext *ctx, |
| 35 | int fd, |
| 36 | IOHandler *io_read, |
| 37 | IOHandler *io_write, |
| 38 | void *opaque) |
| 39 | { |
| 40 | /* fd is a SOCKET in our case */ |
| 41 | AioHandler *node; |
| 42 | |
| 43 | QLIST_FOREACH(node, &ctx->aio_handlers, node) { |
| 44 | if (node->pfd.fd == fd && !node->deleted) { |
| 45 | break; |
| 46 | } |
| 47 | } |
| 48 | |
| 49 | /* Are we deleting the fd handler? */ |
| 50 | if (!io_read && !io_write) { |
| 51 | if (node) { |
| 52 | /* If the lock is held, just mark the node as deleted */ |
| 53 | if (ctx->walking_handlers) { |
| 54 | node->deleted = 1; |
| 55 | node->pfd.revents = 0; |
| 56 | } else { |
| 57 | /* Otherwise, delete it for real. We can't just mark it as |
| 58 | * deleted because deleted nodes are only cleaned up after |
| 59 | * releasing the walking_handlers lock. |
| 60 | */ |
| 61 | QLIST_REMOVE(node, node); |
| 62 | g_free(node); |
| 63 | } |
| 64 | } |
| 65 | } else { |
| 66 | HANDLE event; |
| 67 | |
| 68 | if (node == NULL) { |
| 69 | /* Alloc and insert if it's not already there */ |
Markus Armbruster | 3ba235a | 2014-12-04 13:55:09 +0100 | [diff] [blame] | 70 | node = g_new0(AioHandler, 1); |
Paolo Bonzini | b493317 | 2014-07-09 11:53:10 +0200 | [diff] [blame] | 71 | node->pfd.fd = fd; |
| 72 | QLIST_INSERT_HEAD(&ctx->aio_handlers, node, node); |
| 73 | } |
| 74 | |
| 75 | node->pfd.events = 0; |
| 76 | if (node->io_read) { |
| 77 | node->pfd.events |= G_IO_IN; |
| 78 | } |
| 79 | if (node->io_write) { |
| 80 | node->pfd.events |= G_IO_OUT; |
| 81 | } |
| 82 | |
| 83 | node->e = &ctx->notifier; |
| 84 | |
| 85 | /* Update handler with latest information */ |
| 86 | node->opaque = opaque; |
| 87 | node->io_read = io_read; |
| 88 | node->io_write = io_write; |
| 89 | |
| 90 | event = event_notifier_get_handle(&ctx->notifier); |
| 91 | WSAEventSelect(node->pfd.fd, event, |
| 92 | FD_READ | FD_ACCEPT | FD_CLOSE | |
| 93 | FD_CONNECT | FD_WRITE | FD_OOB); |
| 94 | } |
| 95 | |
| 96 | aio_notify(ctx); |
| 97 | } |
| 98 | |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 99 | void aio_set_event_notifier(AioContext *ctx, |
| 100 | EventNotifier *e, |
Stefan Hajnoczi | f2e5dca | 2013-04-11 17:26:25 +0200 | [diff] [blame] | 101 | EventNotifierHandler *io_notify) |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 102 | { |
| 103 | AioHandler *node; |
| 104 | |
| 105 | QLIST_FOREACH(node, &ctx->aio_handlers, node) { |
| 106 | if (node->e == e && !node->deleted) { |
| 107 | break; |
| 108 | } |
| 109 | } |
| 110 | |
| 111 | /* Are we deleting the fd handler? */ |
| 112 | if (!io_notify) { |
| 113 | if (node) { |
Paolo Bonzini | e3713e0 | 2012-09-24 14:57:41 +0200 | [diff] [blame] | 114 | g_source_remove_poll(&ctx->source, &node->pfd); |
| 115 | |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 116 | /* If the lock is held, just mark the node as deleted */ |
| 117 | if (ctx->walking_handlers) { |
| 118 | node->deleted = 1; |
| 119 | node->pfd.revents = 0; |
| 120 | } else { |
| 121 | /* Otherwise, delete it for real. We can't just mark it as |
| 122 | * deleted because deleted nodes are only cleaned up after |
| 123 | * releasing the walking_handlers lock. |
| 124 | */ |
| 125 | QLIST_REMOVE(node, node); |
| 126 | g_free(node); |
| 127 | } |
| 128 | } |
| 129 | } else { |
| 130 | if (node == NULL) { |
| 131 | /* Alloc and insert if it's not already there */ |
Markus Armbruster | 3ba235a | 2014-12-04 13:55:09 +0100 | [diff] [blame] | 132 | node = g_new0(AioHandler, 1); |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 133 | node->e = e; |
| 134 | node->pfd.fd = (uintptr_t)event_notifier_get_handle(e); |
| 135 | node->pfd.events = G_IO_IN; |
| 136 | QLIST_INSERT_HEAD(&ctx->aio_handlers, node, node); |
Paolo Bonzini | e3713e0 | 2012-09-24 14:57:41 +0200 | [diff] [blame] | 137 | |
| 138 | g_source_add_poll(&ctx->source, &node->pfd); |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 139 | } |
| 140 | /* Update handler with latest information */ |
| 141 | node->io_notify = io_notify; |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 142 | } |
Paolo Bonzini | 7ed2b24 | 2012-09-25 10:22:39 +0200 | [diff] [blame] | 143 | |
| 144 | aio_notify(ctx); |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 145 | } |
| 146 | |
Paolo Bonzini | a3462c6 | 2014-07-09 11:53:08 +0200 | [diff] [blame] | 147 | bool aio_prepare(AioContext *ctx) |
| 148 | { |
Paolo Bonzini | b493317 | 2014-07-09 11:53:10 +0200 | [diff] [blame] | 149 | static struct timeval tv0; |
| 150 | AioHandler *node; |
| 151 | bool have_select_revents = false; |
| 152 | fd_set rfds, wfds; |
| 153 | |
| 154 | /* fill fd sets */ |
| 155 | FD_ZERO(&rfds); |
| 156 | FD_ZERO(&wfds); |
| 157 | QLIST_FOREACH(node, &ctx->aio_handlers, node) { |
| 158 | if (node->io_read) { |
| 159 | FD_SET ((SOCKET)node->pfd.fd, &rfds); |
| 160 | } |
| 161 | if (node->io_write) { |
| 162 | FD_SET ((SOCKET)node->pfd.fd, &wfds); |
| 163 | } |
| 164 | } |
| 165 | |
| 166 | if (select(0, &rfds, &wfds, NULL, &tv0) > 0) { |
| 167 | QLIST_FOREACH(node, &ctx->aio_handlers, node) { |
| 168 | node->pfd.revents = 0; |
| 169 | if (FD_ISSET(node->pfd.fd, &rfds)) { |
| 170 | node->pfd.revents |= G_IO_IN; |
| 171 | have_select_revents = true; |
| 172 | } |
| 173 | |
| 174 | if (FD_ISSET(node->pfd.fd, &wfds)) { |
| 175 | node->pfd.revents |= G_IO_OUT; |
| 176 | have_select_revents = true; |
| 177 | } |
| 178 | } |
| 179 | } |
| 180 | |
| 181 | return have_select_revents; |
Paolo Bonzini | a3462c6 | 2014-07-09 11:53:08 +0200 | [diff] [blame] | 182 | } |
| 183 | |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 184 | bool aio_pending(AioContext *ctx) |
| 185 | { |
| 186 | AioHandler *node; |
| 187 | |
| 188 | QLIST_FOREACH(node, &ctx->aio_handlers, node) { |
| 189 | if (node->pfd.revents && node->io_notify) { |
| 190 | return true; |
| 191 | } |
Paolo Bonzini | b493317 | 2014-07-09 11:53:10 +0200 | [diff] [blame] | 192 | |
| 193 | if ((node->pfd.revents & G_IO_IN) && node->io_read) { |
| 194 | return true; |
| 195 | } |
| 196 | if ((node->pfd.revents & G_IO_OUT) && node->io_write) { |
| 197 | return true; |
| 198 | } |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 199 | } |
| 200 | |
| 201 | return false; |
| 202 | } |
| 203 | |
Paolo Bonzini | a398dea | 2014-07-09 11:53:03 +0200 | [diff] [blame] | 204 | static bool aio_dispatch_handlers(AioContext *ctx, HANDLE event) |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 205 | { |
| 206 | AioHandler *node; |
Paolo Bonzini | a398dea | 2014-07-09 11:53:03 +0200 | [diff] [blame] | 207 | bool progress = false; |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 208 | |
| 209 | /* |
Paolo Bonzini | 87f68d3 | 2014-07-07 15:18:02 +0200 | [diff] [blame] | 210 | * We have to walk very carefully in case aio_set_fd_handler is |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 211 | * called while we're walking. |
| 212 | */ |
| 213 | node = QLIST_FIRST(&ctx->aio_handlers); |
| 214 | while (node) { |
| 215 | AioHandler *tmp; |
Paolo Bonzini | b493317 | 2014-07-09 11:53:10 +0200 | [diff] [blame] | 216 | int revents = node->pfd.revents; |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 217 | |
| 218 | ctx->walking_handlers++; |
| 219 | |
Paolo Bonzini | a398dea | 2014-07-09 11:53:03 +0200 | [diff] [blame] | 220 | if (!node->deleted && |
Paolo Bonzini | b493317 | 2014-07-09 11:53:10 +0200 | [diff] [blame] | 221 | (revents || event_notifier_get_handle(node->e) == event) && |
Paolo Bonzini | a398dea | 2014-07-09 11:53:03 +0200 | [diff] [blame] | 222 | node->io_notify) { |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 223 | node->pfd.revents = 0; |
| 224 | node->io_notify(node->e); |
Stefan Hajnoczi | 164a101 | 2013-04-11 16:56:50 +0200 | [diff] [blame] | 225 | |
| 226 | /* aio_notify() does not count as progress */ |
Stefan Hajnoczi | 8b2d42d | 2013-08-22 15:28:35 +0200 | [diff] [blame] | 227 | if (node->e != &ctx->notifier) { |
Stefan Hajnoczi | 164a101 | 2013-04-11 16:56:50 +0200 | [diff] [blame] | 228 | progress = true; |
| 229 | } |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 230 | } |
| 231 | |
Paolo Bonzini | b493317 | 2014-07-09 11:53:10 +0200 | [diff] [blame] | 232 | if (!node->deleted && |
| 233 | (node->io_read || node->io_write)) { |
| 234 | node->pfd.revents = 0; |
| 235 | if ((revents & G_IO_IN) && node->io_read) { |
| 236 | node->io_read(node->opaque); |
| 237 | progress = true; |
| 238 | } |
| 239 | if ((revents & G_IO_OUT) && node->io_write) { |
| 240 | node->io_write(node->opaque); |
| 241 | progress = true; |
| 242 | } |
| 243 | |
| 244 | /* if the next select() will return an event, we have progressed */ |
| 245 | if (event == event_notifier_get_handle(&ctx->notifier)) { |
| 246 | WSANETWORKEVENTS ev; |
| 247 | WSAEnumNetworkEvents(node->pfd.fd, event, &ev); |
| 248 | if (ev.lNetworkEvents) { |
| 249 | progress = true; |
| 250 | } |
| 251 | } |
| 252 | } |
| 253 | |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 254 | tmp = node; |
| 255 | node = QLIST_NEXT(node, node); |
| 256 | |
| 257 | ctx->walking_handlers--; |
| 258 | |
| 259 | if (!ctx->walking_handlers && tmp->deleted) { |
| 260 | QLIST_REMOVE(tmp, node); |
| 261 | g_free(tmp); |
| 262 | } |
| 263 | } |
| 264 | |
Paolo Bonzini | a398dea | 2014-07-09 11:53:03 +0200 | [diff] [blame] | 265 | return progress; |
| 266 | } |
| 267 | |
Paolo Bonzini | e4c7e2d | 2014-07-09 11:53:05 +0200 | [diff] [blame] | 268 | bool aio_dispatch(AioContext *ctx) |
Paolo Bonzini | a398dea | 2014-07-09 11:53:03 +0200 | [diff] [blame] | 269 | { |
| 270 | bool progress; |
| 271 | |
Paolo Bonzini | e4c7e2d | 2014-07-09 11:53:05 +0200 | [diff] [blame] | 272 | progress = aio_bh_poll(ctx); |
| 273 | progress |= aio_dispatch_handlers(ctx, INVALID_HANDLE_VALUE); |
Paolo Bonzini | d397ec99 | 2014-07-09 11:53:02 +0200 | [diff] [blame] | 274 | progress |= timerlistgroup_run_timers(&ctx->tlg); |
Paolo Bonzini | a398dea | 2014-07-09 11:53:03 +0200 | [diff] [blame] | 275 | return progress; |
| 276 | } |
| 277 | |
| 278 | bool aio_poll(AioContext *ctx, bool blocking) |
| 279 | { |
| 280 | AioHandler *node; |
| 281 | HANDLE events[MAXIMUM_WAIT_OBJECTS + 1]; |
Paolo Bonzini | b493317 | 2014-07-09 11:53:10 +0200 | [diff] [blame] | 282 | bool was_dispatching, progress, have_select_revents, first; |
Paolo Bonzini | a398dea | 2014-07-09 11:53:03 +0200 | [diff] [blame] | 283 | int count; |
| 284 | int timeout; |
| 285 | |
Paolo Bonzini | 4911017 | 2015-02-20 17:26:51 +0100 | [diff] [blame] | 286 | aio_context_acquire(ctx); |
Paolo Bonzini | 6698c5b | 2014-09-12 12:08:49 +0200 | [diff] [blame] | 287 | have_select_revents = aio_prepare(ctx); |
| 288 | if (have_select_revents) { |
Paolo Bonzini | b493317 | 2014-07-09 11:53:10 +0200 | [diff] [blame] | 289 | blocking = false; |
Paolo Bonzini | b493317 | 2014-07-09 11:53:10 +0200 | [diff] [blame] | 290 | } |
| 291 | |
Paolo Bonzini | 0a9dd16 | 2014-07-09 11:53:07 +0200 | [diff] [blame] | 292 | was_dispatching = ctx->dispatching; |
Paolo Bonzini | a398dea | 2014-07-09 11:53:03 +0200 | [diff] [blame] | 293 | progress = false; |
| 294 | |
Paolo Bonzini | 0a9dd16 | 2014-07-09 11:53:07 +0200 | [diff] [blame] | 295 | /* aio_notify can avoid the expensive event_notifier_set if |
| 296 | * everything (file descriptors, bottom halves, timers) will |
| 297 | * be re-evaluated before the next blocking poll(). This is |
| 298 | * already true when aio_poll is called with blocking == false; |
| 299 | * if blocking == true, it is only true after poll() returns. |
| 300 | * |
| 301 | * If we're in a nested event loop, ctx->dispatching might be true. |
| 302 | * In that case we can restore it just before returning, but we |
| 303 | * have to clear it now. |
| 304 | */ |
| 305 | aio_set_dispatching(ctx, !blocking); |
| 306 | |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 307 | ctx->walking_handlers++; |
| 308 | |
| 309 | /* fill fd sets */ |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 310 | count = 0; |
| 311 | QLIST_FOREACH(node, &ctx->aio_handlers, node) { |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 312 | if (!node->deleted && node->io_notify) { |
| 313 | events[count++] = event_notifier_get_handle(node->e); |
| 314 | } |
| 315 | } |
| 316 | |
| 317 | ctx->walking_handlers--; |
Paolo Bonzini | 3672fa5 | 2014-07-09 11:53:04 +0200 | [diff] [blame] | 318 | first = true; |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 319 | |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 320 | /* wait until next event */ |
Paolo Bonzini | b022b4a | 2012-11-23 15:59:43 +0100 | [diff] [blame] | 321 | while (count > 0) { |
Paolo Bonzini | b493317 | 2014-07-09 11:53:10 +0200 | [diff] [blame] | 322 | HANDLE event; |
Alex Bligh | 438e1f4 | 2013-08-21 16:02:53 +0100 | [diff] [blame] | 323 | int ret; |
| 324 | |
Paolo Bonzini | 845ca10 | 2014-07-09 11:53:01 +0200 | [diff] [blame] | 325 | timeout = blocking |
| 326 | ? qemu_timeout_ns_to_ms(aio_compute_timeout(ctx)) : 0; |
Paolo Bonzini | 4911017 | 2015-02-20 17:26:51 +0100 | [diff] [blame] | 327 | if (timeout) { |
| 328 | aio_context_release(ctx); |
| 329 | } |
Alex Bligh | 438e1f4 | 2013-08-21 16:02:53 +0100 | [diff] [blame] | 330 | ret = WaitForMultipleObjects(count, events, FALSE, timeout); |
Paolo Bonzini | 4911017 | 2015-02-20 17:26:51 +0100 | [diff] [blame] | 331 | if (timeout) { |
| 332 | aio_context_acquire(ctx); |
| 333 | } |
Paolo Bonzini | 0a9dd16 | 2014-07-09 11:53:07 +0200 | [diff] [blame] | 334 | aio_set_dispatching(ctx, true); |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 335 | |
Paolo Bonzini | 3672fa5 | 2014-07-09 11:53:04 +0200 | [diff] [blame] | 336 | if (first && aio_bh_poll(ctx)) { |
| 337 | progress = true; |
| 338 | } |
| 339 | first = false; |
| 340 | |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 341 | /* if we have any signaled events, dispatch event */ |
Paolo Bonzini | b493317 | 2014-07-09 11:53:10 +0200 | [diff] [blame] | 342 | event = NULL; |
| 343 | if ((DWORD) (ret - WAIT_OBJECT_0) < count) { |
| 344 | event = events[ret - WAIT_OBJECT_0]; |
Paolo Bonzini | a90d411 | 2014-09-15 14:52:58 +0200 | [diff] [blame] | 345 | events[ret - WAIT_OBJECT_0] = events[--count]; |
Paolo Bonzini | b493317 | 2014-07-09 11:53:10 +0200 | [diff] [blame] | 346 | } else if (!have_select_revents) { |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 347 | break; |
| 348 | } |
| 349 | |
Paolo Bonzini | b493317 | 2014-07-09 11:53:10 +0200 | [diff] [blame] | 350 | have_select_revents = false; |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 351 | blocking = false; |
| 352 | |
Paolo Bonzini | b493317 | 2014-07-09 11:53:10 +0200 | [diff] [blame] | 353 | progress |= aio_dispatch_handlers(ctx, event); |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 354 | } |
| 355 | |
Paolo Bonzini | e4c7e2d | 2014-07-09 11:53:05 +0200 | [diff] [blame] | 356 | progress |= timerlistgroup_run_timers(&ctx->tlg); |
Alex Bligh | 438e1f4 | 2013-08-21 16:02:53 +0100 | [diff] [blame] | 357 | |
Paolo Bonzini | 0a9dd16 | 2014-07-09 11:53:07 +0200 | [diff] [blame] | 358 | aio_set_dispatching(ctx, was_dispatching); |
Paolo Bonzini | 4911017 | 2015-02-20 17:26:51 +0100 | [diff] [blame] | 359 | aio_context_release(ctx); |
Stefan Hajnoczi | 164a101 | 2013-04-11 16:56:50 +0200 | [diff] [blame] | 360 | return progress; |
Paolo Bonzini | f42b220 | 2012-06-09 04:01:51 +0200 | [diff] [blame] | 361 | } |