aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 1 | /* |
| 2 | * QEMU aio implementation |
| 3 | * |
| 4 | * Copyright IBM, Corp. 2008 |
| 5 | * |
| 6 | * Authors: |
| 7 | * Anthony Liguori <aliguori@us.ibm.com> |
| 8 | * |
| 9 | * This work is licensed under the terms of the GNU GPL, version 2. See |
| 10 | * the COPYING file in the top-level directory. |
| 11 | * |
Paolo Bonzini | 6b620ca | 2012-01-13 17:44:23 +0100 | [diff] [blame] | 12 | * Contributions after 2012-01-13 are licensed under the terms of the |
| 13 | * GNU GPL, version 2 or (at your option) any later version. |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 14 | */ |
| 15 | |
| 16 | #include "qemu-common.h" |
Paolo Bonzini | 737e150 | 2012-12-17 18:19:44 +0100 | [diff] [blame] | 17 | #include "block/block.h" |
Paolo Bonzini | 1de7afc | 2012-12-17 18:20:00 +0100 | [diff] [blame] | 18 | #include "qemu/queue.h" |
| 19 | #include "qemu/sockets.h" |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 20 | |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 21 | struct AioHandler |
| 22 | { |
Paolo Bonzini | cd9ba1e | 2012-09-24 14:57:22 +0200 | [diff] [blame] | 23 | GPollFD pfd; |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 24 | IOHandler *io_read; |
| 25 | IOHandler *io_write; |
| 26 | AioFlushHandler *io_flush; |
| 27 | int deleted; |
| 28 | void *opaque; |
Blue Swirl | 72cf2d4 | 2009-09-12 07:36:22 +0000 | [diff] [blame] | 29 | QLIST_ENTRY(AioHandler) node; |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 30 | }; |
| 31 | |
Paolo Bonzini | a915f4b | 2012-09-13 12:28:51 +0200 | [diff] [blame] | 32 | static AioHandler *find_aio_handler(AioContext *ctx, int fd) |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 33 | { |
| 34 | AioHandler *node; |
| 35 | |
Paolo Bonzini | a915f4b | 2012-09-13 12:28:51 +0200 | [diff] [blame] | 36 | QLIST_FOREACH(node, &ctx->aio_handlers, node) { |
Paolo Bonzini | cd9ba1e | 2012-09-24 14:57:22 +0200 | [diff] [blame] | 37 | if (node->pfd.fd == fd) |
Alexander Graf | 79d5ca5 | 2009-05-06 02:58:48 +0200 | [diff] [blame] | 38 | if (!node->deleted) |
| 39 | return node; |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 40 | } |
| 41 | |
| 42 | return NULL; |
| 43 | } |
| 44 | |
Paolo Bonzini | a915f4b | 2012-09-13 12:28:51 +0200 | [diff] [blame] | 45 | void aio_set_fd_handler(AioContext *ctx, |
| 46 | int fd, |
| 47 | IOHandler *io_read, |
| 48 | IOHandler *io_write, |
| 49 | AioFlushHandler *io_flush, |
| 50 | void *opaque) |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 51 | { |
| 52 | AioHandler *node; |
| 53 | |
Paolo Bonzini | a915f4b | 2012-09-13 12:28:51 +0200 | [diff] [blame] | 54 | node = find_aio_handler(ctx, fd); |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 55 | |
| 56 | /* Are we deleting the fd handler? */ |
| 57 | if (!io_read && !io_write) { |
| 58 | if (node) { |
Paolo Bonzini | e3713e0 | 2012-09-24 14:57:41 +0200 | [diff] [blame] | 59 | g_source_remove_poll(&ctx->source, &node->pfd); |
| 60 | |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 61 | /* If the lock is held, just mark the node as deleted */ |
Paolo Bonzini | cd9ba1e | 2012-09-24 14:57:22 +0200 | [diff] [blame] | 62 | if (ctx->walking_handlers) { |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 63 | node->deleted = 1; |
Paolo Bonzini | cd9ba1e | 2012-09-24 14:57:22 +0200 | [diff] [blame] | 64 | node->pfd.revents = 0; |
| 65 | } else { |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 66 | /* Otherwise, delete it for real. We can't just mark it as |
| 67 | * deleted because deleted nodes are only cleaned up after |
| 68 | * releasing the walking_handlers lock. |
| 69 | */ |
Blue Swirl | 72cf2d4 | 2009-09-12 07:36:22 +0000 | [diff] [blame] | 70 | QLIST_REMOVE(node, node); |
Anthony Liguori | 7267c09 | 2011-08-20 22:09:37 -0500 | [diff] [blame] | 71 | g_free(node); |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 72 | } |
| 73 | } |
| 74 | } else { |
| 75 | if (node == NULL) { |
| 76 | /* Alloc and insert if it's not already there */ |
Anthony Liguori | 7267c09 | 2011-08-20 22:09:37 -0500 | [diff] [blame] | 77 | node = g_malloc0(sizeof(AioHandler)); |
Paolo Bonzini | cd9ba1e | 2012-09-24 14:57:22 +0200 | [diff] [blame] | 78 | node->pfd.fd = fd; |
Paolo Bonzini | a915f4b | 2012-09-13 12:28:51 +0200 | [diff] [blame] | 79 | QLIST_INSERT_HEAD(&ctx->aio_handlers, node, node); |
Paolo Bonzini | e3713e0 | 2012-09-24 14:57:41 +0200 | [diff] [blame] | 80 | |
| 81 | g_source_add_poll(&ctx->source, &node->pfd); |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 82 | } |
| 83 | /* Update handler with latest information */ |
| 84 | node->io_read = io_read; |
| 85 | node->io_write = io_write; |
| 86 | node->io_flush = io_flush; |
| 87 | node->opaque = opaque; |
Paolo Bonzini | cd9ba1e | 2012-09-24 14:57:22 +0200 | [diff] [blame] | 88 | |
| 89 | node->pfd.events = (io_read ? G_IO_IN | G_IO_HUP : 0); |
| 90 | node->pfd.events |= (io_write ? G_IO_OUT : 0); |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 91 | } |
Paolo Bonzini | 7ed2b24 | 2012-09-25 10:22:39 +0200 | [diff] [blame] | 92 | |
| 93 | aio_notify(ctx); |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 94 | } |
| 95 | |
Paolo Bonzini | a915f4b | 2012-09-13 12:28:51 +0200 | [diff] [blame] | 96 | void aio_set_event_notifier(AioContext *ctx, |
| 97 | EventNotifier *notifier, |
| 98 | EventNotifierHandler *io_read, |
| 99 | AioFlushEventNotifierHandler *io_flush) |
Paolo Bonzini | 9958c35 | 2012-06-09 03:44:00 +0200 | [diff] [blame] | 100 | { |
Paolo Bonzini | a915f4b | 2012-09-13 12:28:51 +0200 | [diff] [blame] | 101 | aio_set_fd_handler(ctx, event_notifier_get_fd(notifier), |
| 102 | (IOHandler *)io_read, NULL, |
| 103 | (AioFlushHandler *)io_flush, notifier); |
Paolo Bonzini | 9958c35 | 2012-06-09 03:44:00 +0200 | [diff] [blame] | 104 | } |
| 105 | |
Paolo Bonzini | cd9ba1e | 2012-09-24 14:57:22 +0200 | [diff] [blame] | 106 | bool aio_pending(AioContext *ctx) |
| 107 | { |
| 108 | AioHandler *node; |
| 109 | |
| 110 | QLIST_FOREACH(node, &ctx->aio_handlers, node) { |
| 111 | int revents; |
| 112 | |
| 113 | /* |
| 114 | * FIXME: right now we cannot get G_IO_HUP and G_IO_ERR because |
| 115 | * main-loop.c is still select based (due to the slirp legacy). |
| 116 | * If main-loop.c ever switches to poll, G_IO_ERR should be |
| 117 | * tested too. Dispatching G_IO_ERR to both handlers should be |
| 118 | * okay, since handlers need to be ready for spurious wakeups. |
| 119 | */ |
| 120 | revents = node->pfd.revents & node->pfd.events; |
| 121 | if (revents & (G_IO_IN | G_IO_HUP | G_IO_ERR) && node->io_read) { |
| 122 | return true; |
| 123 | } |
| 124 | if (revents & (G_IO_OUT | G_IO_ERR) && node->io_write) { |
| 125 | return true; |
| 126 | } |
| 127 | } |
| 128 | |
| 129 | return false; |
| 130 | } |
| 131 | |
Paolo Bonzini | 7c0628b | 2012-09-24 14:37:53 +0200 | [diff] [blame] | 132 | bool aio_poll(AioContext *ctx, bool blocking) |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 133 | { |
Paolo Bonzini | 7c0628b | 2012-09-24 14:37:53 +0200 | [diff] [blame] | 134 | static struct timeval tv0; |
Paolo Bonzini | 9eb0bfc | 2012-04-12 14:00:56 +0200 | [diff] [blame] | 135 | AioHandler *node; |
| 136 | fd_set rdfds, wrfds; |
| 137 | int max_fd = -1; |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 138 | int ret; |
Paolo Bonzini | 7c0628b | 2012-09-24 14:37:53 +0200 | [diff] [blame] | 139 | bool busy, progress; |
| 140 | |
| 141 | progress = false; |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 142 | |
Kevin Wolf | 8febfa2 | 2009-10-22 17:54:36 +0200 | [diff] [blame] | 143 | /* |
| 144 | * If there are callbacks left that have been queued, we need to call then. |
Paolo Bonzini | bcdc185 | 2012-04-12 14:00:55 +0200 | [diff] [blame] | 145 | * Do not call select in this case, because it is possible that the caller |
| 146 | * does not need a complete flush (as is the case for qemu_aio_wait loops). |
Kevin Wolf | 8febfa2 | 2009-10-22 17:54:36 +0200 | [diff] [blame] | 147 | */ |
Paolo Bonzini | a915f4b | 2012-09-13 12:28:51 +0200 | [diff] [blame] | 148 | if (aio_bh_poll(ctx)) { |
Paolo Bonzini | 7c0628b | 2012-09-24 14:37:53 +0200 | [diff] [blame] | 149 | blocking = false; |
| 150 | progress = true; |
| 151 | } |
| 152 | |
Paolo Bonzini | cd9ba1e | 2012-09-24 14:57:22 +0200 | [diff] [blame] | 153 | /* |
| 154 | * Then dispatch any pending callbacks from the GSource. |
| 155 | * |
| 156 | * We have to walk very carefully in case qemu_aio_set_fd_handler is |
| 157 | * called while we're walking. |
| 158 | */ |
| 159 | node = QLIST_FIRST(&ctx->aio_handlers); |
| 160 | while (node) { |
| 161 | AioHandler *tmp; |
| 162 | int revents; |
| 163 | |
| 164 | ctx->walking_handlers++; |
| 165 | |
| 166 | revents = node->pfd.revents & node->pfd.events; |
| 167 | node->pfd.revents = 0; |
| 168 | |
| 169 | /* See comment in aio_pending. */ |
| 170 | if (revents & (G_IO_IN | G_IO_HUP | G_IO_ERR) && node->io_read) { |
| 171 | node->io_read(node->opaque); |
| 172 | progress = true; |
| 173 | } |
| 174 | if (revents & (G_IO_OUT | G_IO_ERR) && node->io_write) { |
| 175 | node->io_write(node->opaque); |
| 176 | progress = true; |
| 177 | } |
| 178 | |
| 179 | tmp = node; |
| 180 | node = QLIST_NEXT(node, node); |
| 181 | |
| 182 | ctx->walking_handlers--; |
| 183 | |
| 184 | if (!ctx->walking_handlers && tmp->deleted) { |
| 185 | QLIST_REMOVE(tmp, node); |
| 186 | g_free(tmp); |
| 187 | } |
| 188 | } |
| 189 | |
Paolo Bonzini | 7c0628b | 2012-09-24 14:37:53 +0200 | [diff] [blame] | 190 | if (progress && !blocking) { |
Paolo Bonzini | bcdc185 | 2012-04-12 14:00:55 +0200 | [diff] [blame] | 191 | return true; |
Paolo Bonzini | bafbd6a | 2012-04-12 14:00:54 +0200 | [diff] [blame] | 192 | } |
Kevin Wolf | 8febfa2 | 2009-10-22 17:54:36 +0200 | [diff] [blame] | 193 | |
Paolo Bonzini | a915f4b | 2012-09-13 12:28:51 +0200 | [diff] [blame] | 194 | ctx->walking_handlers++; |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 195 | |
Paolo Bonzini | 9eb0bfc | 2012-04-12 14:00:56 +0200 | [diff] [blame] | 196 | FD_ZERO(&rdfds); |
| 197 | FD_ZERO(&wrfds); |
| 198 | |
| 199 | /* fill fd sets */ |
| 200 | busy = false; |
Paolo Bonzini | a915f4b | 2012-09-13 12:28:51 +0200 | [diff] [blame] | 201 | QLIST_FOREACH(node, &ctx->aio_handlers, node) { |
Paolo Bonzini | 9eb0bfc | 2012-04-12 14:00:56 +0200 | [diff] [blame] | 202 | /* If there aren't pending AIO operations, don't invoke callbacks. |
| 203 | * Otherwise, if there are no AIO requests, qemu_aio_wait() would |
| 204 | * wait indefinitely. |
| 205 | */ |
Paolo Bonzini | 4231c88 | 2012-09-26 15:21:36 +0200 | [diff] [blame] | 206 | if (!node->deleted && node->io_flush) { |
Paolo Bonzini | 9eb0bfc | 2012-04-12 14:00:56 +0200 | [diff] [blame] | 207 | if (node->io_flush(node->opaque) == 0) { |
| 208 | continue; |
| 209 | } |
| 210 | busy = true; |
| 211 | } |
| 212 | if (!node->deleted && node->io_read) { |
Paolo Bonzini | cd9ba1e | 2012-09-24 14:57:22 +0200 | [diff] [blame] | 213 | FD_SET(node->pfd.fd, &rdfds); |
| 214 | max_fd = MAX(max_fd, node->pfd.fd + 1); |
Paolo Bonzini | 9eb0bfc | 2012-04-12 14:00:56 +0200 | [diff] [blame] | 215 | } |
| 216 | if (!node->deleted && node->io_write) { |
Paolo Bonzini | cd9ba1e | 2012-09-24 14:57:22 +0200 | [diff] [blame] | 217 | FD_SET(node->pfd.fd, &wrfds); |
| 218 | max_fd = MAX(max_fd, node->pfd.fd + 1); |
Paolo Bonzini | 9eb0bfc | 2012-04-12 14:00:56 +0200 | [diff] [blame] | 219 | } |
| 220 | } |
| 221 | |
Paolo Bonzini | a915f4b | 2012-09-13 12:28:51 +0200 | [diff] [blame] | 222 | ctx->walking_handlers--; |
Paolo Bonzini | 9eb0bfc | 2012-04-12 14:00:56 +0200 | [diff] [blame] | 223 | |
| 224 | /* No AIO operations? Get us out of here */ |
| 225 | if (!busy) { |
Paolo Bonzini | 7c0628b | 2012-09-24 14:37:53 +0200 | [diff] [blame] | 226 | return progress; |
Paolo Bonzini | 9eb0bfc | 2012-04-12 14:00:56 +0200 | [diff] [blame] | 227 | } |
| 228 | |
| 229 | /* wait until next event */ |
Paolo Bonzini | 7c0628b | 2012-09-24 14:37:53 +0200 | [diff] [blame] | 230 | ret = select(max_fd, &rdfds, &wrfds, NULL, blocking ? NULL : &tv0); |
Paolo Bonzini | 9eb0bfc | 2012-04-12 14:00:56 +0200 | [diff] [blame] | 231 | |
| 232 | /* if we have any readable fds, dispatch event */ |
| 233 | if (ret > 0) { |
Paolo Bonzini | 9eb0bfc | 2012-04-12 14:00:56 +0200 | [diff] [blame] | 234 | /* we have to walk very carefully in case |
| 235 | * qemu_aio_set_fd_handler is called while we're walking */ |
Paolo Bonzini | a915f4b | 2012-09-13 12:28:51 +0200 | [diff] [blame] | 236 | node = QLIST_FIRST(&ctx->aio_handlers); |
Paolo Bonzini | 9eb0bfc | 2012-04-12 14:00:56 +0200 | [diff] [blame] | 237 | while (node) { |
| 238 | AioHandler *tmp; |
aliguori | f71903d | 2008-10-12 21:19:57 +0000 | [diff] [blame] | 239 | |
Paolo Bonzini | a915f4b | 2012-09-13 12:28:51 +0200 | [diff] [blame] | 240 | ctx->walking_handlers++; |
Paolo Bonzini | 2db2bfc | 2012-09-27 19:27:43 +0530 | [diff] [blame] | 241 | |
Paolo Bonzini | 9eb0bfc | 2012-04-12 14:00:56 +0200 | [diff] [blame] | 242 | if (!node->deleted && |
Paolo Bonzini | cd9ba1e | 2012-09-24 14:57:22 +0200 | [diff] [blame] | 243 | FD_ISSET(node->pfd.fd, &rdfds) && |
Paolo Bonzini | 9eb0bfc | 2012-04-12 14:00:56 +0200 | [diff] [blame] | 244 | node->io_read) { |
| 245 | node->io_read(node->opaque); |
Paolo Bonzini | cd9ba1e | 2012-09-24 14:57:22 +0200 | [diff] [blame] | 246 | progress = true; |
Paolo Bonzini | bcdc185 | 2012-04-12 14:00:55 +0200 | [diff] [blame] | 247 | } |
Paolo Bonzini | 9eb0bfc | 2012-04-12 14:00:56 +0200 | [diff] [blame] | 248 | if (!node->deleted && |
Paolo Bonzini | cd9ba1e | 2012-09-24 14:57:22 +0200 | [diff] [blame] | 249 | FD_ISSET(node->pfd.fd, &wrfds) && |
Paolo Bonzini | 9eb0bfc | 2012-04-12 14:00:56 +0200 | [diff] [blame] | 250 | node->io_write) { |
| 251 | node->io_write(node->opaque); |
Paolo Bonzini | cd9ba1e | 2012-09-24 14:57:22 +0200 | [diff] [blame] | 252 | progress = true; |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 253 | } |
Paolo Bonzini | 9eb0bfc | 2012-04-12 14:00:56 +0200 | [diff] [blame] | 254 | |
| 255 | tmp = node; |
| 256 | node = QLIST_NEXT(node, node); |
| 257 | |
Paolo Bonzini | a915f4b | 2012-09-13 12:28:51 +0200 | [diff] [blame] | 258 | ctx->walking_handlers--; |
Paolo Bonzini | 2db2bfc | 2012-09-27 19:27:43 +0530 | [diff] [blame] | 259 | |
Paolo Bonzini | a915f4b | 2012-09-13 12:28:51 +0200 | [diff] [blame] | 260 | if (!ctx->walking_handlers && tmp->deleted) { |
Paolo Bonzini | 9eb0bfc | 2012-04-12 14:00:56 +0200 | [diff] [blame] | 261 | QLIST_REMOVE(tmp, node); |
| 262 | g_free(tmp); |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 263 | } |
| 264 | } |
Paolo Bonzini | 9eb0bfc | 2012-04-12 14:00:56 +0200 | [diff] [blame] | 265 | } |
Paolo Bonzini | bcdc185 | 2012-04-12 14:00:55 +0200 | [diff] [blame] | 266 | |
Kevin Wolf | 2ea9b58 | 2013-01-16 19:25:51 +0100 | [diff] [blame] | 267 | assert(progress || busy); |
| 268 | return true; |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 269 | } |