aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 1 | /* |
| 2 | * QEMU aio implementation |
| 3 | * |
| 4 | * Copyright IBM, Corp. 2008 |
| 5 | * |
| 6 | * Authors: |
| 7 | * Anthony Liguori <aliguori@us.ibm.com> |
| 8 | * |
| 9 | * This work is licensed under the terms of the GNU GPL, version 2. See |
| 10 | * the COPYING file in the top-level directory. |
| 11 | * |
Paolo Bonzini | 6b620ca | 2012-01-13 17:44:23 +0100 | [diff] [blame] | 12 | * Contributions after 2012-01-13 are licensed under the terms of the |
| 13 | * GNU GPL, version 2 or (at your option) any later version. |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 14 | */ |
| 15 | |
| 16 | #include "qemu-common.h" |
Paolo Bonzini | 737e150 | 2012-12-17 18:19:44 +0100 | [diff] [blame] | 17 | #include "block/block.h" |
Paolo Bonzini | 1de7afc | 2012-12-17 18:20:00 +0100 | [diff] [blame] | 18 | #include "qemu/queue.h" |
| 19 | #include "qemu/sockets.h" |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 20 | |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 21 | struct AioHandler |
| 22 | { |
Paolo Bonzini | cd9ba1e | 2012-09-24 14:57:22 +0200 | [diff] [blame] | 23 | GPollFD pfd; |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 24 | IOHandler *io_read; |
| 25 | IOHandler *io_write; |
| 26 | AioFlushHandler *io_flush; |
| 27 | int deleted; |
Stefan Hajnoczi | 6b5f876 | 2013-02-20 11:28:32 +0100 | [diff] [blame] | 28 | int pollfds_idx; |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 29 | void *opaque; |
Blue Swirl | 72cf2d4 | 2009-09-12 07:36:22 +0000 | [diff] [blame] | 30 | QLIST_ENTRY(AioHandler) node; |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 31 | }; |
| 32 | |
Paolo Bonzini | a915f4b | 2012-09-13 12:28:51 +0200 | [diff] [blame] | 33 | static AioHandler *find_aio_handler(AioContext *ctx, int fd) |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 34 | { |
| 35 | AioHandler *node; |
| 36 | |
Paolo Bonzini | a915f4b | 2012-09-13 12:28:51 +0200 | [diff] [blame] | 37 | QLIST_FOREACH(node, &ctx->aio_handlers, node) { |
Paolo Bonzini | cd9ba1e | 2012-09-24 14:57:22 +0200 | [diff] [blame] | 38 | if (node->pfd.fd == fd) |
Alexander Graf | 79d5ca5 | 2009-05-06 02:58:48 +0200 | [diff] [blame] | 39 | if (!node->deleted) |
| 40 | return node; |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 41 | } |
| 42 | |
| 43 | return NULL; |
| 44 | } |
| 45 | |
Paolo Bonzini | a915f4b | 2012-09-13 12:28:51 +0200 | [diff] [blame] | 46 | void aio_set_fd_handler(AioContext *ctx, |
| 47 | int fd, |
| 48 | IOHandler *io_read, |
| 49 | IOHandler *io_write, |
| 50 | AioFlushHandler *io_flush, |
| 51 | void *opaque) |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 52 | { |
| 53 | AioHandler *node; |
| 54 | |
Paolo Bonzini | a915f4b | 2012-09-13 12:28:51 +0200 | [diff] [blame] | 55 | node = find_aio_handler(ctx, fd); |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 56 | |
| 57 | /* Are we deleting the fd handler? */ |
| 58 | if (!io_read && !io_write) { |
| 59 | if (node) { |
Paolo Bonzini | e3713e0 | 2012-09-24 14:57:41 +0200 | [diff] [blame] | 60 | g_source_remove_poll(&ctx->source, &node->pfd); |
| 61 | |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 62 | /* If the lock is held, just mark the node as deleted */ |
Paolo Bonzini | cd9ba1e | 2012-09-24 14:57:22 +0200 | [diff] [blame] | 63 | if (ctx->walking_handlers) { |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 64 | node->deleted = 1; |
Paolo Bonzini | cd9ba1e | 2012-09-24 14:57:22 +0200 | [diff] [blame] | 65 | node->pfd.revents = 0; |
| 66 | } else { |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 67 | /* Otherwise, delete it for real. We can't just mark it as |
| 68 | * deleted because deleted nodes are only cleaned up after |
| 69 | * releasing the walking_handlers lock. |
| 70 | */ |
Blue Swirl | 72cf2d4 | 2009-09-12 07:36:22 +0000 | [diff] [blame] | 71 | QLIST_REMOVE(node, node); |
Anthony Liguori | 7267c09 | 2011-08-20 22:09:37 -0500 | [diff] [blame] | 72 | g_free(node); |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 73 | } |
| 74 | } |
| 75 | } else { |
| 76 | if (node == NULL) { |
| 77 | /* Alloc and insert if it's not already there */ |
Anthony Liguori | 7267c09 | 2011-08-20 22:09:37 -0500 | [diff] [blame] | 78 | node = g_malloc0(sizeof(AioHandler)); |
Paolo Bonzini | cd9ba1e | 2012-09-24 14:57:22 +0200 | [diff] [blame] | 79 | node->pfd.fd = fd; |
Paolo Bonzini | a915f4b | 2012-09-13 12:28:51 +0200 | [diff] [blame] | 80 | QLIST_INSERT_HEAD(&ctx->aio_handlers, node, node); |
Paolo Bonzini | e3713e0 | 2012-09-24 14:57:41 +0200 | [diff] [blame] | 81 | |
| 82 | g_source_add_poll(&ctx->source, &node->pfd); |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 83 | } |
| 84 | /* Update handler with latest information */ |
| 85 | node->io_read = io_read; |
| 86 | node->io_write = io_write; |
| 87 | node->io_flush = io_flush; |
| 88 | node->opaque = opaque; |
Stefan Hajnoczi | 6b5f876 | 2013-02-20 11:28:32 +0100 | [diff] [blame] | 89 | node->pollfds_idx = -1; |
Paolo Bonzini | cd9ba1e | 2012-09-24 14:57:22 +0200 | [diff] [blame] | 90 | |
Stefan Hajnoczi | b5a01a7 | 2013-02-20 11:28:33 +0100 | [diff] [blame] | 91 | node->pfd.events = (io_read ? G_IO_IN | G_IO_HUP | G_IO_ERR : 0); |
| 92 | node->pfd.events |= (io_write ? G_IO_OUT | G_IO_ERR : 0); |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 93 | } |
Paolo Bonzini | 7ed2b24 | 2012-09-25 10:22:39 +0200 | [diff] [blame] | 94 | |
| 95 | aio_notify(ctx); |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 96 | } |
| 97 | |
Paolo Bonzini | a915f4b | 2012-09-13 12:28:51 +0200 | [diff] [blame] | 98 | void aio_set_event_notifier(AioContext *ctx, |
| 99 | EventNotifier *notifier, |
| 100 | EventNotifierHandler *io_read, |
| 101 | AioFlushEventNotifierHandler *io_flush) |
Paolo Bonzini | 9958c35 | 2012-06-09 03:44:00 +0200 | [diff] [blame] | 102 | { |
Paolo Bonzini | a915f4b | 2012-09-13 12:28:51 +0200 | [diff] [blame] | 103 | aio_set_fd_handler(ctx, event_notifier_get_fd(notifier), |
| 104 | (IOHandler *)io_read, NULL, |
| 105 | (AioFlushHandler *)io_flush, notifier); |
Paolo Bonzini | 9958c35 | 2012-06-09 03:44:00 +0200 | [diff] [blame] | 106 | } |
| 107 | |
Paolo Bonzini | cd9ba1e | 2012-09-24 14:57:22 +0200 | [diff] [blame] | 108 | bool aio_pending(AioContext *ctx) |
| 109 | { |
| 110 | AioHandler *node; |
| 111 | |
| 112 | QLIST_FOREACH(node, &ctx->aio_handlers, node) { |
| 113 | int revents; |
| 114 | |
Paolo Bonzini | cd9ba1e | 2012-09-24 14:57:22 +0200 | [diff] [blame] | 115 | revents = node->pfd.revents & node->pfd.events; |
| 116 | if (revents & (G_IO_IN | G_IO_HUP | G_IO_ERR) && node->io_read) { |
| 117 | return true; |
| 118 | } |
| 119 | if (revents & (G_IO_OUT | G_IO_ERR) && node->io_write) { |
| 120 | return true; |
| 121 | } |
| 122 | } |
| 123 | |
| 124 | return false; |
| 125 | } |
| 126 | |
Stefan Hajnoczi | d0c8d2c | 2013-02-20 11:28:31 +0100 | [diff] [blame] | 127 | static bool aio_dispatch(AioContext *ctx) |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 128 | { |
Paolo Bonzini | 9eb0bfc | 2012-04-12 14:00:56 +0200 | [diff] [blame] | 129 | AioHandler *node; |
Stefan Hajnoczi | d0c8d2c | 2013-02-20 11:28:31 +0100 | [diff] [blame] | 130 | bool progress = false; |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 131 | |
Kevin Wolf | 8febfa2 | 2009-10-22 17:54:36 +0200 | [diff] [blame] | 132 | /* |
Paolo Bonzini | cd9ba1e | 2012-09-24 14:57:22 +0200 | [diff] [blame] | 133 | * We have to walk very carefully in case qemu_aio_set_fd_handler is |
| 134 | * called while we're walking. |
| 135 | */ |
| 136 | node = QLIST_FIRST(&ctx->aio_handlers); |
| 137 | while (node) { |
| 138 | AioHandler *tmp; |
| 139 | int revents; |
| 140 | |
| 141 | ctx->walking_handlers++; |
| 142 | |
| 143 | revents = node->pfd.revents & node->pfd.events; |
| 144 | node->pfd.revents = 0; |
| 145 | |
Stefan Hajnoczi | d0c8d2c | 2013-02-20 11:28:31 +0100 | [diff] [blame] | 146 | if (!node->deleted && |
| 147 | (revents & (G_IO_IN | G_IO_HUP | G_IO_ERR)) && |
| 148 | node->io_read) { |
Paolo Bonzini | cd9ba1e | 2012-09-24 14:57:22 +0200 | [diff] [blame] | 149 | node->io_read(node->opaque); |
| 150 | progress = true; |
| 151 | } |
Stefan Hajnoczi | d0c8d2c | 2013-02-20 11:28:31 +0100 | [diff] [blame] | 152 | if (!node->deleted && |
| 153 | (revents & (G_IO_OUT | G_IO_ERR)) && |
| 154 | node->io_write) { |
Paolo Bonzini | cd9ba1e | 2012-09-24 14:57:22 +0200 | [diff] [blame] | 155 | node->io_write(node->opaque); |
| 156 | progress = true; |
| 157 | } |
| 158 | |
| 159 | tmp = node; |
| 160 | node = QLIST_NEXT(node, node); |
| 161 | |
| 162 | ctx->walking_handlers--; |
| 163 | |
| 164 | if (!ctx->walking_handlers && tmp->deleted) { |
| 165 | QLIST_REMOVE(tmp, node); |
| 166 | g_free(tmp); |
| 167 | } |
| 168 | } |
Stefan Hajnoczi | d0c8d2c | 2013-02-20 11:28:31 +0100 | [diff] [blame] | 169 | return progress; |
| 170 | } |
| 171 | |
| 172 | bool aio_poll(AioContext *ctx, bool blocking) |
| 173 | { |
Stefan Hajnoczi | d0c8d2c | 2013-02-20 11:28:31 +0100 | [diff] [blame] | 174 | AioHandler *node; |
Stefan Hajnoczi | d0c8d2c | 2013-02-20 11:28:31 +0100 | [diff] [blame] | 175 | int ret; |
| 176 | bool busy, progress; |
| 177 | |
| 178 | progress = false; |
| 179 | |
| 180 | /* |
| 181 | * If there are callbacks left that have been queued, we need to call them. |
| 182 | * Do not call select in this case, because it is possible that the caller |
| 183 | * does not need a complete flush (as is the case for qemu_aio_wait loops). |
| 184 | */ |
| 185 | if (aio_bh_poll(ctx)) { |
| 186 | blocking = false; |
| 187 | progress = true; |
| 188 | } |
| 189 | |
| 190 | if (aio_dispatch(ctx)) { |
| 191 | progress = true; |
| 192 | } |
Paolo Bonzini | cd9ba1e | 2012-09-24 14:57:22 +0200 | [diff] [blame] | 193 | |
Paolo Bonzini | 7c0628b | 2012-09-24 14:37:53 +0200 | [diff] [blame] | 194 | if (progress && !blocking) { |
Paolo Bonzini | bcdc185 | 2012-04-12 14:00:55 +0200 | [diff] [blame] | 195 | return true; |
Paolo Bonzini | bafbd6a | 2012-04-12 14:00:54 +0200 | [diff] [blame] | 196 | } |
Kevin Wolf | 8febfa2 | 2009-10-22 17:54:36 +0200 | [diff] [blame] | 197 | |
Paolo Bonzini | a915f4b | 2012-09-13 12:28:51 +0200 | [diff] [blame] | 198 | ctx->walking_handlers++; |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 199 | |
Stefan Hajnoczi | 6b5f876 | 2013-02-20 11:28:32 +0100 | [diff] [blame] | 200 | g_array_set_size(ctx->pollfds, 0); |
Paolo Bonzini | 9eb0bfc | 2012-04-12 14:00:56 +0200 | [diff] [blame] | 201 | |
Stefan Hajnoczi | 6b5f876 | 2013-02-20 11:28:32 +0100 | [diff] [blame] | 202 | /* fill pollfds */ |
Paolo Bonzini | 9eb0bfc | 2012-04-12 14:00:56 +0200 | [diff] [blame] | 203 | busy = false; |
Paolo Bonzini | a915f4b | 2012-09-13 12:28:51 +0200 | [diff] [blame] | 204 | QLIST_FOREACH(node, &ctx->aio_handlers, node) { |
Stefan Hajnoczi | 6b5f876 | 2013-02-20 11:28:32 +0100 | [diff] [blame] | 205 | node->pollfds_idx = -1; |
| 206 | |
Paolo Bonzini | 9eb0bfc | 2012-04-12 14:00:56 +0200 | [diff] [blame] | 207 | /* If there aren't pending AIO operations, don't invoke callbacks. |
| 208 | * Otherwise, if there are no AIO requests, qemu_aio_wait() would |
| 209 | * wait indefinitely. |
| 210 | */ |
Paolo Bonzini | 4231c88 | 2012-09-26 15:21:36 +0200 | [diff] [blame] | 211 | if (!node->deleted && node->io_flush) { |
Paolo Bonzini | 9eb0bfc | 2012-04-12 14:00:56 +0200 | [diff] [blame] | 212 | if (node->io_flush(node->opaque) == 0) { |
| 213 | continue; |
| 214 | } |
| 215 | busy = true; |
| 216 | } |
Stefan Hajnoczi | 6b5f876 | 2013-02-20 11:28:32 +0100 | [diff] [blame] | 217 | if (!node->deleted && node->pfd.events) { |
| 218 | GPollFD pfd = { |
| 219 | .fd = node->pfd.fd, |
| 220 | .events = node->pfd.events, |
| 221 | }; |
| 222 | node->pollfds_idx = ctx->pollfds->len; |
| 223 | g_array_append_val(ctx->pollfds, pfd); |
Paolo Bonzini | 9eb0bfc | 2012-04-12 14:00:56 +0200 | [diff] [blame] | 224 | } |
| 225 | } |
| 226 | |
Paolo Bonzini | a915f4b | 2012-09-13 12:28:51 +0200 | [diff] [blame] | 227 | ctx->walking_handlers--; |
Paolo Bonzini | 9eb0bfc | 2012-04-12 14:00:56 +0200 | [diff] [blame] | 228 | |
| 229 | /* No AIO operations? Get us out of here */ |
| 230 | if (!busy) { |
Paolo Bonzini | 7c0628b | 2012-09-24 14:37:53 +0200 | [diff] [blame] | 231 | return progress; |
Paolo Bonzini | 9eb0bfc | 2012-04-12 14:00:56 +0200 | [diff] [blame] | 232 | } |
| 233 | |
| 234 | /* wait until next event */ |
Stefan Hajnoczi | 6b5f876 | 2013-02-20 11:28:32 +0100 | [diff] [blame] | 235 | ret = g_poll((GPollFD *)ctx->pollfds->data, |
| 236 | ctx->pollfds->len, |
| 237 | blocking ? -1 : 0); |
Paolo Bonzini | 9eb0bfc | 2012-04-12 14:00:56 +0200 | [diff] [blame] | 238 | |
| 239 | /* if we have any readable fds, dispatch event */ |
| 240 | if (ret > 0) { |
Stefan Hajnoczi | 6b5f876 | 2013-02-20 11:28:32 +0100 | [diff] [blame] | 241 | QLIST_FOREACH(node, &ctx->aio_handlers, node) { |
| 242 | if (node->pollfds_idx != -1) { |
| 243 | GPollFD *pfd = &g_array_index(ctx->pollfds, GPollFD, |
| 244 | node->pollfds_idx); |
| 245 | node->pfd.revents = pfd->revents; |
Paolo Bonzini | bcdc185 | 2012-04-12 14:00:55 +0200 | [diff] [blame] | 246 | } |
Stefan Hajnoczi | 6b5f876 | 2013-02-20 11:28:32 +0100 | [diff] [blame] | 247 | } |
| 248 | if (aio_dispatch(ctx)) { |
| 249 | progress = true; |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 250 | } |
Paolo Bonzini | 9eb0bfc | 2012-04-12 14:00:56 +0200 | [diff] [blame] | 251 | } |
Paolo Bonzini | bcdc185 | 2012-04-12 14:00:55 +0200 | [diff] [blame] | 252 | |
Kevin Wolf | 2ea9b58 | 2013-01-16 19:25:51 +0100 | [diff] [blame] | 253 | assert(progress || busy); |
| 254 | return true; |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 255 | } |