aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 1 | /* |
| 2 | * QEMU aio implementation |
| 3 | * |
| 4 | * Copyright IBM, Corp. 2008 |
| 5 | * |
| 6 | * Authors: |
| 7 | * Anthony Liguori <aliguori@us.ibm.com> |
| 8 | * |
| 9 | * This work is licensed under the terms of the GNU GPL, version 2. See |
| 10 | * the COPYING file in the top-level directory. |
| 11 | * |
Paolo Bonzini | 6b620ca | 2012-01-13 17:44:23 +0100 | [diff] [blame] | 12 | * Contributions after 2012-01-13 are licensed under the terms of the |
| 13 | * GNU GPL, version 2 or (at your option) any later version. |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 14 | */ |
| 15 | |
| 16 | #include "qemu-common.h" |
| 17 | #include "block.h" |
Blue Swirl | 72cf2d4 | 2009-09-12 07:36:22 +0000 | [diff] [blame] | 18 | #include "qemu-queue.h" |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 19 | #include "qemu_socket.h" |
| 20 | |
| 21 | typedef struct AioHandler AioHandler; |
| 22 | |
| 23 | /* The list of registered AIO handlers */ |
Blue Swirl | 72cf2d4 | 2009-09-12 07:36:22 +0000 | [diff] [blame] | 24 | static QLIST_HEAD(, AioHandler) aio_handlers; |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 25 | |
| 26 | /* This is a simple lock used to protect the aio_handlers list. Specifically, |
| 27 | * it's used to ensure that no callbacks are removed while we're walking and |
| 28 | * dispatching callbacks. |
| 29 | */ |
| 30 | static int walking_handlers; |
| 31 | |
| 32 | struct AioHandler |
| 33 | { |
| 34 | int fd; |
| 35 | IOHandler *io_read; |
| 36 | IOHandler *io_write; |
| 37 | AioFlushHandler *io_flush; |
| 38 | int deleted; |
| 39 | void *opaque; |
Blue Swirl | 72cf2d4 | 2009-09-12 07:36:22 +0000 | [diff] [blame] | 40 | QLIST_ENTRY(AioHandler) node; |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 41 | }; |
| 42 | |
| 43 | static AioHandler *find_aio_handler(int fd) |
| 44 | { |
| 45 | AioHandler *node; |
| 46 | |
Blue Swirl | 72cf2d4 | 2009-09-12 07:36:22 +0000 | [diff] [blame] | 47 | QLIST_FOREACH(node, &aio_handlers, node) { |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 48 | if (node->fd == fd) |
Alexander Graf | 79d5ca5 | 2009-05-06 02:58:48 +0200 | [diff] [blame] | 49 | if (!node->deleted) |
| 50 | return node; |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 51 | } |
| 52 | |
| 53 | return NULL; |
| 54 | } |
| 55 | |
| 56 | int qemu_aio_set_fd_handler(int fd, |
| 57 | IOHandler *io_read, |
| 58 | IOHandler *io_write, |
| 59 | AioFlushHandler *io_flush, |
| 60 | void *opaque) |
| 61 | { |
| 62 | AioHandler *node; |
| 63 | |
| 64 | node = find_aio_handler(fd); |
| 65 | |
| 66 | /* Are we deleting the fd handler? */ |
| 67 | if (!io_read && !io_write) { |
| 68 | if (node) { |
| 69 | /* If the lock is held, just mark the node as deleted */ |
| 70 | if (walking_handlers) |
| 71 | node->deleted = 1; |
| 72 | else { |
| 73 | /* Otherwise, delete it for real. We can't just mark it as |
| 74 | * deleted because deleted nodes are only cleaned up after |
| 75 | * releasing the walking_handlers lock. |
| 76 | */ |
Blue Swirl | 72cf2d4 | 2009-09-12 07:36:22 +0000 | [diff] [blame] | 77 | QLIST_REMOVE(node, node); |
Anthony Liguori | 7267c09 | 2011-08-20 22:09:37 -0500 | [diff] [blame] | 78 | g_free(node); |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 79 | } |
| 80 | } |
| 81 | } else { |
| 82 | if (node == NULL) { |
| 83 | /* Alloc and insert if it's not already there */ |
Anthony Liguori | 7267c09 | 2011-08-20 22:09:37 -0500 | [diff] [blame] | 84 | node = g_malloc0(sizeof(AioHandler)); |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 85 | node->fd = fd; |
Blue Swirl | 72cf2d4 | 2009-09-12 07:36:22 +0000 | [diff] [blame] | 86 | QLIST_INSERT_HEAD(&aio_handlers, node, node); |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 87 | } |
| 88 | /* Update handler with latest information */ |
| 89 | node->io_read = io_read; |
| 90 | node->io_write = io_write; |
| 91 | node->io_flush = io_flush; |
| 92 | node->opaque = opaque; |
| 93 | } |
| 94 | |
| 95 | qemu_set_fd_handler2(fd, NULL, io_read, io_write, opaque); |
| 96 | |
| 97 | return 0; |
| 98 | } |
| 99 | |
| 100 | void qemu_aio_flush(void) |
| 101 | { |
Paolo Bonzini | bcdc185 | 2012-04-12 14:00:55 +0200 | [diff] [blame] | 102 | while (qemu_aio_wait()); |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 103 | } |
| 104 | |
Paolo Bonzini | bcdc185 | 2012-04-12 14:00:55 +0200 | [diff] [blame] | 105 | bool qemu_aio_wait(void) |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 106 | { |
Paolo Bonzini | 9eb0bfc | 2012-04-12 14:00:56 +0200 | [diff] [blame] | 107 | AioHandler *node; |
| 108 | fd_set rdfds, wrfds; |
| 109 | int max_fd = -1; |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 110 | int ret; |
Paolo Bonzini | 9eb0bfc | 2012-04-12 14:00:56 +0200 | [diff] [blame] | 111 | bool busy; |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 112 | |
Kevin Wolf | 8febfa2 | 2009-10-22 17:54:36 +0200 | [diff] [blame] | 113 | /* |
| 114 | * If there are callbacks left that have been queued, we need to call then. |
Paolo Bonzini | bcdc185 | 2012-04-12 14:00:55 +0200 | [diff] [blame] | 115 | * Do not call select in this case, because it is possible that the caller |
| 116 | * does not need a complete flush (as is the case for qemu_aio_wait loops). |
Kevin Wolf | 8febfa2 | 2009-10-22 17:54:36 +0200 | [diff] [blame] | 117 | */ |
Paolo Bonzini | bafbd6a | 2012-04-12 14:00:54 +0200 | [diff] [blame] | 118 | if (qemu_bh_poll()) { |
Paolo Bonzini | bcdc185 | 2012-04-12 14:00:55 +0200 | [diff] [blame] | 119 | return true; |
Paolo Bonzini | bafbd6a | 2012-04-12 14:00:54 +0200 | [diff] [blame] | 120 | } |
Kevin Wolf | 8febfa2 | 2009-10-22 17:54:36 +0200 | [diff] [blame] | 121 | |
Paolo Bonzini | 9eb0bfc | 2012-04-12 14:00:56 +0200 | [diff] [blame] | 122 | walking_handlers = 1; |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 123 | |
Paolo Bonzini | 9eb0bfc | 2012-04-12 14:00:56 +0200 | [diff] [blame] | 124 | FD_ZERO(&rdfds); |
| 125 | FD_ZERO(&wrfds); |
| 126 | |
| 127 | /* fill fd sets */ |
| 128 | busy = false; |
| 129 | QLIST_FOREACH(node, &aio_handlers, node) { |
| 130 | /* If there aren't pending AIO operations, don't invoke callbacks. |
| 131 | * Otherwise, if there are no AIO requests, qemu_aio_wait() would |
| 132 | * wait indefinitely. |
| 133 | */ |
| 134 | if (node->io_flush) { |
| 135 | if (node->io_flush(node->opaque) == 0) { |
| 136 | continue; |
| 137 | } |
| 138 | busy = true; |
| 139 | } |
| 140 | if (!node->deleted && node->io_read) { |
| 141 | FD_SET(node->fd, &rdfds); |
| 142 | max_fd = MAX(max_fd, node->fd + 1); |
| 143 | } |
| 144 | if (!node->deleted && node->io_write) { |
| 145 | FD_SET(node->fd, &wrfds); |
| 146 | max_fd = MAX(max_fd, node->fd + 1); |
| 147 | } |
| 148 | } |
| 149 | |
| 150 | walking_handlers = 0; |
| 151 | |
| 152 | /* No AIO operations? Get us out of here */ |
| 153 | if (!busy) { |
| 154 | return false; |
| 155 | } |
| 156 | |
| 157 | /* wait until next event */ |
| 158 | ret = select(max_fd, &rdfds, &wrfds, NULL, NULL); |
| 159 | |
| 160 | /* if we have any readable fds, dispatch event */ |
| 161 | if (ret > 0) { |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 162 | walking_handlers = 1; |
| 163 | |
Paolo Bonzini | 9eb0bfc | 2012-04-12 14:00:56 +0200 | [diff] [blame] | 164 | /* we have to walk very carefully in case |
| 165 | * qemu_aio_set_fd_handler is called while we're walking */ |
| 166 | node = QLIST_FIRST(&aio_handlers); |
| 167 | while (node) { |
| 168 | AioHandler *tmp; |
aliguori | f71903d | 2008-10-12 21:19:57 +0000 | [diff] [blame] | 169 | |
Paolo Bonzini | 9eb0bfc | 2012-04-12 14:00:56 +0200 | [diff] [blame] | 170 | if (!node->deleted && |
| 171 | FD_ISSET(node->fd, &rdfds) && |
| 172 | node->io_read) { |
| 173 | node->io_read(node->opaque); |
Paolo Bonzini | bcdc185 | 2012-04-12 14:00:55 +0200 | [diff] [blame] | 174 | } |
Paolo Bonzini | 9eb0bfc | 2012-04-12 14:00:56 +0200 | [diff] [blame] | 175 | if (!node->deleted && |
| 176 | FD_ISSET(node->fd, &wrfds) && |
| 177 | node->io_write) { |
| 178 | node->io_write(node->opaque); |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 179 | } |
Paolo Bonzini | 9eb0bfc | 2012-04-12 14:00:56 +0200 | [diff] [blame] | 180 | |
| 181 | tmp = node; |
| 182 | node = QLIST_NEXT(node, node); |
| 183 | |
| 184 | if (tmp->deleted) { |
| 185 | QLIST_REMOVE(tmp, node); |
| 186 | g_free(tmp); |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 187 | } |
| 188 | } |
| 189 | |
| 190 | walking_handlers = 0; |
Paolo Bonzini | 9eb0bfc | 2012-04-12 14:00:56 +0200 | [diff] [blame] | 191 | } |
Paolo Bonzini | bcdc185 | 2012-04-12 14:00:55 +0200 | [diff] [blame] | 192 | |
| 193 | return true; |
aliguori | a76bab4 | 2008-09-22 19:17:18 +0000 | [diff] [blame] | 194 | } |