Peter Maydell | 681c28a | 2016-02-08 18:08:51 +0000 | [diff] [blame] | 1 | #include "qemu/osdep.h" |
Paolo Bonzini | 74c856e | 2012-11-23 16:13:24 +0100 | [diff] [blame] | 2 | #include "qemu-common.h" |
Paolo Bonzini | 737e150 | 2012-12-17 18:19:44 +0100 | [diff] [blame] | 3 | #include "block/aio.h" |
| 4 | #include "block/thread-pool.h" |
| 5 | #include "block/block.h" |
Markus Armbruster | da34e65 | 2016-03-14 09:01:28 +0100 | [diff] [blame] | 6 | #include "qapi/error.h" |
Alex Bligh | dae21b9 | 2013-08-21 16:02:49 +0100 | [diff] [blame] | 7 | #include "qemu/timer.h" |
Chrysostomos Nanakos | 2f78e49 | 2014-09-18 14:30:49 +0300 | [diff] [blame] | 8 | #include "qemu/error-report.h" |
Paolo Bonzini | 934ebf4 | 2017-02-13 14:52:21 +0100 | [diff] [blame] | 9 | #include "qemu/main-loop.h" |
Paolo Bonzini | 74c856e | 2012-11-23 16:13:24 +0100 | [diff] [blame] | 10 | |
Stefan Hajnoczi | c4d9d19 | 2013-03-07 13:41:49 +0100 | [diff] [blame] | 11 | static AioContext *ctx; |
| 12 | static ThreadPool *pool; |
Paolo Bonzini | 74c856e | 2012-11-23 16:13:24 +0100 | [diff] [blame] | 13 | static int active; |
| 14 | |
| 15 | typedef struct { |
Markus Armbruster | 7c84b1b | 2014-10-07 13:59:14 +0200 | [diff] [blame] | 16 | BlockAIOCB *aiocb; |
Paolo Bonzini | 74c856e | 2012-11-23 16:13:24 +0100 | [diff] [blame] | 17 | int n; |
| 18 | int ret; |
| 19 | } WorkerTestData; |
| 20 | |
| 21 | static int worker_cb(void *opaque) |
| 22 | { |
| 23 | WorkerTestData *data = opaque; |
Paolo Bonzini | 5444e76 | 2013-05-13 13:29:47 +0200 | [diff] [blame] | 24 | return atomic_fetch_inc(&data->n); |
Paolo Bonzini | 74c856e | 2012-11-23 16:13:24 +0100 | [diff] [blame] | 25 | } |
| 26 | |
| 27 | static int long_cb(void *opaque) |
| 28 | { |
| 29 | WorkerTestData *data = opaque; |
Paolo Bonzini | 5444e76 | 2013-05-13 13:29:47 +0200 | [diff] [blame] | 30 | atomic_inc(&data->n); |
Paolo Bonzini | 74c856e | 2012-11-23 16:13:24 +0100 | [diff] [blame] | 31 | g_usleep(2000000); |
Paolo Bonzini | 5444e76 | 2013-05-13 13:29:47 +0200 | [diff] [blame] | 32 | atomic_inc(&data->n); |
Paolo Bonzini | 74c856e | 2012-11-23 16:13:24 +0100 | [diff] [blame] | 33 | return 0; |
| 34 | } |
| 35 | |
| 36 | static void done_cb(void *opaque, int ret) |
| 37 | { |
| 38 | WorkerTestData *data = opaque; |
Fam Zheng | 3391f5e | 2014-09-11 13:41:12 +0800 | [diff] [blame] | 39 | g_assert(data->ret == -EINPROGRESS || data->ret == -ECANCELED); |
Paolo Bonzini | 74c856e | 2012-11-23 16:13:24 +0100 | [diff] [blame] | 40 | data->ret = ret; |
| 41 | data->aiocb = NULL; |
| 42 | |
| 43 | /* Callbacks are serialized, so no need to use atomic ops. */ |
| 44 | active--; |
| 45 | } |
| 46 | |
Paolo Bonzini | 74c856e | 2012-11-23 16:13:24 +0100 | [diff] [blame] | 47 | static void test_submit(void) |
| 48 | { |
| 49 | WorkerTestData data = { .n = 0 }; |
Stefan Hajnoczi | c4d9d19 | 2013-03-07 13:41:49 +0100 | [diff] [blame] | 50 | thread_pool_submit(pool, worker_cb, &data); |
Stefan Hajnoczi | 35ecde2 | 2013-04-16 17:49:42 +0200 | [diff] [blame] | 51 | while (data.n == 0) { |
| 52 | aio_poll(ctx, true); |
| 53 | } |
Paolo Bonzini | 74c856e | 2012-11-23 16:13:24 +0100 | [diff] [blame] | 54 | g_assert_cmpint(data.n, ==, 1); |
| 55 | } |
| 56 | |
| 57 | static void test_submit_aio(void) |
| 58 | { |
| 59 | WorkerTestData data = { .n = 0, .ret = -EINPROGRESS }; |
Stefan Hajnoczi | c4d9d19 | 2013-03-07 13:41:49 +0100 | [diff] [blame] | 60 | data.aiocb = thread_pool_submit_aio(pool, worker_cb, &data, |
| 61 | done_cb, &data); |
Paolo Bonzini | 74c856e | 2012-11-23 16:13:24 +0100 | [diff] [blame] | 62 | |
| 63 | /* The callbacks are not called until after the first wait. */ |
| 64 | active = 1; |
| 65 | g_assert_cmpint(data.ret, ==, -EINPROGRESS); |
Stefan Hajnoczi | 35ecde2 | 2013-04-16 17:49:42 +0200 | [diff] [blame] | 66 | while (data.ret == -EINPROGRESS) { |
| 67 | aio_poll(ctx, true); |
| 68 | } |
Paolo Bonzini | 74c856e | 2012-11-23 16:13:24 +0100 | [diff] [blame] | 69 | g_assert_cmpint(active, ==, 0); |
| 70 | g_assert_cmpint(data.n, ==, 1); |
| 71 | g_assert_cmpint(data.ret, ==, 0); |
| 72 | } |
| 73 | |
| 74 | static void co_test_cb(void *opaque) |
| 75 | { |
| 76 | WorkerTestData *data = opaque; |
| 77 | |
| 78 | active = 1; |
| 79 | data->n = 0; |
| 80 | data->ret = -EINPROGRESS; |
Stefan Hajnoczi | c4d9d19 | 2013-03-07 13:41:49 +0100 | [diff] [blame] | 81 | thread_pool_submit_co(pool, worker_cb, data); |
Paolo Bonzini | 74c856e | 2012-11-23 16:13:24 +0100 | [diff] [blame] | 82 | |
| 83 | /* The test continues in test_submit_co, after qemu_coroutine_enter... */ |
| 84 | |
| 85 | g_assert_cmpint(data->n, ==, 1); |
| 86 | data->ret = 0; |
| 87 | active--; |
| 88 | |
Paolo Bonzini | 87f68d3 | 2014-07-07 15:18:02 +0200 | [diff] [blame] | 89 | /* The test continues in test_submit_co, after aio_poll... */ |
Paolo Bonzini | 74c856e | 2012-11-23 16:13:24 +0100 | [diff] [blame] | 90 | } |
| 91 | |
| 92 | static void test_submit_co(void) |
| 93 | { |
| 94 | WorkerTestData data; |
Paolo Bonzini | 0b8b875 | 2016-07-04 19:10:01 +0200 | [diff] [blame] | 95 | Coroutine *co = qemu_coroutine_create(co_test_cb, &data); |
Paolo Bonzini | 74c856e | 2012-11-23 16:13:24 +0100 | [diff] [blame] | 96 | |
Paolo Bonzini | 0b8b875 | 2016-07-04 19:10:01 +0200 | [diff] [blame] | 97 | qemu_coroutine_enter(co); |
Paolo Bonzini | 74c856e | 2012-11-23 16:13:24 +0100 | [diff] [blame] | 98 | |
| 99 | /* Back here once the worker has started. */ |
| 100 | |
| 101 | g_assert_cmpint(active, ==, 1); |
| 102 | g_assert_cmpint(data.ret, ==, -EINPROGRESS); |
| 103 | |
Paolo Bonzini | 87f68d3 | 2014-07-07 15:18:02 +0200 | [diff] [blame] | 104 | /* aio_poll will execute the rest of the coroutine. */ |
Paolo Bonzini | 74c856e | 2012-11-23 16:13:24 +0100 | [diff] [blame] | 105 | |
Stefan Hajnoczi | 35ecde2 | 2013-04-16 17:49:42 +0200 | [diff] [blame] | 106 | while (data.ret == -EINPROGRESS) { |
| 107 | aio_poll(ctx, true); |
| 108 | } |
Paolo Bonzini | 74c856e | 2012-11-23 16:13:24 +0100 | [diff] [blame] | 109 | |
| 110 | /* Back here after the coroutine has finished. */ |
| 111 | |
| 112 | g_assert_cmpint(active, ==, 0); |
| 113 | g_assert_cmpint(data.ret, ==, 0); |
| 114 | } |
| 115 | |
| 116 | static void test_submit_many(void) |
| 117 | { |
| 118 | WorkerTestData data[100]; |
| 119 | int i; |
| 120 | |
| 121 | /* Start more work items than there will be threads. */ |
| 122 | for (i = 0; i < 100; i++) { |
| 123 | data[i].n = 0; |
| 124 | data[i].ret = -EINPROGRESS; |
Stefan Hajnoczi | c4d9d19 | 2013-03-07 13:41:49 +0100 | [diff] [blame] | 125 | thread_pool_submit_aio(pool, worker_cb, &data[i], done_cb, &data[i]); |
Paolo Bonzini | 74c856e | 2012-11-23 16:13:24 +0100 | [diff] [blame] | 126 | } |
| 127 | |
| 128 | active = 100; |
| 129 | while (active > 0) { |
Stefan Hajnoczi | c4d9d19 | 2013-03-07 13:41:49 +0100 | [diff] [blame] | 130 | aio_poll(ctx, true); |
Paolo Bonzini | 74c856e | 2012-11-23 16:13:24 +0100 | [diff] [blame] | 131 | } |
| 132 | for (i = 0; i < 100; i++) { |
| 133 | g_assert_cmpint(data[i].n, ==, 1); |
| 134 | g_assert_cmpint(data[i].ret, ==, 0); |
| 135 | } |
| 136 | } |
| 137 | |
Fam Zheng | 3391f5e | 2014-09-11 13:41:12 +0800 | [diff] [blame] | 138 | static void do_test_cancel(bool sync) |
Paolo Bonzini | 74c856e | 2012-11-23 16:13:24 +0100 | [diff] [blame] | 139 | { |
| 140 | WorkerTestData data[100]; |
Paolo Bonzini | d60478c | 2012-11-27 09:51:48 +0100 | [diff] [blame] | 141 | int num_canceled; |
Paolo Bonzini | 74c856e | 2012-11-23 16:13:24 +0100 | [diff] [blame] | 142 | int i; |
| 143 | |
| 144 | /* Start more work items than there will be threads, to ensure |
| 145 | * the pool is full. |
| 146 | */ |
| 147 | test_submit_many(); |
| 148 | |
| 149 | /* Start long running jobs, to ensure we can cancel some. */ |
| 150 | for (i = 0; i < 100; i++) { |
| 151 | data[i].n = 0; |
| 152 | data[i].ret = -EINPROGRESS; |
Stefan Hajnoczi | c4d9d19 | 2013-03-07 13:41:49 +0100 | [diff] [blame] | 153 | data[i].aiocb = thread_pool_submit_aio(pool, long_cb, &data[i], |
Paolo Bonzini | 74c856e | 2012-11-23 16:13:24 +0100 | [diff] [blame] | 154 | done_cb, &data[i]); |
| 155 | } |
| 156 | |
| 157 | /* Starting the threads may be left to a bottom half. Let it |
| 158 | * run, but do not waste too much time... |
| 159 | */ |
| 160 | active = 100; |
Stefan Hajnoczi | c4d9d19 | 2013-03-07 13:41:49 +0100 | [diff] [blame] | 161 | aio_notify(ctx); |
| 162 | aio_poll(ctx, false); |
Paolo Bonzini | 74c856e | 2012-11-23 16:13:24 +0100 | [diff] [blame] | 163 | |
| 164 | /* Wait some time for the threads to start, with some sanity |
| 165 | * testing on the behavior of the scheduler... |
| 166 | */ |
| 167 | g_assert_cmpint(active, ==, 100); |
| 168 | g_usleep(1000000); |
| 169 | g_assert_cmpint(active, >, 50); |
| 170 | |
| 171 | /* Cancel the jobs that haven't been started yet. */ |
Paolo Bonzini | d60478c | 2012-11-27 09:51:48 +0100 | [diff] [blame] | 172 | num_canceled = 0; |
Paolo Bonzini | 74c856e | 2012-11-23 16:13:24 +0100 | [diff] [blame] | 173 | for (i = 0; i < 100; i++) { |
Paolo Bonzini | 5444e76 | 2013-05-13 13:29:47 +0200 | [diff] [blame] | 174 | if (atomic_cmpxchg(&data[i].n, 0, 3) == 0) { |
Paolo Bonzini | 74c856e | 2012-11-23 16:13:24 +0100 | [diff] [blame] | 175 | data[i].ret = -ECANCELED; |
Fam Zheng | 3391f5e | 2014-09-11 13:41:12 +0800 | [diff] [blame] | 176 | if (sync) { |
| 177 | bdrv_aio_cancel(data[i].aiocb); |
| 178 | } else { |
| 179 | bdrv_aio_cancel_async(data[i].aiocb); |
| 180 | } |
Paolo Bonzini | d60478c | 2012-11-27 09:51:48 +0100 | [diff] [blame] | 181 | num_canceled++; |
Paolo Bonzini | 74c856e | 2012-11-23 16:13:24 +0100 | [diff] [blame] | 182 | } |
| 183 | } |
Paolo Bonzini | d60478c | 2012-11-27 09:51:48 +0100 | [diff] [blame] | 184 | g_assert_cmpint(active, >, 0); |
| 185 | g_assert_cmpint(num_canceled, <, 100); |
Paolo Bonzini | 74c856e | 2012-11-23 16:13:24 +0100 | [diff] [blame] | 186 | |
Paolo Bonzini | 74c856e | 2012-11-23 16:13:24 +0100 | [diff] [blame] | 187 | for (i = 0; i < 100; i++) { |
Fam Zheng | 271c0f6 | 2014-05-21 10:42:13 +0800 | [diff] [blame] | 188 | if (data[i].aiocb && data[i].n != 3) { |
Fam Zheng | 3391f5e | 2014-09-11 13:41:12 +0800 | [diff] [blame] | 189 | if (sync) { |
| 190 | /* Canceling the others will be a blocking operation. */ |
| 191 | bdrv_aio_cancel(data[i].aiocb); |
| 192 | } else { |
| 193 | bdrv_aio_cancel_async(data[i].aiocb); |
| 194 | } |
Paolo Bonzini | 74c856e | 2012-11-23 16:13:24 +0100 | [diff] [blame] | 195 | } |
| 196 | } |
| 197 | |
| 198 | /* Finish execution and execute any remaining callbacks. */ |
Stefan Hajnoczi | 35ecde2 | 2013-04-16 17:49:42 +0200 | [diff] [blame] | 199 | while (active > 0) { |
| 200 | aio_poll(ctx, true); |
| 201 | } |
Paolo Bonzini | 74c856e | 2012-11-23 16:13:24 +0100 | [diff] [blame] | 202 | g_assert_cmpint(active, ==, 0); |
| 203 | for (i = 0; i < 100; i++) { |
| 204 | if (data[i].n == 3) { |
| 205 | g_assert_cmpint(data[i].ret, ==, -ECANCELED); |
Fam Zheng | 3391f5e | 2014-09-11 13:41:12 +0800 | [diff] [blame] | 206 | g_assert(data[i].aiocb == NULL); |
Paolo Bonzini | 74c856e | 2012-11-23 16:13:24 +0100 | [diff] [blame] | 207 | } else { |
| 208 | g_assert_cmpint(data[i].n, ==, 2); |
Fam Zheng | 3391f5e | 2014-09-11 13:41:12 +0800 | [diff] [blame] | 209 | g_assert(data[i].ret == 0 || data[i].ret == -ECANCELED); |
Paolo Bonzini | 74c856e | 2012-11-23 16:13:24 +0100 | [diff] [blame] | 210 | g_assert(data[i].aiocb == NULL); |
| 211 | } |
| 212 | } |
| 213 | } |
| 214 | |
Fam Zheng | 3391f5e | 2014-09-11 13:41:12 +0800 | [diff] [blame] | 215 | static void test_cancel(void) |
| 216 | { |
| 217 | do_test_cancel(true); |
| 218 | } |
| 219 | |
| 220 | static void test_cancel_async(void) |
| 221 | { |
| 222 | do_test_cancel(false); |
| 223 | } |
| 224 | |
Paolo Bonzini | 74c856e | 2012-11-23 16:13:24 +0100 | [diff] [blame] | 225 | int main(int argc, char **argv) |
| 226 | { |
Stefan Hajnoczi | c4d9d19 | 2013-03-07 13:41:49 +0100 | [diff] [blame] | 227 | int ret; |
Stefan Hajnoczi | c4d9d19 | 2013-03-07 13:41:49 +0100 | [diff] [blame] | 228 | |
Paolo Bonzini | 934ebf4 | 2017-02-13 14:52:21 +0100 | [diff] [blame] | 229 | qemu_init_main_loop(&error_abort); |
| 230 | ctx = qemu_get_current_aio_context(); |
Stefan Hajnoczi | c4d9d19 | 2013-03-07 13:41:49 +0100 | [diff] [blame] | 231 | pool = aio_get_thread_pool(ctx); |
Paolo Bonzini | 74c856e | 2012-11-23 16:13:24 +0100 | [diff] [blame] | 232 | |
| 233 | g_test_init(&argc, &argv, NULL); |
| 234 | g_test_add_func("/thread-pool/submit", test_submit); |
| 235 | g_test_add_func("/thread-pool/submit-aio", test_submit_aio); |
| 236 | g_test_add_func("/thread-pool/submit-co", test_submit_co); |
| 237 | g_test_add_func("/thread-pool/submit-many", test_submit_many); |
| 238 | g_test_add_func("/thread-pool/cancel", test_cancel); |
Fam Zheng | 3391f5e | 2014-09-11 13:41:12 +0800 | [diff] [blame] | 239 | g_test_add_func("/thread-pool/cancel-async", test_cancel_async); |
Stefan Hajnoczi | c4d9d19 | 2013-03-07 13:41:49 +0100 | [diff] [blame] | 240 | |
| 241 | ret = g_test_run(); |
| 242 | |
Stefan Hajnoczi | c4d9d19 | 2013-03-07 13:41:49 +0100 | [diff] [blame] | 243 | return ret; |
Paolo Bonzini | 74c856e | 2012-11-23 16:13:24 +0100 | [diff] [blame] | 244 | } |