Stefan Hajnoczi | aa7ee42 | 2011-05-10 10:21:18 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Coroutine tests |
| 3 | * |
| 4 | * Copyright IBM, Corp. 2011 |
| 5 | * |
| 6 | * Authors: |
| 7 | * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com> |
| 8 | * |
| 9 | * This work is licensed under the terms of the GNU LGPL, version 2 or later. |
| 10 | * See the COPYING.LIB file in the top-level directory. |
| 11 | * |
| 12 | */ |
| 13 | |
Peter Maydell | 681c28a | 2016-02-08 18:08:51 +0000 | [diff] [blame] | 14 | #include "qemu/osdep.h" |
Daniel P. Berrange | 10817bf | 2015-09-01 14:48:02 +0100 | [diff] [blame] | 15 | #include "qemu/coroutine.h" |
| 16 | #include "qemu/coroutine_int.h" |
Paolo Bonzini | e70372f | 2018-02-03 10:39:32 -0500 | [diff] [blame] | 17 | #include "qemu/lockable.h" |
Stefan Hajnoczi | aa7ee42 | 2011-05-10 10:21:18 +0100 | [diff] [blame] | 18 | |
| 19 | /* |
| 20 | * Check that qemu_in_coroutine() works |
| 21 | */ |
| 22 | |
| 23 | static void coroutine_fn verify_in_coroutine(void *opaque) |
| 24 | { |
| 25 | g_assert(qemu_in_coroutine()); |
| 26 | } |
| 27 | |
| 28 | static void test_in_coroutine(void) |
| 29 | { |
| 30 | Coroutine *coroutine; |
| 31 | |
| 32 | g_assert(!qemu_in_coroutine()); |
| 33 | |
Paolo Bonzini | 0b8b875 | 2016-07-04 19:10:01 +0200 | [diff] [blame] | 34 | coroutine = qemu_coroutine_create(verify_in_coroutine, NULL); |
| 35 | qemu_coroutine_enter(coroutine); |
Stefan Hajnoczi | aa7ee42 | 2011-05-10 10:21:18 +0100 | [diff] [blame] | 36 | } |
| 37 | |
| 38 | /* |
| 39 | * Check that qemu_coroutine_self() works |
| 40 | */ |
| 41 | |
| 42 | static void coroutine_fn verify_self(void *opaque) |
| 43 | { |
Paolo Bonzini | 7e70cdb | 2016-07-04 19:10:00 +0200 | [diff] [blame] | 44 | Coroutine **p_co = opaque; |
| 45 | g_assert(qemu_coroutine_self() == *p_co); |
Stefan Hajnoczi | aa7ee42 | 2011-05-10 10:21:18 +0100 | [diff] [blame] | 46 | } |
| 47 | |
| 48 | static void test_self(void) |
| 49 | { |
| 50 | Coroutine *coroutine; |
| 51 | |
Paolo Bonzini | 0b8b875 | 2016-07-04 19:10:01 +0200 | [diff] [blame] | 52 | coroutine = qemu_coroutine_create(verify_self, &coroutine); |
| 53 | qemu_coroutine_enter(coroutine); |
Stefan Hajnoczi | aa7ee42 | 2011-05-10 10:21:18 +0100 | [diff] [blame] | 54 | } |
| 55 | |
| 56 | /* |
Stefan Hajnoczi | afe16f3 | 2016-09-27 16:18:35 +0100 | [diff] [blame] | 57 | * Check that qemu_coroutine_entered() works |
| 58 | */ |
| 59 | |
| 60 | static void coroutine_fn verify_entered_step_2(void *opaque) |
| 61 | { |
| 62 | Coroutine *caller = (Coroutine *)opaque; |
| 63 | |
| 64 | g_assert(qemu_coroutine_entered(caller)); |
| 65 | g_assert(qemu_coroutine_entered(qemu_coroutine_self())); |
| 66 | qemu_coroutine_yield(); |
| 67 | |
| 68 | /* Once more to check it still works after yielding */ |
| 69 | g_assert(qemu_coroutine_entered(caller)); |
| 70 | g_assert(qemu_coroutine_entered(qemu_coroutine_self())); |
Stefan Hajnoczi | afe16f3 | 2016-09-27 16:18:35 +0100 | [diff] [blame] | 71 | } |
| 72 | |
| 73 | static void coroutine_fn verify_entered_step_1(void *opaque) |
| 74 | { |
| 75 | Coroutine *self = qemu_coroutine_self(); |
| 76 | Coroutine *coroutine; |
| 77 | |
| 78 | g_assert(qemu_coroutine_entered(self)); |
| 79 | |
| 80 | coroutine = qemu_coroutine_create(verify_entered_step_2, self); |
| 81 | g_assert(!qemu_coroutine_entered(coroutine)); |
| 82 | qemu_coroutine_enter(coroutine); |
| 83 | g_assert(!qemu_coroutine_entered(coroutine)); |
| 84 | qemu_coroutine_enter(coroutine); |
| 85 | } |
| 86 | |
| 87 | static void test_entered(void) |
| 88 | { |
| 89 | Coroutine *coroutine; |
| 90 | |
| 91 | coroutine = qemu_coroutine_create(verify_entered_step_1, NULL); |
| 92 | g_assert(!qemu_coroutine_entered(coroutine)); |
| 93 | qemu_coroutine_enter(coroutine); |
| 94 | } |
| 95 | |
| 96 | /* |
Stefan Hajnoczi | aa7ee42 | 2011-05-10 10:21:18 +0100 | [diff] [blame] | 97 | * Check that coroutines may nest multiple levels |
| 98 | */ |
| 99 | |
| 100 | typedef struct { |
| 101 | unsigned int n_enter; /* num coroutines entered */ |
| 102 | unsigned int n_return; /* num coroutines returned */ |
| 103 | unsigned int max; /* maximum level of nesting */ |
| 104 | } NestData; |
| 105 | |
| 106 | static void coroutine_fn nest(void *opaque) |
| 107 | { |
| 108 | NestData *nd = opaque; |
| 109 | |
| 110 | nd->n_enter++; |
| 111 | |
| 112 | if (nd->n_enter < nd->max) { |
| 113 | Coroutine *child; |
| 114 | |
Paolo Bonzini | 0b8b875 | 2016-07-04 19:10:01 +0200 | [diff] [blame] | 115 | child = qemu_coroutine_create(nest, nd); |
| 116 | qemu_coroutine_enter(child); |
Stefan Hajnoczi | aa7ee42 | 2011-05-10 10:21:18 +0100 | [diff] [blame] | 117 | } |
| 118 | |
| 119 | nd->n_return++; |
| 120 | } |
| 121 | |
| 122 | static void test_nesting(void) |
| 123 | { |
| 124 | Coroutine *root; |
| 125 | NestData nd = { |
| 126 | .n_enter = 0, |
| 127 | .n_return = 0, |
| 128 | .max = 128, |
| 129 | }; |
| 130 | |
Paolo Bonzini | 0b8b875 | 2016-07-04 19:10:01 +0200 | [diff] [blame] | 131 | root = qemu_coroutine_create(nest, &nd); |
| 132 | qemu_coroutine_enter(root); |
Stefan Hajnoczi | aa7ee42 | 2011-05-10 10:21:18 +0100 | [diff] [blame] | 133 | |
| 134 | /* Must enter and return from max nesting level */ |
| 135 | g_assert_cmpint(nd.n_enter, ==, nd.max); |
| 136 | g_assert_cmpint(nd.n_return, ==, nd.max); |
| 137 | } |
| 138 | |
| 139 | /* |
| 140 | * Check that yield/enter transfer control correctly |
| 141 | */ |
| 142 | |
| 143 | static void coroutine_fn yield_5_times(void *opaque) |
| 144 | { |
| 145 | bool *done = opaque; |
| 146 | int i; |
| 147 | |
| 148 | for (i = 0; i < 5; i++) { |
| 149 | qemu_coroutine_yield(); |
| 150 | } |
| 151 | *done = true; |
| 152 | } |
| 153 | |
| 154 | static void test_yield(void) |
| 155 | { |
| 156 | Coroutine *coroutine; |
| 157 | bool done = false; |
| 158 | int i = -1; /* one extra time to return from coroutine */ |
| 159 | |
Paolo Bonzini | 0b8b875 | 2016-07-04 19:10:01 +0200 | [diff] [blame] | 160 | coroutine = qemu_coroutine_create(yield_5_times, &done); |
Stefan Hajnoczi | aa7ee42 | 2011-05-10 10:21:18 +0100 | [diff] [blame] | 161 | while (!done) { |
Paolo Bonzini | 0b8b875 | 2016-07-04 19:10:01 +0200 | [diff] [blame] | 162 | qemu_coroutine_enter(coroutine); |
Stefan Hajnoczi | aa7ee42 | 2011-05-10 10:21:18 +0100 | [diff] [blame] | 163 | i++; |
| 164 | } |
| 165 | g_assert_cmpint(i, ==, 5); /* coroutine must yield 5 times */ |
| 166 | } |
| 167 | |
Stefan Hajnoczi | 7c2eed3 | 2015-02-10 11:15:59 +0100 | [diff] [blame] | 168 | static void coroutine_fn c2_fn(void *opaque) |
| 169 | { |
| 170 | qemu_coroutine_yield(); |
| 171 | } |
| 172 | |
| 173 | static void coroutine_fn c1_fn(void *opaque) |
| 174 | { |
| 175 | Coroutine *c2 = opaque; |
Paolo Bonzini | 0b8b875 | 2016-07-04 19:10:01 +0200 | [diff] [blame] | 176 | qemu_coroutine_enter(c2); |
Stefan Hajnoczi | 7c2eed3 | 2015-02-10 11:15:59 +0100 | [diff] [blame] | 177 | } |
| 178 | |
Paolo Bonzini | 439b6e5 | 2018-02-03 10:39:31 -0500 | [diff] [blame] | 179 | static void test_no_dangling_access(void) |
Stefan Hajnoczi | 7c2eed3 | 2015-02-10 11:15:59 +0100 | [diff] [blame] | 180 | { |
| 181 | Coroutine *c1; |
| 182 | Coroutine *c2; |
Kevin Wolf | 980e662 | 2016-08-10 13:06:55 +0200 | [diff] [blame] | 183 | Coroutine tmp; |
Stefan Hajnoczi | 7c2eed3 | 2015-02-10 11:15:59 +0100 | [diff] [blame] | 184 | |
Paolo Bonzini | 0b8b875 | 2016-07-04 19:10:01 +0200 | [diff] [blame] | 185 | c2 = qemu_coroutine_create(c2_fn, NULL); |
| 186 | c1 = qemu_coroutine_create(c1_fn, c2); |
Stefan Hajnoczi | 7c2eed3 | 2015-02-10 11:15:59 +0100 | [diff] [blame] | 187 | |
Paolo Bonzini | 0b8b875 | 2016-07-04 19:10:01 +0200 | [diff] [blame] | 188 | qemu_coroutine_enter(c1); |
Kevin Wolf | 980e662 | 2016-08-10 13:06:55 +0200 | [diff] [blame] | 189 | |
| 190 | /* c1 shouldn't be used any more now; make sure we segfault if it is */ |
| 191 | tmp = *c1; |
Stefan Hajnoczi | 7c2eed3 | 2015-02-10 11:15:59 +0100 | [diff] [blame] | 192 | memset(c1, 0xff, sizeof(Coroutine)); |
Paolo Bonzini | 0b8b875 | 2016-07-04 19:10:01 +0200 | [diff] [blame] | 193 | qemu_coroutine_enter(c2); |
Kevin Wolf | 980e662 | 2016-08-10 13:06:55 +0200 | [diff] [blame] | 194 | |
| 195 | /* Must restore the coroutine now to avoid corrupted pool */ |
| 196 | *c1 = tmp; |
Stefan Hajnoczi | 7c2eed3 | 2015-02-10 11:15:59 +0100 | [diff] [blame] | 197 | } |
| 198 | |
Paolo Bonzini | 439b6e5 | 2018-02-03 10:39:31 -0500 | [diff] [blame] | 199 | static bool locked; |
| 200 | static int done; |
| 201 | |
| 202 | static void coroutine_fn mutex_fn(void *opaque) |
| 203 | { |
| 204 | CoMutex *m = opaque; |
| 205 | qemu_co_mutex_lock(m); |
| 206 | assert(!locked); |
| 207 | locked = true; |
| 208 | qemu_coroutine_yield(); |
| 209 | locked = false; |
| 210 | qemu_co_mutex_unlock(m); |
| 211 | done++; |
| 212 | } |
| 213 | |
Paolo Bonzini | e70372f | 2018-02-03 10:39:32 -0500 | [diff] [blame] | 214 | static void coroutine_fn lockable_fn(void *opaque) |
| 215 | { |
| 216 | QemuLockable *x = opaque; |
| 217 | qemu_lockable_lock(x); |
| 218 | assert(!locked); |
| 219 | locked = true; |
| 220 | qemu_coroutine_yield(); |
| 221 | locked = false; |
| 222 | qemu_lockable_unlock(x); |
| 223 | done++; |
| 224 | } |
| 225 | |
Paolo Bonzini | 439b6e5 | 2018-02-03 10:39:31 -0500 | [diff] [blame] | 226 | static void do_test_co_mutex(CoroutineEntry *entry, void *opaque) |
| 227 | { |
| 228 | Coroutine *c1 = qemu_coroutine_create(entry, opaque); |
| 229 | Coroutine *c2 = qemu_coroutine_create(entry, opaque); |
| 230 | |
| 231 | done = 0; |
| 232 | qemu_coroutine_enter(c1); |
| 233 | g_assert(locked); |
| 234 | qemu_coroutine_enter(c2); |
| 235 | |
| 236 | /* Unlock queues c2. It is then started automatically when c1 yields or |
| 237 | * terminates. |
| 238 | */ |
| 239 | qemu_coroutine_enter(c1); |
| 240 | g_assert_cmpint(done, ==, 1); |
| 241 | g_assert(locked); |
| 242 | |
| 243 | qemu_coroutine_enter(c2); |
| 244 | g_assert_cmpint(done, ==, 2); |
| 245 | g_assert(!locked); |
| 246 | } |
| 247 | |
| 248 | static void test_co_mutex(void) |
| 249 | { |
| 250 | CoMutex m; |
| 251 | |
| 252 | qemu_co_mutex_init(&m); |
| 253 | do_test_co_mutex(mutex_fn, &m); |
| 254 | } |
| 255 | |
Paolo Bonzini | e70372f | 2018-02-03 10:39:32 -0500 | [diff] [blame] | 256 | static void test_co_mutex_lockable(void) |
| 257 | { |
| 258 | CoMutex m; |
| 259 | CoMutex *null_pointer = NULL; |
| 260 | |
| 261 | qemu_co_mutex_init(&m); |
| 262 | do_test_co_mutex(lockable_fn, QEMU_MAKE_LOCKABLE(&m)); |
| 263 | |
| 264 | g_assert(QEMU_MAKE_LOCKABLE(null_pointer) == NULL); |
| 265 | } |
| 266 | |
Stefan Hajnoczi | aa7ee42 | 2011-05-10 10:21:18 +0100 | [diff] [blame] | 267 | /* |
| 268 | * Check that creation, enter, and return work |
| 269 | */ |
| 270 | |
| 271 | static void coroutine_fn set_and_exit(void *opaque) |
| 272 | { |
| 273 | bool *done = opaque; |
| 274 | |
| 275 | *done = true; |
| 276 | } |
| 277 | |
| 278 | static void test_lifecycle(void) |
| 279 | { |
| 280 | Coroutine *coroutine; |
| 281 | bool done = false; |
| 282 | |
| 283 | /* Create, enter, and return from coroutine */ |
Paolo Bonzini | 0b8b875 | 2016-07-04 19:10:01 +0200 | [diff] [blame] | 284 | coroutine = qemu_coroutine_create(set_and_exit, &done); |
| 285 | qemu_coroutine_enter(coroutine); |
Stefan Hajnoczi | aa7ee42 | 2011-05-10 10:21:18 +0100 | [diff] [blame] | 286 | g_assert(done); /* expect done to be true (first time) */ |
| 287 | |
| 288 | /* Repeat to check that no state affects this test */ |
| 289 | done = false; |
Paolo Bonzini | 0b8b875 | 2016-07-04 19:10:01 +0200 | [diff] [blame] | 290 | coroutine = qemu_coroutine_create(set_and_exit, &done); |
| 291 | qemu_coroutine_enter(coroutine); |
Stefan Hajnoczi | aa7ee42 | 2011-05-10 10:21:18 +0100 | [diff] [blame] | 292 | g_assert(done); /* expect done to be true (second time) */ |
| 293 | } |
| 294 | |
Charlie Shepherd | f8d1dae | 2013-08-08 03:23:14 +0100 | [diff] [blame] | 295 | |
| 296 | #define RECORD_SIZE 10 /* Leave some room for expansion */ |
| 297 | struct coroutine_position { |
| 298 | int func; |
| 299 | int state; |
| 300 | }; |
| 301 | static struct coroutine_position records[RECORD_SIZE]; |
| 302 | static unsigned record_pos; |
| 303 | |
| 304 | static void record_push(int func, int state) |
| 305 | { |
| 306 | struct coroutine_position *cp = &records[record_pos++]; |
| 307 | g_assert_cmpint(record_pos, <, RECORD_SIZE); |
| 308 | cp->func = func; |
| 309 | cp->state = state; |
| 310 | } |
| 311 | |
| 312 | static void coroutine_fn co_order_test(void *opaque) |
| 313 | { |
| 314 | record_push(2, 1); |
| 315 | g_assert(qemu_in_coroutine()); |
| 316 | qemu_coroutine_yield(); |
| 317 | record_push(2, 2); |
| 318 | g_assert(qemu_in_coroutine()); |
| 319 | } |
| 320 | |
| 321 | static void do_order_test(void) |
| 322 | { |
| 323 | Coroutine *co; |
| 324 | |
Paolo Bonzini | 0b8b875 | 2016-07-04 19:10:01 +0200 | [diff] [blame] | 325 | co = qemu_coroutine_create(co_order_test, NULL); |
Charlie Shepherd | f8d1dae | 2013-08-08 03:23:14 +0100 | [diff] [blame] | 326 | record_push(1, 1); |
Paolo Bonzini | 0b8b875 | 2016-07-04 19:10:01 +0200 | [diff] [blame] | 327 | qemu_coroutine_enter(co); |
Charlie Shepherd | f8d1dae | 2013-08-08 03:23:14 +0100 | [diff] [blame] | 328 | record_push(1, 2); |
| 329 | g_assert(!qemu_in_coroutine()); |
Paolo Bonzini | 0b8b875 | 2016-07-04 19:10:01 +0200 | [diff] [blame] | 330 | qemu_coroutine_enter(co); |
Charlie Shepherd | f8d1dae | 2013-08-08 03:23:14 +0100 | [diff] [blame] | 331 | record_push(1, 3); |
| 332 | g_assert(!qemu_in_coroutine()); |
| 333 | } |
| 334 | |
| 335 | static void test_order(void) |
| 336 | { |
| 337 | int i; |
| 338 | const struct coroutine_position expected_pos[] = { |
| 339 | {1, 1,}, {2, 1}, {1, 2}, {2, 2}, {1, 3} |
| 340 | }; |
| 341 | do_order_test(); |
| 342 | g_assert_cmpint(record_pos, ==, 5); |
| 343 | for (i = 0; i < record_pos; i++) { |
| 344 | g_assert_cmpint(records[i].func , ==, expected_pos[i].func ); |
| 345 | g_assert_cmpint(records[i].state, ==, expected_pos[i].state); |
| 346 | } |
| 347 | } |
Stefan Hajnoczi | 5e3840c | 2011-05-12 08:27:39 +0100 | [diff] [blame] | 348 | /* |
| 349 | * Lifecycle benchmark |
| 350 | */ |
| 351 | |
| 352 | static void coroutine_fn empty_coroutine(void *opaque) |
| 353 | { |
| 354 | /* Do nothing */ |
| 355 | } |
| 356 | |
| 357 | static void perf_lifecycle(void) |
| 358 | { |
| 359 | Coroutine *coroutine; |
| 360 | unsigned int i, max; |
| 361 | double duration; |
| 362 | |
| 363 | max = 1000000; |
| 364 | |
| 365 | g_test_timer_start(); |
| 366 | for (i = 0; i < max; i++) { |
Paolo Bonzini | 0b8b875 | 2016-07-04 19:10:01 +0200 | [diff] [blame] | 367 | coroutine = qemu_coroutine_create(empty_coroutine, NULL); |
| 368 | qemu_coroutine_enter(coroutine); |
Stefan Hajnoczi | 5e3840c | 2011-05-12 08:27:39 +0100 | [diff] [blame] | 369 | } |
| 370 | duration = g_test_timer_elapsed(); |
| 371 | |
| 372 | g_test_message("Lifecycle %u iterations: %f s\n", max, duration); |
| 373 | } |
| 374 | |
Alex Barcelo | 7e849a9 | 2012-02-16 13:14:06 +0100 | [diff] [blame] | 375 | static void perf_nesting(void) |
| 376 | { |
| 377 | unsigned int i, maxcycles, maxnesting; |
| 378 | double duration; |
| 379 | |
Gabriel Kerneis | a903167 | 2013-09-17 18:26:48 +0200 | [diff] [blame] | 380 | maxcycles = 10000; |
Paolo Bonzini | 0270031 | 2013-02-19 11:59:10 +0100 | [diff] [blame] | 381 | maxnesting = 1000; |
Alex Barcelo | 7e849a9 | 2012-02-16 13:14:06 +0100 | [diff] [blame] | 382 | Coroutine *root; |
Alex Barcelo | 7e849a9 | 2012-02-16 13:14:06 +0100 | [diff] [blame] | 383 | |
| 384 | g_test_timer_start(); |
| 385 | for (i = 0; i < maxcycles; i++) { |
Gabriel Kerneis | a903167 | 2013-09-17 18:26:48 +0200 | [diff] [blame] | 386 | NestData nd = { |
| 387 | .n_enter = 0, |
| 388 | .n_return = 0, |
| 389 | .max = maxnesting, |
| 390 | }; |
Paolo Bonzini | 0b8b875 | 2016-07-04 19:10:01 +0200 | [diff] [blame] | 391 | root = qemu_coroutine_create(nest, &nd); |
| 392 | qemu_coroutine_enter(root); |
Alex Barcelo | 7e849a9 | 2012-02-16 13:14:06 +0100 | [diff] [blame] | 393 | } |
| 394 | duration = g_test_timer_elapsed(); |
| 395 | |
| 396 | g_test_message("Nesting %u iterations of %u depth each: %f s\n", |
| 397 | maxcycles, maxnesting, duration); |
| 398 | } |
| 399 | |
Gabriel Kerneis | 2fcd15e | 2013-09-17 17:09:39 +0200 | [diff] [blame] | 400 | /* |
| 401 | * Yield benchmark |
| 402 | */ |
| 403 | |
| 404 | static void coroutine_fn yield_loop(void *opaque) |
| 405 | { |
| 406 | unsigned int *counter = opaque; |
| 407 | |
| 408 | while ((*counter) > 0) { |
| 409 | (*counter)--; |
| 410 | qemu_coroutine_yield(); |
| 411 | } |
| 412 | } |
| 413 | |
| 414 | static void perf_yield(void) |
| 415 | { |
| 416 | unsigned int i, maxcycles; |
| 417 | double duration; |
| 418 | |
| 419 | maxcycles = 100000000; |
| 420 | i = maxcycles; |
Paolo Bonzini | 0b8b875 | 2016-07-04 19:10:01 +0200 | [diff] [blame] | 421 | Coroutine *coroutine = qemu_coroutine_create(yield_loop, &i); |
Gabriel Kerneis | 2fcd15e | 2013-09-17 17:09:39 +0200 | [diff] [blame] | 422 | |
| 423 | g_test_timer_start(); |
| 424 | while (i > 0) { |
Paolo Bonzini | 0b8b875 | 2016-07-04 19:10:01 +0200 | [diff] [blame] | 425 | qemu_coroutine_enter(coroutine); |
Gabriel Kerneis | 2fcd15e | 2013-09-17 17:09:39 +0200 | [diff] [blame] | 426 | } |
| 427 | duration = g_test_timer_elapsed(); |
| 428 | |
| 429 | g_test_message("Yield %u iterations: %f s\n", |
| 430 | maxcycles, duration); |
| 431 | } |
Alex Barcelo | 7e849a9 | 2012-02-16 13:14:06 +0100 | [diff] [blame] | 432 | |
Paolo Bonzini | 58803ce | 2014-08-06 11:33:41 +0200 | [diff] [blame] | 433 | static __attribute__((noinline)) void dummy(unsigned *i) |
| 434 | { |
| 435 | (*i)--; |
| 436 | } |
| 437 | |
| 438 | static void perf_baseline(void) |
| 439 | { |
| 440 | unsigned int i, maxcycles; |
| 441 | double duration; |
| 442 | |
| 443 | maxcycles = 100000000; |
| 444 | i = maxcycles; |
| 445 | |
| 446 | g_test_timer_start(); |
| 447 | while (i > 0) { |
| 448 | dummy(&i); |
| 449 | } |
| 450 | duration = g_test_timer_elapsed(); |
| 451 | |
| 452 | g_test_message("Function call %u iterations: %f s\n", |
| 453 | maxcycles, duration); |
| 454 | } |
| 455 | |
Ming Lei | 61ff8cf | 2014-08-13 18:08:47 +0800 | [diff] [blame] | 456 | static __attribute__((noinline)) void perf_cost_func(void *opaque) |
| 457 | { |
| 458 | qemu_coroutine_yield(); |
| 459 | } |
| 460 | |
| 461 | static void perf_cost(void) |
| 462 | { |
| 463 | const unsigned long maxcycles = 40000000; |
| 464 | unsigned long i = 0; |
| 465 | double duration; |
| 466 | unsigned long ops; |
| 467 | Coroutine *co; |
| 468 | |
| 469 | g_test_timer_start(); |
| 470 | while (i++ < maxcycles) { |
Paolo Bonzini | 0b8b875 | 2016-07-04 19:10:01 +0200 | [diff] [blame] | 471 | co = qemu_coroutine_create(perf_cost_func, &i); |
| 472 | qemu_coroutine_enter(co); |
| 473 | qemu_coroutine_enter(co); |
Ming Lei | 61ff8cf | 2014-08-13 18:08:47 +0800 | [diff] [blame] | 474 | } |
| 475 | duration = g_test_timer_elapsed(); |
| 476 | ops = (long)(maxcycles / (duration * 1000)); |
| 477 | |
| 478 | g_test_message("Run operation %lu iterations %f s, %luK operations/s, " |
| 479 | "%luns per coroutine", |
| 480 | maxcycles, |
| 481 | duration, ops, |
Paolo Bonzini | 6d86ae08 | 2014-12-02 12:05:46 +0100 | [diff] [blame] | 482 | (unsigned long)(1000000000.0 * duration / maxcycles)); |
Ming Lei | 61ff8cf | 2014-08-13 18:08:47 +0800 | [diff] [blame] | 483 | } |
| 484 | |
Stefan Hajnoczi | aa7ee42 | 2011-05-10 10:21:18 +0100 | [diff] [blame] | 485 | int main(int argc, char **argv) |
| 486 | { |
| 487 | g_test_init(&argc, &argv, NULL); |
Stefan Hajnoczi | 271b385 | 2016-05-20 11:00:31 -0700 | [diff] [blame] | 488 | |
| 489 | /* This test assumes there is a freelist and marks freed coroutine memory |
| 490 | * with a sentinel value. If there is no freelist this would legitimately |
| 491 | * crash, so skip it. |
| 492 | */ |
| 493 | if (CONFIG_COROUTINE_POOL) { |
Paolo Bonzini | 439b6e5 | 2018-02-03 10:39:31 -0500 | [diff] [blame] | 494 | g_test_add_func("/basic/no-dangling-access", test_no_dangling_access); |
Stefan Hajnoczi | 271b385 | 2016-05-20 11:00:31 -0700 | [diff] [blame] | 495 | } |
| 496 | |
Stefan Hajnoczi | aa7ee42 | 2011-05-10 10:21:18 +0100 | [diff] [blame] | 497 | g_test_add_func("/basic/lifecycle", test_lifecycle); |
| 498 | g_test_add_func("/basic/yield", test_yield); |
| 499 | g_test_add_func("/basic/nesting", test_nesting); |
| 500 | g_test_add_func("/basic/self", test_self); |
Stefan Hajnoczi | afe16f3 | 2016-09-27 16:18:35 +0100 | [diff] [blame] | 501 | g_test_add_func("/basic/entered", test_entered); |
Stefan Hajnoczi | aa7ee42 | 2011-05-10 10:21:18 +0100 | [diff] [blame] | 502 | g_test_add_func("/basic/in_coroutine", test_in_coroutine); |
Charlie Shepherd | f8d1dae | 2013-08-08 03:23:14 +0100 | [diff] [blame] | 503 | g_test_add_func("/basic/order", test_order); |
Paolo Bonzini | 439b6e5 | 2018-02-03 10:39:31 -0500 | [diff] [blame] | 504 | g_test_add_func("/locking/co-mutex", test_co_mutex); |
Paolo Bonzini | e70372f | 2018-02-03 10:39:32 -0500 | [diff] [blame] | 505 | g_test_add_func("/locking/co-mutex/lockable", test_co_mutex_lockable); |
Stefan Hajnoczi | 5e3840c | 2011-05-12 08:27:39 +0100 | [diff] [blame] | 506 | if (g_test_perf()) { |
| 507 | g_test_add_func("/perf/lifecycle", perf_lifecycle); |
Alex Barcelo | 7e849a9 | 2012-02-16 13:14:06 +0100 | [diff] [blame] | 508 | g_test_add_func("/perf/nesting", perf_nesting); |
Gabriel Kerneis | 2fcd15e | 2013-09-17 17:09:39 +0200 | [diff] [blame] | 509 | g_test_add_func("/perf/yield", perf_yield); |
Paolo Bonzini | 58803ce | 2014-08-06 11:33:41 +0200 | [diff] [blame] | 510 | g_test_add_func("/perf/function-call", perf_baseline); |
Ming Lei | 61ff8cf | 2014-08-13 18:08:47 +0800 | [diff] [blame] | 511 | g_test_add_func("/perf/cost", perf_cost); |
Stefan Hajnoczi | 5e3840c | 2011-05-12 08:27:39 +0100 | [diff] [blame] | 512 | } |
Stefan Hajnoczi | aa7ee42 | 2011-05-10 10:21:18 +0100 | [diff] [blame] | 513 | return g_test_run(); |
| 514 | } |