Stefan Hajnoczi | be8d853 | 2014-03-03 11:30:05 +0100 | [diff] [blame] | 1 | /* |
| 2 | * Event loop thread |
| 3 | * |
| 4 | * Copyright Red Hat Inc., 2013 |
| 5 | * |
| 6 | * Authors: |
| 7 | * Stefan Hajnoczi <stefanha@redhat.com> |
| 8 | * |
| 9 | * This work is licensed under the terms of the GNU GPL, version 2 or later. |
| 10 | * See the COPYING file in the top-level directory. |
| 11 | * |
| 12 | */ |
| 13 | |
Peter Maydell | d38ea87 | 2016-01-29 17:50:05 +0000 | [diff] [blame] | 14 | #include "qemu/osdep.h" |
Stefan Hajnoczi | be8d853 | 2014-03-03 11:30:05 +0100 | [diff] [blame] | 15 | #include "qom/object.h" |
| 16 | #include "qom/object_interfaces.h" |
| 17 | #include "qemu/module.h" |
Stefan Hajnoczi | be8d853 | 2014-03-03 11:30:05 +0100 | [diff] [blame] | 18 | #include "block/aio.h" |
Paolo Bonzini | d16341f | 2016-10-27 12:49:00 +0200 | [diff] [blame] | 19 | #include "block/block.h" |
Stefan Hajnoczi | be8d853 | 2014-03-03 11:30:05 +0100 | [diff] [blame] | 20 | #include "sysemu/iothread.h" |
Markus Armbruster | e688df6 | 2018-02-01 12:18:31 +0100 | [diff] [blame] | 21 | #include "qapi/error.h" |
Markus Armbruster | 112ed24 | 2018-02-26 17:13:27 -0600 | [diff] [blame] | 22 | #include "qapi/qapi-commands-misc.h" |
Chrysostomos Nanakos | 2f78e49 | 2014-09-18 14:30:49 +0300 | [diff] [blame] | 23 | #include "qemu/error-report.h" |
Paolo Bonzini | ab28bd2 | 2015-07-09 08:55:38 +0200 | [diff] [blame] | 24 | #include "qemu/rcu.h" |
Paolo Bonzini | e437016 | 2016-10-27 12:48:59 +0200 | [diff] [blame] | 25 | #include "qemu/main-loop.h" |
Stefan Hajnoczi | be8d853 | 2014-03-03 11:30:05 +0100 | [diff] [blame] | 26 | |
Stefan Hajnoczi | be8d853 | 2014-03-03 11:30:05 +0100 | [diff] [blame] | 27 | typedef ObjectClass IOThreadClass; |
Stefan Hajnoczi | be8d853 | 2014-03-03 11:30:05 +0100 | [diff] [blame] | 28 | |
| 29 | #define IOTHREAD_GET_CLASS(obj) \ |
| 30 | OBJECT_GET_CLASS(IOThreadClass, obj, TYPE_IOTHREAD) |
| 31 | #define IOTHREAD_CLASS(klass) \ |
| 32 | OBJECT_CLASS_CHECK(IOThreadClass, klass, TYPE_IOTHREAD) |
| 33 | |
Peter Xu | 90c558b | 2018-03-22 16:56:30 +0800 | [diff] [blame] | 34 | #ifdef CONFIG_POSIX |
Stefan Hajnoczi | cdd7abf | 2017-01-26 17:01:19 +0000 | [diff] [blame] | 35 | /* Benchmark results from 2016 on NVMe SSD drives show max polling times around |
| 36 | * 16-32 microseconds yield IOPS improvements for both iodepth=1 and iodepth=32 |
| 37 | * workloads. |
| 38 | */ |
| 39 | #define IOTHREAD_POLL_MAX_NS_DEFAULT 32768ULL |
Peter Xu | 90c558b | 2018-03-22 16:56:30 +0800 | [diff] [blame] | 40 | #else |
| 41 | #define IOTHREAD_POLL_MAX_NS_DEFAULT 0ULL |
| 42 | #endif |
Stefan Hajnoczi | cdd7abf | 2017-01-26 17:01:19 +0000 | [diff] [blame] | 43 | |
Paolo Bonzini | e437016 | 2016-10-27 12:48:59 +0200 | [diff] [blame] | 44 | static __thread IOThread *my_iothread; |
| 45 | |
| 46 | AioContext *qemu_get_current_aio_context(void) |
| 47 | { |
| 48 | return my_iothread ? my_iothread->ctx : qemu_get_aio_context(); |
| 49 | } |
| 50 | |
Stefan Hajnoczi | be8d853 | 2014-03-03 11:30:05 +0100 | [diff] [blame] | 51 | static void *iothread_run(void *opaque) |
| 52 | { |
| 53 | IOThread *iothread = opaque; |
| 54 | |
Paolo Bonzini | ab28bd2 | 2015-07-09 08:55:38 +0200 | [diff] [blame] | 55 | rcu_register_thread(); |
Peter Xu | b60ec76 | 2019-03-06 19:55:31 +0800 | [diff] [blame] | 56 | /* |
| 57 | * g_main_context_push_thread_default() must be called before anything |
| 58 | * in this new thread uses glib. |
| 59 | */ |
| 60 | g_main_context_push_thread_default(iothread->worker_context); |
Paolo Bonzini | e437016 | 2016-10-27 12:48:59 +0200 | [diff] [blame] | 61 | my_iothread = iothread; |
Stefan Hajnoczi | 88eb7c2 | 2014-02-27 11:48:41 +0100 | [diff] [blame] | 62 | iothread->thread_id = qemu_get_thread_id(); |
Peter Xu | 21c4d15 | 2019-03-06 19:55:28 +0800 | [diff] [blame] | 63 | qemu_sem_post(&iothread->init_done_sem); |
Stefan Hajnoczi | 88eb7c2 | 2014-02-27 11:48:41 +0100 | [diff] [blame] | 64 | |
Stefan Hajnoczi | 2362a28 | 2017-12-07 20:13:19 +0000 | [diff] [blame] | 65 | while (iothread->running) { |
Peter Xu | 6ca2062 | 2019-03-06 19:55:32 +0800 | [diff] [blame] | 66 | /* |
| 67 | * Note: from functional-wise the g_main_loop_run() below can |
| 68 | * already cover the aio_poll() events, but we can't run the |
| 69 | * main loop unconditionally because explicit aio_poll() here |
| 70 | * is faster than g_main_loop_run() when we do not need the |
| 71 | * gcontext at all (e.g., pure block layer iothreads). In |
| 72 | * other words, when we want to run the gcontext with the |
| 73 | * iothread we need to pay some performance for functionality. |
| 74 | */ |
Paolo Bonzini | 65c1b5b | 2016-10-27 12:49:06 +0200 | [diff] [blame] | 75 | aio_poll(iothread->ctx, true); |
Wang Yong | 329163c | 2017-08-29 15:22:37 +0800 | [diff] [blame] | 76 | |
Peter Xu | 6c95363 | 2019-01-29 13:14:32 +0800 | [diff] [blame] | 77 | /* |
| 78 | * We must check the running state again in case it was |
| 79 | * changed in previous aio_poll() |
| 80 | */ |
Peter Xu | b506e0f | 2019-03-06 19:55:29 +0800 | [diff] [blame] | 81 | if (iothread->running && atomic_read(&iothread->run_gcontext)) { |
Wang Yong | 329163c | 2017-08-29 15:22:37 +0800 | [diff] [blame] | 82 | g_main_loop_run(iothread->main_loop); |
Wang Yong | 329163c | 2017-08-29 15:22:37 +0800 | [diff] [blame] | 83 | } |
Stefan Hajnoczi | be8d853 | 2014-03-03 11:30:05 +0100 | [diff] [blame] | 84 | } |
Paolo Bonzini | ab28bd2 | 2015-07-09 08:55:38 +0200 | [diff] [blame] | 85 | |
Peter Xu | b60ec76 | 2019-03-06 19:55:31 +0800 | [diff] [blame] | 86 | g_main_context_pop_thread_default(iothread->worker_context); |
Paolo Bonzini | ab28bd2 | 2015-07-09 08:55:38 +0200 | [diff] [blame] | 87 | rcu_unregister_thread(); |
Stefan Hajnoczi | be8d853 | 2014-03-03 11:30:05 +0100 | [diff] [blame] | 88 | return NULL; |
| 89 | } |
| 90 | |
Stefan Hajnoczi | 2362a28 | 2017-12-07 20:13:19 +0000 | [diff] [blame] | 91 | /* Runs in iothread_run() thread */ |
| 92 | static void iothread_stop_bh(void *opaque) |
| 93 | { |
| 94 | IOThread *iothread = opaque; |
| 95 | |
| 96 | iothread->running = false; /* stop iothread_run() */ |
| 97 | |
| 98 | if (iothread->main_loop) { |
| 99 | g_main_loop_quit(iothread->main_loop); |
| 100 | } |
| 101 | } |
| 102 | |
Peter Xu | 82d9070 | 2017-09-28 10:59:56 +0800 | [diff] [blame] | 103 | void iothread_stop(IOThread *iothread) |
Stefan Hajnoczi | be8d853 | 2014-03-03 11:30:05 +0100 | [diff] [blame] | 104 | { |
Peter Xu | 82d9070 | 2017-09-28 10:59:56 +0800 | [diff] [blame] | 105 | if (!iothread->ctx || iothread->stopping) { |
| 106 | return; |
Chrysostomos Nanakos | 2f78e49 | 2014-09-18 14:30:49 +0300 | [diff] [blame] | 107 | } |
Stefan Hajnoczi | be8d853 | 2014-03-03 11:30:05 +0100 | [diff] [blame] | 108 | iothread->stopping = true; |
Stefan Hajnoczi | 2362a28 | 2017-12-07 20:13:19 +0000 | [diff] [blame] | 109 | aio_bh_schedule_oneshot(iothread->ctx, iothread_stop_bh, iothread); |
Stefan Hajnoczi | be8d853 | 2014-03-03 11:30:05 +0100 | [diff] [blame] | 110 | qemu_thread_join(&iothread->thread); |
Peter Xu | 82d9070 | 2017-09-28 10:59:56 +0800 | [diff] [blame] | 111 | } |
| 112 | |
Stefan Hajnoczi | cdd7abf | 2017-01-26 17:01:19 +0000 | [diff] [blame] | 113 | static void iothread_instance_init(Object *obj) |
| 114 | { |
| 115 | IOThread *iothread = IOTHREAD(obj); |
| 116 | |
| 117 | iothread->poll_max_ns = IOTHREAD_POLL_MAX_NS_DEFAULT; |
Marc-André Lureau | 14a2d11 | 2018-08-21 12:07:16 +0200 | [diff] [blame] | 118 | iothread->thread_id = -1; |
Peter Xu | 21c4d15 | 2019-03-06 19:55:28 +0800 | [diff] [blame] | 119 | qemu_sem_init(&iothread->init_done_sem, 0); |
Peter Xu | b506e0f | 2019-03-06 19:55:29 +0800 | [diff] [blame] | 120 | /* By default, we don't run gcontext */ |
| 121 | atomic_set(&iothread->run_gcontext, 0); |
Stefan Hajnoczi | cdd7abf | 2017-01-26 17:01:19 +0000 | [diff] [blame] | 122 | } |
| 123 | |
Fam Zheng | dce8921 | 2016-09-08 17:28:51 +0800 | [diff] [blame] | 124 | static void iothread_instance_finalize(Object *obj) |
| 125 | { |
| 126 | IOThread *iothread = IOTHREAD(obj); |
| 127 | |
Peter Xu | 82d9070 | 2017-09-28 10:59:56 +0800 | [diff] [blame] | 128 | iothread_stop(iothread); |
Marc-André Lureau | 14a2d11 | 2018-08-21 12:07:16 +0200 | [diff] [blame] | 129 | |
Peter Xu | 1554434 | 2018-04-09 16:39:56 +0800 | [diff] [blame] | 130 | /* |
| 131 | * Before glib2 2.33.10, there is a glib2 bug that GSource context |
| 132 | * pointer may not be cleared even if the context has already been |
| 133 | * destroyed (while it should). Here let's free the AIO context |
| 134 | * earlier to bypass that glib bug. |
| 135 | * |
| 136 | * We can remove this comment after the minimum supported glib2 |
| 137 | * version boosts to 2.33.10. Before that, let's free the |
| 138 | * GSources first before destroying any GMainContext. |
| 139 | */ |
| 140 | if (iothread->ctx) { |
| 141 | aio_context_unref(iothread->ctx); |
| 142 | iothread->ctx = NULL; |
| 143 | } |
Peter Xu | 5b3ac23 | 2017-09-28 10:59:57 +0800 | [diff] [blame] | 144 | if (iothread->worker_context) { |
| 145 | g_main_context_unref(iothread->worker_context); |
| 146 | iothread->worker_context = NULL; |
Peter Xu | 0bd2d23 | 2019-03-06 19:55:30 +0800 | [diff] [blame] | 147 | g_main_loop_unref(iothread->main_loop); |
| 148 | iothread->main_loop = NULL; |
Peter Xu | 5b3ac23 | 2017-09-28 10:59:57 +0800 | [diff] [blame] | 149 | } |
Peter Xu | 21c4d15 | 2019-03-06 19:55:28 +0800 | [diff] [blame] | 150 | qemu_sem_destroy(&iothread->init_done_sem); |
Stefan Hajnoczi | be8d853 | 2014-03-03 11:30:05 +0100 | [diff] [blame] | 151 | } |
| 152 | |
Peter Xu | b506e0f | 2019-03-06 19:55:29 +0800 | [diff] [blame] | 153 | static void iothread_init_gcontext(IOThread *iothread) |
| 154 | { |
| 155 | GSource *source; |
| 156 | |
| 157 | iothread->worker_context = g_main_context_new(); |
| 158 | source = aio_get_g_source(iothread_get_aio_context(iothread)); |
| 159 | g_source_attach(source, iothread->worker_context); |
| 160 | g_source_unref(source); |
Peter Xu | 0bd2d23 | 2019-03-06 19:55:30 +0800 | [diff] [blame] | 161 | iothread->main_loop = g_main_loop_new(iothread->worker_context, TRUE); |
Peter Xu | b506e0f | 2019-03-06 19:55:29 +0800 | [diff] [blame] | 162 | } |
| 163 | |
Stefan Hajnoczi | be8d853 | 2014-03-03 11:30:05 +0100 | [diff] [blame] | 164 | static void iothread_complete(UserCreatable *obj, Error **errp) |
| 165 | { |
Chrysostomos Nanakos | 2f78e49 | 2014-09-18 14:30:49 +0300 | [diff] [blame] | 166 | Error *local_error = NULL; |
Stefan Hajnoczi | be8d853 | 2014-03-03 11:30:05 +0100 | [diff] [blame] | 167 | IOThread *iothread = IOTHREAD(obj); |
Paolo Bonzini | d21e877 | 2015-11-24 14:46:44 +0100 | [diff] [blame] | 168 | char *name, *thread_name; |
Stefan Hajnoczi | be8d853 | 2014-03-03 11:30:05 +0100 | [diff] [blame] | 169 | |
| 170 | iothread->stopping = false; |
Stefan Hajnoczi | 2362a28 | 2017-12-07 20:13:19 +0000 | [diff] [blame] | 171 | iothread->running = true; |
Chrysostomos Nanakos | 2f78e49 | 2014-09-18 14:30:49 +0300 | [diff] [blame] | 172 | iothread->ctx = aio_context_new(&local_error); |
| 173 | if (!iothread->ctx) { |
| 174 | error_propagate(errp, local_error); |
| 175 | return; |
| 176 | } |
Stefan Hajnoczi | 88eb7c2 | 2014-02-27 11:48:41 +0100 | [diff] [blame] | 177 | |
Peter Xu | b506e0f | 2019-03-06 19:55:29 +0800 | [diff] [blame] | 178 | /* |
| 179 | * Init one GMainContext for the iothread unconditionally, even if |
| 180 | * it's not used |
| 181 | */ |
| 182 | iothread_init_gcontext(iothread); |
| 183 | |
Stefan Hajnoczi | 5e5db49 | 2016-12-01 19:26:52 +0000 | [diff] [blame] | 184 | aio_context_set_poll_params(iothread->ctx, |
| 185 | iothread->poll_max_ns, |
| 186 | iothread->poll_grow, |
| 187 | iothread->poll_shrink, |
Stefan Hajnoczi | 0d9d86f | 2016-12-01 19:26:45 +0000 | [diff] [blame] | 188 | &local_error); |
| 189 | if (local_error) { |
| 190 | error_propagate(errp, local_error); |
| 191 | aio_context_unref(iothread->ctx); |
| 192 | iothread->ctx = NULL; |
| 193 | return; |
| 194 | } |
| 195 | |
Stefan Hajnoczi | be8d853 | 2014-03-03 11:30:05 +0100 | [diff] [blame] | 196 | /* This assumes we are called from a thread with useful CPU affinity for us |
| 197 | * to inherit. |
| 198 | */ |
Paolo Bonzini | d21e877 | 2015-11-24 14:46:44 +0100 | [diff] [blame] | 199 | name = object_get_canonical_path_component(OBJECT(obj)); |
| 200 | thread_name = g_strdup_printf("IO %s", name); |
| 201 | qemu_thread_create(&iothread->thread, thread_name, iothread_run, |
Stefan Hajnoczi | be8d853 | 2014-03-03 11:30:05 +0100 | [diff] [blame] | 202 | iothread, QEMU_THREAD_JOINABLE); |
Paolo Bonzini | d21e877 | 2015-11-24 14:46:44 +0100 | [diff] [blame] | 203 | g_free(thread_name); |
| 204 | g_free(name); |
Stefan Hajnoczi | 88eb7c2 | 2014-02-27 11:48:41 +0100 | [diff] [blame] | 205 | |
| 206 | /* Wait for initialization to complete */ |
Stefan Hajnoczi | 88eb7c2 | 2014-02-27 11:48:41 +0100 | [diff] [blame] | 207 | while (iothread->thread_id == -1) { |
Peter Xu | 21c4d15 | 2019-03-06 19:55:28 +0800 | [diff] [blame] | 208 | qemu_sem_wait(&iothread->init_done_sem); |
Stefan Hajnoczi | 88eb7c2 | 2014-02-27 11:48:41 +0100 | [diff] [blame] | 209 | } |
Stefan Hajnoczi | be8d853 | 2014-03-03 11:30:05 +0100 | [diff] [blame] | 210 | } |
| 211 | |
Stefan Hajnoczi | 5e5db49 | 2016-12-01 19:26:52 +0000 | [diff] [blame] | 212 | typedef struct { |
| 213 | const char *name; |
| 214 | ptrdiff_t offset; /* field's byte offset in IOThread struct */ |
| 215 | } PollParamInfo; |
| 216 | |
| 217 | static PollParamInfo poll_max_ns_info = { |
| 218 | "poll-max-ns", offsetof(IOThread, poll_max_ns), |
| 219 | }; |
| 220 | static PollParamInfo poll_grow_info = { |
| 221 | "poll-grow", offsetof(IOThread, poll_grow), |
| 222 | }; |
| 223 | static PollParamInfo poll_shrink_info = { |
| 224 | "poll-shrink", offsetof(IOThread, poll_shrink), |
| 225 | }; |
| 226 | |
| 227 | static void iothread_get_poll_param(Object *obj, Visitor *v, |
Stefan Hajnoczi | 0d9d86f | 2016-12-01 19:26:45 +0000 | [diff] [blame] | 228 | const char *name, void *opaque, Error **errp) |
| 229 | { |
| 230 | IOThread *iothread = IOTHREAD(obj); |
Stefan Hajnoczi | 5e5db49 | 2016-12-01 19:26:52 +0000 | [diff] [blame] | 231 | PollParamInfo *info = opaque; |
| 232 | int64_t *field = (void *)iothread + info->offset; |
Stefan Hajnoczi | 0d9d86f | 2016-12-01 19:26:45 +0000 | [diff] [blame] | 233 | |
Stefan Hajnoczi | 5e5db49 | 2016-12-01 19:26:52 +0000 | [diff] [blame] | 234 | visit_type_int64(v, name, field, errp); |
Stefan Hajnoczi | 0d9d86f | 2016-12-01 19:26:45 +0000 | [diff] [blame] | 235 | } |
| 236 | |
Stefan Hajnoczi | 5e5db49 | 2016-12-01 19:26:52 +0000 | [diff] [blame] | 237 | static void iothread_set_poll_param(Object *obj, Visitor *v, |
Stefan Hajnoczi | 0d9d86f | 2016-12-01 19:26:45 +0000 | [diff] [blame] | 238 | const char *name, void *opaque, Error **errp) |
| 239 | { |
| 240 | IOThread *iothread = IOTHREAD(obj); |
Stefan Hajnoczi | 5e5db49 | 2016-12-01 19:26:52 +0000 | [diff] [blame] | 241 | PollParamInfo *info = opaque; |
| 242 | int64_t *field = (void *)iothread + info->offset; |
Stefan Hajnoczi | 0d9d86f | 2016-12-01 19:26:45 +0000 | [diff] [blame] | 243 | Error *local_err = NULL; |
| 244 | int64_t value; |
| 245 | |
| 246 | visit_type_int64(v, name, &value, &local_err); |
| 247 | if (local_err) { |
| 248 | goto out; |
| 249 | } |
| 250 | |
| 251 | if (value < 0) { |
Stefan Hajnoczi | 5e5db49 | 2016-12-01 19:26:52 +0000 | [diff] [blame] | 252 | error_setg(&local_err, "%s value must be in range [0, %"PRId64"]", |
| 253 | info->name, INT64_MAX); |
Stefan Hajnoczi | 0d9d86f | 2016-12-01 19:26:45 +0000 | [diff] [blame] | 254 | goto out; |
| 255 | } |
| 256 | |
Stefan Hajnoczi | 5e5db49 | 2016-12-01 19:26:52 +0000 | [diff] [blame] | 257 | *field = value; |
Stefan Hajnoczi | 0d9d86f | 2016-12-01 19:26:45 +0000 | [diff] [blame] | 258 | |
| 259 | if (iothread->ctx) { |
Stefan Hajnoczi | 5e5db49 | 2016-12-01 19:26:52 +0000 | [diff] [blame] | 260 | aio_context_set_poll_params(iothread->ctx, |
| 261 | iothread->poll_max_ns, |
| 262 | iothread->poll_grow, |
| 263 | iothread->poll_shrink, |
| 264 | &local_err); |
Stefan Hajnoczi | 0d9d86f | 2016-12-01 19:26:45 +0000 | [diff] [blame] | 265 | } |
| 266 | |
| 267 | out: |
| 268 | error_propagate(errp, local_err); |
| 269 | } |
| 270 | |
Stefan Hajnoczi | be8d853 | 2014-03-03 11:30:05 +0100 | [diff] [blame] | 271 | static void iothread_class_init(ObjectClass *klass, void *class_data) |
| 272 | { |
| 273 | UserCreatableClass *ucc = USER_CREATABLE_CLASS(klass); |
| 274 | ucc->complete = iothread_complete; |
Stefan Hajnoczi | 0d9d86f | 2016-12-01 19:26:45 +0000 | [diff] [blame] | 275 | |
| 276 | object_class_property_add(klass, "poll-max-ns", "int", |
Stefan Hajnoczi | 5e5db49 | 2016-12-01 19:26:52 +0000 | [diff] [blame] | 277 | iothread_get_poll_param, |
| 278 | iothread_set_poll_param, |
| 279 | NULL, &poll_max_ns_info, &error_abort); |
| 280 | object_class_property_add(klass, "poll-grow", "int", |
| 281 | iothread_get_poll_param, |
| 282 | iothread_set_poll_param, |
| 283 | NULL, &poll_grow_info, &error_abort); |
| 284 | object_class_property_add(klass, "poll-shrink", "int", |
| 285 | iothread_get_poll_param, |
| 286 | iothread_set_poll_param, |
| 287 | NULL, &poll_shrink_info, &error_abort); |
Stefan Hajnoczi | be8d853 | 2014-03-03 11:30:05 +0100 | [diff] [blame] | 288 | } |
| 289 | |
| 290 | static const TypeInfo iothread_info = { |
| 291 | .name = TYPE_IOTHREAD, |
| 292 | .parent = TYPE_OBJECT, |
| 293 | .class_init = iothread_class_init, |
| 294 | .instance_size = sizeof(IOThread), |
Stefan Hajnoczi | cdd7abf | 2017-01-26 17:01:19 +0000 | [diff] [blame] | 295 | .instance_init = iothread_instance_init, |
Stefan Hajnoczi | be8d853 | 2014-03-03 11:30:05 +0100 | [diff] [blame] | 296 | .instance_finalize = iothread_instance_finalize, |
| 297 | .interfaces = (InterfaceInfo[]) { |
| 298 | {TYPE_USER_CREATABLE}, |
| 299 | {} |
| 300 | }, |
| 301 | }; |
| 302 | |
| 303 | static void iothread_register_types(void) |
| 304 | { |
| 305 | type_register_static(&iothread_info); |
| 306 | } |
| 307 | |
| 308 | type_init(iothread_register_types) |
| 309 | |
Stefan Hajnoczi | be8d853 | 2014-03-03 11:30:05 +0100 | [diff] [blame] | 310 | char *iothread_get_id(IOThread *iothread) |
| 311 | { |
| 312 | return object_get_canonical_path_component(OBJECT(iothread)); |
| 313 | } |
| 314 | |
| 315 | AioContext *iothread_get_aio_context(IOThread *iothread) |
| 316 | { |
| 317 | return iothread->ctx; |
| 318 | } |
Stefan Hajnoczi | dc3dd0d | 2014-02-27 11:48:42 +0100 | [diff] [blame] | 319 | |
| 320 | static int query_one_iothread(Object *object, void *opaque) |
| 321 | { |
| 322 | IOThreadInfoList ***prev = opaque; |
| 323 | IOThreadInfoList *elem; |
| 324 | IOThreadInfo *info; |
| 325 | IOThread *iothread; |
| 326 | |
| 327 | iothread = (IOThread *)object_dynamic_cast(object, TYPE_IOTHREAD); |
| 328 | if (!iothread) { |
| 329 | return 0; |
| 330 | } |
| 331 | |
| 332 | info = g_new0(IOThreadInfo, 1); |
| 333 | info->id = iothread_get_id(iothread); |
| 334 | info->thread_id = iothread->thread_id; |
Pavel Hrdina | 5fc0048 | 2017-02-10 10:41:17 +0100 | [diff] [blame] | 335 | info->poll_max_ns = iothread->poll_max_ns; |
| 336 | info->poll_grow = iothread->poll_grow; |
| 337 | info->poll_shrink = iothread->poll_shrink; |
Stefan Hajnoczi | dc3dd0d | 2014-02-27 11:48:42 +0100 | [diff] [blame] | 338 | |
| 339 | elem = g_new0(IOThreadInfoList, 1); |
| 340 | elem->value = info; |
| 341 | elem->next = NULL; |
| 342 | |
| 343 | **prev = elem; |
| 344 | *prev = &elem->next; |
| 345 | return 0; |
| 346 | } |
| 347 | |
| 348 | IOThreadInfoList *qmp_query_iothreads(Error **errp) |
| 349 | { |
| 350 | IOThreadInfoList *head = NULL; |
| 351 | IOThreadInfoList **prev = &head; |
Daniel P. Berrange | bc2256c | 2015-05-13 17:14:05 +0100 | [diff] [blame] | 352 | Object *container = object_get_objects_root(); |
Stefan Hajnoczi | dc3dd0d | 2014-02-27 11:48:42 +0100 | [diff] [blame] | 353 | |
| 354 | object_child_foreach(container, query_one_iothread, &prev); |
| 355 | return head; |
| 356 | } |
Fam Zheng | dce8921 | 2016-09-08 17:28:51 +0800 | [diff] [blame] | 357 | |
Wang Yong | 329163c | 2017-08-29 15:22:37 +0800 | [diff] [blame] | 358 | GMainContext *iothread_get_g_main_context(IOThread *iothread) |
| 359 | { |
Peter Xu | b506e0f | 2019-03-06 19:55:29 +0800 | [diff] [blame] | 360 | atomic_set(&iothread->run_gcontext, 1); |
| 361 | aio_notify(iothread->ctx); |
Wang Yong | 329163c | 2017-08-29 15:22:37 +0800 | [diff] [blame] | 362 | return iothread->worker_context; |
| 363 | } |
Peter Xu | 0173e21 | 2017-09-28 10:59:55 +0800 | [diff] [blame] | 364 | |
| 365 | IOThread *iothread_create(const char *id, Error **errp) |
| 366 | { |
| 367 | Object *obj; |
| 368 | |
| 369 | obj = object_new_with_props(TYPE_IOTHREAD, |
| 370 | object_get_internal_root(), |
| 371 | id, errp, NULL); |
| 372 | |
| 373 | return IOTHREAD(obj); |
| 374 | } |
| 375 | |
| 376 | void iothread_destroy(IOThread *iothread) |
| 377 | { |
| 378 | object_unparent(OBJECT(iothread)); |
| 379 | } |
Stefan Hajnoczi | fbcc692 | 2017-12-06 14:45:48 +0000 | [diff] [blame] | 380 | |
| 381 | /* Lookup IOThread by its id. Only finds user-created objects, not internal |
| 382 | * iothread_create() objects. */ |
| 383 | IOThread *iothread_by_id(const char *id) |
| 384 | { |
| 385 | return IOTHREAD(object_resolve_path_type(id, TYPE_IOTHREAD, NULL)); |
| 386 | } |