blob: b4bf205a0ce64b493c623ef7c806dce012aa3b05 [file] [log] [blame]
Kevin Wolf4f999d02009-10-22 17:54:37 +02001/*
2 * QEMU System Emulator
3 *
4 * Copyright (c) 2003-2008 Fabrice Bellard
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
24
Peter Maydelld38ea872016-01-29 17:50:05 +000025#include "qemu/osdep.h"
Markus Armbrusterda34e652016-03-14 09:01:28 +010026#include "qapi/error.h"
Kevin Wolf4f999d02009-10-22 17:54:37 +020027#include "qemu-common.h"
Paolo Bonzini737e1502012-12-17 18:19:44 +010028#include "block/aio.h"
Stefan Hajnoczi9b342772013-03-07 13:41:47 +010029#include "block/thread-pool.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010030#include "qemu/main-loop.h"
Paolo Bonzini0ceb8492014-07-07 15:18:04 +020031#include "qemu/atomic.h"
Kevin Wolf9a1e9482009-10-22 17:54:38 +020032
Kevin Wolf4f999d02009-10-22 17:54:37 +020033/***********************************************************/
34/* bottom halves (can be seen as timers which expire ASAP) */
35
36struct QEMUBH {
Paolo Bonzini2f4dc3c2012-09-24 18:44:14 +020037 AioContext *ctx;
Kevin Wolf4f999d02009-10-22 17:54:37 +020038 QEMUBHFunc *cb;
39 void *opaque;
Kevin Wolf4f999d02009-10-22 17:54:37 +020040 QEMUBH *next;
Stefan Weil9b47b172012-04-29 19:08:45 +020041 bool scheduled;
42 bool idle;
43 bool deleted;
Kevin Wolf4f999d02009-10-22 17:54:37 +020044};
45
Paolo Bonzinif627aab2012-10-29 23:45:23 +010046QEMUBH *aio_bh_new(AioContext *ctx, QEMUBHFunc *cb, void *opaque)
Kevin Wolf4f999d02009-10-22 17:54:37 +020047{
48 QEMUBH *bh;
Paolo Bonziniee823102014-12-17 16:10:00 +010049 bh = g_new(QEMUBH, 1);
50 *bh = (QEMUBH){
51 .ctx = ctx,
52 .cb = cb,
53 .opaque = opaque,
54 };
Liu Ping Fandcc772e2013-07-16 12:28:58 +080055 qemu_mutex_lock(&ctx->bh_lock);
Paolo Bonzinif627aab2012-10-29 23:45:23 +010056 bh->next = ctx->first_bh;
Liu Ping Fandcc772e2013-07-16 12:28:58 +080057 /* Make sure that the members are ready before putting bh into list */
58 smp_wmb();
Paolo Bonzinif627aab2012-10-29 23:45:23 +010059 ctx->first_bh = bh;
Liu Ping Fandcc772e2013-07-16 12:28:58 +080060 qemu_mutex_unlock(&ctx->bh_lock);
Kevin Wolf4f999d02009-10-22 17:54:37 +020061 return bh;
62}
63
Pavel Dovgalyukdf281b82015-09-17 19:24:50 +030064void aio_bh_call(QEMUBH *bh)
65{
66 bh->cb(bh->opaque);
67}
68
Liu Ping Fandcc772e2013-07-16 12:28:58 +080069/* Multiple occurrences of aio_bh_poll cannot be called concurrently */
Paolo Bonzinif627aab2012-10-29 23:45:23 +010070int aio_bh_poll(AioContext *ctx)
Kevin Wolf4f999d02009-10-22 17:54:37 +020071{
Kevin Wolf7887f622011-06-07 17:51:21 +020072 QEMUBH *bh, **bhp, *next;
Kevin Wolf4f999d02009-10-22 17:54:37 +020073 int ret;
Kevin Wolf648fb0e2011-09-01 16:16:10 +020074
Paolo Bonzinif627aab2012-10-29 23:45:23 +010075 ctx->walking_bh++;
Kevin Wolf4f999d02009-10-22 17:54:37 +020076
77 ret = 0;
Paolo Bonzinif627aab2012-10-29 23:45:23 +010078 for (bh = ctx->first_bh; bh; bh = next) {
Liu Ping Fandcc772e2013-07-16 12:28:58 +080079 /* Make sure that fetching bh happens before accessing its members */
80 smp_read_barrier_depends();
Kevin Wolf7887f622011-06-07 17:51:21 +020081 next = bh->next;
Paolo Bonzinie8d3b1a2015-04-07 17:16:19 +020082 /* The atomic_xchg is paired with the one in qemu_bh_schedule. The
83 * implicit memory barrier ensures that the callback sees all writes
84 * done by the scheduling thread. It also ensures that the scheduling
85 * thread sees the zero before bh->cb has run, and thus will call
86 * aio_notify again if necessary.
87 */
88 if (!bh->deleted && atomic_xchg(&bh->scheduled, 0)) {
Stefan Hajnoczica96ac42015-07-28 18:34:09 +020089 /* Idle BHs and the notify BH don't count as progress */
90 if (!bh->idle && bh != ctx->notify_dummy_bh) {
Kevin Wolf4f999d02009-10-22 17:54:37 +020091 ret = 1;
Stefan Hajnoczica96ac42015-07-28 18:34:09 +020092 }
Kevin Wolf4f999d02009-10-22 17:54:37 +020093 bh->idle = 0;
Pavel Dovgalyukdf281b82015-09-17 19:24:50 +030094 aio_bh_call(bh);
Kevin Wolf4f999d02009-10-22 17:54:37 +020095 }
96 }
97
Paolo Bonzinif627aab2012-10-29 23:45:23 +010098 ctx->walking_bh--;
Kevin Wolf648fb0e2011-09-01 16:16:10 +020099
Kevin Wolf4f999d02009-10-22 17:54:37 +0200100 /* remove deleted bhs */
Paolo Bonzinif627aab2012-10-29 23:45:23 +0100101 if (!ctx->walking_bh) {
Liu Ping Fandcc772e2013-07-16 12:28:58 +0800102 qemu_mutex_lock(&ctx->bh_lock);
Paolo Bonzinif627aab2012-10-29 23:45:23 +0100103 bhp = &ctx->first_bh;
Kevin Wolf648fb0e2011-09-01 16:16:10 +0200104 while (*bhp) {
105 bh = *bhp;
106 if (bh->deleted) {
107 *bhp = bh->next;
108 g_free(bh);
109 } else {
110 bhp = &bh->next;
111 }
112 }
Liu Ping Fandcc772e2013-07-16 12:28:58 +0800113 qemu_mutex_unlock(&ctx->bh_lock);
Kevin Wolf4f999d02009-10-22 17:54:37 +0200114 }
115
116 return ret;
117}
118
119void qemu_bh_schedule_idle(QEMUBH *bh)
120{
Kevin Wolf4f999d02009-10-22 17:54:37 +0200121 bh->idle = 1;
Liu Ping Fandcc772e2013-07-16 12:28:58 +0800122 /* Make sure that idle & any writes needed by the callback are done
123 * before the locations are read in the aio_bh_poll.
124 */
Paolo Bonzinie8d3b1a2015-04-07 17:16:19 +0200125 atomic_mb_set(&bh->scheduled, 1);
Kevin Wolf4f999d02009-10-22 17:54:37 +0200126}
127
128void qemu_bh_schedule(QEMUBH *bh)
129{
Stefan Hajnoczi924fe122014-06-03 11:21:01 +0200130 AioContext *ctx;
131
Stefan Hajnoczi924fe122014-06-03 11:21:01 +0200132 ctx = bh->ctx;
Kevin Wolf4f999d02009-10-22 17:54:37 +0200133 bh->idle = 0;
Paolo Bonzinie8d3b1a2015-04-07 17:16:19 +0200134 /* The memory barrier implicit in atomic_xchg makes sure that:
Stefan Hajnoczi924fe122014-06-03 11:21:01 +0200135 * 1. idle & any writes needed by the callback are done before the
136 * locations are read in the aio_bh_poll.
137 * 2. ctx is loaded before scheduled is set and the callback has a chance
138 * to execute.
Liu Ping Fandcc772e2013-07-16 12:28:58 +0800139 */
Paolo Bonzinie8d3b1a2015-04-07 17:16:19 +0200140 if (atomic_xchg(&bh->scheduled, 1) == 0) {
141 aio_notify(ctx);
142 }
Kevin Wolf4f999d02009-10-22 17:54:37 +0200143}
144
Liu Ping Fandcc772e2013-07-16 12:28:58 +0800145
146/* This func is async.
147 */
Kevin Wolf4f999d02009-10-22 17:54:37 +0200148void qemu_bh_cancel(QEMUBH *bh)
149{
150 bh->scheduled = 0;
151}
152
Liu Ping Fandcc772e2013-07-16 12:28:58 +0800153/* This func is async.The bottom half will do the delete action at the finial
154 * end.
155 */
Kevin Wolf4f999d02009-10-22 17:54:37 +0200156void qemu_bh_delete(QEMUBH *bh)
157{
158 bh->scheduled = 0;
159 bh->deleted = 1;
160}
161
Paolo Bonzini845ca102014-07-09 11:53:01 +0200162int64_t
163aio_compute_timeout(AioContext *ctx)
Kevin Wolf4f999d02009-10-22 17:54:37 +0200164{
Paolo Bonzini845ca102014-07-09 11:53:01 +0200165 int64_t deadline;
166 int timeout = -1;
Kevin Wolf4f999d02009-10-22 17:54:37 +0200167 QEMUBH *bh;
168
Paolo Bonzinif627aab2012-10-29 23:45:23 +0100169 for (bh = ctx->first_bh; bh; bh = bh->next) {
Kevin Wolf4f999d02009-10-22 17:54:37 +0200170 if (!bh->deleted && bh->scheduled) {
171 if (bh->idle) {
172 /* idle bottom halves will be polled at least
173 * every 10ms */
Paolo Bonzini845ca102014-07-09 11:53:01 +0200174 timeout = 10000000;
Kevin Wolf4f999d02009-10-22 17:54:37 +0200175 } else {
176 /* non-idle bottom halves will be executed
177 * immediately */
Paolo Bonzini845ca102014-07-09 11:53:01 +0200178 return 0;
Kevin Wolf4f999d02009-10-22 17:54:37 +0200179 }
180 }
181 }
Kevin Wolf4f999d02009-10-22 17:54:37 +0200182
Paolo Bonzini845ca102014-07-09 11:53:01 +0200183 deadline = timerlistgroup_deadline_ns(&ctx->tlg);
Alex Bligh533a8cf2013-08-21 16:02:51 +0100184 if (deadline == 0) {
Paolo Bonzini845ca102014-07-09 11:53:01 +0200185 return 0;
Alex Bligh533a8cf2013-08-21 16:02:51 +0100186 } else {
Paolo Bonzini845ca102014-07-09 11:53:01 +0200187 return qemu_soonest_timeout(timeout, deadline);
Alex Bligh533a8cf2013-08-21 16:02:51 +0100188 }
Paolo Bonzini845ca102014-07-09 11:53:01 +0200189}
Alex Bligh533a8cf2013-08-21 16:02:51 +0100190
Paolo Bonzini845ca102014-07-09 11:53:01 +0200191static gboolean
192aio_ctx_prepare(GSource *source, gint *timeout)
193{
194 AioContext *ctx = (AioContext *) source;
195
Paolo Bonzinieabc9772015-07-21 16:07:51 +0200196 atomic_or(&ctx->notify_me, 1);
197
Paolo Bonzini845ca102014-07-09 11:53:01 +0200198 /* We assume there is no timeout already supplied */
199 *timeout = qemu_timeout_ns_to_ms(aio_compute_timeout(ctx));
Paolo Bonzinia3462c62014-07-09 11:53:08 +0200200
201 if (aio_prepare(ctx)) {
202 *timeout = 0;
203 }
204
Paolo Bonzini845ca102014-07-09 11:53:01 +0200205 return *timeout == 0;
Paolo Bonzinie3713e02012-09-24 14:57:41 +0200206}
207
208static gboolean
209aio_ctx_check(GSource *source)
210{
211 AioContext *ctx = (AioContext *) source;
212 QEMUBH *bh;
213
Paolo Bonzinieabc9772015-07-21 16:07:51 +0200214 atomic_and(&ctx->notify_me, ~1);
Paolo Bonzini05e514b2015-07-21 16:07:53 +0200215 aio_notify_accept(ctx);
Paolo Bonzini21a03d12015-07-21 16:07:52 +0200216
Paolo Bonzinie3713e02012-09-24 14:57:41 +0200217 for (bh = ctx->first_bh; bh; bh = bh->next) {
218 if (!bh->deleted && bh->scheduled) {
219 return true;
220 }
221 }
Alex Bligh533a8cf2013-08-21 16:02:51 +0100222 return aio_pending(ctx) || (timerlistgroup_deadline_ns(&ctx->tlg) == 0);
Paolo Bonzinie3713e02012-09-24 14:57:41 +0200223}
224
225static gboolean
226aio_ctx_dispatch(GSource *source,
227 GSourceFunc callback,
228 gpointer user_data)
229{
230 AioContext *ctx = (AioContext *) source;
231
232 assert(callback == NULL);
Paolo Bonzinie4c7e2d2014-07-09 11:53:05 +0200233 aio_dispatch(ctx);
Paolo Bonzinie3713e02012-09-24 14:57:41 +0200234 return true;
235}
236
Paolo Bonzini2f4dc3c2012-09-24 18:44:14 +0200237static void
238aio_ctx_finalize(GSource *source)
239{
240 AioContext *ctx = (AioContext *) source;
241
Stefan Hajnoczica96ac42015-07-28 18:34:09 +0200242 qemu_bh_delete(ctx->notify_dummy_bh);
Stefan Hajnoczi9b342772013-03-07 13:41:47 +0100243 thread_pool_free(ctx->thread_pool);
Stefan Hajnoczia0769722015-07-28 18:34:08 +0200244
245 qemu_mutex_lock(&ctx->bh_lock);
246 while (ctx->first_bh) {
247 QEMUBH *next = ctx->first_bh->next;
248
249 /* qemu_bh_delete() must have been called on BHs in this AioContext */
250 assert(ctx->first_bh->deleted);
251
252 g_free(ctx->first_bh);
253 ctx->first_bh = next;
254 }
255 qemu_mutex_unlock(&ctx->bh_lock);
256
Fam Zhengdca21ef2015-10-23 11:08:05 +0800257 aio_set_event_notifier(ctx, &ctx->notifier, false, NULL);
Paolo Bonzini2f4dc3c2012-09-24 18:44:14 +0200258 event_notifier_cleanup(&ctx->notifier);
Stefan Hajnoczi98563fc2014-03-03 11:30:04 +0100259 rfifolock_destroy(&ctx->lock);
Liu Ping Fandcc772e2013-07-16 12:28:58 +0800260 qemu_mutex_destroy(&ctx->bh_lock);
Alex Blighdae21b92013-08-21 16:02:49 +0100261 timerlistgroup_deinit(&ctx->tlg);
Paolo Bonzini2f4dc3c2012-09-24 18:44:14 +0200262}
263
Paolo Bonzinie3713e02012-09-24 14:57:41 +0200264static GSourceFuncs aio_source_funcs = {
265 aio_ctx_prepare,
266 aio_ctx_check,
267 aio_ctx_dispatch,
Paolo Bonzini2f4dc3c2012-09-24 18:44:14 +0200268 aio_ctx_finalize
Paolo Bonzinie3713e02012-09-24 14:57:41 +0200269};
270
271GSource *aio_get_g_source(AioContext *ctx)
272{
273 g_source_ref(&ctx->source);
274 return &ctx->source;
275}
Paolo Bonzinia915f4b2012-09-13 12:28:51 +0200276
Stefan Hajnoczi9b342772013-03-07 13:41:47 +0100277ThreadPool *aio_get_thread_pool(AioContext *ctx)
278{
279 if (!ctx->thread_pool) {
280 ctx->thread_pool = thread_pool_new(ctx);
281 }
282 return ctx->thread_pool;
283}
284
Paolo Bonzini2f4dc3c2012-09-24 18:44:14 +0200285void aio_notify(AioContext *ctx)
286{
Paolo Bonzinieabc9772015-07-21 16:07:51 +0200287 /* Write e.g. bh->scheduled before reading ctx->notify_me. Pairs
288 * with atomic_or in aio_ctx_prepare or atomic_add in aio_poll.
289 */
Paolo Bonzini0ceb8492014-07-07 15:18:04 +0200290 smp_mb();
Paolo Bonzinieabc9772015-07-21 16:07:51 +0200291 if (ctx->notify_me) {
Paolo Bonzini0ceb8492014-07-07 15:18:04 +0200292 event_notifier_set(&ctx->notifier);
Paolo Bonzini05e514b2015-07-21 16:07:53 +0200293 atomic_mb_set(&ctx->notified, true);
294 }
295}
296
297void aio_notify_accept(AioContext *ctx)
298{
299 if (atomic_xchg(&ctx->notified, false)) {
300 event_notifier_test_and_clear(&ctx->notifier);
Paolo Bonzini0ceb8492014-07-07 15:18:04 +0200301 }
Paolo Bonzini2f4dc3c2012-09-24 18:44:14 +0200302}
303
Alex Blighd5541d82013-08-21 16:02:50 +0100304static void aio_timerlist_notify(void *opaque)
305{
306 aio_notify(opaque);
307}
308
Stefan Hajnoczida5e1de2015-06-03 10:15:33 +0100309static void aio_rfifolock_cb(void *opaque)
310{
Stefan Hajnoczica96ac42015-07-28 18:34:09 +0200311 AioContext *ctx = opaque;
312
Stefan Hajnoczida5e1de2015-06-03 10:15:33 +0100313 /* Kick owner thread in case they are blocked in aio_poll() */
Stefan Hajnoczica96ac42015-07-28 18:34:09 +0200314 qemu_bh_schedule(ctx->notify_dummy_bh);
315}
316
317static void notify_dummy_bh(void *opaque)
318{
319 /* Do nothing, we were invoked just to force the event loop to iterate */
Stefan Hajnoczida5e1de2015-06-03 10:15:33 +0100320}
321
Paolo Bonzini21a03d12015-07-21 16:07:52 +0200322static void event_notifier_dummy_cb(EventNotifier *e)
323{
324}
325
Chrysostomos Nanakos2f78e492014-09-18 14:30:49 +0300326AioContext *aio_context_new(Error **errp)
Paolo Bonzinif627aab2012-10-29 23:45:23 +0100327{
Chrysostomos Nanakos2f78e492014-09-18 14:30:49 +0300328 int ret;
Paolo Bonzini2f4dc3c2012-09-24 18:44:14 +0200329 AioContext *ctx;
Fam Zheng37fcee52015-10-30 12:06:28 +0800330 Error *local_err = NULL;
331
Paolo Bonzini2f4dc3c2012-09-24 18:44:14 +0200332 ctx = (AioContext *) g_source_new(&aio_source_funcs, sizeof(AioContext));
Fam Zheng37fcee52015-10-30 12:06:28 +0800333 aio_context_setup(ctx, &local_err);
334 if (local_err) {
335 error_propagate(errp, local_err);
336 goto fail;
337 }
Chrysostomos Nanakos2f78e492014-09-18 14:30:49 +0300338 ret = event_notifier_init(&ctx->notifier, false);
339 if (ret < 0) {
Chrysostomos Nanakos2f78e492014-09-18 14:30:49 +0300340 error_setg_errno(errp, -ret, "Failed to initialize event notifier");
Fam Zheng37fcee52015-10-30 12:06:28 +0800341 goto fail;
Chrysostomos Nanakos2f78e492014-09-18 14:30:49 +0300342 }
Paolo Bonzinifcf5def2014-12-17 16:09:58 +0100343 g_source_set_can_recurse(&ctx->source, true);
Chrysostomos Nanakos2f78e492014-09-18 14:30:49 +0300344 aio_set_event_notifier(ctx, &ctx->notifier,
Fam Zhengdca21ef2015-10-23 11:08:05 +0800345 false,
Chrysostomos Nanakos2f78e492014-09-18 14:30:49 +0300346 (EventNotifierHandler *)
Paolo Bonzini21a03d12015-07-21 16:07:52 +0200347 event_notifier_dummy_cb);
Stefan Hajnoczi9b342772013-03-07 13:41:47 +0100348 ctx->thread_pool = NULL;
Liu Ping Fandcc772e2013-07-16 12:28:58 +0800349 qemu_mutex_init(&ctx->bh_lock);
Stefan Hajnoczida5e1de2015-06-03 10:15:33 +0100350 rfifolock_init(&ctx->lock, aio_rfifolock_cb, ctx);
Alex Blighd5541d82013-08-21 16:02:50 +0100351 timerlistgroup_init(&ctx->tlg, aio_timerlist_notify, ctx);
Paolo Bonzini2f4dc3c2012-09-24 18:44:14 +0200352
Stefan Hajnoczica96ac42015-07-28 18:34:09 +0200353 ctx->notify_dummy_bh = aio_bh_new(ctx, notify_dummy_bh, NULL);
354
Paolo Bonzini2f4dc3c2012-09-24 18:44:14 +0200355 return ctx;
Fam Zheng37fcee52015-10-30 12:06:28 +0800356fail:
357 g_source_destroy(&ctx->source);
358 return NULL;
Paolo Bonzinie3713e02012-09-24 14:57:41 +0200359}
360
361void aio_context_ref(AioContext *ctx)
362{
363 g_source_ref(&ctx->source);
364}
365
366void aio_context_unref(AioContext *ctx)
367{
368 g_source_unref(&ctx->source);
Paolo Bonzinif627aab2012-10-29 23:45:23 +0100369}
Stefan Hajnoczi98563fc2014-03-03 11:30:04 +0100370
371void aio_context_acquire(AioContext *ctx)
372{
373 rfifolock_lock(&ctx->lock);
374}
375
376void aio_context_release(AioContext *ctx)
377{
378 rfifolock_unlock(&ctx->lock);
379}