QEMUBH: make AioContext's bh re-entrant
BH will be used outside big lock, so introduce lock to protect
between the writers, ie, bh's adders and deleter. The lock only
affects the writers and bh's callback does not take this extra lock.
Note that for the same AioContext, aio_bh_poll() can not run in
parallel yet.
Signed-off-by: Liu Ping Fan <pingfank@linux.vnet.ibm.com>
Reviewed-by: Stefan Hajnoczi <stefanha@redhat.com>
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
diff --git a/async.c b/async.c
index 90fe906..5ce3633 100644
--- a/async.c
+++ b/async.c
@@ -47,11 +47,16 @@
bh->ctx = ctx;
bh->cb = cb;
bh->opaque = opaque;
+ qemu_mutex_lock(&ctx->bh_lock);
bh->next = ctx->first_bh;
+ /* Make sure that the members are ready before putting bh into list */
+ smp_wmb();
ctx->first_bh = bh;
+ qemu_mutex_unlock(&ctx->bh_lock);
return bh;
}
+/* Multiple occurrences of aio_bh_poll cannot be called concurrently */
int aio_bh_poll(AioContext *ctx)
{
QEMUBH *bh, **bhp, *next;
@@ -61,9 +66,15 @@
ret = 0;
for (bh = ctx->first_bh; bh; bh = next) {
+ /* Make sure that fetching bh happens before accessing its members */
+ smp_read_barrier_depends();
next = bh->next;
if (!bh->deleted && bh->scheduled) {
bh->scheduled = 0;
+ /* Paired with write barrier in bh schedule to ensure reading for
+ * idle & callbacks coming after bh's scheduling.
+ */
+ smp_rmb();
if (!bh->idle)
ret = 1;
bh->idle = 0;
@@ -75,6 +86,7 @@
/* remove deleted bhs */
if (!ctx->walking_bh) {
+ qemu_mutex_lock(&ctx->bh_lock);
bhp = &ctx->first_bh;
while (*bhp) {
bh = *bhp;
@@ -85,6 +97,7 @@
bhp = &bh->next;
}
}
+ qemu_mutex_unlock(&ctx->bh_lock);
}
return ret;
@@ -94,24 +107,38 @@
{
if (bh->scheduled)
return;
- bh->scheduled = 1;
bh->idle = 1;
+ /* Make sure that idle & any writes needed by the callback are done
+ * before the locations are read in the aio_bh_poll.
+ */
+ smp_wmb();
+ bh->scheduled = 1;
}
void qemu_bh_schedule(QEMUBH *bh)
{
if (bh->scheduled)
return;
- bh->scheduled = 1;
bh->idle = 0;
+ /* Make sure that idle & any writes needed by the callback are done
+ * before the locations are read in the aio_bh_poll.
+ */
+ smp_wmb();
+ bh->scheduled = 1;
aio_notify(bh->ctx);
}
+
+/* This func is async.
+ */
void qemu_bh_cancel(QEMUBH *bh)
{
bh->scheduled = 0;
}
+/* This func is async.The bottom half will do the delete action at the finial
+ * end.
+ */
void qemu_bh_delete(QEMUBH *bh)
{
bh->scheduled = 0;
@@ -176,6 +203,7 @@
thread_pool_free(ctx->thread_pool);
aio_set_event_notifier(ctx, &ctx->notifier, NULL, NULL);
event_notifier_cleanup(&ctx->notifier);
+ qemu_mutex_destroy(&ctx->bh_lock);
g_array_free(ctx->pollfds, TRUE);
}
@@ -211,6 +239,7 @@
ctx = (AioContext *) g_source_new(&aio_source_funcs, sizeof(AioContext));
ctx->pollfds = g_array_new(FALSE, FALSE, sizeof(GPollFD));
ctx->thread_pool = NULL;
+ qemu_mutex_init(&ctx->bh_lock);
event_notifier_init(&ctx->notifier, false);
aio_set_event_notifier(ctx, &ctx->notifier,
(EventNotifierHandler *)