block/export: stop using is_external in vhost-user-blk server
vhost-user activity must be suspended during bdrv_drained_begin/end().
This prevents new requests from interfering with whatever is happening
in the drained section.
Previously this was done using aio_set_fd_handler()'s is_external
argument. In a multi-queue block layer world the aio_disable_external()
API cannot be used since multiple AioContext may be processing I/O, not
just one.
Switch to BlockDevOps->drained_begin/end() callbacks.
Signed-off-by: Stefan Hajnoczi <stefanha@redhat.com>
Reviewed-by: Kevin Wolf <kwolf@redhat.com>
Message-Id: <20230516190238.8401-8-stefanha@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
diff --git a/block/export/vhost-user-blk-server.c b/block/export/vhost-user-blk-server.c
index f51a36a..81b5976 100644
--- a/block/export/vhost-user-blk-server.c
+++ b/block/export/vhost-user-blk-server.c
@@ -212,15 +212,21 @@
{
VuBlkExport *vexp = opaque;
+ /*
+ * The actual attach will happen in vu_blk_drained_end() and we just
+ * restore ctx here.
+ */
vexp->export.ctx = ctx;
- vhost_user_server_attach_aio_context(&vexp->vu_server, ctx);
}
static void blk_aio_detach(void *opaque)
{
VuBlkExport *vexp = opaque;
- vhost_user_server_detach_aio_context(&vexp->vu_server);
+ /*
+ * The actual detach already happened in vu_blk_drained_begin() but from
+ * this point on we must not access ctx anymore.
+ */
vexp->export.ctx = NULL;
}
@@ -272,6 +278,22 @@
vu_config_change_msg(&vexp->vu_server.vu_dev);
}
+/* Called with vexp->export.ctx acquired */
+static void vu_blk_drained_begin(void *opaque)
+{
+ VuBlkExport *vexp = opaque;
+
+ vhost_user_server_detach_aio_context(&vexp->vu_server);
+}
+
+/* Called with vexp->export.blk AioContext acquired */
+static void vu_blk_drained_end(void *opaque)
+{
+ VuBlkExport *vexp = opaque;
+
+ vhost_user_server_attach_aio_context(&vexp->vu_server, vexp->export.ctx);
+}
+
/*
* Ensures that bdrv_drained_begin() waits until in-flight requests complete.
*
@@ -285,6 +307,8 @@
}
static const BlockDevOps vu_blk_dev_ops = {
+ .drained_begin = vu_blk_drained_begin,
+ .drained_end = vu_blk_drained_end,
.drained_poll = vu_blk_drained_poll,
.resize_cb = vu_blk_exp_resize,
};
diff --git a/util/vhost-user-server.c b/util/vhost-user-server.c
index 68c3bf1..a12b2d1 100644
--- a/util/vhost-user-server.c
+++ b/util/vhost-user-server.c
@@ -278,7 +278,7 @@
vu_fd_watch->fd = fd;
vu_fd_watch->cb = cb;
qemu_socket_set_nonblock(fd);
- aio_set_fd_handler(server->ioc->ctx, fd, true, kick_handler,
+ aio_set_fd_handler(server->ioc->ctx, fd, false, kick_handler,
NULL, NULL, NULL, vu_fd_watch);
vu_fd_watch->vu_dev = vu_dev;
vu_fd_watch->pvt = pvt;
@@ -299,7 +299,7 @@
if (!vu_fd_watch) {
return;
}
- aio_set_fd_handler(server->ioc->ctx, fd, true,
+ aio_set_fd_handler(server->ioc->ctx, fd, false,
NULL, NULL, NULL, NULL, NULL);
QTAILQ_REMOVE(&server->vu_fd_watches, vu_fd_watch, next);
@@ -362,7 +362,7 @@
VuFdWatch *vu_fd_watch;
QTAILQ_FOREACH(vu_fd_watch, &server->vu_fd_watches, next) {
- aio_set_fd_handler(server->ctx, vu_fd_watch->fd, true,
+ aio_set_fd_handler(server->ctx, vu_fd_watch->fd, false,
NULL, NULL, NULL, NULL, vu_fd_watch);
}
@@ -403,7 +403,7 @@
qio_channel_attach_aio_context(server->ioc, ctx);
QTAILQ_FOREACH(vu_fd_watch, &server->vu_fd_watches, next) {
- aio_set_fd_handler(ctx, vu_fd_watch->fd, true, kick_handler, NULL,
+ aio_set_fd_handler(ctx, vu_fd_watch->fd, false, kick_handler, NULL,
NULL, NULL, vu_fd_watch);
}
@@ -417,7 +417,7 @@
VuFdWatch *vu_fd_watch;
QTAILQ_FOREACH(vu_fd_watch, &server->vu_fd_watches, next) {
- aio_set_fd_handler(server->ctx, vu_fd_watch->fd, true,
+ aio_set_fd_handler(server->ctx, vu_fd_watch->fd, false,
NULL, NULL, NULL, NULL, vu_fd_watch);
}