blob: 3d13dfadde470e2922e6e44ed9a0b6e94e931f9e [file] [log] [blame]
Marc-André Lureau7b2e5c62016-10-18 12:24:04 +03001/*
2 * Vhost User library
3 *
4 * Copyright (c) 2016 Red Hat, Inc.
5 *
6 * Authors:
7 * Victor Kaplansky <victork@redhat.com>
8 * Marc-André Lureau <mlureau@redhat.com>
9 *
10 * This work is licensed under the terms of the GNU GPL, version 2 or
11 * later. See the COPYING file in the top-level directory.
12 */
13
14#ifndef LIBVHOST_USER_H
15#define LIBVHOST_USER_H
16
17#include <stdint.h>
18#include <stdbool.h>
19#include <stddef.h>
Jiaxun Yang29ce0d32021-01-18 14:38:01 +080020#include <poll.h>
Marc-André Lureau7b2e5c62016-10-18 12:24:04 +030021#include <linux/vhost.h>
Dr. David Alan Gilbertc25c02b2019-03-01 11:18:30 +000022#include <pthread.h>
Marc-André Lureau7b2e5c62016-10-18 12:24:04 +030023#include "standard-headers/linux/virtio_ring.h"
24
25/* Based on qemu/hw/virtio/vhost-user.c */
26#define VHOST_USER_F_PROTOCOL_FEATURES 30
27#define VHOST_LOG_PAGE 4096
28
Marc-André Lureau7b2e5c62016-10-18 12:24:04 +030029#define VIRTQUEUE_MAX_SIZE 1024
30
Raphael Norwitzb650d5f2020-05-21 05:00:59 +000031#define VHOST_MEMORY_BASELINE_NREGIONS 8
32
33/*
34 * Set a reasonable maximum number of ram slots, which will be supported by
35 * any architecture.
36 */
37#define VHOST_USER_MAX_RAM_SLOTS 32
Marc-André Lureau7b2e5c62016-10-18 12:24:04 +030038
Coiby Xu049f5552020-09-18 16:09:06 +080039#define VHOST_USER_HDR_SIZE offsetof(VhostUserMsg, payload.u64)
40
Changpeng Liu0bc24d82018-01-04 09:53:33 +080041typedef enum VhostSetConfigType {
42 VHOST_SET_CONFIG_TYPE_MASTER = 0,
43 VHOST_SET_CONFIG_TYPE_MIGRATION = 1,
44} VhostSetConfigType;
45
46/*
47 * Maximum size of virtio device config space
48 */
49#define VHOST_USER_MAX_CONFIG_SIZE 256
50
Marc-André Lureau7b2e5c62016-10-18 12:24:04 +030051enum VhostUserProtocolFeature {
52 VHOST_USER_PROTOCOL_F_MQ = 0,
53 VHOST_USER_PROTOCOL_F_LOG_SHMFD = 1,
54 VHOST_USER_PROTOCOL_F_RARP = 2,
Dr. David Alan Gilbertea642e22017-10-02 20:15:20 +010055 VHOST_USER_PROTOCOL_F_REPLY_ACK = 3,
56 VHOST_USER_PROTOCOL_F_NET_MTU = 4,
57 VHOST_USER_PROTOCOL_F_SLAVE_REQ = 5,
58 VHOST_USER_PROTOCOL_F_CROSS_ENDIAN = 6,
Dr. David Alan Gilbert9ccbfe12018-03-12 17:21:00 +000059 VHOST_USER_PROTOCOL_F_CRYPTO_SESSION = 7,
60 VHOST_USER_PROTOCOL_F_PAGEFAULT = 8,
Changpeng Liuea3e6f52018-03-30 10:46:16 +080061 VHOST_USER_PROTOCOL_F_CONFIG = 9,
Tiwei Bied84599f2018-05-24 18:33:35 +080062 VHOST_USER_PROTOCOL_F_SLAVE_SEND_FD = 10,
63 VHOST_USER_PROTOCOL_F_HOST_NOTIFIER = 11,
Xie Yongji5f9ff1e2019-02-28 16:53:52 +080064 VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD = 12,
Johannes Bergff132002020-01-23 09:17:08 +010065 VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS = 14,
Raphael Norwitzb650d5f2020-05-21 05:00:59 +000066 VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS = 15,
Marc-André Lureau7b2e5c62016-10-18 12:24:04 +030067
68 VHOST_USER_PROTOCOL_F_MAX
69};
70
71#define VHOST_USER_PROTOCOL_FEATURE_MASK ((1 << VHOST_USER_PROTOCOL_F_MAX) - 1)
72
73typedef enum VhostUserRequest {
74 VHOST_USER_NONE = 0,
75 VHOST_USER_GET_FEATURES = 1,
76 VHOST_USER_SET_FEATURES = 2,
77 VHOST_USER_SET_OWNER = 3,
78 VHOST_USER_RESET_OWNER = 4,
79 VHOST_USER_SET_MEM_TABLE = 5,
80 VHOST_USER_SET_LOG_BASE = 6,
81 VHOST_USER_SET_LOG_FD = 7,
82 VHOST_USER_SET_VRING_NUM = 8,
83 VHOST_USER_SET_VRING_ADDR = 9,
84 VHOST_USER_SET_VRING_BASE = 10,
85 VHOST_USER_GET_VRING_BASE = 11,
86 VHOST_USER_SET_VRING_KICK = 12,
87 VHOST_USER_SET_VRING_CALL = 13,
88 VHOST_USER_SET_VRING_ERR = 14,
89 VHOST_USER_GET_PROTOCOL_FEATURES = 15,
90 VHOST_USER_SET_PROTOCOL_FEATURES = 16,
91 VHOST_USER_GET_QUEUE_NUM = 17,
92 VHOST_USER_SET_VRING_ENABLE = 18,
93 VHOST_USER_SEND_RARP = 19,
Dr. David Alan Gilbertea642e22017-10-02 20:15:20 +010094 VHOST_USER_NET_SET_MTU = 20,
95 VHOST_USER_SET_SLAVE_REQ_FD = 21,
96 VHOST_USER_IOTLB_MSG = 22,
97 VHOST_USER_SET_VRING_ENDIAN = 23,
Changpeng Liu0bc24d82018-01-04 09:53:33 +080098 VHOST_USER_GET_CONFIG = 24,
99 VHOST_USER_SET_CONFIG = 25,
Dr. David Alan Gilbertd3dff7a2018-03-12 17:21:01 +0000100 VHOST_USER_CREATE_CRYPTO_SESSION = 26,
101 VHOST_USER_CLOSE_CRYPTO_SESSION = 27,
102 VHOST_USER_POSTCOPY_ADVISE = 28,
Dr. David Alan Gilbert6864a7b2018-03-12 17:21:06 +0000103 VHOST_USER_POSTCOPY_LISTEN = 29,
Dr. David Alan Gilbertc6391872018-03-12 17:21:19 +0000104 VHOST_USER_POSTCOPY_END = 30,
Xie Yongji5f9ff1e2019-02-28 16:53:52 +0800105 VHOST_USER_GET_INFLIGHT_FD = 31,
106 VHOST_USER_SET_INFLIGHT_FD = 32,
Marc-André Lureaubd2e44f2019-05-24 15:09:38 +0200107 VHOST_USER_GPU_SET_SOCKET = 33,
Johannes Bergff132002020-01-23 09:17:08 +0100108 VHOST_USER_VRING_KICK = 35,
Raphael Norwitz6fb2e172020-05-21 05:00:50 +0000109 VHOST_USER_GET_MAX_MEM_SLOTS = 36,
Raphael Norwitzec94c8e2020-05-21 05:00:52 +0000110 VHOST_USER_ADD_MEM_REG = 37,
Raphael Norwitz875b9fd2020-05-21 05:00:56 +0000111 VHOST_USER_REM_MEM_REG = 38,
Marc-André Lureau7b2e5c62016-10-18 12:24:04 +0300112 VHOST_USER_MAX
113} VhostUserRequest;
114
Tiwei Bied84599f2018-05-24 18:33:35 +0800115typedef enum VhostUserSlaveRequest {
116 VHOST_USER_SLAVE_NONE = 0,
117 VHOST_USER_SLAVE_IOTLB_MSG = 1,
118 VHOST_USER_SLAVE_CONFIG_CHANGE_MSG = 2,
119 VHOST_USER_SLAVE_VRING_HOST_NOTIFIER_MSG = 3,
Johannes Bergff132002020-01-23 09:17:08 +0100120 VHOST_USER_SLAVE_VRING_CALL = 4,
121 VHOST_USER_SLAVE_VRING_ERR = 5,
Tiwei Bied84599f2018-05-24 18:33:35 +0800122 VHOST_USER_SLAVE_MAX
123} VhostUserSlaveRequest;
124
Marc-André Lureau7b2e5c62016-10-18 12:24:04 +0300125typedef struct VhostUserMemoryRegion {
126 uint64_t guest_phys_addr;
127 uint64_t memory_size;
128 uint64_t userspace_addr;
129 uint64_t mmap_offset;
130} VhostUserMemoryRegion;
131
132typedef struct VhostUserMemory {
133 uint32_t nregions;
134 uint32_t padding;
Raphael Norwitzb650d5f2020-05-21 05:00:59 +0000135 VhostUserMemoryRegion regions[VHOST_MEMORY_BASELINE_NREGIONS];
Marc-André Lureau7b2e5c62016-10-18 12:24:04 +0300136} VhostUserMemory;
137
Raphael Norwitzec94c8e2020-05-21 05:00:52 +0000138typedef struct VhostUserMemRegMsg {
Stefan Hajnoczi3009edf2020-11-09 17:43:55 +0000139 uint64_t padding;
Raphael Norwitzec94c8e2020-05-21 05:00:52 +0000140 VhostUserMemoryRegion region;
141} VhostUserMemRegMsg;
142
Marc-André Lureau7b2e5c62016-10-18 12:24:04 +0300143typedef struct VhostUserLog {
144 uint64_t mmap_size;
145 uint64_t mmap_offset;
146} VhostUserLog;
147
Changpeng Liu0bc24d82018-01-04 09:53:33 +0800148typedef struct VhostUserConfig {
149 uint32_t offset;
150 uint32_t size;
151 uint32_t flags;
152 uint8_t region[VHOST_USER_MAX_CONFIG_SIZE];
153} VhostUserConfig;
154
155static VhostUserConfig c __attribute__ ((unused));
156#define VHOST_USER_CONFIG_HDR_SIZE (sizeof(c.offset) \
157 + sizeof(c.size) \
158 + sizeof(c.flags))
159
Tiwei Bied84599f2018-05-24 18:33:35 +0800160typedef struct VhostUserVringArea {
161 uint64_t u64;
162 uint64_t size;
163 uint64_t offset;
164} VhostUserVringArea;
165
Xie Yongji5f9ff1e2019-02-28 16:53:52 +0800166typedef struct VhostUserInflight {
167 uint64_t mmap_size;
168 uint64_t mmap_offset;
169 uint16_t num_queues;
170 uint16_t queue_size;
171} VhostUserInflight;
172
Cao Jiaxi48bb55b2019-05-07 12:55:02 +0100173#if defined(_WIN32) && (defined(__x86_64__) || defined(__i386__))
Marc-André Lureau7b2e5c62016-10-18 12:24:04 +0300174# define VU_PACKED __attribute__((gcc_struct, packed))
175#else
176# define VU_PACKED __attribute__((packed))
177#endif
178
179typedef struct VhostUserMsg {
Marc-André Lureauba275e92019-03-08 15:04:43 +0100180 int request;
Marc-André Lureau7b2e5c62016-10-18 12:24:04 +0300181
182#define VHOST_USER_VERSION_MASK (0x3)
183#define VHOST_USER_REPLY_MASK (0x1 << 2)
Tiwei Bied84599f2018-05-24 18:33:35 +0800184#define VHOST_USER_NEED_REPLY_MASK (0x1 << 3)
Marc-André Lureau7b2e5c62016-10-18 12:24:04 +0300185 uint32_t flags;
186 uint32_t size; /* the following payload size */
187
188 union {
189#define VHOST_USER_VRING_IDX_MASK (0xff)
190#define VHOST_USER_VRING_NOFD_MASK (0x1 << 8)
191 uint64_t u64;
192 struct vhost_vring_state state;
193 struct vhost_vring_addr addr;
194 VhostUserMemory memory;
Raphael Norwitzec94c8e2020-05-21 05:00:52 +0000195 VhostUserMemRegMsg memreg;
Marc-André Lureau7b2e5c62016-10-18 12:24:04 +0300196 VhostUserLog log;
Changpeng Liu0bc24d82018-01-04 09:53:33 +0800197 VhostUserConfig config;
Tiwei Bied84599f2018-05-24 18:33:35 +0800198 VhostUserVringArea area;
Xie Yongji5f9ff1e2019-02-28 16:53:52 +0800199 VhostUserInflight inflight;
Marc-André Lureau7b2e5c62016-10-18 12:24:04 +0300200 } payload;
201
Raphael Norwitzb650d5f2020-05-21 05:00:59 +0000202 int fds[VHOST_MEMORY_BASELINE_NREGIONS];
Marc-André Lureau7b2e5c62016-10-18 12:24:04 +0300203 int fd_num;
204 uint8_t *data;
205} VU_PACKED VhostUserMsg;
206
207typedef struct VuDevRegion {
208 /* Guest Physical address. */
209 uint64_t gpa;
210 /* Memory region size. */
211 uint64_t size;
212 /* QEMU virtual address (userspace). */
213 uint64_t qva;
214 /* Starting offset in our mmaped space. */
215 uint64_t mmap_offset;
216 /* Start address of mmaped space. */
217 uint64_t mmap_addr;
218} VuDevRegion;
219
220typedef struct VuDev VuDev;
221
222typedef uint64_t (*vu_get_features_cb) (VuDev *dev);
223typedef void (*vu_set_features_cb) (VuDev *dev, uint64_t features);
224typedef int (*vu_process_msg_cb) (VuDev *dev, VhostUserMsg *vmsg,
225 int *do_reply);
Coiby Xu049f5552020-09-18 16:09:06 +0800226typedef bool (*vu_read_msg_cb) (VuDev *dev, int sock, VhostUserMsg *vmsg);
Marc-André Lureau7b2e5c62016-10-18 12:24:04 +0300227typedef void (*vu_queue_set_started_cb) (VuDev *dev, int qidx, bool started);
Marc-André Lureau35480cb2017-08-29 17:27:50 +0200228typedef bool (*vu_queue_is_processed_in_order_cb) (VuDev *dev, int qidx);
Changpeng Liu0bc24d82018-01-04 09:53:33 +0800229typedef int (*vu_get_config_cb) (VuDev *dev, uint8_t *config, uint32_t len);
230typedef int (*vu_set_config_cb) (VuDev *dev, const uint8_t *data,
231 uint32_t offset, uint32_t size,
232 uint32_t flags);
Marc-André Lureau7b2e5c62016-10-18 12:24:04 +0300233
234typedef struct VuDevIface {
235 /* called by VHOST_USER_GET_FEATURES to get the features bitmask */
236 vu_get_features_cb get_features;
237 /* enable vhost implementation features */
238 vu_set_features_cb set_features;
239 /* get the protocol feature bitmask from the underlying vhost
240 * implementation */
241 vu_get_features_cb get_protocol_features;
242 /* enable protocol features in the underlying vhost implementation. */
243 vu_set_features_cb set_protocol_features;
244 /* process_msg is called for each vhost-user message received */
245 /* skip libvhost-user processing if return value != 0 */
246 vu_process_msg_cb process_msg;
247 /* tells when queues can be processed */
248 vu_queue_set_started_cb queue_set_started;
Marc-André Lureau35480cb2017-08-29 17:27:50 +0200249 /*
250 * If the queue is processed in order, in which case it will be
251 * resumed to vring.used->idx. This can help to support resuming
252 * on unmanaged exit/crash.
253 */
254 vu_queue_is_processed_in_order_cb queue_is_processed_in_order;
Changpeng Liu0bc24d82018-01-04 09:53:33 +0800255 /* get the config space of the device */
256 vu_get_config_cb get_config;
257 /* set the config space of the device */
258 vu_set_config_cb set_config;
Marc-André Lureau7b2e5c62016-10-18 12:24:04 +0300259} VuDevIface;
260
261typedef void (*vu_queue_handler_cb) (VuDev *dev, int qidx);
262
263typedef struct VuRing {
264 unsigned int num;
265 struct vring_desc *desc;
266 struct vring_avail *avail;
267 struct vring_used *used;
268 uint64_t log_guest_addr;
269 uint32_t flags;
270} VuRing;
271
Xie Yongji5f9ff1e2019-02-28 16:53:52 +0800272typedef struct VuDescStateSplit {
273 /* Indicate whether this descriptor is inflight or not.
274 * Only available for head-descriptor. */
275 uint8_t inflight;
276
277 /* Padding */
278 uint8_t padding[5];
279
280 /* Maintain a list for the last batch of used descriptors.
281 * Only available when batching is used for submitting */
282 uint16_t next;
283
284 /* Used to preserve the order of fetching available descriptors.
285 * Only available for head-descriptor. */
286 uint64_t counter;
287} VuDescStateSplit;
288
289typedef struct VuVirtqInflight {
290 /* The feature flags of this region. Now it's initialized to 0. */
291 uint64_t features;
292
293 /* The version of this region. It's 1 currently.
294 * Zero value indicates a vm reset happened. */
295 uint16_t version;
296
297 /* The size of VuDescStateSplit array. It's equal to the virtqueue
298 * size. Slave could get it from queue size field of VhostUserInflight. */
299 uint16_t desc_num;
300
301 /* The head of list that track the last batch of used descriptors. */
302 uint16_t last_batch_head;
303
304 /* Storing the idx value of used ring */
305 uint16_t used_idx;
306
307 /* Used to track the state of each descriptor in descriptor table */
Philippe Mathieu-Daudéf7795e42020-03-04 16:38:15 +0100308 VuDescStateSplit desc[];
Xie Yongji5f9ff1e2019-02-28 16:53:52 +0800309} VuVirtqInflight;
310
311typedef struct VuVirtqInflightDesc {
312 uint16_t index;
313 uint64_t counter;
314} VuVirtqInflightDesc;
315
Marc-André Lureau7b2e5c62016-10-18 12:24:04 +0300316typedef struct VuVirtq {
317 VuRing vring;
318
Xie Yongji5f9ff1e2019-02-28 16:53:52 +0800319 VuVirtqInflight *inflight;
320
321 VuVirtqInflightDesc *resubmit_list;
322
323 uint16_t resubmit_num;
324
325 uint64_t counter;
326
Marc-André Lureau7b2e5c62016-10-18 12:24:04 +0300327 /* Next head to pop */
328 uint16_t last_avail_idx;
329
330 /* Last avail_idx read from VQ. */
331 uint16_t shadow_avail_idx;
332
333 uint16_t used_idx;
334
335 /* Last used index value we have signalled on */
336 uint16_t signalled_used;
337
338 /* Last used index value we have signalled on */
339 bool signalled_used_valid;
340
341 /* Notification enabled? */
342 bool notification;
343
344 int inuse;
345
346 vu_queue_handler_cb handler;
347
348 int call_fd;
349 int kick_fd;
350 int err_fd;
351 unsigned int enable;
352 bool started;
Dr. David Alan Gilbert49e9ec72019-08-12 17:35:19 +0100353
354 /* Guest addresses of our ring */
355 struct vhost_vring_addr vra;
Marc-André Lureau7b2e5c62016-10-18 12:24:04 +0300356} VuVirtq;
357
358enum VuWatchCondtion {
Felipe Franciosi49cc0342017-03-02 10:25:53 -0800359 VU_WATCH_IN = POLLIN,
360 VU_WATCH_OUT = POLLOUT,
361 VU_WATCH_PRI = POLLPRI,
362 VU_WATCH_ERR = POLLERR,
363 VU_WATCH_HUP = POLLHUP,
Marc-André Lureau7b2e5c62016-10-18 12:24:04 +0300364};
365
366typedef void (*vu_panic_cb) (VuDev *dev, const char *err);
367typedef void (*vu_watch_cb) (VuDev *dev, int condition, void *data);
368typedef void (*vu_set_watch_cb) (VuDev *dev, int fd, int condition,
369 vu_watch_cb cb, void *data);
370typedef void (*vu_remove_watch_cb) (VuDev *dev, int fd);
371
Xie Yongji5f9ff1e2019-02-28 16:53:52 +0800372typedef struct VuDevInflightInfo {
373 int fd;
374 void *addr;
375 uint64_t size;
376} VuDevInflightInfo;
377
Marc-André Lureau7b2e5c62016-10-18 12:24:04 +0300378struct VuDev {
379 int sock;
380 uint32_t nregions;
Raphael Norwitzb650d5f2020-05-21 05:00:59 +0000381 VuDevRegion regions[VHOST_USER_MAX_RAM_SLOTS];
Stefan Hajnoczi6f5fd832019-06-26 08:48:13 +0100382 VuVirtq *vq;
Xie Yongji5f9ff1e2019-02-28 16:53:52 +0800383 VuDevInflightInfo inflight_info;
Marc-André Lureau7b2e5c62016-10-18 12:24:04 +0300384 int log_call_fd;
Dr. David Alan Gilbertc25c02b2019-03-01 11:18:30 +0000385 /* Must be held while using slave_fd */
386 pthread_mutex_t slave_mutex;
Dr. David Alan Gilbert13384f12017-10-02 20:15:21 +0100387 int slave_fd;
Marc-André Lureau7b2e5c62016-10-18 12:24:04 +0300388 uint64_t log_size;
389 uint8_t *log_table;
390 uint64_t features;
391 uint64_t protocol_features;
392 bool broken;
Stefan Hajnoczi6f5fd832019-06-26 08:48:13 +0100393 uint16_t max_queues;
Marc-André Lureau7b2e5c62016-10-18 12:24:04 +0300394
Stefan Hajnoczide65d492020-10-27 17:35:17 +0000395 /*
396 * @read_msg: custom method to read vhost-user message
Coiby Xu049f5552020-09-18 16:09:06 +0800397 *
398 * Read data from vhost_user socket fd and fill up
399 * the passed VhostUserMsg *vmsg struct.
400 *
401 * If reading fails, it should close the received set of file
402 * descriptors as socket message's auxiliary data.
403 *
404 * For the details, please refer to vu_message_read in libvhost-user.c
405 * which will be used by default if not custom method is provided when
406 * calling vu_init
407 *
408 * Returns: true if vhost-user message successfully received,
409 * otherwise return false.
410 *
411 */
412 vu_read_msg_cb read_msg;
Stefan Hajnoczide65d492020-10-27 17:35:17 +0000413
414 /*
415 * @set_watch: add or update the given fd to the watch set,
416 * call cb when condition is met.
417 */
Marc-André Lureau7b2e5c62016-10-18 12:24:04 +0300418 vu_set_watch_cb set_watch;
419
420 /* @remove_watch: remove the given fd from the watch set */
421 vu_remove_watch_cb remove_watch;
422
Stefan Hajnoczide65d492020-10-27 17:35:17 +0000423 /*
424 * @panic: encountered an unrecoverable error, you may try to re-initialize
425 */
Marc-André Lureau7b2e5c62016-10-18 12:24:04 +0300426 vu_panic_cb panic;
427 const VuDevIface *iface;
Dr. David Alan Gilbert2a84ffc2018-03-12 17:21:03 +0000428
429 /* Postcopy data */
430 int postcopy_ufd;
Dr. David Alan Gilbert6864a7b2018-03-12 17:21:06 +0000431 bool postcopy_listening;
Marc-André Lureau7b2e5c62016-10-18 12:24:04 +0300432};
433
434typedef struct VuVirtqElement {
435 unsigned int index;
436 unsigned int out_num;
437 unsigned int in_num;
438 struct iovec *in_sg;
439 struct iovec *out_sg;
440} VuVirtqElement;
441
442/**
443 * vu_init:
444 * @dev: a VuDev context
Stefan Hajnoczi6f5fd832019-06-26 08:48:13 +0100445 * @max_queues: maximum number of virtqueues
Marc-André Lureau7b2e5c62016-10-18 12:24:04 +0300446 * @socket: the socket connected to vhost-user master
447 * @panic: a panic callback
448 * @set_watch: a set_watch callback
449 * @remove_watch: a remove_watch callback
450 * @iface: a VuDevIface structure with vhost-user device callbacks
451 *
zhaolichang639b0902020-09-17 15:50:29 +0800452 * Initializes a VuDev vhost-user context.
Stefan Hajnoczi6f5fd832019-06-26 08:48:13 +0100453 *
454 * Returns: true on success, false on failure.
Marc-André Lureau7b2e5c62016-10-18 12:24:04 +0300455 **/
Stefan Hajnoczi6f5fd832019-06-26 08:48:13 +0100456bool vu_init(VuDev *dev,
457 uint16_t max_queues,
Marc-André Lureau7b2e5c62016-10-18 12:24:04 +0300458 int socket,
459 vu_panic_cb panic,
Coiby Xu049f5552020-09-18 16:09:06 +0800460 vu_read_msg_cb read_msg,
Marc-André Lureau7b2e5c62016-10-18 12:24:04 +0300461 vu_set_watch_cb set_watch,
462 vu_remove_watch_cb remove_watch,
463 const VuDevIface *iface);
464
465
466/**
467 * vu_deinit:
468 * @dev: a VuDev context
469 *
470 * Cleans up the VuDev context
471 */
472void vu_deinit(VuDev *dev);
473
474/**
475 * vu_dispatch:
476 * @dev: a VuDev context
477 *
478 * Process one vhost-user message.
479 *
480 * Returns: TRUE on success, FALSE on failure.
481 */
482bool vu_dispatch(VuDev *dev);
483
484/**
485 * vu_gpa_to_va:
486 * @dev: a VuDev context
Yongji Xie293084a2018-01-19 00:04:05 +0800487 * @plen: guest memory size
Marc-André Lureau7b2e5c62016-10-18 12:24:04 +0300488 * @guest_addr: guest address
489 *
490 * Translate a guest address to a pointer. Returns NULL on failure.
491 */
Yongji Xie293084a2018-01-19 00:04:05 +0800492void *vu_gpa_to_va(VuDev *dev, uint64_t *plen, uint64_t guest_addr);
Marc-André Lureau7b2e5c62016-10-18 12:24:04 +0300493
494/**
495 * vu_get_queue:
496 * @dev: a VuDev context
497 * @qidx: queue index
498 *
499 * Returns the queue number @qidx.
500 */
501VuVirtq *vu_get_queue(VuDev *dev, int qidx);
502
503/**
504 * vu_set_queue_handler:
505 * @dev: a VuDev context
506 * @vq: a VuVirtq queue
507 * @handler: the queue handler callback
508 *
509 * Set the queue handler. This function may be called several times
510 * for the same queue. If called with NULL @handler, the handler is
511 * removed.
512 */
513void vu_set_queue_handler(VuDev *dev, VuVirtq *vq,
514 vu_queue_handler_cb handler);
515
Tiwei Bied84599f2018-05-24 18:33:35 +0800516/**
517 * vu_set_queue_host_notifier:
518 * @dev: a VuDev context
519 * @vq: a VuVirtq queue
520 * @fd: a file descriptor
521 * @size: host page size
522 * @offset: notifier offset in @fd file
523 *
524 * Set queue's host notifier. This function may be called several
525 * times for the same queue. If called with -1 @fd, the notifier
526 * is removed.
527 */
528bool vu_set_queue_host_notifier(VuDev *dev, VuVirtq *vq, int fd,
529 int size, int offset);
Marc-André Lureau7b2e5c62016-10-18 12:24:04 +0300530
531/**
532 * vu_queue_set_notification:
533 * @dev: a VuDev context
534 * @vq: a VuVirtq queue
535 * @enable: state
536 *
537 * Set whether the queue notifies (via event index or interrupt)
538 */
539void vu_queue_set_notification(VuDev *dev, VuVirtq *vq, int enable);
540
541/**
542 * vu_queue_enabled:
543 * @dev: a VuDev context
544 * @vq: a VuVirtq queue
545 *
546 * Returns: whether the queue is enabled.
547 */
548bool vu_queue_enabled(VuDev *dev, VuVirtq *vq);
549
550/**
Dr. David Alan Gilbertbcf08362017-10-02 20:15:18 +0100551 * vu_queue_started:
552 * @dev: a VuDev context
553 * @vq: a VuVirtq queue
554 *
555 * Returns: whether the queue is started.
556 */
557bool vu_queue_started(const VuDev *dev, const VuVirtq *vq);
558
559/**
Marc-André Lureau640601c2017-05-03 20:54:12 +0400560 * vu_queue_empty:
Marc-André Lureau7b2e5c62016-10-18 12:24:04 +0300561 * @dev: a VuDev context
562 * @vq: a VuVirtq queue
563 *
Marc-André Lureau640601c2017-05-03 20:54:12 +0400564 * Returns: true if the queue is empty or not ready.
Marc-André Lureau7b2e5c62016-10-18 12:24:04 +0300565 */
Marc-André Lureau640601c2017-05-03 20:54:12 +0400566bool vu_queue_empty(VuDev *dev, VuVirtq *vq);
Marc-André Lureau7b2e5c62016-10-18 12:24:04 +0300567
568/**
569 * vu_queue_notify:
570 * @dev: a VuDev context
571 * @vq: a VuVirtq queue
572 *
573 * Request to notify the queue via callfd (skipped if unnecessary)
574 */
575void vu_queue_notify(VuDev *dev, VuVirtq *vq);
576
577/**
Johannes Bergff132002020-01-23 09:17:08 +0100578 * vu_queue_notify_sync:
579 * @dev: a VuDev context
580 * @vq: a VuVirtq queue
581 *
582 * Request to notify the queue via callfd (skipped if unnecessary)
583 * or sync message if possible.
584 */
585void vu_queue_notify_sync(VuDev *dev, VuVirtq *vq);
586
587/**
Marc-André Lureau7b2e5c62016-10-18 12:24:04 +0300588 * vu_queue_pop:
589 * @dev: a VuDev context
590 * @vq: a VuVirtq queue
591 * @sz: the size of struct to return (must be >= VuVirtqElement)
592 *
Marc-André Lureau19409df2017-08-11 01:25:38 +0200593 * Returns: a VuVirtqElement filled from the queue or NULL. The
594 * returned element must be free()-d by the caller.
Marc-André Lureau7b2e5c62016-10-18 12:24:04 +0300595 */
596void *vu_queue_pop(VuDev *dev, VuVirtq *vq, size_t sz);
597
Marc-André Lureaub13919a2019-03-08 15:04:52 +0100598
599/**
600 * vu_queue_unpop:
601 * @dev: a VuDev context
602 * @vq: a VuVirtq queue
603 * @elem: The #VuVirtqElement
604 * @len: number of bytes written
605 *
606 * Pretend the most recent element wasn't popped from the virtqueue. The next
607 * call to vu_queue_pop() will refetch the element.
608 */
609void vu_queue_unpop(VuDev *dev, VuVirtq *vq, VuVirtqElement *elem,
610 size_t len);
611
Marc-André Lureau7b2e5c62016-10-18 12:24:04 +0300612/**
613 * vu_queue_rewind:
614 * @dev: a VuDev context
615 * @vq: a VuVirtq queue
616 * @num: number of elements to push back
617 *
618 * Pretend that elements weren't popped from the virtqueue. The next
619 * virtqueue_pop() will refetch the oldest element.
620 *
621 * Returns: true on success, false if @num is greater than the number of in use
622 * elements.
623 */
624bool vu_queue_rewind(VuDev *dev, VuVirtq *vq, unsigned int num);
625
626/**
627 * vu_queue_fill:
628 * @dev: a VuDev context
629 * @vq: a VuVirtq queue
630 * @elem: a VuVirtqElement
631 * @len: length in bytes to write
632 * @idx: optional offset for the used ring index (0 in general)
633 *
634 * Fill the used ring with @elem element.
635 */
636void vu_queue_fill(VuDev *dev, VuVirtq *vq,
637 const VuVirtqElement *elem,
638 unsigned int len, unsigned int idx);
639
640/**
641 * vu_queue_push:
642 * @dev: a VuDev context
643 * @vq: a VuVirtq queue
644 * @elem: a VuVirtqElement
645 * @len: length in bytes to write
646 *
647 * Helper that combines vu_queue_fill() with a vu_queue_flush().
648 */
649void vu_queue_push(VuDev *dev, VuVirtq *vq,
650 const VuVirtqElement *elem, unsigned int len);
651
652/**
653 * vu_queue_flush:
654 * @dev: a VuDev context
655 * @vq: a VuVirtq queue
656 * @num: number of elements to flush
657 *
658 * Mark the last number of elements as done (used.idx is updated by
659 * num elements).
660*/
661void vu_queue_flush(VuDev *dev, VuVirtq *vq, unsigned int num);
662
663/**
664 * vu_queue_get_avail_bytes:
665 * @dev: a VuDev context
666 * @vq: a VuVirtq queue
667 * @in_bytes: in bytes
668 * @out_bytes: out bytes
669 * @max_in_bytes: stop counting after max_in_bytes
670 * @max_out_bytes: stop counting after max_out_bytes
671 *
672 * Count the number of available bytes, up to max_in_bytes/max_out_bytes.
673 */
674void vu_queue_get_avail_bytes(VuDev *vdev, VuVirtq *vq, unsigned int *in_bytes,
675 unsigned int *out_bytes,
676 unsigned max_in_bytes, unsigned max_out_bytes);
677
678/**
679 * vu_queue_avail_bytes:
680 * @dev: a VuDev context
681 * @vq: a VuVirtq queue
682 * @in_bytes: expected in bytes
683 * @out_bytes: expected out bytes
684 *
685 * Returns: true if in_bytes <= in_total && out_bytes <= out_total
686 */
687bool vu_queue_avail_bytes(VuDev *dev, VuVirtq *vq, unsigned int in_bytes,
688 unsigned int out_bytes);
689
690#endif /* LIBVHOST_USER_H */