Marc-André Lureau | 7b2e5c6 | 2016-10-18 12:24:04 +0300 | [diff] [blame] | 1 | /* |
| 2 | * Vhost User library |
| 3 | * |
| 4 | * Copyright (c) 2016 Red Hat, Inc. |
| 5 | * |
| 6 | * Authors: |
| 7 | * Victor Kaplansky <victork@redhat.com> |
| 8 | * Marc-André Lureau <mlureau@redhat.com> |
| 9 | * |
| 10 | * This work is licensed under the terms of the GNU GPL, version 2 or |
| 11 | * later. See the COPYING file in the top-level directory. |
| 12 | */ |
| 13 | |
| 14 | #ifndef LIBVHOST_USER_H |
| 15 | #define LIBVHOST_USER_H |
| 16 | |
| 17 | #include <stdint.h> |
| 18 | #include <stdbool.h> |
| 19 | #include <stddef.h> |
Jiaxun Yang | 29ce0d3 | 2021-01-18 14:38:01 +0800 | [diff] [blame] | 20 | #include <poll.h> |
Marc-André Lureau | 7b2e5c6 | 2016-10-18 12:24:04 +0300 | [diff] [blame] | 21 | #include <linux/vhost.h> |
Dr. David Alan Gilbert | c25c02b | 2019-03-01 11:18:30 +0000 | [diff] [blame] | 22 | #include <pthread.h> |
Marc-André Lureau | 7b2e5c6 | 2016-10-18 12:24:04 +0300 | [diff] [blame] | 23 | #include "standard-headers/linux/virtio_ring.h" |
| 24 | |
| 25 | /* Based on qemu/hw/virtio/vhost-user.c */ |
| 26 | #define VHOST_USER_F_PROTOCOL_FEATURES 30 |
| 27 | #define VHOST_LOG_PAGE 4096 |
| 28 | |
Marc-André Lureau | 7b2e5c6 | 2016-10-18 12:24:04 +0300 | [diff] [blame] | 29 | #define VIRTQUEUE_MAX_SIZE 1024 |
| 30 | |
Raphael Norwitz | b650d5f | 2020-05-21 05:00:59 +0000 | [diff] [blame] | 31 | #define VHOST_MEMORY_BASELINE_NREGIONS 8 |
| 32 | |
| 33 | /* |
David Hildenbrand | 0fa6344 | 2024-02-14 16:16:49 +0100 | [diff] [blame] | 34 | * vhost in the kernel usually supports 509 mem slots. 509 used to be the |
| 35 | * KVM limit, it supported 512, but 3 were used for internal purposes. This |
| 36 | * limit is sufficient to support many DIMMs and virtio-mem in |
| 37 | * "dynamic-memslots" mode. |
Raphael Norwitz | b650d5f | 2020-05-21 05:00:59 +0000 | [diff] [blame] | 38 | */ |
David Hildenbrand | 0fa6344 | 2024-02-14 16:16:49 +0100 | [diff] [blame] | 39 | #define VHOST_USER_MAX_RAM_SLOTS 509 |
Marc-André Lureau | 7b2e5c6 | 2016-10-18 12:24:04 +0300 | [diff] [blame] | 40 | |
Coiby Xu | 049f555 | 2020-09-18 16:09:06 +0800 | [diff] [blame] | 41 | #define VHOST_USER_HDR_SIZE offsetof(VhostUserMsg, payload.u64) |
| 42 | |
Changpeng Liu | 0bc24d8 | 2018-01-04 09:53:33 +0800 | [diff] [blame] | 43 | typedef enum VhostSetConfigType { |
Manos Pitsidianakis | f8ed364 | 2023-06-13 11:08:48 +0300 | [diff] [blame] | 44 | VHOST_SET_CONFIG_TYPE_FRONTEND = 0, |
Changpeng Liu | 0bc24d8 | 2018-01-04 09:53:33 +0800 | [diff] [blame] | 45 | VHOST_SET_CONFIG_TYPE_MIGRATION = 1, |
| 46 | } VhostSetConfigType; |
| 47 | |
| 48 | /* |
| 49 | * Maximum size of virtio device config space |
| 50 | */ |
| 51 | #define VHOST_USER_MAX_CONFIG_SIZE 256 |
| 52 | |
Marc-André Lureau | 7b2e5c6 | 2016-10-18 12:24:04 +0300 | [diff] [blame] | 53 | enum VhostUserProtocolFeature { |
| 54 | VHOST_USER_PROTOCOL_F_MQ = 0, |
| 55 | VHOST_USER_PROTOCOL_F_LOG_SHMFD = 1, |
| 56 | VHOST_USER_PROTOCOL_F_RARP = 2, |
Dr. David Alan Gilbert | ea642e2 | 2017-10-02 20:15:20 +0100 | [diff] [blame] | 57 | VHOST_USER_PROTOCOL_F_REPLY_ACK = 3, |
| 58 | VHOST_USER_PROTOCOL_F_NET_MTU = 4, |
Maxime Coquelin | e608fee | 2023-02-08 21:32:58 +0100 | [diff] [blame] | 59 | VHOST_USER_PROTOCOL_F_BACKEND_REQ = 5, |
Dr. David Alan Gilbert | ea642e2 | 2017-10-02 20:15:20 +0100 | [diff] [blame] | 60 | VHOST_USER_PROTOCOL_F_CROSS_ENDIAN = 6, |
Dr. David Alan Gilbert | 9ccbfe1 | 2018-03-12 17:21:00 +0000 | [diff] [blame] | 61 | VHOST_USER_PROTOCOL_F_CRYPTO_SESSION = 7, |
| 62 | VHOST_USER_PROTOCOL_F_PAGEFAULT = 8, |
Changpeng Liu | ea3e6f5 | 2018-03-30 10:46:16 +0800 | [diff] [blame] | 63 | VHOST_USER_PROTOCOL_F_CONFIG = 9, |
Maxime Coquelin | e608fee | 2023-02-08 21:32:58 +0100 | [diff] [blame] | 64 | VHOST_USER_PROTOCOL_F_BACKEND_SEND_FD = 10, |
Tiwei Bie | d84599f | 2018-05-24 18:33:35 +0800 | [diff] [blame] | 65 | VHOST_USER_PROTOCOL_F_HOST_NOTIFIER = 11, |
Xie Yongji | 5f9ff1e | 2019-02-28 16:53:52 +0800 | [diff] [blame] | 66 | VHOST_USER_PROTOCOL_F_INFLIGHT_SHMFD = 12, |
Johannes Berg | ff13200 | 2020-01-23 09:17:08 +0100 | [diff] [blame] | 67 | VHOST_USER_PROTOCOL_F_INBAND_NOTIFICATIONS = 14, |
Raphael Norwitz | b650d5f | 2020-05-21 05:00:59 +0000 | [diff] [blame] | 68 | VHOST_USER_PROTOCOL_F_CONFIGURE_MEM_SLOTS = 15, |
Albert Esteve | ce0f3b0 | 2023-10-02 08:57:06 +0200 | [diff] [blame] | 69 | /* Feature 16 is reserved for VHOST_USER_PROTOCOL_F_STATUS. */ |
Hanna Czenczek | d4eb503 | 2023-10-16 10:32:01 +0200 | [diff] [blame] | 70 | /* Feature 17 reserved for VHOST_USER_PROTOCOL_F_XEN_MMAP. */ |
| 71 | VHOST_USER_PROTOCOL_F_SHARED_OBJECT = 18, |
Marc-André Lureau | 7b2e5c6 | 2016-10-18 12:24:04 +0300 | [diff] [blame] | 72 | VHOST_USER_PROTOCOL_F_MAX |
| 73 | }; |
| 74 | |
| 75 | #define VHOST_USER_PROTOCOL_FEATURE_MASK ((1 << VHOST_USER_PROTOCOL_F_MAX) - 1) |
| 76 | |
| 77 | typedef enum VhostUserRequest { |
| 78 | VHOST_USER_NONE = 0, |
| 79 | VHOST_USER_GET_FEATURES = 1, |
| 80 | VHOST_USER_SET_FEATURES = 2, |
| 81 | VHOST_USER_SET_OWNER = 3, |
| 82 | VHOST_USER_RESET_OWNER = 4, |
| 83 | VHOST_USER_SET_MEM_TABLE = 5, |
| 84 | VHOST_USER_SET_LOG_BASE = 6, |
| 85 | VHOST_USER_SET_LOG_FD = 7, |
| 86 | VHOST_USER_SET_VRING_NUM = 8, |
| 87 | VHOST_USER_SET_VRING_ADDR = 9, |
| 88 | VHOST_USER_SET_VRING_BASE = 10, |
| 89 | VHOST_USER_GET_VRING_BASE = 11, |
| 90 | VHOST_USER_SET_VRING_KICK = 12, |
| 91 | VHOST_USER_SET_VRING_CALL = 13, |
| 92 | VHOST_USER_SET_VRING_ERR = 14, |
| 93 | VHOST_USER_GET_PROTOCOL_FEATURES = 15, |
| 94 | VHOST_USER_SET_PROTOCOL_FEATURES = 16, |
| 95 | VHOST_USER_GET_QUEUE_NUM = 17, |
| 96 | VHOST_USER_SET_VRING_ENABLE = 18, |
| 97 | VHOST_USER_SEND_RARP = 19, |
Dr. David Alan Gilbert | ea642e2 | 2017-10-02 20:15:20 +0100 | [diff] [blame] | 98 | VHOST_USER_NET_SET_MTU = 20, |
Maxime Coquelin | e608fee | 2023-02-08 21:32:58 +0100 | [diff] [blame] | 99 | VHOST_USER_SET_BACKEND_REQ_FD = 21, |
Dr. David Alan Gilbert | ea642e2 | 2017-10-02 20:15:20 +0100 | [diff] [blame] | 100 | VHOST_USER_IOTLB_MSG = 22, |
| 101 | VHOST_USER_SET_VRING_ENDIAN = 23, |
Changpeng Liu | 0bc24d8 | 2018-01-04 09:53:33 +0800 | [diff] [blame] | 102 | VHOST_USER_GET_CONFIG = 24, |
| 103 | VHOST_USER_SET_CONFIG = 25, |
Dr. David Alan Gilbert | d3dff7a | 2018-03-12 17:21:01 +0000 | [diff] [blame] | 104 | VHOST_USER_CREATE_CRYPTO_SESSION = 26, |
| 105 | VHOST_USER_CLOSE_CRYPTO_SESSION = 27, |
| 106 | VHOST_USER_POSTCOPY_ADVISE = 28, |
Dr. David Alan Gilbert | 6864a7b | 2018-03-12 17:21:06 +0000 | [diff] [blame] | 107 | VHOST_USER_POSTCOPY_LISTEN = 29, |
Dr. David Alan Gilbert | c639187 | 2018-03-12 17:21:19 +0000 | [diff] [blame] | 108 | VHOST_USER_POSTCOPY_END = 30, |
Xie Yongji | 5f9ff1e | 2019-02-28 16:53:52 +0800 | [diff] [blame] | 109 | VHOST_USER_GET_INFLIGHT_FD = 31, |
| 110 | VHOST_USER_SET_INFLIGHT_FD = 32, |
Marc-André Lureau | bd2e44f | 2019-05-24 15:09:38 +0200 | [diff] [blame] | 111 | VHOST_USER_GPU_SET_SOCKET = 33, |
Johannes Berg | ff13200 | 2020-01-23 09:17:08 +0100 | [diff] [blame] | 112 | VHOST_USER_VRING_KICK = 35, |
Raphael Norwitz | 6fb2e17 | 2020-05-21 05:00:50 +0000 | [diff] [blame] | 113 | VHOST_USER_GET_MAX_MEM_SLOTS = 36, |
Raphael Norwitz | ec94c8e | 2020-05-21 05:00:52 +0000 | [diff] [blame] | 114 | VHOST_USER_ADD_MEM_REG = 37, |
Raphael Norwitz | 875b9fd | 2020-05-21 05:00:56 +0000 | [diff] [blame] | 115 | VHOST_USER_REM_MEM_REG = 38, |
Albert Esteve | ce0f3b0 | 2023-10-02 08:57:06 +0200 | [diff] [blame] | 116 | VHOST_USER_GET_SHARED_OBJECT = 41, |
Marc-André Lureau | 7b2e5c6 | 2016-10-18 12:24:04 +0300 | [diff] [blame] | 117 | VHOST_USER_MAX |
| 118 | } VhostUserRequest; |
| 119 | |
Manos Pitsidianakis | f8ed364 | 2023-06-13 11:08:48 +0300 | [diff] [blame] | 120 | typedef enum VhostUserBackendRequest { |
Maxime Coquelin | e608fee | 2023-02-08 21:32:58 +0100 | [diff] [blame] | 121 | VHOST_USER_BACKEND_NONE = 0, |
| 122 | VHOST_USER_BACKEND_IOTLB_MSG = 1, |
| 123 | VHOST_USER_BACKEND_CONFIG_CHANGE_MSG = 2, |
| 124 | VHOST_USER_BACKEND_VRING_HOST_NOTIFIER_MSG = 3, |
| 125 | VHOST_USER_BACKEND_VRING_CALL = 4, |
| 126 | VHOST_USER_BACKEND_VRING_ERR = 5, |
Albert Esteve | ce0f3b0 | 2023-10-02 08:57:06 +0200 | [diff] [blame] | 127 | VHOST_USER_BACKEND_SHARED_OBJECT_ADD = 6, |
| 128 | VHOST_USER_BACKEND_SHARED_OBJECT_REMOVE = 7, |
| 129 | VHOST_USER_BACKEND_SHARED_OBJECT_LOOKUP = 8, |
Maxime Coquelin | e608fee | 2023-02-08 21:32:58 +0100 | [diff] [blame] | 130 | VHOST_USER_BACKEND_MAX |
Manos Pitsidianakis | f8ed364 | 2023-06-13 11:08:48 +0300 | [diff] [blame] | 131 | } VhostUserBackendRequest; |
Tiwei Bie | d84599f | 2018-05-24 18:33:35 +0800 | [diff] [blame] | 132 | |
Marc-André Lureau | 7b2e5c6 | 2016-10-18 12:24:04 +0300 | [diff] [blame] | 133 | typedef struct VhostUserMemoryRegion { |
| 134 | uint64_t guest_phys_addr; |
| 135 | uint64_t memory_size; |
| 136 | uint64_t userspace_addr; |
| 137 | uint64_t mmap_offset; |
| 138 | } VhostUserMemoryRegion; |
| 139 | |
Raphael Norwitz | 316ee11 | 2022-01-17 04:12:24 +0000 | [diff] [blame] | 140 | #define VHOST_USER_MEM_REG_SIZE (sizeof(VhostUserMemoryRegion)) |
| 141 | |
Marc-André Lureau | 7b2e5c6 | 2016-10-18 12:24:04 +0300 | [diff] [blame] | 142 | typedef struct VhostUserMemory { |
| 143 | uint32_t nregions; |
| 144 | uint32_t padding; |
Raphael Norwitz | b650d5f | 2020-05-21 05:00:59 +0000 | [diff] [blame] | 145 | VhostUserMemoryRegion regions[VHOST_MEMORY_BASELINE_NREGIONS]; |
Marc-André Lureau | 7b2e5c6 | 2016-10-18 12:24:04 +0300 | [diff] [blame] | 146 | } VhostUserMemory; |
| 147 | |
Raphael Norwitz | ec94c8e | 2020-05-21 05:00:52 +0000 | [diff] [blame] | 148 | typedef struct VhostUserMemRegMsg { |
Stefan Hajnoczi | 3009edf | 2020-11-09 17:43:55 +0000 | [diff] [blame] | 149 | uint64_t padding; |
Raphael Norwitz | ec94c8e | 2020-05-21 05:00:52 +0000 | [diff] [blame] | 150 | VhostUserMemoryRegion region; |
| 151 | } VhostUserMemRegMsg; |
| 152 | |
Marc-André Lureau | 7b2e5c6 | 2016-10-18 12:24:04 +0300 | [diff] [blame] | 153 | typedef struct VhostUserLog { |
| 154 | uint64_t mmap_size; |
| 155 | uint64_t mmap_offset; |
| 156 | } VhostUserLog; |
| 157 | |
Changpeng Liu | 0bc24d8 | 2018-01-04 09:53:33 +0800 | [diff] [blame] | 158 | typedef struct VhostUserConfig { |
| 159 | uint32_t offset; |
| 160 | uint32_t size; |
| 161 | uint32_t flags; |
| 162 | uint8_t region[VHOST_USER_MAX_CONFIG_SIZE]; |
| 163 | } VhostUserConfig; |
| 164 | |
| 165 | static VhostUserConfig c __attribute__ ((unused)); |
| 166 | #define VHOST_USER_CONFIG_HDR_SIZE (sizeof(c.offset) \ |
| 167 | + sizeof(c.size) \ |
| 168 | + sizeof(c.flags)) |
| 169 | |
Tiwei Bie | d84599f | 2018-05-24 18:33:35 +0800 | [diff] [blame] | 170 | typedef struct VhostUserVringArea { |
| 171 | uint64_t u64; |
| 172 | uint64_t size; |
| 173 | uint64_t offset; |
| 174 | } VhostUserVringArea; |
| 175 | |
Xie Yongji | 5f9ff1e | 2019-02-28 16:53:52 +0800 | [diff] [blame] | 176 | typedef struct VhostUserInflight { |
| 177 | uint64_t mmap_size; |
| 178 | uint64_t mmap_offset; |
| 179 | uint16_t num_queues; |
| 180 | uint16_t queue_size; |
| 181 | } VhostUserInflight; |
| 182 | |
Albert Esteve | ce0f3b0 | 2023-10-02 08:57:06 +0200 | [diff] [blame] | 183 | #define UUID_LEN 16 |
| 184 | |
| 185 | typedef struct VhostUserShared { |
| 186 | unsigned char uuid[UUID_LEN]; |
| 187 | } VhostUserShared; |
| 188 | |
Cao Jiaxi | 48bb55b | 2019-05-07 12:55:02 +0100 | [diff] [blame] | 189 | #if defined(_WIN32) && (defined(__x86_64__) || defined(__i386__)) |
Marc-André Lureau | 7b2e5c6 | 2016-10-18 12:24:04 +0300 | [diff] [blame] | 190 | # define VU_PACKED __attribute__((gcc_struct, packed)) |
| 191 | #else |
| 192 | # define VU_PACKED __attribute__((packed)) |
| 193 | #endif |
| 194 | |
| 195 | typedef struct VhostUserMsg { |
Marc-André Lureau | ba275e9 | 2019-03-08 15:04:43 +0100 | [diff] [blame] | 196 | int request; |
Marc-André Lureau | 7b2e5c6 | 2016-10-18 12:24:04 +0300 | [diff] [blame] | 197 | |
| 198 | #define VHOST_USER_VERSION_MASK (0x3) |
| 199 | #define VHOST_USER_REPLY_MASK (0x1 << 2) |
Tiwei Bie | d84599f | 2018-05-24 18:33:35 +0800 | [diff] [blame] | 200 | #define VHOST_USER_NEED_REPLY_MASK (0x1 << 3) |
Marc-André Lureau | 7b2e5c6 | 2016-10-18 12:24:04 +0300 | [diff] [blame] | 201 | uint32_t flags; |
| 202 | uint32_t size; /* the following payload size */ |
| 203 | |
| 204 | union { |
| 205 | #define VHOST_USER_VRING_IDX_MASK (0xff) |
| 206 | #define VHOST_USER_VRING_NOFD_MASK (0x1 << 8) |
| 207 | uint64_t u64; |
| 208 | struct vhost_vring_state state; |
| 209 | struct vhost_vring_addr addr; |
| 210 | VhostUserMemory memory; |
Raphael Norwitz | ec94c8e | 2020-05-21 05:00:52 +0000 | [diff] [blame] | 211 | VhostUserMemRegMsg memreg; |
Marc-André Lureau | 7b2e5c6 | 2016-10-18 12:24:04 +0300 | [diff] [blame] | 212 | VhostUserLog log; |
Changpeng Liu | 0bc24d8 | 2018-01-04 09:53:33 +0800 | [diff] [blame] | 213 | VhostUserConfig config; |
Tiwei Bie | d84599f | 2018-05-24 18:33:35 +0800 | [diff] [blame] | 214 | VhostUserVringArea area; |
Xie Yongji | 5f9ff1e | 2019-02-28 16:53:52 +0800 | [diff] [blame] | 215 | VhostUserInflight inflight; |
Albert Esteve | ce0f3b0 | 2023-10-02 08:57:06 +0200 | [diff] [blame] | 216 | VhostUserShared object; |
Marc-André Lureau | 7b2e5c6 | 2016-10-18 12:24:04 +0300 | [diff] [blame] | 217 | } payload; |
| 218 | |
Raphael Norwitz | b650d5f | 2020-05-21 05:00:59 +0000 | [diff] [blame] | 219 | int fds[VHOST_MEMORY_BASELINE_NREGIONS]; |
Marc-André Lureau | 7b2e5c6 | 2016-10-18 12:24:04 +0300 | [diff] [blame] | 220 | int fd_num; |
| 221 | uint8_t *data; |
| 222 | } VU_PACKED VhostUserMsg; |
| 223 | |
| 224 | typedef struct VuDevRegion { |
| 225 | /* Guest Physical address. */ |
| 226 | uint64_t gpa; |
| 227 | /* Memory region size. */ |
| 228 | uint64_t size; |
| 229 | /* QEMU virtual address (userspace). */ |
| 230 | uint64_t qva; |
| 231 | /* Starting offset in our mmaped space. */ |
| 232 | uint64_t mmap_offset; |
| 233 | /* Start address of mmaped space. */ |
| 234 | uint64_t mmap_addr; |
| 235 | } VuDevRegion; |
| 236 | |
| 237 | typedef struct VuDev VuDev; |
| 238 | |
| 239 | typedef uint64_t (*vu_get_features_cb) (VuDev *dev); |
| 240 | typedef void (*vu_set_features_cb) (VuDev *dev, uint64_t features); |
| 241 | typedef int (*vu_process_msg_cb) (VuDev *dev, VhostUserMsg *vmsg, |
| 242 | int *do_reply); |
Coiby Xu | 049f555 | 2020-09-18 16:09:06 +0800 | [diff] [blame] | 243 | typedef bool (*vu_read_msg_cb) (VuDev *dev, int sock, VhostUserMsg *vmsg); |
Marc-André Lureau | 7b2e5c6 | 2016-10-18 12:24:04 +0300 | [diff] [blame] | 244 | typedef void (*vu_queue_set_started_cb) (VuDev *dev, int qidx, bool started); |
Marc-André Lureau | 35480cb | 2017-08-29 17:27:50 +0200 | [diff] [blame] | 245 | typedef bool (*vu_queue_is_processed_in_order_cb) (VuDev *dev, int qidx); |
Changpeng Liu | 0bc24d8 | 2018-01-04 09:53:33 +0800 | [diff] [blame] | 246 | typedef int (*vu_get_config_cb) (VuDev *dev, uint8_t *config, uint32_t len); |
| 247 | typedef int (*vu_set_config_cb) (VuDev *dev, const uint8_t *data, |
| 248 | uint32_t offset, uint32_t size, |
| 249 | uint32_t flags); |
Albert Esteve | ce0f3b0 | 2023-10-02 08:57:06 +0200 | [diff] [blame] | 250 | typedef int (*vu_get_shared_object_cb) (VuDev *dev, const unsigned char *uuid); |
Marc-André Lureau | 7b2e5c6 | 2016-10-18 12:24:04 +0300 | [diff] [blame] | 251 | |
| 252 | typedef struct VuDevIface { |
| 253 | /* called by VHOST_USER_GET_FEATURES to get the features bitmask */ |
| 254 | vu_get_features_cb get_features; |
| 255 | /* enable vhost implementation features */ |
| 256 | vu_set_features_cb set_features; |
| 257 | /* get the protocol feature bitmask from the underlying vhost |
| 258 | * implementation */ |
| 259 | vu_get_features_cb get_protocol_features; |
| 260 | /* enable protocol features in the underlying vhost implementation. */ |
| 261 | vu_set_features_cb set_protocol_features; |
| 262 | /* process_msg is called for each vhost-user message received */ |
| 263 | /* skip libvhost-user processing if return value != 0 */ |
| 264 | vu_process_msg_cb process_msg; |
| 265 | /* tells when queues can be processed */ |
| 266 | vu_queue_set_started_cb queue_set_started; |
Marc-André Lureau | 35480cb | 2017-08-29 17:27:50 +0200 | [diff] [blame] | 267 | /* |
| 268 | * If the queue is processed in order, in which case it will be |
| 269 | * resumed to vring.used->idx. This can help to support resuming |
| 270 | * on unmanaged exit/crash. |
| 271 | */ |
| 272 | vu_queue_is_processed_in_order_cb queue_is_processed_in_order; |
Changpeng Liu | 0bc24d8 | 2018-01-04 09:53:33 +0800 | [diff] [blame] | 273 | /* get the config space of the device */ |
| 274 | vu_get_config_cb get_config; |
| 275 | /* set the config space of the device */ |
| 276 | vu_set_config_cb set_config; |
Albert Esteve | ce0f3b0 | 2023-10-02 08:57:06 +0200 | [diff] [blame] | 277 | /* get virtio shared object from the underlying vhost implementation. */ |
| 278 | vu_get_shared_object_cb get_shared_object; |
Marc-André Lureau | 7b2e5c6 | 2016-10-18 12:24:04 +0300 | [diff] [blame] | 279 | } VuDevIface; |
| 280 | |
| 281 | typedef void (*vu_queue_handler_cb) (VuDev *dev, int qidx); |
| 282 | |
| 283 | typedef struct VuRing { |
| 284 | unsigned int num; |
| 285 | struct vring_desc *desc; |
| 286 | struct vring_avail *avail; |
| 287 | struct vring_used *used; |
| 288 | uint64_t log_guest_addr; |
| 289 | uint32_t flags; |
| 290 | } VuRing; |
| 291 | |
Xie Yongji | 5f9ff1e | 2019-02-28 16:53:52 +0800 | [diff] [blame] | 292 | typedef struct VuDescStateSplit { |
| 293 | /* Indicate whether this descriptor is inflight or not. |
| 294 | * Only available for head-descriptor. */ |
| 295 | uint8_t inflight; |
| 296 | |
| 297 | /* Padding */ |
| 298 | uint8_t padding[5]; |
| 299 | |
| 300 | /* Maintain a list for the last batch of used descriptors. |
| 301 | * Only available when batching is used for submitting */ |
| 302 | uint16_t next; |
| 303 | |
| 304 | /* Used to preserve the order of fetching available descriptors. |
| 305 | * Only available for head-descriptor. */ |
| 306 | uint64_t counter; |
| 307 | } VuDescStateSplit; |
| 308 | |
| 309 | typedef struct VuVirtqInflight { |
| 310 | /* The feature flags of this region. Now it's initialized to 0. */ |
| 311 | uint64_t features; |
| 312 | |
| 313 | /* The version of this region. It's 1 currently. |
| 314 | * Zero value indicates a vm reset happened. */ |
| 315 | uint16_t version; |
| 316 | |
Manos Pitsidianakis | f8ed364 | 2023-06-13 11:08:48 +0300 | [diff] [blame] | 317 | /* |
| 318 | * The size of VuDescStateSplit array. It's equal to the virtqueue size. |
| 319 | * Backend could get it from queue size field of VhostUserInflight. |
| 320 | */ |
Xie Yongji | 5f9ff1e | 2019-02-28 16:53:52 +0800 | [diff] [blame] | 321 | uint16_t desc_num; |
| 322 | |
| 323 | /* The head of list that track the last batch of used descriptors. */ |
| 324 | uint16_t last_batch_head; |
| 325 | |
| 326 | /* Storing the idx value of used ring */ |
| 327 | uint16_t used_idx; |
| 328 | |
| 329 | /* Used to track the state of each descriptor in descriptor table */ |
Philippe Mathieu-Daudé | f7795e4 | 2020-03-04 16:38:15 +0100 | [diff] [blame] | 330 | VuDescStateSplit desc[]; |
Xie Yongji | 5f9ff1e | 2019-02-28 16:53:52 +0800 | [diff] [blame] | 331 | } VuVirtqInflight; |
| 332 | |
| 333 | typedef struct VuVirtqInflightDesc { |
| 334 | uint16_t index; |
| 335 | uint64_t counter; |
| 336 | } VuVirtqInflightDesc; |
| 337 | |
Marc-André Lureau | 7b2e5c6 | 2016-10-18 12:24:04 +0300 | [diff] [blame] | 338 | typedef struct VuVirtq { |
| 339 | VuRing vring; |
| 340 | |
Xie Yongji | 5f9ff1e | 2019-02-28 16:53:52 +0800 | [diff] [blame] | 341 | VuVirtqInflight *inflight; |
| 342 | |
| 343 | VuVirtqInflightDesc *resubmit_list; |
| 344 | |
| 345 | uint16_t resubmit_num; |
| 346 | |
| 347 | uint64_t counter; |
| 348 | |
Marc-André Lureau | 7b2e5c6 | 2016-10-18 12:24:04 +0300 | [diff] [blame] | 349 | /* Next head to pop */ |
| 350 | uint16_t last_avail_idx; |
| 351 | |
| 352 | /* Last avail_idx read from VQ. */ |
| 353 | uint16_t shadow_avail_idx; |
| 354 | |
| 355 | uint16_t used_idx; |
| 356 | |
| 357 | /* Last used index value we have signalled on */ |
| 358 | uint16_t signalled_used; |
| 359 | |
| 360 | /* Last used index value we have signalled on */ |
| 361 | bool signalled_used_valid; |
| 362 | |
| 363 | /* Notification enabled? */ |
| 364 | bool notification; |
| 365 | |
Marcel Holtmann | e7ee4fe | 2022-12-19 18:53:37 +0100 | [diff] [blame] | 366 | unsigned int inuse; |
Marc-André Lureau | 7b2e5c6 | 2016-10-18 12:24:04 +0300 | [diff] [blame] | 367 | |
| 368 | vu_queue_handler_cb handler; |
| 369 | |
| 370 | int call_fd; |
| 371 | int kick_fd; |
| 372 | int err_fd; |
| 373 | unsigned int enable; |
| 374 | bool started; |
Dr. David Alan Gilbert | 49e9ec7 | 2019-08-12 17:35:19 +0100 | [diff] [blame] | 375 | |
| 376 | /* Guest addresses of our ring */ |
| 377 | struct vhost_vring_addr vra; |
Marc-André Lureau | 7b2e5c6 | 2016-10-18 12:24:04 +0300 | [diff] [blame] | 378 | } VuVirtq; |
| 379 | |
| 380 | enum VuWatchCondtion { |
Felipe Franciosi | 49cc034 | 2017-03-02 10:25:53 -0800 | [diff] [blame] | 381 | VU_WATCH_IN = POLLIN, |
| 382 | VU_WATCH_OUT = POLLOUT, |
| 383 | VU_WATCH_PRI = POLLPRI, |
| 384 | VU_WATCH_ERR = POLLERR, |
| 385 | VU_WATCH_HUP = POLLHUP, |
Marc-André Lureau | 7b2e5c6 | 2016-10-18 12:24:04 +0300 | [diff] [blame] | 386 | }; |
| 387 | |
| 388 | typedef void (*vu_panic_cb) (VuDev *dev, const char *err); |
| 389 | typedef void (*vu_watch_cb) (VuDev *dev, int condition, void *data); |
| 390 | typedef void (*vu_set_watch_cb) (VuDev *dev, int fd, int condition, |
| 391 | vu_watch_cb cb, void *data); |
| 392 | typedef void (*vu_remove_watch_cb) (VuDev *dev, int fd); |
| 393 | |
Xie Yongji | 5f9ff1e | 2019-02-28 16:53:52 +0800 | [diff] [blame] | 394 | typedef struct VuDevInflightInfo { |
| 395 | int fd; |
| 396 | void *addr; |
| 397 | uint64_t size; |
| 398 | } VuDevInflightInfo; |
| 399 | |
Marc-André Lureau | 7b2e5c6 | 2016-10-18 12:24:04 +0300 | [diff] [blame] | 400 | struct VuDev { |
| 401 | int sock; |
| 402 | uint32_t nregions; |
David Hildenbrand | d884e27 | 2024-02-14 16:16:48 +0100 | [diff] [blame] | 403 | VuDevRegion *regions; |
Stefan Hajnoczi | 6f5fd83 | 2019-06-26 08:48:13 +0100 | [diff] [blame] | 404 | VuVirtq *vq; |
Xie Yongji | 5f9ff1e | 2019-02-28 16:53:52 +0800 | [diff] [blame] | 405 | VuDevInflightInfo inflight_info; |
Marc-André Lureau | 7b2e5c6 | 2016-10-18 12:24:04 +0300 | [diff] [blame] | 406 | int log_call_fd; |
Manos Pitsidianakis | f8ed364 | 2023-06-13 11:08:48 +0300 | [diff] [blame] | 407 | /* Must be held while using backend_fd */ |
| 408 | pthread_mutex_t backend_mutex; |
| 409 | int backend_fd; |
Marc-André Lureau | 7b2e5c6 | 2016-10-18 12:24:04 +0300 | [diff] [blame] | 410 | uint64_t log_size; |
| 411 | uint8_t *log_table; |
| 412 | uint64_t features; |
| 413 | uint64_t protocol_features; |
| 414 | bool broken; |
Stefan Hajnoczi | 6f5fd83 | 2019-06-26 08:48:13 +0100 | [diff] [blame] | 415 | uint16_t max_queues; |
Marc-André Lureau | 7b2e5c6 | 2016-10-18 12:24:04 +0300 | [diff] [blame] | 416 | |
Stefan Hajnoczi | de65d49 | 2020-10-27 17:35:17 +0000 | [diff] [blame] | 417 | /* |
| 418 | * @read_msg: custom method to read vhost-user message |
Coiby Xu | 049f555 | 2020-09-18 16:09:06 +0800 | [diff] [blame] | 419 | * |
| 420 | * Read data from vhost_user socket fd and fill up |
| 421 | * the passed VhostUserMsg *vmsg struct. |
| 422 | * |
| 423 | * If reading fails, it should close the received set of file |
| 424 | * descriptors as socket message's auxiliary data. |
| 425 | * |
| 426 | * For the details, please refer to vu_message_read in libvhost-user.c |
| 427 | * which will be used by default if not custom method is provided when |
| 428 | * calling vu_init |
| 429 | * |
| 430 | * Returns: true if vhost-user message successfully received, |
| 431 | * otherwise return false. |
| 432 | * |
| 433 | */ |
| 434 | vu_read_msg_cb read_msg; |
Stefan Hajnoczi | de65d49 | 2020-10-27 17:35:17 +0000 | [diff] [blame] | 435 | |
| 436 | /* |
| 437 | * @set_watch: add or update the given fd to the watch set, |
| 438 | * call cb when condition is met. |
| 439 | */ |
Marc-André Lureau | 7b2e5c6 | 2016-10-18 12:24:04 +0300 | [diff] [blame] | 440 | vu_set_watch_cb set_watch; |
| 441 | |
| 442 | /* @remove_watch: remove the given fd from the watch set */ |
| 443 | vu_remove_watch_cb remove_watch; |
| 444 | |
Stefan Hajnoczi | de65d49 | 2020-10-27 17:35:17 +0000 | [diff] [blame] | 445 | /* |
| 446 | * @panic: encountered an unrecoverable error, you may try to re-initialize |
| 447 | */ |
Marc-André Lureau | 7b2e5c6 | 2016-10-18 12:24:04 +0300 | [diff] [blame] | 448 | vu_panic_cb panic; |
| 449 | const VuDevIface *iface; |
Dr. David Alan Gilbert | 2a84ffc | 2018-03-12 17:21:03 +0000 | [diff] [blame] | 450 | |
| 451 | /* Postcopy data */ |
| 452 | int postcopy_ufd; |
Dr. David Alan Gilbert | 6864a7b | 2018-03-12 17:21:06 +0000 | [diff] [blame] | 453 | bool postcopy_listening; |
Marc-André Lureau | 7b2e5c6 | 2016-10-18 12:24:04 +0300 | [diff] [blame] | 454 | }; |
| 455 | |
| 456 | typedef struct VuVirtqElement { |
| 457 | unsigned int index; |
| 458 | unsigned int out_num; |
| 459 | unsigned int in_num; |
| 460 | struct iovec *in_sg; |
| 461 | struct iovec *out_sg; |
| 462 | } VuVirtqElement; |
| 463 | |
| 464 | /** |
| 465 | * vu_init: |
| 466 | * @dev: a VuDev context |
Stefan Hajnoczi | 6f5fd83 | 2019-06-26 08:48:13 +0100 | [diff] [blame] | 467 | * @max_queues: maximum number of virtqueues |
Manos Pitsidianakis | f8ed364 | 2023-06-13 11:08:48 +0300 | [diff] [blame] | 468 | * @socket: the socket connected to vhost-user frontend |
Marc-André Lureau | 7b2e5c6 | 2016-10-18 12:24:04 +0300 | [diff] [blame] | 469 | * @panic: a panic callback |
| 470 | * @set_watch: a set_watch callback |
| 471 | * @remove_watch: a remove_watch callback |
| 472 | * @iface: a VuDevIface structure with vhost-user device callbacks |
| 473 | * |
zhaolichang | 639b090 | 2020-09-17 15:50:29 +0800 | [diff] [blame] | 474 | * Initializes a VuDev vhost-user context. |
Stefan Hajnoczi | 6f5fd83 | 2019-06-26 08:48:13 +0100 | [diff] [blame] | 475 | * |
| 476 | * Returns: true on success, false on failure. |
Marc-André Lureau | 7b2e5c6 | 2016-10-18 12:24:04 +0300 | [diff] [blame] | 477 | **/ |
Stefan Hajnoczi | 6f5fd83 | 2019-06-26 08:48:13 +0100 | [diff] [blame] | 478 | bool vu_init(VuDev *dev, |
| 479 | uint16_t max_queues, |
Marc-André Lureau | 7b2e5c6 | 2016-10-18 12:24:04 +0300 | [diff] [blame] | 480 | int socket, |
| 481 | vu_panic_cb panic, |
Coiby Xu | 049f555 | 2020-09-18 16:09:06 +0800 | [diff] [blame] | 482 | vu_read_msg_cb read_msg, |
Marc-André Lureau | 7b2e5c6 | 2016-10-18 12:24:04 +0300 | [diff] [blame] | 483 | vu_set_watch_cb set_watch, |
| 484 | vu_remove_watch_cb remove_watch, |
| 485 | const VuDevIface *iface); |
| 486 | |
| 487 | |
| 488 | /** |
| 489 | * vu_deinit: |
| 490 | * @dev: a VuDev context |
| 491 | * |
| 492 | * Cleans up the VuDev context |
| 493 | */ |
| 494 | void vu_deinit(VuDev *dev); |
| 495 | |
Alex Bennée | 467eeb0 | 2022-03-21 15:30:32 +0000 | [diff] [blame] | 496 | |
| 497 | /** |
| 498 | * vu_request_to_string: return string for vhost message request |
| 499 | * @req: VhostUserMsg request |
| 500 | * |
| 501 | * Returns a const string, do not free. |
| 502 | */ |
| 503 | const char *vu_request_to_string(unsigned int req); |
| 504 | |
Marc-André Lureau | 7b2e5c6 | 2016-10-18 12:24:04 +0300 | [diff] [blame] | 505 | /** |
| 506 | * vu_dispatch: |
| 507 | * @dev: a VuDev context |
| 508 | * |
| 509 | * Process one vhost-user message. |
| 510 | * |
| 511 | * Returns: TRUE on success, FALSE on failure. |
| 512 | */ |
| 513 | bool vu_dispatch(VuDev *dev); |
| 514 | |
| 515 | /** |
| 516 | * vu_gpa_to_va: |
| 517 | * @dev: a VuDev context |
Yongji Xie | 293084a | 2018-01-19 00:04:05 +0800 | [diff] [blame] | 518 | * @plen: guest memory size |
Marc-André Lureau | 7b2e5c6 | 2016-10-18 12:24:04 +0300 | [diff] [blame] | 519 | * @guest_addr: guest address |
| 520 | * |
| 521 | * Translate a guest address to a pointer. Returns NULL on failure. |
| 522 | */ |
Yongji Xie | 293084a | 2018-01-19 00:04:05 +0800 | [diff] [blame] | 523 | void *vu_gpa_to_va(VuDev *dev, uint64_t *plen, uint64_t guest_addr); |
Marc-André Lureau | 7b2e5c6 | 2016-10-18 12:24:04 +0300 | [diff] [blame] | 524 | |
| 525 | /** |
| 526 | * vu_get_queue: |
| 527 | * @dev: a VuDev context |
| 528 | * @qidx: queue index |
| 529 | * |
| 530 | * Returns the queue number @qidx. |
| 531 | */ |
| 532 | VuVirtq *vu_get_queue(VuDev *dev, int qidx); |
| 533 | |
| 534 | /** |
| 535 | * vu_set_queue_handler: |
| 536 | * @dev: a VuDev context |
| 537 | * @vq: a VuVirtq queue |
| 538 | * @handler: the queue handler callback |
| 539 | * |
| 540 | * Set the queue handler. This function may be called several times |
| 541 | * for the same queue. If called with NULL @handler, the handler is |
| 542 | * removed. |
| 543 | */ |
| 544 | void vu_set_queue_handler(VuDev *dev, VuVirtq *vq, |
| 545 | vu_queue_handler_cb handler); |
| 546 | |
Tiwei Bie | d84599f | 2018-05-24 18:33:35 +0800 | [diff] [blame] | 547 | /** |
| 548 | * vu_set_queue_host_notifier: |
| 549 | * @dev: a VuDev context |
| 550 | * @vq: a VuVirtq queue |
| 551 | * @fd: a file descriptor |
| 552 | * @size: host page size |
| 553 | * @offset: notifier offset in @fd file |
| 554 | * |
| 555 | * Set queue's host notifier. This function may be called several |
| 556 | * times for the same queue. If called with -1 @fd, the notifier |
| 557 | * is removed. |
| 558 | */ |
| 559 | bool vu_set_queue_host_notifier(VuDev *dev, VuVirtq *vq, int fd, |
| 560 | int size, int offset); |
Marc-André Lureau | 7b2e5c6 | 2016-10-18 12:24:04 +0300 | [diff] [blame] | 561 | |
| 562 | /** |
Albert Esteve | ce0f3b0 | 2023-10-02 08:57:06 +0200 | [diff] [blame] | 563 | * vu_lookup_shared_object: |
| 564 | * @dev: a VuDev context |
| 565 | * @uuid: UUID of the shared object |
| 566 | * @dmabuf_fd: output dma-buf file descriptor |
| 567 | * |
| 568 | * Lookup for a virtio shared object (i.e., dma-buf fd) associated with the |
| 569 | * received UUID. Result, if found, is stored in the dmabuf_fd argument. |
| 570 | * |
| 571 | * Returns: whether the virtio object was found. |
| 572 | */ |
| 573 | bool vu_lookup_shared_object(VuDev *dev, unsigned char uuid[UUID_LEN], |
| 574 | int *dmabuf_fd); |
| 575 | |
| 576 | /** |
| 577 | * vu_add_shared_object: |
| 578 | * @dev: a VuDev context |
| 579 | * @uuid: UUID of the shared object |
| 580 | * |
| 581 | * Registers this back-end as the exporter for the object associated with |
| 582 | * the received UUID. |
| 583 | * |
| 584 | * Returns: TRUE on success, FALSE on failure. |
| 585 | */ |
| 586 | bool vu_add_shared_object(VuDev *dev, unsigned char uuid[UUID_LEN]); |
| 587 | |
| 588 | /** |
| 589 | * vu_rm_shared_object: |
| 590 | * @dev: a VuDev context |
| 591 | * @uuid: UUID of the shared object |
| 592 | * |
| 593 | * Removes a shared object entry (i.e., back-end entry) associated with the |
| 594 | * received UUID key from the hash table. |
| 595 | * |
| 596 | * Returns: TRUE on success, FALSE on failure. |
| 597 | */ |
| 598 | bool vu_rm_shared_object(VuDev *dev, unsigned char uuid[UUID_LEN]); |
| 599 | |
| 600 | /** |
Marc-André Lureau | 7b2e5c6 | 2016-10-18 12:24:04 +0300 | [diff] [blame] | 601 | * vu_queue_set_notification: |
| 602 | * @dev: a VuDev context |
| 603 | * @vq: a VuVirtq queue |
| 604 | * @enable: state |
| 605 | * |
| 606 | * Set whether the queue notifies (via event index or interrupt) |
| 607 | */ |
| 608 | void vu_queue_set_notification(VuDev *dev, VuVirtq *vq, int enable); |
| 609 | |
| 610 | /** |
| 611 | * vu_queue_enabled: |
| 612 | * @dev: a VuDev context |
| 613 | * @vq: a VuVirtq queue |
| 614 | * |
| 615 | * Returns: whether the queue is enabled. |
| 616 | */ |
| 617 | bool vu_queue_enabled(VuDev *dev, VuVirtq *vq); |
| 618 | |
| 619 | /** |
Dr. David Alan Gilbert | bcf0836 | 2017-10-02 20:15:18 +0100 | [diff] [blame] | 620 | * vu_queue_started: |
| 621 | * @dev: a VuDev context |
| 622 | * @vq: a VuVirtq queue |
| 623 | * |
| 624 | * Returns: whether the queue is started. |
| 625 | */ |
| 626 | bool vu_queue_started(const VuDev *dev, const VuVirtq *vq); |
| 627 | |
| 628 | /** |
Marc-André Lureau | 640601c | 2017-05-03 20:54:12 +0400 | [diff] [blame] | 629 | * vu_queue_empty: |
Marc-André Lureau | 7b2e5c6 | 2016-10-18 12:24:04 +0300 | [diff] [blame] | 630 | * @dev: a VuDev context |
| 631 | * @vq: a VuVirtq queue |
| 632 | * |
Marc-André Lureau | 640601c | 2017-05-03 20:54:12 +0400 | [diff] [blame] | 633 | * Returns: true if the queue is empty or not ready. |
Marc-André Lureau | 7b2e5c6 | 2016-10-18 12:24:04 +0300 | [diff] [blame] | 634 | */ |
Marc-André Lureau | 640601c | 2017-05-03 20:54:12 +0400 | [diff] [blame] | 635 | bool vu_queue_empty(VuDev *dev, VuVirtq *vq); |
Marc-André Lureau | 7b2e5c6 | 2016-10-18 12:24:04 +0300 | [diff] [blame] | 636 | |
| 637 | /** |
| 638 | * vu_queue_notify: |
| 639 | * @dev: a VuDev context |
| 640 | * @vq: a VuVirtq queue |
| 641 | * |
| 642 | * Request to notify the queue via callfd (skipped if unnecessary) |
| 643 | */ |
| 644 | void vu_queue_notify(VuDev *dev, VuVirtq *vq); |
| 645 | |
Vladimir Sementsov-Ogievskiy | ca858a5 | 2023-03-21 23:13:23 +0300 | [diff] [blame] | 646 | void vu_config_change_msg(VuDev *dev); |
| 647 | |
Marc-André Lureau | 7b2e5c6 | 2016-10-18 12:24:04 +0300 | [diff] [blame] | 648 | /** |
Johannes Berg | ff13200 | 2020-01-23 09:17:08 +0100 | [diff] [blame] | 649 | * vu_queue_notify_sync: |
| 650 | * @dev: a VuDev context |
| 651 | * @vq: a VuVirtq queue |
| 652 | * |
| 653 | * Request to notify the queue via callfd (skipped if unnecessary) |
| 654 | * or sync message if possible. |
| 655 | */ |
| 656 | void vu_queue_notify_sync(VuDev *dev, VuVirtq *vq); |
| 657 | |
| 658 | /** |
Marc-André Lureau | 7b2e5c6 | 2016-10-18 12:24:04 +0300 | [diff] [blame] | 659 | * vu_queue_pop: |
| 660 | * @dev: a VuDev context |
| 661 | * @vq: a VuVirtq queue |
| 662 | * @sz: the size of struct to return (must be >= VuVirtqElement) |
| 663 | * |
Marc-André Lureau | 19409df | 2017-08-11 01:25:38 +0200 | [diff] [blame] | 664 | * Returns: a VuVirtqElement filled from the queue or NULL. The |
| 665 | * returned element must be free()-d by the caller. |
Marc-André Lureau | 7b2e5c6 | 2016-10-18 12:24:04 +0300 | [diff] [blame] | 666 | */ |
| 667 | void *vu_queue_pop(VuDev *dev, VuVirtq *vq, size_t sz); |
| 668 | |
Marc-André Lureau | b13919a | 2019-03-08 15:04:52 +0100 | [diff] [blame] | 669 | |
| 670 | /** |
| 671 | * vu_queue_unpop: |
| 672 | * @dev: a VuDev context |
| 673 | * @vq: a VuVirtq queue |
| 674 | * @elem: The #VuVirtqElement |
| 675 | * @len: number of bytes written |
| 676 | * |
| 677 | * Pretend the most recent element wasn't popped from the virtqueue. The next |
| 678 | * call to vu_queue_pop() will refetch the element. |
| 679 | */ |
| 680 | void vu_queue_unpop(VuDev *dev, VuVirtq *vq, VuVirtqElement *elem, |
| 681 | size_t len); |
| 682 | |
Marc-André Lureau | 7b2e5c6 | 2016-10-18 12:24:04 +0300 | [diff] [blame] | 683 | /** |
| 684 | * vu_queue_rewind: |
| 685 | * @dev: a VuDev context |
| 686 | * @vq: a VuVirtq queue |
| 687 | * @num: number of elements to push back |
| 688 | * |
| 689 | * Pretend that elements weren't popped from the virtqueue. The next |
| 690 | * virtqueue_pop() will refetch the oldest element. |
| 691 | * |
| 692 | * Returns: true on success, false if @num is greater than the number of in use |
| 693 | * elements. |
| 694 | */ |
| 695 | bool vu_queue_rewind(VuDev *dev, VuVirtq *vq, unsigned int num); |
| 696 | |
| 697 | /** |
| 698 | * vu_queue_fill: |
| 699 | * @dev: a VuDev context |
| 700 | * @vq: a VuVirtq queue |
| 701 | * @elem: a VuVirtqElement |
| 702 | * @len: length in bytes to write |
| 703 | * @idx: optional offset for the used ring index (0 in general) |
| 704 | * |
| 705 | * Fill the used ring with @elem element. |
| 706 | */ |
| 707 | void vu_queue_fill(VuDev *dev, VuVirtq *vq, |
| 708 | const VuVirtqElement *elem, |
| 709 | unsigned int len, unsigned int idx); |
| 710 | |
| 711 | /** |
| 712 | * vu_queue_push: |
| 713 | * @dev: a VuDev context |
| 714 | * @vq: a VuVirtq queue |
| 715 | * @elem: a VuVirtqElement |
| 716 | * @len: length in bytes to write |
| 717 | * |
| 718 | * Helper that combines vu_queue_fill() with a vu_queue_flush(). |
| 719 | */ |
| 720 | void vu_queue_push(VuDev *dev, VuVirtq *vq, |
| 721 | const VuVirtqElement *elem, unsigned int len); |
| 722 | |
| 723 | /** |
| 724 | * vu_queue_flush: |
| 725 | * @dev: a VuDev context |
| 726 | * @vq: a VuVirtq queue |
| 727 | * @num: number of elements to flush |
| 728 | * |
| 729 | * Mark the last number of elements as done (used.idx is updated by |
| 730 | * num elements). |
| 731 | */ |
| 732 | void vu_queue_flush(VuDev *dev, VuVirtq *vq, unsigned int num); |
| 733 | |
| 734 | /** |
| 735 | * vu_queue_get_avail_bytes: |
| 736 | * @dev: a VuDev context |
| 737 | * @vq: a VuVirtq queue |
| 738 | * @in_bytes: in bytes |
| 739 | * @out_bytes: out bytes |
| 740 | * @max_in_bytes: stop counting after max_in_bytes |
| 741 | * @max_out_bytes: stop counting after max_out_bytes |
| 742 | * |
| 743 | * Count the number of available bytes, up to max_in_bytes/max_out_bytes. |
| 744 | */ |
| 745 | void vu_queue_get_avail_bytes(VuDev *vdev, VuVirtq *vq, unsigned int *in_bytes, |
| 746 | unsigned int *out_bytes, |
| 747 | unsigned max_in_bytes, unsigned max_out_bytes); |
| 748 | |
| 749 | /** |
| 750 | * vu_queue_avail_bytes: |
| 751 | * @dev: a VuDev context |
| 752 | * @vq: a VuVirtq queue |
| 753 | * @in_bytes: expected in bytes |
| 754 | * @out_bytes: expected out bytes |
| 755 | * |
| 756 | * Returns: true if in_bytes <= in_total && out_bytes <= out_total |
| 757 | */ |
| 758 | bool vu_queue_avail_bytes(VuDev *dev, VuVirtq *vq, unsigned int in_bytes, |
| 759 | unsigned int out_bytes); |
| 760 | |
| 761 | #endif /* LIBVHOST_USER_H */ |