blob: a4c98169c392b7d43439beec22867a213ee737cf [file] [log] [blame]
ths75818252008-07-03 13:41:03 +00001/*
Eric Blake58a6fdc2022-05-11 19:49:24 -05002 * Copyright (C) 2016-2022 Red Hat, Inc.
bellard7a5ca862008-05-27 21:13:40 +00003 * Copyright (C) 2005 Anthony Liguori <anthony@codemonkey.ws>
4 *
5 * Network Block Device
6 *
7 * This program is free software; you can redistribute it and/or modify
8 * it under the terms of the GNU General Public License as published by
9 * the Free Software Foundation; under version 2 of the License.
10 *
11 * This program is distributed in the hope that it will be useful,
12 * but WITHOUT ANY WARRANTY; without even the implied warranty of
13 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
14 * GNU General Public License for more details.
15 *
16 * You should have received a copy of the GNU General Public License
Blue Swirl8167ee82009-07-16 20:47:01 +000017 * along with this program; if not, see <http://www.gnu.org/licenses/>.
ths75818252008-07-03 13:41:03 +000018 */
bellard7a5ca862008-05-27 21:13:40 +000019
20#ifndef NBD_H
21#define NBD_H
22
Kevin Wolf56ee8622020-09-24 17:26:50 +020023#include "block/export.h"
Daniel P. Berrange1c778ef2016-02-10 18:41:04 +000024#include "io/channel-socket.h"
Daniel P. Berrangef95910f2016-02-10 18:41:11 +000025#include "crypto/tlscreds.h"
Vladimir Sementsov-Ogievskiye6798f02019-01-28 19:58:30 +030026#include "qapi/error.h"
Philippe Mathieu-Daudéfcb9e052022-11-25 18:53:28 +010027#include "qemu/bswap.h"
Nick Thomasc12504c2011-02-22 15:44:53 +000028
Kevin Wolf56ee8622020-09-24 17:26:50 +020029extern const BlockExportDriver blk_exp_nbd;
30
Eric Blakec8a3a1b2016-10-14 13:33:10 -050031/* Handshake phase structs - this struct is passed on the wire */
32
Vladimir Sementsov-Ogievskiy420a4e92017-11-22 13:19:57 +030033struct NBDOption {
Eric Blakec8a3a1b2016-10-14 13:33:10 -050034 uint64_t magic; /* NBD_OPTS_MAGIC */
35 uint32_t option; /* NBD_OPT_* */
36 uint32_t length;
37} QEMU_PACKED;
Vladimir Sementsov-Ogievskiy420a4e92017-11-22 13:19:57 +030038typedef struct NBDOption NBDOption;
Eric Blakec8a3a1b2016-10-14 13:33:10 -050039
Vladimir Sementsov-Ogievskiy420a4e92017-11-22 13:19:57 +030040struct NBDOptionReply {
Eric Blakec8a3a1b2016-10-14 13:33:10 -050041 uint64_t magic; /* NBD_REP_MAGIC */
42 uint32_t option; /* NBD_OPT_* */
43 uint32_t type; /* NBD_REP_* */
44 uint32_t length;
45} QEMU_PACKED;
Vladimir Sementsov-Ogievskiy420a4e92017-11-22 13:19:57 +030046typedef struct NBDOptionReply NBDOptionReply;
Eric Blakec8a3a1b2016-10-14 13:33:10 -050047
Vladimir Sementsov-Ogievskiy25c14672018-02-26 10:26:25 -060048typedef struct NBDOptionReplyMetaContext {
49 NBDOptionReply h; /* h.type = NBD_REP_META_CONTEXT, h.length > 4 */
50 uint32_t context_id;
Eric Blake71719cd2020-10-27 00:05:54 -050051 /* metadata context name follows */
Vladimir Sementsov-Ogievskiy25c14672018-02-26 10:26:25 -060052} QEMU_PACKED NBDOptionReplyMetaContext;
53
Eric Blakec8a3a1b2016-10-14 13:33:10 -050054/* Transmission phase structs
55 *
56 * Note: these are _NOT_ the same as the network representation of an NBD
Paolo Bonzini56af2dd2016-06-13 11:42:40 +020057 * request and reply!
58 */
Eric Blakeed2dd912016-10-14 13:33:07 -050059struct NBDRequest {
ths75818252008-07-03 13:41:03 +000060 uint64_t handle;
61 uint64_t from;
62 uint32_t len;
Eric Blakec8a3a1b2016-10-14 13:33:10 -050063 uint16_t flags; /* NBD_CMD_FLAG_* */
64 uint16_t type; /* NBD_CMD_* */
Paolo Bonzini56af2dd2016-06-13 11:42:40 +020065};
Eric Blakeed2dd912016-10-14 13:33:07 -050066typedef struct NBDRequest NBDRequest;
ths75818252008-07-03 13:41:03 +000067
Vladimir Sementsov-Ogievskiycaad5382017-10-12 12:53:10 +030068typedef struct NBDSimpleReply {
69 uint32_t magic; /* NBD_SIMPLE_REPLY_MAGIC */
70 uint32_t error;
71 uint64_t handle;
72} QEMU_PACKED NBDSimpleReply;
73
Eric Blakebae245d2017-10-27 12:40:28 +020074/* Header of all structured replies */
75typedef struct NBDStructuredReplyChunk {
76 uint32_t magic; /* NBD_STRUCTURED_REPLY_MAGIC */
77 uint16_t flags; /* combination of NBD_REPLY_FLAG_* */
78 uint16_t type; /* NBD_REPLY_TYPE_* */
79 uint64_t handle; /* request handle */
80 uint32_t length; /* length of payload */
81} QEMU_PACKED NBDStructuredReplyChunk;
82
Vladimir Sementsov-Ogievskiyd2febed2017-10-27 12:40:35 +020083typedef union NBDReply {
84 NBDSimpleReply simple;
85 NBDStructuredReplyChunk structured;
86 struct {
87 /* @magic and @handle fields have the same offset and size both in
88 * simple reply and structured reply chunk, so let them be accessible
89 * without ".simple." or ".structured." specification
90 */
91 uint32_t magic;
92 uint32_t _skip;
93 uint64_t handle;
94 } QEMU_PACKED;
95} NBDReply;
96
Eric Blakeefdc0c12017-11-08 15:57:00 -060097/* Header of chunk for NBD_REPLY_TYPE_OFFSET_DATA */
98typedef struct NBDStructuredReadData {
99 NBDStructuredReplyChunk h; /* h.length >= 9 */
Eric Blakebae245d2017-10-27 12:40:28 +0200100 uint64_t offset;
Eric Blakeefdc0c12017-11-08 15:57:00 -0600101 /* At least one byte of data payload follows, calculated from h.length */
102} QEMU_PACKED NBDStructuredReadData;
103
104/* Complete chunk for NBD_REPLY_TYPE_OFFSET_HOLE */
105typedef struct NBDStructuredReadHole {
106 NBDStructuredReplyChunk h; /* h.length == 12 */
107 uint64_t offset;
108 uint32_t length;
109} QEMU_PACKED NBDStructuredReadHole;
Eric Blakebae245d2017-10-27 12:40:28 +0200110
111/* Header of all NBD_REPLY_TYPE_ERROR* errors */
112typedef struct NBDStructuredError {
Eric Blakeefdc0c12017-11-08 15:57:00 -0600113 NBDStructuredReplyChunk h; /* h.length >= 6 */
Eric Blakebae245d2017-10-27 12:40:28 +0200114 uint32_t error;
115 uint16_t message_length;
116} QEMU_PACKED NBDStructuredError;
117
Vladimir Sementsov-Ogievskiy25c14672018-02-26 10:26:25 -0600118/* Header of NBD_REPLY_TYPE_BLOCK_STATUS */
119typedef struct NBDStructuredMeta {
120 NBDStructuredReplyChunk h; /* h.length >= 12 (at least one extent) */
121 uint32_t context_id;
122 /* extents follows */
123} QEMU_PACKED NBDStructuredMeta;
124
125/* Extent chunk for NBD_REPLY_TYPE_BLOCK_STATUS */
126typedef struct NBDExtent {
127 uint32_t length;
128 uint32_t flags; /* NBD_STATE_* */
129} QEMU_PACKED NBDExtent;
130
Eric Blakeb626b512016-10-14 13:33:04 -0500131/* Transmission (export) flags: sent from server to client during handshake,
132 but describe what will happen during transmission */
Max Reitzc4e2aff2019-04-05 21:16:35 +0200133enum {
134 NBD_FLAG_HAS_FLAGS_BIT = 0, /* Flags are there */
135 NBD_FLAG_READ_ONLY_BIT = 1, /* Device is read-only */
136 NBD_FLAG_SEND_FLUSH_BIT = 2, /* Send FLUSH */
137 NBD_FLAG_SEND_FUA_BIT = 3, /* Send FUA (Force Unit Access) */
138 NBD_FLAG_ROTATIONAL_BIT = 4, /* Use elevator algorithm -
139 rotational media */
140 NBD_FLAG_SEND_TRIM_BIT = 5, /* Send TRIM (discard) */
141 NBD_FLAG_SEND_WRITE_ZEROES_BIT = 6, /* Send WRITE_ZEROES */
142 NBD_FLAG_SEND_DF_BIT = 7, /* Send DF (Do not Fragment) */
143 NBD_FLAG_CAN_MULTI_CONN_BIT = 8, /* Multi-client cache consistent */
144 NBD_FLAG_SEND_RESIZE_BIT = 9, /* Send resize */
145 NBD_FLAG_SEND_CACHE_BIT = 10, /* Send CACHE (prefetch) */
Eric Blake0a479542019-08-23 09:37:23 -0500146 NBD_FLAG_SEND_FAST_ZERO_BIT = 11, /* FAST_ZERO flag for WRITE_ZEROES */
Max Reitzc4e2aff2019-04-05 21:16:35 +0200147};
148
149#define NBD_FLAG_HAS_FLAGS (1 << NBD_FLAG_HAS_FLAGS_BIT)
150#define NBD_FLAG_READ_ONLY (1 << NBD_FLAG_READ_ONLY_BIT)
151#define NBD_FLAG_SEND_FLUSH (1 << NBD_FLAG_SEND_FLUSH_BIT)
152#define NBD_FLAG_SEND_FUA (1 << NBD_FLAG_SEND_FUA_BIT)
153#define NBD_FLAG_ROTATIONAL (1 << NBD_FLAG_ROTATIONAL_BIT)
154#define NBD_FLAG_SEND_TRIM (1 << NBD_FLAG_SEND_TRIM_BIT)
155#define NBD_FLAG_SEND_WRITE_ZEROES (1 << NBD_FLAG_SEND_WRITE_ZEROES_BIT)
156#define NBD_FLAG_SEND_DF (1 << NBD_FLAG_SEND_DF_BIT)
157#define NBD_FLAG_CAN_MULTI_CONN (1 << NBD_FLAG_CAN_MULTI_CONN_BIT)
158#define NBD_FLAG_SEND_RESIZE (1 << NBD_FLAG_SEND_RESIZE_BIT)
159#define NBD_FLAG_SEND_CACHE (1 << NBD_FLAG_SEND_CACHE_BIT)
Eric Blake0a479542019-08-23 09:37:23 -0500160#define NBD_FLAG_SEND_FAST_ZERO (1 << NBD_FLAG_SEND_FAST_ZERO_BIT)
Paolo Bonzinibbb74ed2011-09-08 17:24:55 +0200161
Eric Blakeb626b512016-10-14 13:33:04 -0500162/* New-style handshake (global) flags, sent from server to client, and
163 control what will happen during handshake phase. */
Eric Blakec203c592016-10-14 13:33:14 -0500164#define NBD_FLAG_FIXED_NEWSTYLE (1 << 0) /* Fixed newstyle protocol. */
165#define NBD_FLAG_NO_ZEROES (1 << 1) /* End handshake without zeroes. */
Hani Benhabilesf5076b52014-06-07 01:32:31 +0100166
Eric Blakeb626b512016-10-14 13:33:04 -0500167/* New-style client flags, sent from client to server to control what happens
168 during handshake phase. */
Eric Blakec203c592016-10-14 13:33:14 -0500169#define NBD_FLAG_C_FIXED_NEWSTYLE (1 << 0) /* Fixed newstyle protocol. */
170#define NBD_FLAG_C_NO_ZEROES (1 << 1) /* End handshake without zeroes. */
Hani Benhabilesf5076b52014-06-07 01:32:31 +0100171
Eric Blake3736cc52017-07-07 15:30:43 -0500172/* Option requests. */
Vladimir Sementsov-Ogievskiy6bc86952018-02-15 16:51:40 +0300173#define NBD_OPT_EXPORT_NAME (1)
174#define NBD_OPT_ABORT (2)
175#define NBD_OPT_LIST (3)
176/* #define NBD_OPT_PEEK_EXPORT (4) not in use */
177#define NBD_OPT_STARTTLS (5)
178#define NBD_OPT_INFO (6)
179#define NBD_OPT_GO (7)
180#define NBD_OPT_STRUCTURED_REPLY (8)
Vladimir Sementsov-Ogievskiy25c14672018-02-26 10:26:25 -0600181#define NBD_OPT_LIST_META_CONTEXT (9)
182#define NBD_OPT_SET_META_CONTEXT (10)
Eric Blake3736cc52017-07-07 15:30:43 -0500183
184/* Option reply types. */
Eric Blakeb6f5d3b2016-10-14 13:33:16 -0500185#define NBD_REP_ERR(value) ((UINT32_C(1) << 31) | (value))
186
Vladimir Sementsov-Ogievskiy6bc86952018-02-15 16:51:40 +0300187#define NBD_REP_ACK (1) /* Data sending finished. */
188#define NBD_REP_SERVER (2) /* Export description. */
189#define NBD_REP_INFO (3) /* NBD_OPT_INFO/GO. */
Vladimir Sementsov-Ogievskiy25c14672018-02-26 10:26:25 -0600190#define NBD_REP_META_CONTEXT (4) /* NBD_OPT_{LIST,SET}_META_CONTEXT */
Eric Blakeb6f5d3b2016-10-14 13:33:16 -0500191
Eric Blake3736cc52017-07-07 15:30:43 -0500192#define NBD_REP_ERR_UNSUP NBD_REP_ERR(1) /* Unknown option */
193#define NBD_REP_ERR_POLICY NBD_REP_ERR(2) /* Server denied */
194#define NBD_REP_ERR_INVALID NBD_REP_ERR(3) /* Invalid length */
195#define NBD_REP_ERR_PLATFORM NBD_REP_ERR(4) /* Not compiled in */
196#define NBD_REP_ERR_TLS_REQD NBD_REP_ERR(5) /* TLS required */
197#define NBD_REP_ERR_UNKNOWN NBD_REP_ERR(6) /* Export unknown */
198#define NBD_REP_ERR_SHUTDOWN NBD_REP_ERR(7) /* Server shutting down */
199#define NBD_REP_ERR_BLOCK_SIZE_REQD NBD_REP_ERR(8) /* Need INFO_BLOCK_SIZE */
200
201/* Info types, used during NBD_REP_INFO */
202#define NBD_INFO_EXPORT 0
203#define NBD_INFO_NAME 1
204#define NBD_INFO_DESCRIPTION 2
205#define NBD_INFO_BLOCK_SIZE 3
Daniel P. Berrangef95910f2016-02-10 18:41:11 +0000206
Eric Blakeb626b512016-10-14 13:33:04 -0500207/* Request flags, sent from client to server during transmission phase */
Eric Blake1f4d6d12016-10-14 13:33:17 -0500208#define NBD_CMD_FLAG_FUA (1 << 0) /* 'force unit access' during write */
209#define NBD_CMD_FLAG_NO_HOLE (1 << 1) /* don't punch hole on zero run */
Eric Blakebae245d2017-10-27 12:40:28 +0200210#define NBD_CMD_FLAG_DF (1 << 2) /* don't fragment structured read */
Vladimir Sementsov-Ogievskiy25c14672018-02-26 10:26:25 -0600211#define NBD_CMD_FLAG_REQ_ONE (1 << 3) /* only one extent in BLOCK_STATUS
212 * reply chunk */
Eric Blake0a479542019-08-23 09:37:23 -0500213#define NBD_CMD_FLAG_FAST_ZERO (1 << 4) /* fail if WRITE_ZEROES is not fast */
Hani Benhabilesf5076b52014-06-07 01:32:31 +0100214
Eric Blakeb626b512016-10-14 13:33:04 -0500215/* Supported request types */
ths75818252008-07-03 13:41:03 +0000216enum {
217 NBD_CMD_READ = 0,
218 NBD_CMD_WRITE = 1,
Paolo Bonzinibbb74ed2011-09-08 17:24:55 +0200219 NBD_CMD_DISC = 2,
220 NBD_CMD_FLUSH = 3,
Eric Blake1f4d6d12016-10-14 13:33:17 -0500221 NBD_CMD_TRIM = 4,
Vladimir Sementsov-Ogievskiybc37b062018-04-13 17:31:56 +0300222 NBD_CMD_CACHE = 5,
Eric Blake1f4d6d12016-10-14 13:33:17 -0500223 NBD_CMD_WRITE_ZEROES = 6,
Vladimir Sementsov-Ogievskiy25c14672018-02-26 10:26:25 -0600224 NBD_CMD_BLOCK_STATUS = 7,
ths75818252008-07-03 13:41:03 +0000225};
226
Laurent Vivier1d45f8b2010-08-25 22:48:33 +0200227#define NBD_DEFAULT_PORT 10809
228
Stefan Hajnoczi2d821482013-05-02 14:23:08 +0200229/* Maximum size of a single READ/WRITE data buffer */
230#define NBD_MAX_BUFFER_SIZE (32 * 1024 * 1024)
Eric Blake476b9232016-06-23 16:37:08 -0600231
Eric Blake9d7ab222019-11-13 20:46:32 -0600232/*
Eric Blake71719cd2020-10-27 00:05:54 -0500233 * Maximum size of a protocol string (export name, metadata context name,
Eric Blake93676c82019-11-13 20:46:34 -0600234 * etc.). Use malloc rather than stack allocation for storage of a
235 * string.
Eric Blake9d7ab222019-11-13 20:46:32 -0600236 */
Eric Blake93676c82019-11-13 20:46:34 -0600237#define NBD_MAX_STRING_SIZE 4096
Paolo Bonzini3777b092011-10-07 14:35:58 +0200238
Eric Blakebae245d2017-10-27 12:40:28 +0200239/* Two types of reply structures */
240#define NBD_SIMPLE_REPLY_MAGIC 0x67446698
241#define NBD_STRUCTURED_REPLY_MAGIC 0x668e33ef
242
243/* Structured reply flags */
244#define NBD_REPLY_FLAG_DONE (1 << 0) /* This reply-chunk is last */
245
246/* Structured reply types */
247#define NBD_REPLY_ERR(value) ((1 << 15) | (value))
248
249#define NBD_REPLY_TYPE_NONE 0
250#define NBD_REPLY_TYPE_OFFSET_DATA 1
251#define NBD_REPLY_TYPE_OFFSET_HOLE 2
Vladimir Sementsov-Ogievskiy25c14672018-02-26 10:26:25 -0600252#define NBD_REPLY_TYPE_BLOCK_STATUS 5
Eric Blakebae245d2017-10-27 12:40:28 +0200253#define NBD_REPLY_TYPE_ERROR NBD_REPLY_ERR(1)
254#define NBD_REPLY_TYPE_ERROR_OFFSET NBD_REPLY_ERR(2)
255
Vladimir Sementsov-Ogievskiy3d068af2018-06-09 18:17:56 +0300256/* Extent flags for base:allocation in NBD_REPLY_TYPE_BLOCK_STATUS */
Vladimir Sementsov-Ogievskiy25c14672018-02-26 10:26:25 -0600257#define NBD_STATE_HOLE (1 << 0)
258#define NBD_STATE_ZERO (1 << 1)
259
Vladimir Sementsov-Ogievskiy3d068af2018-06-09 18:17:56 +0300260/* Extent flags for qemu:dirty-bitmap in NBD_REPLY_TYPE_BLOCK_STATUS */
261#define NBD_STATE_DIRTY (1 << 0)
262
Eric Blake71719cd2020-10-27 00:05:54 -0500263/* No flags needed for qemu:allocation-depth in NBD_REPLY_TYPE_BLOCK_STATUS */
264
Vladimir Sementsov-Ogievskiyf140e302017-10-27 12:40:37 +0200265static inline bool nbd_reply_type_is_error(int type)
266{
267 return type & (1 << 15);
268}
269
Eric Blakedd689442017-10-27 12:40:27 +0200270/* NBD errors are based on errno numbers, so there is a 1:1 mapping,
271 * but only a limited set of errno values is specified in the protocol.
272 * Everything else is squashed to EINVAL.
273 */
274#define NBD_SUCCESS 0
275#define NBD_EPERM 1
276#define NBD_EIO 5
277#define NBD_ENOMEM 12
278#define NBD_EINVAL 22
279#define NBD_ENOSPC 28
Eric Blakebae245d2017-10-27 12:40:28 +0200280#define NBD_EOVERFLOW 75
Eric Blake0a479542019-08-23 09:37:23 -0500281#define NBD_ENOTSUP 95
Eric Blakedd689442017-10-27 12:40:27 +0200282#define NBD_ESHUTDOWN 108
283
Eric Blake004a89f2017-07-07 15:30:41 -0500284/* Details collected by NBD_OPT_EXPORT_NAME and NBD_OPT_GO */
285struct NBDExportInfo {
Eric Blake081dd1f2017-07-07 15:30:49 -0500286 /* Set by client before nbd_receive_negotiate() */
287 bool request_sizes;
Eric Blake216ee362018-07-02 14:14:57 -0500288 char *x_dirty_bitmap;
Eric Blaked21a2d32019-01-17 13:36:54 -0600289
290 /* Set by client before nbd_receive_negotiate(), or by server results
291 * during nbd_receive_export_list() */
Eric Blake6dc16672019-01-17 13:36:46 -0600292 char *name; /* must be non-NULL */
Vladimir Sementsov-Ogievskiyf140e302017-10-27 12:40:37 +0200293
294 /* In-out fields, set by client before nbd_receive_negotiate() and
295 * updated by server results during nbd_receive_negotiate() */
296 bool structured_reply;
Vladimir Sementsov-Ogievskiy78a33ab2018-03-12 18:21:23 +0300297 bool base_allocation; /* base:allocation context for NBD_CMD_BLOCK_STATUS */
Vladimir Sementsov-Ogievskiyf140e302017-10-27 12:40:37 +0200298
Eric Blaked21a2d32019-01-17 13:36:54 -0600299 /* Set by server results during nbd_receive_negotiate() and
300 * nbd_receive_export_list() */
Eric Blake004a89f2017-07-07 15:30:41 -0500301 uint64_t size;
302 uint16_t flags;
Eric Blake081dd1f2017-07-07 15:30:49 -0500303 uint32_t min_block;
304 uint32_t opt_block;
305 uint32_t max_block;
Vladimir Sementsov-Ogievskiy78a33ab2018-03-12 18:21:23 +0300306
Eric Blake2df94eb2019-01-17 13:36:47 -0600307 uint32_t context_id;
Eric Blaked21a2d32019-01-17 13:36:54 -0600308
309 /* Set by server results during nbd_receive_export_list() */
310 char *description;
Eric Blake0b576b62019-01-17 13:36:55 -0600311 int n_contexts;
312 char **contexts;
Eric Blake004a89f2017-07-07 15:30:41 -0500313};
314typedef struct NBDExportInfo NBDExportInfo;
315
Vladimir Sementsov-Ogievskiya8e2bb62019-06-18 14:43:21 +0300316int nbd_receive_negotiate(AioContext *aio_context, QIOChannel *ioc,
317 QCryptoTLSCreds *tlscreds,
Eric Blake6dc16672019-01-17 13:36:46 -0600318 const char *hostname, QIOChannel **outioc,
319 NBDExportInfo *info, Error **errp);
Eric Blaked21a2d32019-01-17 13:36:54 -0600320void nbd_free_export_list(NBDExportInfo *info, int count);
321int nbd_receive_export_list(QIOChannel *ioc, QCryptoTLSCreds *tlscreds,
322 const char *hostname, NBDExportInfo **info,
323 Error **errp);
Eric Blake004a89f2017-07-07 15:30:41 -0500324int nbd_init(int fd, QIOChannelSocket *sioc, NBDExportInfo *info,
Vladimir Sementsov-Ogievskiybe41c102017-05-26 14:09:13 +0300325 Error **errp);
Vladimir Sementsov-Ogievskiy490dc5e2017-08-04 18:14:27 +0300326int nbd_send_request(QIOChannel *ioc, NBDRequest *request);
Kevin Wolfd3bd5b92019-02-18 14:56:01 +0100327int coroutine_fn nbd_receive_reply(BlockDriverState *bs, QIOChannel *ioc,
328 NBDReply *reply, Error **errp);
Jes Sorensen0a4eb862010-08-31 09:30:33 +0200329int nbd_client(int fd);
bellard7a5ca862008-05-27 21:13:40 +0000330int nbd_disconnect(int fd);
Eric Blakedd689442017-10-27 12:40:27 +0200331int nbd_errno_to_system_errno(int err);
bellard7a5ca862008-05-27 21:13:40 +0000332
Paolo Bonziniaf49bbb2011-09-19 14:03:37 +0200333typedef struct NBDExport NBDExport;
Paolo Bonzini1743b512011-09-19 14:33:23 +0200334typedef struct NBDClient NBDClient;
Paolo Bonziniaf49bbb2011-09-19 14:03:37 +0200335
Kevin Wolf9b562c62020-09-24 17:26:53 +0200336void nbd_export_set_on_eject_blk(BlockExport *exp, BlockBackend *blk);
Paolo Bonzinice339672012-09-18 13:17:52 +0200337
Eric Blake61bc8462019-09-16 21:39:17 -0500338AioContext *nbd_export_aio_context(NBDExport *exp);
Paolo Bonziniee0a19e2012-08-22 15:59:23 +0200339NBDExport *nbd_export_find(const char *name);
Paolo Bonziniee0a19e2012-08-22 15:59:23 +0200340
Vladimir Sementsov-Ogievskiy7f7dfe22018-10-03 20:02:28 +0300341void nbd_client_new(QIOChannelSocket *sioc,
Daniel P. Berrangef95910f2016-02-10 18:41:11 +0000342 QCryptoTLSCreds *tlscreds,
Daniel P. Berrangeb25e12d2019-02-27 16:20:33 +0000343 const char *tlsauthz,
Eric Blake0c9390d2017-06-08 17:26:17 -0500344 void (*close_fn)(NBDClient *, bool));
Paolo Bonzinice339672012-09-18 13:17:52 +0200345void nbd_client_get(NBDClient *client);
346void nbd_client_put(NBDClient *client);
Paolo Bonziniaf49bbb2011-09-19 14:03:37 +0200347
Eric Blakea5fced42022-05-11 19:49:23 -0500348void nbd_server_is_qemu_nbd(int max_connections);
Kevin Wolf5b1cb492020-09-24 17:27:12 +0200349bool nbd_server_is_running(void);
Eric Blake58a6fdc2022-05-11 19:49:24 -0500350int nbd_server_max_connections(void);
Markus Armbrusterbd269eb2017-04-26 09:36:41 +0200351void nbd_server_start(SocketAddress *addr, const char *tls_creds,
Kevin Wolf1c8222b2020-09-24 17:26:54 +0200352 const char *tls_authz, uint32_t max_connections,
353 Error **errp);
Kevin Wolfeed8b692020-02-24 15:29:57 +0100354void nbd_server_start_options(NbdServerOptions *arg, Error **errp);
Markus Armbrusterbd269eb2017-04-26 09:36:41 +0200355
Eric Blake56dc6822017-10-27 12:40:36 +0200356/* nbd_read
357 * Reads @size bytes from @ioc. Returns 0 on success.
358 */
359static inline int nbd_read(QIOChannel *ioc, void *buffer, size_t size,
Vladimir Sementsov-Ogievskiye6798f02019-01-28 19:58:30 +0300360 const char *desc, Error **errp)
Eric Blake56dc6822017-10-27 12:40:36 +0200361{
Vladimir Sementsov-Ogievskiy795d9462020-07-07 18:50:36 +0200362 ERRP_GUARD();
Vladimir Sementsov-Ogievskiye6798f02019-01-28 19:58:30 +0300363 int ret = qio_channel_read_all(ioc, buffer, size, errp) < 0 ? -EIO : 0;
364
365 if (ret < 0) {
366 if (desc) {
367 error_prepend(errp, "Failed to read %s: ", desc);
368 }
Roman Kagan5082fc82021-01-29 10:38:59 +0300369 return ret;
Vladimir Sementsov-Ogievskiye6798f02019-01-28 19:58:30 +0300370 }
371
372 return 0;
Eric Blake56dc6822017-10-27 12:40:36 +0200373}
374
Vladimir Sementsov-Ogievskiye6798f02019-01-28 19:58:30 +0300375#define DEF_NBD_READ_N(bits) \
376static inline int nbd_read##bits(QIOChannel *ioc, \
377 uint##bits##_t *val, \
378 const char *desc, Error **errp) \
379{ \
Roman Kagan5082fc82021-01-29 10:38:59 +0300380 int ret = nbd_read(ioc, val, sizeof(*val), desc, errp); \
381 if (ret < 0) { \
382 return ret; \
Vladimir Sementsov-Ogievskiye6798f02019-01-28 19:58:30 +0300383 } \
384 *val = be##bits##_to_cpu(*val); \
385 return 0; \
386}
387
388DEF_NBD_READ_N(16) /* Defines nbd_read16(). */
389DEF_NBD_READ_N(32) /* Defines nbd_read32(). */
390DEF_NBD_READ_N(64) /* Defines nbd_read64(). */
391
392#undef DEF_NBD_READ_N
393
Vladimir Sementsov-Ogievskiyd2febed2017-10-27 12:40:35 +0200394static inline bool nbd_reply_is_simple(NBDReply *reply)
395{
396 return reply->magic == NBD_SIMPLE_REPLY_MAGIC;
397}
398
399static inline bool nbd_reply_is_structured(NBDReply *reply)
400{
401 return reply->magic == NBD_STRUCTURED_REPLY_MAGIC;
402}
403
Vladimir Sementsov-Ogievskiyf140e302017-10-27 12:40:37 +0200404const char *nbd_reply_type_lookup(uint16_t type);
Vladimir Sementsov-Ogievskiy757a0d02018-11-02 18:11:51 +0300405const char *nbd_opt_lookup(uint32_t opt);
406const char *nbd_rep_lookup(uint32_t rep);
407const char *nbd_info_lookup(uint16_t info);
408const char *nbd_cmd_lookup(uint16_t info);
409const char *nbd_err_lookup(int err);
Vladimir Sementsov-Ogievskiyf140e302017-10-27 12:40:37 +0200410
Vladimir Sementsov-Ogievskiy5276c872021-06-15 14:07:05 -0500411/* nbd/client-connection.c */
412typedef struct NBDClientConnection NBDClientConnection;
413
Vladimir Sementsov-Ogievskiye0e67cb2021-06-10 13:07:50 +0300414void nbd_client_connection_enable_retry(NBDClientConnection *conn);
415
Vladimir Sementsov-Ogievskiy130d49b2021-06-10 13:07:49 +0300416NBDClientConnection *nbd_client_connection_new(const SocketAddress *saddr,
417 bool do_negotiation,
418 const char *export_name,
419 const char *x_dirty_bitmap,
Daniel P. Berrangé046f98d2022-03-04 19:36:00 +0000420 QCryptoTLSCreds *tlscreds,
421 const char *tlshostname);
Vladimir Sementsov-Ogievskiy5276c872021-06-15 14:07:05 -0500422void nbd_client_connection_release(NBDClientConnection *conn);
423
Vladimir Sementsov-Ogievskiy43cb34d2021-06-10 13:07:56 +0300424QIOChannel *coroutine_fn
Vladimir Sementsov-Ogievskiy130d49b2021-06-10 13:07:49 +0300425nbd_co_establish_connection(NBDClientConnection *conn, NBDExportInfo *info,
Vladimir Sementsov-Ogievskiy97cf8922021-06-10 13:07:59 +0300426 bool blocking, Error **errp);
Vladimir Sementsov-Ogievskiy5276c872021-06-15 14:07:05 -0500427
Paolo Bonzini9fb26292022-09-22 10:49:02 +0200428void nbd_co_establish_connection_cancel(NBDClientConnection *conn);
Vladimir Sementsov-Ogievskiy5276c872021-06-15 14:07:05 -0500429
bellard7a5ca862008-05-27 21:13:40 +0000430#endif