ths | 7581825 | 2008-07-03 13:41:03 +0000 | [diff] [blame] | 1 | /* |
Eric Blake | 58a6fdc | 2022-05-11 19:49:24 -0500 | [diff] [blame] | 2 | * Copyright (C) 2016-2022 Red Hat, Inc. |
bellard | 7a5ca86 | 2008-05-27 21:13:40 +0000 | [diff] [blame] | 3 | * Copyright (C) 2005 Anthony Liguori <anthony@codemonkey.ws> |
| 4 | * |
| 5 | * Network Block Device |
| 6 | * |
| 7 | * This program is free software; you can redistribute it and/or modify |
| 8 | * it under the terms of the GNU General Public License as published by |
| 9 | * the Free Software Foundation; under version 2 of the License. |
| 10 | * |
| 11 | * This program is distributed in the hope that it will be useful, |
| 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 14 | * GNU General Public License for more details. |
| 15 | * |
| 16 | * You should have received a copy of the GNU General Public License |
Blue Swirl | 8167ee8 | 2009-07-16 20:47:01 +0000 | [diff] [blame] | 17 | * along with this program; if not, see <http://www.gnu.org/licenses/>. |
ths | 7581825 | 2008-07-03 13:41:03 +0000 | [diff] [blame] | 18 | */ |
bellard | 7a5ca86 | 2008-05-27 21:13:40 +0000 | [diff] [blame] | 19 | |
| 20 | #ifndef NBD_H |
| 21 | #define NBD_H |
| 22 | |
Kevin Wolf | 56ee862 | 2020-09-24 17:26:50 +0200 | [diff] [blame] | 23 | #include "block/export.h" |
Daniel P. Berrange | 1c778ef | 2016-02-10 18:41:04 +0000 | [diff] [blame] | 24 | #include "io/channel-socket.h" |
Daniel P. Berrange | f95910f | 2016-02-10 18:41:11 +0000 | [diff] [blame] | 25 | #include "crypto/tlscreds.h" |
Vladimir Sementsov-Ogievskiy | e6798f0 | 2019-01-28 19:58:30 +0300 | [diff] [blame] | 26 | #include "qapi/error.h" |
Philippe Mathieu-Daudé | fcb9e05 | 2022-11-25 18:53:28 +0100 | [diff] [blame] | 27 | #include "qemu/bswap.h" |
Nick Thomas | c12504c | 2011-02-22 15:44:53 +0000 | [diff] [blame] | 28 | |
Kevin Wolf | 56ee862 | 2020-09-24 17:26:50 +0200 | [diff] [blame] | 29 | extern const BlockExportDriver blk_exp_nbd; |
| 30 | |
Eric Blake | c8a3a1b | 2016-10-14 13:33:10 -0500 | [diff] [blame] | 31 | /* Handshake phase structs - this struct is passed on the wire */ |
| 32 | |
Vladimir Sementsov-Ogievskiy | 420a4e9 | 2017-11-22 13:19:57 +0300 | [diff] [blame] | 33 | struct NBDOption { |
Eric Blake | c8a3a1b | 2016-10-14 13:33:10 -0500 | [diff] [blame] | 34 | uint64_t magic; /* NBD_OPTS_MAGIC */ |
| 35 | uint32_t option; /* NBD_OPT_* */ |
| 36 | uint32_t length; |
| 37 | } QEMU_PACKED; |
Vladimir Sementsov-Ogievskiy | 420a4e9 | 2017-11-22 13:19:57 +0300 | [diff] [blame] | 38 | typedef struct NBDOption NBDOption; |
Eric Blake | c8a3a1b | 2016-10-14 13:33:10 -0500 | [diff] [blame] | 39 | |
Vladimir Sementsov-Ogievskiy | 420a4e9 | 2017-11-22 13:19:57 +0300 | [diff] [blame] | 40 | struct NBDOptionReply { |
Eric Blake | c8a3a1b | 2016-10-14 13:33:10 -0500 | [diff] [blame] | 41 | uint64_t magic; /* NBD_REP_MAGIC */ |
| 42 | uint32_t option; /* NBD_OPT_* */ |
| 43 | uint32_t type; /* NBD_REP_* */ |
| 44 | uint32_t length; |
| 45 | } QEMU_PACKED; |
Vladimir Sementsov-Ogievskiy | 420a4e9 | 2017-11-22 13:19:57 +0300 | [diff] [blame] | 46 | typedef struct NBDOptionReply NBDOptionReply; |
Eric Blake | c8a3a1b | 2016-10-14 13:33:10 -0500 | [diff] [blame] | 47 | |
Vladimir Sementsov-Ogievskiy | 25c1467 | 2018-02-26 10:26:25 -0600 | [diff] [blame] | 48 | typedef struct NBDOptionReplyMetaContext { |
| 49 | NBDOptionReply h; /* h.type = NBD_REP_META_CONTEXT, h.length > 4 */ |
| 50 | uint32_t context_id; |
Eric Blake | 71719cd | 2020-10-27 00:05:54 -0500 | [diff] [blame] | 51 | /* metadata context name follows */ |
Vladimir Sementsov-Ogievskiy | 25c1467 | 2018-02-26 10:26:25 -0600 | [diff] [blame] | 52 | } QEMU_PACKED NBDOptionReplyMetaContext; |
| 53 | |
Eric Blake | c8a3a1b | 2016-10-14 13:33:10 -0500 | [diff] [blame] | 54 | /* Transmission phase structs |
| 55 | * |
| 56 | * Note: these are _NOT_ the same as the network representation of an NBD |
Paolo Bonzini | 56af2dd | 2016-06-13 11:42:40 +0200 | [diff] [blame] | 57 | * request and reply! |
| 58 | */ |
Eric Blake | ed2dd91 | 2016-10-14 13:33:07 -0500 | [diff] [blame] | 59 | struct NBDRequest { |
ths | 7581825 | 2008-07-03 13:41:03 +0000 | [diff] [blame] | 60 | uint64_t handle; |
| 61 | uint64_t from; |
| 62 | uint32_t len; |
Eric Blake | c8a3a1b | 2016-10-14 13:33:10 -0500 | [diff] [blame] | 63 | uint16_t flags; /* NBD_CMD_FLAG_* */ |
| 64 | uint16_t type; /* NBD_CMD_* */ |
Paolo Bonzini | 56af2dd | 2016-06-13 11:42:40 +0200 | [diff] [blame] | 65 | }; |
Eric Blake | ed2dd91 | 2016-10-14 13:33:07 -0500 | [diff] [blame] | 66 | typedef struct NBDRequest NBDRequest; |
ths | 7581825 | 2008-07-03 13:41:03 +0000 | [diff] [blame] | 67 | |
Vladimir Sementsov-Ogievskiy | caad538 | 2017-10-12 12:53:10 +0300 | [diff] [blame] | 68 | typedef struct NBDSimpleReply { |
| 69 | uint32_t magic; /* NBD_SIMPLE_REPLY_MAGIC */ |
| 70 | uint32_t error; |
| 71 | uint64_t handle; |
| 72 | } QEMU_PACKED NBDSimpleReply; |
| 73 | |
Eric Blake | bae245d | 2017-10-27 12:40:28 +0200 | [diff] [blame] | 74 | /* Header of all structured replies */ |
| 75 | typedef struct NBDStructuredReplyChunk { |
| 76 | uint32_t magic; /* NBD_STRUCTURED_REPLY_MAGIC */ |
| 77 | uint16_t flags; /* combination of NBD_REPLY_FLAG_* */ |
| 78 | uint16_t type; /* NBD_REPLY_TYPE_* */ |
| 79 | uint64_t handle; /* request handle */ |
| 80 | uint32_t length; /* length of payload */ |
| 81 | } QEMU_PACKED NBDStructuredReplyChunk; |
| 82 | |
Vladimir Sementsov-Ogievskiy | d2febed | 2017-10-27 12:40:35 +0200 | [diff] [blame] | 83 | typedef union NBDReply { |
| 84 | NBDSimpleReply simple; |
| 85 | NBDStructuredReplyChunk structured; |
| 86 | struct { |
| 87 | /* @magic and @handle fields have the same offset and size both in |
| 88 | * simple reply and structured reply chunk, so let them be accessible |
| 89 | * without ".simple." or ".structured." specification |
| 90 | */ |
| 91 | uint32_t magic; |
| 92 | uint32_t _skip; |
| 93 | uint64_t handle; |
| 94 | } QEMU_PACKED; |
| 95 | } NBDReply; |
| 96 | |
Eric Blake | efdc0c1 | 2017-11-08 15:57:00 -0600 | [diff] [blame] | 97 | /* Header of chunk for NBD_REPLY_TYPE_OFFSET_DATA */ |
| 98 | typedef struct NBDStructuredReadData { |
| 99 | NBDStructuredReplyChunk h; /* h.length >= 9 */ |
Eric Blake | bae245d | 2017-10-27 12:40:28 +0200 | [diff] [blame] | 100 | uint64_t offset; |
Eric Blake | efdc0c1 | 2017-11-08 15:57:00 -0600 | [diff] [blame] | 101 | /* At least one byte of data payload follows, calculated from h.length */ |
| 102 | } QEMU_PACKED NBDStructuredReadData; |
| 103 | |
| 104 | /* Complete chunk for NBD_REPLY_TYPE_OFFSET_HOLE */ |
| 105 | typedef struct NBDStructuredReadHole { |
| 106 | NBDStructuredReplyChunk h; /* h.length == 12 */ |
| 107 | uint64_t offset; |
| 108 | uint32_t length; |
| 109 | } QEMU_PACKED NBDStructuredReadHole; |
Eric Blake | bae245d | 2017-10-27 12:40:28 +0200 | [diff] [blame] | 110 | |
| 111 | /* Header of all NBD_REPLY_TYPE_ERROR* errors */ |
| 112 | typedef struct NBDStructuredError { |
Eric Blake | efdc0c1 | 2017-11-08 15:57:00 -0600 | [diff] [blame] | 113 | NBDStructuredReplyChunk h; /* h.length >= 6 */ |
Eric Blake | bae245d | 2017-10-27 12:40:28 +0200 | [diff] [blame] | 114 | uint32_t error; |
| 115 | uint16_t message_length; |
| 116 | } QEMU_PACKED NBDStructuredError; |
| 117 | |
Vladimir Sementsov-Ogievskiy | 25c1467 | 2018-02-26 10:26:25 -0600 | [diff] [blame] | 118 | /* Header of NBD_REPLY_TYPE_BLOCK_STATUS */ |
| 119 | typedef struct NBDStructuredMeta { |
| 120 | NBDStructuredReplyChunk h; /* h.length >= 12 (at least one extent) */ |
| 121 | uint32_t context_id; |
| 122 | /* extents follows */ |
| 123 | } QEMU_PACKED NBDStructuredMeta; |
| 124 | |
| 125 | /* Extent chunk for NBD_REPLY_TYPE_BLOCK_STATUS */ |
| 126 | typedef struct NBDExtent { |
| 127 | uint32_t length; |
| 128 | uint32_t flags; /* NBD_STATE_* */ |
| 129 | } QEMU_PACKED NBDExtent; |
| 130 | |
Eric Blake | b626b51 | 2016-10-14 13:33:04 -0500 | [diff] [blame] | 131 | /* Transmission (export) flags: sent from server to client during handshake, |
| 132 | but describe what will happen during transmission */ |
Max Reitz | c4e2aff | 2019-04-05 21:16:35 +0200 | [diff] [blame] | 133 | enum { |
| 134 | NBD_FLAG_HAS_FLAGS_BIT = 0, /* Flags are there */ |
| 135 | NBD_FLAG_READ_ONLY_BIT = 1, /* Device is read-only */ |
| 136 | NBD_FLAG_SEND_FLUSH_BIT = 2, /* Send FLUSH */ |
| 137 | NBD_FLAG_SEND_FUA_BIT = 3, /* Send FUA (Force Unit Access) */ |
| 138 | NBD_FLAG_ROTATIONAL_BIT = 4, /* Use elevator algorithm - |
| 139 | rotational media */ |
| 140 | NBD_FLAG_SEND_TRIM_BIT = 5, /* Send TRIM (discard) */ |
| 141 | NBD_FLAG_SEND_WRITE_ZEROES_BIT = 6, /* Send WRITE_ZEROES */ |
| 142 | NBD_FLAG_SEND_DF_BIT = 7, /* Send DF (Do not Fragment) */ |
| 143 | NBD_FLAG_CAN_MULTI_CONN_BIT = 8, /* Multi-client cache consistent */ |
| 144 | NBD_FLAG_SEND_RESIZE_BIT = 9, /* Send resize */ |
| 145 | NBD_FLAG_SEND_CACHE_BIT = 10, /* Send CACHE (prefetch) */ |
Eric Blake | 0a47954 | 2019-08-23 09:37:23 -0500 | [diff] [blame] | 146 | NBD_FLAG_SEND_FAST_ZERO_BIT = 11, /* FAST_ZERO flag for WRITE_ZEROES */ |
Max Reitz | c4e2aff | 2019-04-05 21:16:35 +0200 | [diff] [blame] | 147 | }; |
| 148 | |
| 149 | #define NBD_FLAG_HAS_FLAGS (1 << NBD_FLAG_HAS_FLAGS_BIT) |
| 150 | #define NBD_FLAG_READ_ONLY (1 << NBD_FLAG_READ_ONLY_BIT) |
| 151 | #define NBD_FLAG_SEND_FLUSH (1 << NBD_FLAG_SEND_FLUSH_BIT) |
| 152 | #define NBD_FLAG_SEND_FUA (1 << NBD_FLAG_SEND_FUA_BIT) |
| 153 | #define NBD_FLAG_ROTATIONAL (1 << NBD_FLAG_ROTATIONAL_BIT) |
| 154 | #define NBD_FLAG_SEND_TRIM (1 << NBD_FLAG_SEND_TRIM_BIT) |
| 155 | #define NBD_FLAG_SEND_WRITE_ZEROES (1 << NBD_FLAG_SEND_WRITE_ZEROES_BIT) |
| 156 | #define NBD_FLAG_SEND_DF (1 << NBD_FLAG_SEND_DF_BIT) |
| 157 | #define NBD_FLAG_CAN_MULTI_CONN (1 << NBD_FLAG_CAN_MULTI_CONN_BIT) |
| 158 | #define NBD_FLAG_SEND_RESIZE (1 << NBD_FLAG_SEND_RESIZE_BIT) |
| 159 | #define NBD_FLAG_SEND_CACHE (1 << NBD_FLAG_SEND_CACHE_BIT) |
Eric Blake | 0a47954 | 2019-08-23 09:37:23 -0500 | [diff] [blame] | 160 | #define NBD_FLAG_SEND_FAST_ZERO (1 << NBD_FLAG_SEND_FAST_ZERO_BIT) |
Paolo Bonzini | bbb74ed | 2011-09-08 17:24:55 +0200 | [diff] [blame] | 161 | |
Eric Blake | b626b51 | 2016-10-14 13:33:04 -0500 | [diff] [blame] | 162 | /* New-style handshake (global) flags, sent from server to client, and |
| 163 | control what will happen during handshake phase. */ |
Eric Blake | c203c59 | 2016-10-14 13:33:14 -0500 | [diff] [blame] | 164 | #define NBD_FLAG_FIXED_NEWSTYLE (1 << 0) /* Fixed newstyle protocol. */ |
| 165 | #define NBD_FLAG_NO_ZEROES (1 << 1) /* End handshake without zeroes. */ |
Hani Benhabiles | f5076b5 | 2014-06-07 01:32:31 +0100 | [diff] [blame] | 166 | |
Eric Blake | b626b51 | 2016-10-14 13:33:04 -0500 | [diff] [blame] | 167 | /* New-style client flags, sent from client to server to control what happens |
| 168 | during handshake phase. */ |
Eric Blake | c203c59 | 2016-10-14 13:33:14 -0500 | [diff] [blame] | 169 | #define NBD_FLAG_C_FIXED_NEWSTYLE (1 << 0) /* Fixed newstyle protocol. */ |
| 170 | #define NBD_FLAG_C_NO_ZEROES (1 << 1) /* End handshake without zeroes. */ |
Hani Benhabiles | f5076b5 | 2014-06-07 01:32:31 +0100 | [diff] [blame] | 171 | |
Eric Blake | 3736cc5 | 2017-07-07 15:30:43 -0500 | [diff] [blame] | 172 | /* Option requests. */ |
Vladimir Sementsov-Ogievskiy | 6bc8695 | 2018-02-15 16:51:40 +0300 | [diff] [blame] | 173 | #define NBD_OPT_EXPORT_NAME (1) |
| 174 | #define NBD_OPT_ABORT (2) |
| 175 | #define NBD_OPT_LIST (3) |
| 176 | /* #define NBD_OPT_PEEK_EXPORT (4) not in use */ |
| 177 | #define NBD_OPT_STARTTLS (5) |
| 178 | #define NBD_OPT_INFO (6) |
| 179 | #define NBD_OPT_GO (7) |
| 180 | #define NBD_OPT_STRUCTURED_REPLY (8) |
Vladimir Sementsov-Ogievskiy | 25c1467 | 2018-02-26 10:26:25 -0600 | [diff] [blame] | 181 | #define NBD_OPT_LIST_META_CONTEXT (9) |
| 182 | #define NBD_OPT_SET_META_CONTEXT (10) |
Eric Blake | 3736cc5 | 2017-07-07 15:30:43 -0500 | [diff] [blame] | 183 | |
| 184 | /* Option reply types. */ |
Eric Blake | b6f5d3b | 2016-10-14 13:33:16 -0500 | [diff] [blame] | 185 | #define NBD_REP_ERR(value) ((UINT32_C(1) << 31) | (value)) |
| 186 | |
Vladimir Sementsov-Ogievskiy | 6bc8695 | 2018-02-15 16:51:40 +0300 | [diff] [blame] | 187 | #define NBD_REP_ACK (1) /* Data sending finished. */ |
| 188 | #define NBD_REP_SERVER (2) /* Export description. */ |
| 189 | #define NBD_REP_INFO (3) /* NBD_OPT_INFO/GO. */ |
Vladimir Sementsov-Ogievskiy | 25c1467 | 2018-02-26 10:26:25 -0600 | [diff] [blame] | 190 | #define NBD_REP_META_CONTEXT (4) /* NBD_OPT_{LIST,SET}_META_CONTEXT */ |
Eric Blake | b6f5d3b | 2016-10-14 13:33:16 -0500 | [diff] [blame] | 191 | |
Eric Blake | 3736cc5 | 2017-07-07 15:30:43 -0500 | [diff] [blame] | 192 | #define NBD_REP_ERR_UNSUP NBD_REP_ERR(1) /* Unknown option */ |
| 193 | #define NBD_REP_ERR_POLICY NBD_REP_ERR(2) /* Server denied */ |
| 194 | #define NBD_REP_ERR_INVALID NBD_REP_ERR(3) /* Invalid length */ |
| 195 | #define NBD_REP_ERR_PLATFORM NBD_REP_ERR(4) /* Not compiled in */ |
| 196 | #define NBD_REP_ERR_TLS_REQD NBD_REP_ERR(5) /* TLS required */ |
| 197 | #define NBD_REP_ERR_UNKNOWN NBD_REP_ERR(6) /* Export unknown */ |
| 198 | #define NBD_REP_ERR_SHUTDOWN NBD_REP_ERR(7) /* Server shutting down */ |
| 199 | #define NBD_REP_ERR_BLOCK_SIZE_REQD NBD_REP_ERR(8) /* Need INFO_BLOCK_SIZE */ |
| 200 | |
| 201 | /* Info types, used during NBD_REP_INFO */ |
| 202 | #define NBD_INFO_EXPORT 0 |
| 203 | #define NBD_INFO_NAME 1 |
| 204 | #define NBD_INFO_DESCRIPTION 2 |
| 205 | #define NBD_INFO_BLOCK_SIZE 3 |
Daniel P. Berrange | f95910f | 2016-02-10 18:41:11 +0000 | [diff] [blame] | 206 | |
Eric Blake | b626b51 | 2016-10-14 13:33:04 -0500 | [diff] [blame] | 207 | /* Request flags, sent from client to server during transmission phase */ |
Eric Blake | 1f4d6d1 | 2016-10-14 13:33:17 -0500 | [diff] [blame] | 208 | #define NBD_CMD_FLAG_FUA (1 << 0) /* 'force unit access' during write */ |
| 209 | #define NBD_CMD_FLAG_NO_HOLE (1 << 1) /* don't punch hole on zero run */ |
Eric Blake | bae245d | 2017-10-27 12:40:28 +0200 | [diff] [blame] | 210 | #define NBD_CMD_FLAG_DF (1 << 2) /* don't fragment structured read */ |
Vladimir Sementsov-Ogievskiy | 25c1467 | 2018-02-26 10:26:25 -0600 | [diff] [blame] | 211 | #define NBD_CMD_FLAG_REQ_ONE (1 << 3) /* only one extent in BLOCK_STATUS |
| 212 | * reply chunk */ |
Eric Blake | 0a47954 | 2019-08-23 09:37:23 -0500 | [diff] [blame] | 213 | #define NBD_CMD_FLAG_FAST_ZERO (1 << 4) /* fail if WRITE_ZEROES is not fast */ |
Hani Benhabiles | f5076b5 | 2014-06-07 01:32:31 +0100 | [diff] [blame] | 214 | |
Eric Blake | b626b51 | 2016-10-14 13:33:04 -0500 | [diff] [blame] | 215 | /* Supported request types */ |
ths | 7581825 | 2008-07-03 13:41:03 +0000 | [diff] [blame] | 216 | enum { |
| 217 | NBD_CMD_READ = 0, |
| 218 | NBD_CMD_WRITE = 1, |
Paolo Bonzini | bbb74ed | 2011-09-08 17:24:55 +0200 | [diff] [blame] | 219 | NBD_CMD_DISC = 2, |
| 220 | NBD_CMD_FLUSH = 3, |
Eric Blake | 1f4d6d1 | 2016-10-14 13:33:17 -0500 | [diff] [blame] | 221 | NBD_CMD_TRIM = 4, |
Vladimir Sementsov-Ogievskiy | bc37b06 | 2018-04-13 17:31:56 +0300 | [diff] [blame] | 222 | NBD_CMD_CACHE = 5, |
Eric Blake | 1f4d6d1 | 2016-10-14 13:33:17 -0500 | [diff] [blame] | 223 | NBD_CMD_WRITE_ZEROES = 6, |
Vladimir Sementsov-Ogievskiy | 25c1467 | 2018-02-26 10:26:25 -0600 | [diff] [blame] | 224 | NBD_CMD_BLOCK_STATUS = 7, |
ths | 7581825 | 2008-07-03 13:41:03 +0000 | [diff] [blame] | 225 | }; |
| 226 | |
Laurent Vivier | 1d45f8b | 2010-08-25 22:48:33 +0200 | [diff] [blame] | 227 | #define NBD_DEFAULT_PORT 10809 |
| 228 | |
Stefan Hajnoczi | 2d82148 | 2013-05-02 14:23:08 +0200 | [diff] [blame] | 229 | /* Maximum size of a single READ/WRITE data buffer */ |
| 230 | #define NBD_MAX_BUFFER_SIZE (32 * 1024 * 1024) |
Eric Blake | 476b923 | 2016-06-23 16:37:08 -0600 | [diff] [blame] | 231 | |
Eric Blake | 9d7ab22 | 2019-11-13 20:46:32 -0600 | [diff] [blame] | 232 | /* |
Eric Blake | 71719cd | 2020-10-27 00:05:54 -0500 | [diff] [blame] | 233 | * Maximum size of a protocol string (export name, metadata context name, |
Eric Blake | 93676c8 | 2019-11-13 20:46:34 -0600 | [diff] [blame] | 234 | * etc.). Use malloc rather than stack allocation for storage of a |
| 235 | * string. |
Eric Blake | 9d7ab22 | 2019-11-13 20:46:32 -0600 | [diff] [blame] | 236 | */ |
Eric Blake | 93676c8 | 2019-11-13 20:46:34 -0600 | [diff] [blame] | 237 | #define NBD_MAX_STRING_SIZE 4096 |
Paolo Bonzini | 3777b09 | 2011-10-07 14:35:58 +0200 | [diff] [blame] | 238 | |
Eric Blake | bae245d | 2017-10-27 12:40:28 +0200 | [diff] [blame] | 239 | /* Two types of reply structures */ |
| 240 | #define NBD_SIMPLE_REPLY_MAGIC 0x67446698 |
| 241 | #define NBD_STRUCTURED_REPLY_MAGIC 0x668e33ef |
| 242 | |
| 243 | /* Structured reply flags */ |
| 244 | #define NBD_REPLY_FLAG_DONE (1 << 0) /* This reply-chunk is last */ |
| 245 | |
| 246 | /* Structured reply types */ |
| 247 | #define NBD_REPLY_ERR(value) ((1 << 15) | (value)) |
| 248 | |
| 249 | #define NBD_REPLY_TYPE_NONE 0 |
| 250 | #define NBD_REPLY_TYPE_OFFSET_DATA 1 |
| 251 | #define NBD_REPLY_TYPE_OFFSET_HOLE 2 |
Vladimir Sementsov-Ogievskiy | 25c1467 | 2018-02-26 10:26:25 -0600 | [diff] [blame] | 252 | #define NBD_REPLY_TYPE_BLOCK_STATUS 5 |
Eric Blake | bae245d | 2017-10-27 12:40:28 +0200 | [diff] [blame] | 253 | #define NBD_REPLY_TYPE_ERROR NBD_REPLY_ERR(1) |
| 254 | #define NBD_REPLY_TYPE_ERROR_OFFSET NBD_REPLY_ERR(2) |
| 255 | |
Vladimir Sementsov-Ogievskiy | 3d068af | 2018-06-09 18:17:56 +0300 | [diff] [blame] | 256 | /* Extent flags for base:allocation in NBD_REPLY_TYPE_BLOCK_STATUS */ |
Vladimir Sementsov-Ogievskiy | 25c1467 | 2018-02-26 10:26:25 -0600 | [diff] [blame] | 257 | #define NBD_STATE_HOLE (1 << 0) |
| 258 | #define NBD_STATE_ZERO (1 << 1) |
| 259 | |
Vladimir Sementsov-Ogievskiy | 3d068af | 2018-06-09 18:17:56 +0300 | [diff] [blame] | 260 | /* Extent flags for qemu:dirty-bitmap in NBD_REPLY_TYPE_BLOCK_STATUS */ |
| 261 | #define NBD_STATE_DIRTY (1 << 0) |
| 262 | |
Eric Blake | 71719cd | 2020-10-27 00:05:54 -0500 | [diff] [blame] | 263 | /* No flags needed for qemu:allocation-depth in NBD_REPLY_TYPE_BLOCK_STATUS */ |
| 264 | |
Vladimir Sementsov-Ogievskiy | f140e30 | 2017-10-27 12:40:37 +0200 | [diff] [blame] | 265 | static inline bool nbd_reply_type_is_error(int type) |
| 266 | { |
| 267 | return type & (1 << 15); |
| 268 | } |
| 269 | |
Eric Blake | dd68944 | 2017-10-27 12:40:27 +0200 | [diff] [blame] | 270 | /* NBD errors are based on errno numbers, so there is a 1:1 mapping, |
| 271 | * but only a limited set of errno values is specified in the protocol. |
| 272 | * Everything else is squashed to EINVAL. |
| 273 | */ |
| 274 | #define NBD_SUCCESS 0 |
| 275 | #define NBD_EPERM 1 |
| 276 | #define NBD_EIO 5 |
| 277 | #define NBD_ENOMEM 12 |
| 278 | #define NBD_EINVAL 22 |
| 279 | #define NBD_ENOSPC 28 |
Eric Blake | bae245d | 2017-10-27 12:40:28 +0200 | [diff] [blame] | 280 | #define NBD_EOVERFLOW 75 |
Eric Blake | 0a47954 | 2019-08-23 09:37:23 -0500 | [diff] [blame] | 281 | #define NBD_ENOTSUP 95 |
Eric Blake | dd68944 | 2017-10-27 12:40:27 +0200 | [diff] [blame] | 282 | #define NBD_ESHUTDOWN 108 |
| 283 | |
Eric Blake | 004a89f | 2017-07-07 15:30:41 -0500 | [diff] [blame] | 284 | /* Details collected by NBD_OPT_EXPORT_NAME and NBD_OPT_GO */ |
| 285 | struct NBDExportInfo { |
Eric Blake | 081dd1f | 2017-07-07 15:30:49 -0500 | [diff] [blame] | 286 | /* Set by client before nbd_receive_negotiate() */ |
| 287 | bool request_sizes; |
Eric Blake | 216ee36 | 2018-07-02 14:14:57 -0500 | [diff] [blame] | 288 | char *x_dirty_bitmap; |
Eric Blake | d21a2d3 | 2019-01-17 13:36:54 -0600 | [diff] [blame] | 289 | |
| 290 | /* Set by client before nbd_receive_negotiate(), or by server results |
| 291 | * during nbd_receive_export_list() */ |
Eric Blake | 6dc1667 | 2019-01-17 13:36:46 -0600 | [diff] [blame] | 292 | char *name; /* must be non-NULL */ |
Vladimir Sementsov-Ogievskiy | f140e30 | 2017-10-27 12:40:37 +0200 | [diff] [blame] | 293 | |
| 294 | /* In-out fields, set by client before nbd_receive_negotiate() and |
| 295 | * updated by server results during nbd_receive_negotiate() */ |
| 296 | bool structured_reply; |
Vladimir Sementsov-Ogievskiy | 78a33ab | 2018-03-12 18:21:23 +0300 | [diff] [blame] | 297 | bool base_allocation; /* base:allocation context for NBD_CMD_BLOCK_STATUS */ |
Vladimir Sementsov-Ogievskiy | f140e30 | 2017-10-27 12:40:37 +0200 | [diff] [blame] | 298 | |
Eric Blake | d21a2d3 | 2019-01-17 13:36:54 -0600 | [diff] [blame] | 299 | /* Set by server results during nbd_receive_negotiate() and |
| 300 | * nbd_receive_export_list() */ |
Eric Blake | 004a89f | 2017-07-07 15:30:41 -0500 | [diff] [blame] | 301 | uint64_t size; |
| 302 | uint16_t flags; |
Eric Blake | 081dd1f | 2017-07-07 15:30:49 -0500 | [diff] [blame] | 303 | uint32_t min_block; |
| 304 | uint32_t opt_block; |
| 305 | uint32_t max_block; |
Vladimir Sementsov-Ogievskiy | 78a33ab | 2018-03-12 18:21:23 +0300 | [diff] [blame] | 306 | |
Eric Blake | 2df94eb | 2019-01-17 13:36:47 -0600 | [diff] [blame] | 307 | uint32_t context_id; |
Eric Blake | d21a2d3 | 2019-01-17 13:36:54 -0600 | [diff] [blame] | 308 | |
| 309 | /* Set by server results during nbd_receive_export_list() */ |
| 310 | char *description; |
Eric Blake | 0b576b6 | 2019-01-17 13:36:55 -0600 | [diff] [blame] | 311 | int n_contexts; |
| 312 | char **contexts; |
Eric Blake | 004a89f | 2017-07-07 15:30:41 -0500 | [diff] [blame] | 313 | }; |
| 314 | typedef struct NBDExportInfo NBDExportInfo; |
| 315 | |
Vladimir Sementsov-Ogievskiy | a8e2bb6 | 2019-06-18 14:43:21 +0300 | [diff] [blame] | 316 | int nbd_receive_negotiate(AioContext *aio_context, QIOChannel *ioc, |
| 317 | QCryptoTLSCreds *tlscreds, |
Eric Blake | 6dc1667 | 2019-01-17 13:36:46 -0600 | [diff] [blame] | 318 | const char *hostname, QIOChannel **outioc, |
| 319 | NBDExportInfo *info, Error **errp); |
Eric Blake | d21a2d3 | 2019-01-17 13:36:54 -0600 | [diff] [blame] | 320 | void nbd_free_export_list(NBDExportInfo *info, int count); |
| 321 | int nbd_receive_export_list(QIOChannel *ioc, QCryptoTLSCreds *tlscreds, |
| 322 | const char *hostname, NBDExportInfo **info, |
| 323 | Error **errp); |
Eric Blake | 004a89f | 2017-07-07 15:30:41 -0500 | [diff] [blame] | 324 | int nbd_init(int fd, QIOChannelSocket *sioc, NBDExportInfo *info, |
Vladimir Sementsov-Ogievskiy | be41c10 | 2017-05-26 14:09:13 +0300 | [diff] [blame] | 325 | Error **errp); |
Vladimir Sementsov-Ogievskiy | 490dc5e | 2017-08-04 18:14:27 +0300 | [diff] [blame] | 326 | int nbd_send_request(QIOChannel *ioc, NBDRequest *request); |
Kevin Wolf | d3bd5b9 | 2019-02-18 14:56:01 +0100 | [diff] [blame] | 327 | int coroutine_fn nbd_receive_reply(BlockDriverState *bs, QIOChannel *ioc, |
| 328 | NBDReply *reply, Error **errp); |
Jes Sorensen | 0a4eb86 | 2010-08-31 09:30:33 +0200 | [diff] [blame] | 329 | int nbd_client(int fd); |
bellard | 7a5ca86 | 2008-05-27 21:13:40 +0000 | [diff] [blame] | 330 | int nbd_disconnect(int fd); |
Eric Blake | dd68944 | 2017-10-27 12:40:27 +0200 | [diff] [blame] | 331 | int nbd_errno_to_system_errno(int err); |
bellard | 7a5ca86 | 2008-05-27 21:13:40 +0000 | [diff] [blame] | 332 | |
Paolo Bonzini | af49bbb | 2011-09-19 14:03:37 +0200 | [diff] [blame] | 333 | typedef struct NBDExport NBDExport; |
Paolo Bonzini | 1743b51 | 2011-09-19 14:33:23 +0200 | [diff] [blame] | 334 | typedef struct NBDClient NBDClient; |
Paolo Bonzini | af49bbb | 2011-09-19 14:03:37 +0200 | [diff] [blame] | 335 | |
Kevin Wolf | 9b562c6 | 2020-09-24 17:26:53 +0200 | [diff] [blame] | 336 | void nbd_export_set_on_eject_blk(BlockExport *exp, BlockBackend *blk); |
Paolo Bonzini | ce33967 | 2012-09-18 13:17:52 +0200 | [diff] [blame] | 337 | |
Eric Blake | 61bc846 | 2019-09-16 21:39:17 -0500 | [diff] [blame] | 338 | AioContext *nbd_export_aio_context(NBDExport *exp); |
Paolo Bonzini | ee0a19e | 2012-08-22 15:59:23 +0200 | [diff] [blame] | 339 | NBDExport *nbd_export_find(const char *name); |
Paolo Bonzini | ee0a19e | 2012-08-22 15:59:23 +0200 | [diff] [blame] | 340 | |
Vladimir Sementsov-Ogievskiy | 7f7dfe2 | 2018-10-03 20:02:28 +0300 | [diff] [blame] | 341 | void nbd_client_new(QIOChannelSocket *sioc, |
Daniel P. Berrange | f95910f | 2016-02-10 18:41:11 +0000 | [diff] [blame] | 342 | QCryptoTLSCreds *tlscreds, |
Daniel P. Berrange | b25e12d | 2019-02-27 16:20:33 +0000 | [diff] [blame] | 343 | const char *tlsauthz, |
Eric Blake | 0c9390d | 2017-06-08 17:26:17 -0500 | [diff] [blame] | 344 | void (*close_fn)(NBDClient *, bool)); |
Paolo Bonzini | ce33967 | 2012-09-18 13:17:52 +0200 | [diff] [blame] | 345 | void nbd_client_get(NBDClient *client); |
| 346 | void nbd_client_put(NBDClient *client); |
Paolo Bonzini | af49bbb | 2011-09-19 14:03:37 +0200 | [diff] [blame] | 347 | |
Eric Blake | a5fced4 | 2022-05-11 19:49:23 -0500 | [diff] [blame] | 348 | void nbd_server_is_qemu_nbd(int max_connections); |
Kevin Wolf | 5b1cb49 | 2020-09-24 17:27:12 +0200 | [diff] [blame] | 349 | bool nbd_server_is_running(void); |
Eric Blake | 58a6fdc | 2022-05-11 19:49:24 -0500 | [diff] [blame] | 350 | int nbd_server_max_connections(void); |
Markus Armbruster | bd269eb | 2017-04-26 09:36:41 +0200 | [diff] [blame] | 351 | void nbd_server_start(SocketAddress *addr, const char *tls_creds, |
Kevin Wolf | 1c8222b | 2020-09-24 17:26:54 +0200 | [diff] [blame] | 352 | const char *tls_authz, uint32_t max_connections, |
| 353 | Error **errp); |
Kevin Wolf | eed8b69 | 2020-02-24 15:29:57 +0100 | [diff] [blame] | 354 | void nbd_server_start_options(NbdServerOptions *arg, Error **errp); |
Markus Armbruster | bd269eb | 2017-04-26 09:36:41 +0200 | [diff] [blame] | 355 | |
Eric Blake | 56dc682 | 2017-10-27 12:40:36 +0200 | [diff] [blame] | 356 | /* nbd_read |
| 357 | * Reads @size bytes from @ioc. Returns 0 on success. |
| 358 | */ |
| 359 | static inline int nbd_read(QIOChannel *ioc, void *buffer, size_t size, |
Vladimir Sementsov-Ogievskiy | e6798f0 | 2019-01-28 19:58:30 +0300 | [diff] [blame] | 360 | const char *desc, Error **errp) |
Eric Blake | 56dc682 | 2017-10-27 12:40:36 +0200 | [diff] [blame] | 361 | { |
Vladimir Sementsov-Ogievskiy | 795d946 | 2020-07-07 18:50:36 +0200 | [diff] [blame] | 362 | ERRP_GUARD(); |
Vladimir Sementsov-Ogievskiy | e6798f0 | 2019-01-28 19:58:30 +0300 | [diff] [blame] | 363 | int ret = qio_channel_read_all(ioc, buffer, size, errp) < 0 ? -EIO : 0; |
| 364 | |
| 365 | if (ret < 0) { |
| 366 | if (desc) { |
| 367 | error_prepend(errp, "Failed to read %s: ", desc); |
| 368 | } |
Roman Kagan | 5082fc8 | 2021-01-29 10:38:59 +0300 | [diff] [blame] | 369 | return ret; |
Vladimir Sementsov-Ogievskiy | e6798f0 | 2019-01-28 19:58:30 +0300 | [diff] [blame] | 370 | } |
| 371 | |
| 372 | return 0; |
Eric Blake | 56dc682 | 2017-10-27 12:40:36 +0200 | [diff] [blame] | 373 | } |
| 374 | |
Vladimir Sementsov-Ogievskiy | e6798f0 | 2019-01-28 19:58:30 +0300 | [diff] [blame] | 375 | #define DEF_NBD_READ_N(bits) \ |
| 376 | static inline int nbd_read##bits(QIOChannel *ioc, \ |
| 377 | uint##bits##_t *val, \ |
| 378 | const char *desc, Error **errp) \ |
| 379 | { \ |
Roman Kagan | 5082fc8 | 2021-01-29 10:38:59 +0300 | [diff] [blame] | 380 | int ret = nbd_read(ioc, val, sizeof(*val), desc, errp); \ |
| 381 | if (ret < 0) { \ |
| 382 | return ret; \ |
Vladimir Sementsov-Ogievskiy | e6798f0 | 2019-01-28 19:58:30 +0300 | [diff] [blame] | 383 | } \ |
| 384 | *val = be##bits##_to_cpu(*val); \ |
| 385 | return 0; \ |
| 386 | } |
| 387 | |
| 388 | DEF_NBD_READ_N(16) /* Defines nbd_read16(). */ |
| 389 | DEF_NBD_READ_N(32) /* Defines nbd_read32(). */ |
| 390 | DEF_NBD_READ_N(64) /* Defines nbd_read64(). */ |
| 391 | |
| 392 | #undef DEF_NBD_READ_N |
| 393 | |
Vladimir Sementsov-Ogievskiy | d2febed | 2017-10-27 12:40:35 +0200 | [diff] [blame] | 394 | static inline bool nbd_reply_is_simple(NBDReply *reply) |
| 395 | { |
| 396 | return reply->magic == NBD_SIMPLE_REPLY_MAGIC; |
| 397 | } |
| 398 | |
| 399 | static inline bool nbd_reply_is_structured(NBDReply *reply) |
| 400 | { |
| 401 | return reply->magic == NBD_STRUCTURED_REPLY_MAGIC; |
| 402 | } |
| 403 | |
Vladimir Sementsov-Ogievskiy | f140e30 | 2017-10-27 12:40:37 +0200 | [diff] [blame] | 404 | const char *nbd_reply_type_lookup(uint16_t type); |
Vladimir Sementsov-Ogievskiy | 757a0d0 | 2018-11-02 18:11:51 +0300 | [diff] [blame] | 405 | const char *nbd_opt_lookup(uint32_t opt); |
| 406 | const char *nbd_rep_lookup(uint32_t rep); |
| 407 | const char *nbd_info_lookup(uint16_t info); |
| 408 | const char *nbd_cmd_lookup(uint16_t info); |
| 409 | const char *nbd_err_lookup(int err); |
Vladimir Sementsov-Ogievskiy | f140e30 | 2017-10-27 12:40:37 +0200 | [diff] [blame] | 410 | |
Vladimir Sementsov-Ogievskiy | 5276c87 | 2021-06-15 14:07:05 -0500 | [diff] [blame] | 411 | /* nbd/client-connection.c */ |
| 412 | typedef struct NBDClientConnection NBDClientConnection; |
| 413 | |
Vladimir Sementsov-Ogievskiy | e0e67cb | 2021-06-10 13:07:50 +0300 | [diff] [blame] | 414 | void nbd_client_connection_enable_retry(NBDClientConnection *conn); |
| 415 | |
Vladimir Sementsov-Ogievskiy | 130d49b | 2021-06-10 13:07:49 +0300 | [diff] [blame] | 416 | NBDClientConnection *nbd_client_connection_new(const SocketAddress *saddr, |
| 417 | bool do_negotiation, |
| 418 | const char *export_name, |
| 419 | const char *x_dirty_bitmap, |
Daniel P. Berrangé | 046f98d | 2022-03-04 19:36:00 +0000 | [diff] [blame] | 420 | QCryptoTLSCreds *tlscreds, |
| 421 | const char *tlshostname); |
Vladimir Sementsov-Ogievskiy | 5276c87 | 2021-06-15 14:07:05 -0500 | [diff] [blame] | 422 | void nbd_client_connection_release(NBDClientConnection *conn); |
| 423 | |
Vladimir Sementsov-Ogievskiy | 43cb34d | 2021-06-10 13:07:56 +0300 | [diff] [blame] | 424 | QIOChannel *coroutine_fn |
Vladimir Sementsov-Ogievskiy | 130d49b | 2021-06-10 13:07:49 +0300 | [diff] [blame] | 425 | nbd_co_establish_connection(NBDClientConnection *conn, NBDExportInfo *info, |
Vladimir Sementsov-Ogievskiy | 97cf892 | 2021-06-10 13:07:59 +0300 | [diff] [blame] | 426 | bool blocking, Error **errp); |
Vladimir Sementsov-Ogievskiy | 5276c87 | 2021-06-15 14:07:05 -0500 | [diff] [blame] | 427 | |
Paolo Bonzini | 9fb2629 | 2022-09-22 10:49:02 +0200 | [diff] [blame] | 428 | void nbd_co_establish_connection_cancel(NBDClientConnection *conn); |
Vladimir Sementsov-Ogievskiy | 5276c87 | 2021-06-15 14:07:05 -0500 | [diff] [blame] | 429 | |
bellard | 7a5ca86 | 2008-05-27 21:13:40 +0000 | [diff] [blame] | 430 | #endif |