ths | 7581825 | 2008-07-03 13:41:03 +0000 | [diff] [blame] | 1 | /* |
Eric Blake | 71719cd | 2020-10-27 00:05:54 -0500 | [diff] [blame] | 2 | * Copyright (C) 2016-2020 Red Hat, Inc. |
bellard | 7a5ca86 | 2008-05-27 21:13:40 +0000 | [diff] [blame] | 3 | * Copyright (C) 2005 Anthony Liguori <anthony@codemonkey.ws> |
| 4 | * |
| 5 | * Network Block Device |
| 6 | * |
| 7 | * This program is free software; you can redistribute it and/or modify |
| 8 | * it under the terms of the GNU General Public License as published by |
| 9 | * the Free Software Foundation; under version 2 of the License. |
| 10 | * |
| 11 | * This program is distributed in the hope that it will be useful, |
| 12 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 13 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 14 | * GNU General Public License for more details. |
| 15 | * |
| 16 | * You should have received a copy of the GNU General Public License |
Blue Swirl | 8167ee8 | 2009-07-16 20:47:01 +0000 | [diff] [blame] | 17 | * along with this program; if not, see <http://www.gnu.org/licenses/>. |
ths | 7581825 | 2008-07-03 13:41:03 +0000 | [diff] [blame] | 18 | */ |
bellard | 7a5ca86 | 2008-05-27 21:13:40 +0000 | [diff] [blame] | 19 | |
| 20 | #ifndef NBD_H |
| 21 | #define NBD_H |
| 22 | |
Kevin Wolf | 56ee862 | 2020-09-24 17:26:50 +0200 | [diff] [blame] | 23 | #include "block/export.h" |
Daniel P. Berrange | 1c778ef | 2016-02-10 18:41:04 +0000 | [diff] [blame] | 24 | #include "io/channel-socket.h" |
Daniel P. Berrange | f95910f | 2016-02-10 18:41:11 +0000 | [diff] [blame] | 25 | #include "crypto/tlscreds.h" |
Vladimir Sementsov-Ogievskiy | e6798f0 | 2019-01-28 19:58:30 +0300 | [diff] [blame] | 26 | #include "qapi/error.h" |
Nick Thomas | c12504c | 2011-02-22 15:44:53 +0000 | [diff] [blame] | 27 | |
Kevin Wolf | 56ee862 | 2020-09-24 17:26:50 +0200 | [diff] [blame] | 28 | extern const BlockExportDriver blk_exp_nbd; |
| 29 | |
Eric Blake | c8a3a1b | 2016-10-14 13:33:10 -0500 | [diff] [blame] | 30 | /* Handshake phase structs - this struct is passed on the wire */ |
| 31 | |
Vladimir Sementsov-Ogievskiy | 420a4e9 | 2017-11-22 13:19:57 +0300 | [diff] [blame] | 32 | struct NBDOption { |
Eric Blake | c8a3a1b | 2016-10-14 13:33:10 -0500 | [diff] [blame] | 33 | uint64_t magic; /* NBD_OPTS_MAGIC */ |
| 34 | uint32_t option; /* NBD_OPT_* */ |
| 35 | uint32_t length; |
| 36 | } QEMU_PACKED; |
Vladimir Sementsov-Ogievskiy | 420a4e9 | 2017-11-22 13:19:57 +0300 | [diff] [blame] | 37 | typedef struct NBDOption NBDOption; |
Eric Blake | c8a3a1b | 2016-10-14 13:33:10 -0500 | [diff] [blame] | 38 | |
Vladimir Sementsov-Ogievskiy | 420a4e9 | 2017-11-22 13:19:57 +0300 | [diff] [blame] | 39 | struct NBDOptionReply { |
Eric Blake | c8a3a1b | 2016-10-14 13:33:10 -0500 | [diff] [blame] | 40 | uint64_t magic; /* NBD_REP_MAGIC */ |
| 41 | uint32_t option; /* NBD_OPT_* */ |
| 42 | uint32_t type; /* NBD_REP_* */ |
| 43 | uint32_t length; |
| 44 | } QEMU_PACKED; |
Vladimir Sementsov-Ogievskiy | 420a4e9 | 2017-11-22 13:19:57 +0300 | [diff] [blame] | 45 | typedef struct NBDOptionReply NBDOptionReply; |
Eric Blake | c8a3a1b | 2016-10-14 13:33:10 -0500 | [diff] [blame] | 46 | |
Vladimir Sementsov-Ogievskiy | 25c1467 | 2018-02-26 10:26:25 -0600 | [diff] [blame] | 47 | typedef struct NBDOptionReplyMetaContext { |
| 48 | NBDOptionReply h; /* h.type = NBD_REP_META_CONTEXT, h.length > 4 */ |
| 49 | uint32_t context_id; |
Eric Blake | 71719cd | 2020-10-27 00:05:54 -0500 | [diff] [blame] | 50 | /* metadata context name follows */ |
Vladimir Sementsov-Ogievskiy | 25c1467 | 2018-02-26 10:26:25 -0600 | [diff] [blame] | 51 | } QEMU_PACKED NBDOptionReplyMetaContext; |
| 52 | |
Eric Blake | c8a3a1b | 2016-10-14 13:33:10 -0500 | [diff] [blame] | 53 | /* Transmission phase structs |
| 54 | * |
| 55 | * Note: these are _NOT_ the same as the network representation of an NBD |
Paolo Bonzini | 56af2dd | 2016-06-13 11:42:40 +0200 | [diff] [blame] | 56 | * request and reply! |
| 57 | */ |
Eric Blake | ed2dd91 | 2016-10-14 13:33:07 -0500 | [diff] [blame] | 58 | struct NBDRequest { |
ths | 7581825 | 2008-07-03 13:41:03 +0000 | [diff] [blame] | 59 | uint64_t handle; |
| 60 | uint64_t from; |
| 61 | uint32_t len; |
Eric Blake | c8a3a1b | 2016-10-14 13:33:10 -0500 | [diff] [blame] | 62 | uint16_t flags; /* NBD_CMD_FLAG_* */ |
| 63 | uint16_t type; /* NBD_CMD_* */ |
Paolo Bonzini | 56af2dd | 2016-06-13 11:42:40 +0200 | [diff] [blame] | 64 | }; |
Eric Blake | ed2dd91 | 2016-10-14 13:33:07 -0500 | [diff] [blame] | 65 | typedef struct NBDRequest NBDRequest; |
ths | 7581825 | 2008-07-03 13:41:03 +0000 | [diff] [blame] | 66 | |
Vladimir Sementsov-Ogievskiy | caad538 | 2017-10-12 12:53:10 +0300 | [diff] [blame] | 67 | typedef struct NBDSimpleReply { |
| 68 | uint32_t magic; /* NBD_SIMPLE_REPLY_MAGIC */ |
| 69 | uint32_t error; |
| 70 | uint64_t handle; |
| 71 | } QEMU_PACKED NBDSimpleReply; |
| 72 | |
Eric Blake | bae245d | 2017-10-27 12:40:28 +0200 | [diff] [blame] | 73 | /* Header of all structured replies */ |
| 74 | typedef struct NBDStructuredReplyChunk { |
| 75 | uint32_t magic; /* NBD_STRUCTURED_REPLY_MAGIC */ |
| 76 | uint16_t flags; /* combination of NBD_REPLY_FLAG_* */ |
| 77 | uint16_t type; /* NBD_REPLY_TYPE_* */ |
| 78 | uint64_t handle; /* request handle */ |
| 79 | uint32_t length; /* length of payload */ |
| 80 | } QEMU_PACKED NBDStructuredReplyChunk; |
| 81 | |
Vladimir Sementsov-Ogievskiy | d2febed | 2017-10-27 12:40:35 +0200 | [diff] [blame] | 82 | typedef union NBDReply { |
| 83 | NBDSimpleReply simple; |
| 84 | NBDStructuredReplyChunk structured; |
| 85 | struct { |
| 86 | /* @magic and @handle fields have the same offset and size both in |
| 87 | * simple reply and structured reply chunk, so let them be accessible |
| 88 | * without ".simple." or ".structured." specification |
| 89 | */ |
| 90 | uint32_t magic; |
| 91 | uint32_t _skip; |
| 92 | uint64_t handle; |
| 93 | } QEMU_PACKED; |
| 94 | } NBDReply; |
| 95 | |
Eric Blake | efdc0c1 | 2017-11-08 15:57:00 -0600 | [diff] [blame] | 96 | /* Header of chunk for NBD_REPLY_TYPE_OFFSET_DATA */ |
| 97 | typedef struct NBDStructuredReadData { |
| 98 | NBDStructuredReplyChunk h; /* h.length >= 9 */ |
Eric Blake | bae245d | 2017-10-27 12:40:28 +0200 | [diff] [blame] | 99 | uint64_t offset; |
Eric Blake | efdc0c1 | 2017-11-08 15:57:00 -0600 | [diff] [blame] | 100 | /* At least one byte of data payload follows, calculated from h.length */ |
| 101 | } QEMU_PACKED NBDStructuredReadData; |
| 102 | |
| 103 | /* Complete chunk for NBD_REPLY_TYPE_OFFSET_HOLE */ |
| 104 | typedef struct NBDStructuredReadHole { |
| 105 | NBDStructuredReplyChunk h; /* h.length == 12 */ |
| 106 | uint64_t offset; |
| 107 | uint32_t length; |
| 108 | } QEMU_PACKED NBDStructuredReadHole; |
Eric Blake | bae245d | 2017-10-27 12:40:28 +0200 | [diff] [blame] | 109 | |
| 110 | /* Header of all NBD_REPLY_TYPE_ERROR* errors */ |
| 111 | typedef struct NBDStructuredError { |
Eric Blake | efdc0c1 | 2017-11-08 15:57:00 -0600 | [diff] [blame] | 112 | NBDStructuredReplyChunk h; /* h.length >= 6 */ |
Eric Blake | bae245d | 2017-10-27 12:40:28 +0200 | [diff] [blame] | 113 | uint32_t error; |
| 114 | uint16_t message_length; |
| 115 | } QEMU_PACKED NBDStructuredError; |
| 116 | |
Vladimir Sementsov-Ogievskiy | 25c1467 | 2018-02-26 10:26:25 -0600 | [diff] [blame] | 117 | /* Header of NBD_REPLY_TYPE_BLOCK_STATUS */ |
| 118 | typedef struct NBDStructuredMeta { |
| 119 | NBDStructuredReplyChunk h; /* h.length >= 12 (at least one extent) */ |
| 120 | uint32_t context_id; |
| 121 | /* extents follows */ |
| 122 | } QEMU_PACKED NBDStructuredMeta; |
| 123 | |
| 124 | /* Extent chunk for NBD_REPLY_TYPE_BLOCK_STATUS */ |
| 125 | typedef struct NBDExtent { |
| 126 | uint32_t length; |
| 127 | uint32_t flags; /* NBD_STATE_* */ |
| 128 | } QEMU_PACKED NBDExtent; |
| 129 | |
Eric Blake | b626b51 | 2016-10-14 13:33:04 -0500 | [diff] [blame] | 130 | /* Transmission (export) flags: sent from server to client during handshake, |
| 131 | but describe what will happen during transmission */ |
Max Reitz | c4e2aff | 2019-04-05 21:16:35 +0200 | [diff] [blame] | 132 | enum { |
| 133 | NBD_FLAG_HAS_FLAGS_BIT = 0, /* Flags are there */ |
| 134 | NBD_FLAG_READ_ONLY_BIT = 1, /* Device is read-only */ |
| 135 | NBD_FLAG_SEND_FLUSH_BIT = 2, /* Send FLUSH */ |
| 136 | NBD_FLAG_SEND_FUA_BIT = 3, /* Send FUA (Force Unit Access) */ |
| 137 | NBD_FLAG_ROTATIONAL_BIT = 4, /* Use elevator algorithm - |
| 138 | rotational media */ |
| 139 | NBD_FLAG_SEND_TRIM_BIT = 5, /* Send TRIM (discard) */ |
| 140 | NBD_FLAG_SEND_WRITE_ZEROES_BIT = 6, /* Send WRITE_ZEROES */ |
| 141 | NBD_FLAG_SEND_DF_BIT = 7, /* Send DF (Do not Fragment) */ |
| 142 | NBD_FLAG_CAN_MULTI_CONN_BIT = 8, /* Multi-client cache consistent */ |
| 143 | NBD_FLAG_SEND_RESIZE_BIT = 9, /* Send resize */ |
| 144 | NBD_FLAG_SEND_CACHE_BIT = 10, /* Send CACHE (prefetch) */ |
Eric Blake | 0a47954 | 2019-08-23 09:37:23 -0500 | [diff] [blame] | 145 | NBD_FLAG_SEND_FAST_ZERO_BIT = 11, /* FAST_ZERO flag for WRITE_ZEROES */ |
Max Reitz | c4e2aff | 2019-04-05 21:16:35 +0200 | [diff] [blame] | 146 | }; |
| 147 | |
| 148 | #define NBD_FLAG_HAS_FLAGS (1 << NBD_FLAG_HAS_FLAGS_BIT) |
| 149 | #define NBD_FLAG_READ_ONLY (1 << NBD_FLAG_READ_ONLY_BIT) |
| 150 | #define NBD_FLAG_SEND_FLUSH (1 << NBD_FLAG_SEND_FLUSH_BIT) |
| 151 | #define NBD_FLAG_SEND_FUA (1 << NBD_FLAG_SEND_FUA_BIT) |
| 152 | #define NBD_FLAG_ROTATIONAL (1 << NBD_FLAG_ROTATIONAL_BIT) |
| 153 | #define NBD_FLAG_SEND_TRIM (1 << NBD_FLAG_SEND_TRIM_BIT) |
| 154 | #define NBD_FLAG_SEND_WRITE_ZEROES (1 << NBD_FLAG_SEND_WRITE_ZEROES_BIT) |
| 155 | #define NBD_FLAG_SEND_DF (1 << NBD_FLAG_SEND_DF_BIT) |
| 156 | #define NBD_FLAG_CAN_MULTI_CONN (1 << NBD_FLAG_CAN_MULTI_CONN_BIT) |
| 157 | #define NBD_FLAG_SEND_RESIZE (1 << NBD_FLAG_SEND_RESIZE_BIT) |
| 158 | #define NBD_FLAG_SEND_CACHE (1 << NBD_FLAG_SEND_CACHE_BIT) |
Eric Blake | 0a47954 | 2019-08-23 09:37:23 -0500 | [diff] [blame] | 159 | #define NBD_FLAG_SEND_FAST_ZERO (1 << NBD_FLAG_SEND_FAST_ZERO_BIT) |
Paolo Bonzini | bbb74ed | 2011-09-08 17:24:55 +0200 | [diff] [blame] | 160 | |
Eric Blake | b626b51 | 2016-10-14 13:33:04 -0500 | [diff] [blame] | 161 | /* New-style handshake (global) flags, sent from server to client, and |
| 162 | control what will happen during handshake phase. */ |
Eric Blake | c203c59 | 2016-10-14 13:33:14 -0500 | [diff] [blame] | 163 | #define NBD_FLAG_FIXED_NEWSTYLE (1 << 0) /* Fixed newstyle protocol. */ |
| 164 | #define NBD_FLAG_NO_ZEROES (1 << 1) /* End handshake without zeroes. */ |
Hani Benhabiles | f5076b5 | 2014-06-07 01:32:31 +0100 | [diff] [blame] | 165 | |
Eric Blake | b626b51 | 2016-10-14 13:33:04 -0500 | [diff] [blame] | 166 | /* New-style client flags, sent from client to server to control what happens |
| 167 | during handshake phase. */ |
Eric Blake | c203c59 | 2016-10-14 13:33:14 -0500 | [diff] [blame] | 168 | #define NBD_FLAG_C_FIXED_NEWSTYLE (1 << 0) /* Fixed newstyle protocol. */ |
| 169 | #define NBD_FLAG_C_NO_ZEROES (1 << 1) /* End handshake without zeroes. */ |
Hani Benhabiles | f5076b5 | 2014-06-07 01:32:31 +0100 | [diff] [blame] | 170 | |
Eric Blake | 3736cc5 | 2017-07-07 15:30:43 -0500 | [diff] [blame] | 171 | /* Option requests. */ |
Vladimir Sementsov-Ogievskiy | 6bc8695 | 2018-02-15 16:51:40 +0300 | [diff] [blame] | 172 | #define NBD_OPT_EXPORT_NAME (1) |
| 173 | #define NBD_OPT_ABORT (2) |
| 174 | #define NBD_OPT_LIST (3) |
| 175 | /* #define NBD_OPT_PEEK_EXPORT (4) not in use */ |
| 176 | #define NBD_OPT_STARTTLS (5) |
| 177 | #define NBD_OPT_INFO (6) |
| 178 | #define NBD_OPT_GO (7) |
| 179 | #define NBD_OPT_STRUCTURED_REPLY (8) |
Vladimir Sementsov-Ogievskiy | 25c1467 | 2018-02-26 10:26:25 -0600 | [diff] [blame] | 180 | #define NBD_OPT_LIST_META_CONTEXT (9) |
| 181 | #define NBD_OPT_SET_META_CONTEXT (10) |
Eric Blake | 3736cc5 | 2017-07-07 15:30:43 -0500 | [diff] [blame] | 182 | |
| 183 | /* Option reply types. */ |
Eric Blake | b6f5d3b | 2016-10-14 13:33:16 -0500 | [diff] [blame] | 184 | #define NBD_REP_ERR(value) ((UINT32_C(1) << 31) | (value)) |
| 185 | |
Vladimir Sementsov-Ogievskiy | 6bc8695 | 2018-02-15 16:51:40 +0300 | [diff] [blame] | 186 | #define NBD_REP_ACK (1) /* Data sending finished. */ |
| 187 | #define NBD_REP_SERVER (2) /* Export description. */ |
| 188 | #define NBD_REP_INFO (3) /* NBD_OPT_INFO/GO. */ |
Vladimir Sementsov-Ogievskiy | 25c1467 | 2018-02-26 10:26:25 -0600 | [diff] [blame] | 189 | #define NBD_REP_META_CONTEXT (4) /* NBD_OPT_{LIST,SET}_META_CONTEXT */ |
Eric Blake | b6f5d3b | 2016-10-14 13:33:16 -0500 | [diff] [blame] | 190 | |
Eric Blake | 3736cc5 | 2017-07-07 15:30:43 -0500 | [diff] [blame] | 191 | #define NBD_REP_ERR_UNSUP NBD_REP_ERR(1) /* Unknown option */ |
| 192 | #define NBD_REP_ERR_POLICY NBD_REP_ERR(2) /* Server denied */ |
| 193 | #define NBD_REP_ERR_INVALID NBD_REP_ERR(3) /* Invalid length */ |
| 194 | #define NBD_REP_ERR_PLATFORM NBD_REP_ERR(4) /* Not compiled in */ |
| 195 | #define NBD_REP_ERR_TLS_REQD NBD_REP_ERR(5) /* TLS required */ |
| 196 | #define NBD_REP_ERR_UNKNOWN NBD_REP_ERR(6) /* Export unknown */ |
| 197 | #define NBD_REP_ERR_SHUTDOWN NBD_REP_ERR(7) /* Server shutting down */ |
| 198 | #define NBD_REP_ERR_BLOCK_SIZE_REQD NBD_REP_ERR(8) /* Need INFO_BLOCK_SIZE */ |
| 199 | |
| 200 | /* Info types, used during NBD_REP_INFO */ |
| 201 | #define NBD_INFO_EXPORT 0 |
| 202 | #define NBD_INFO_NAME 1 |
| 203 | #define NBD_INFO_DESCRIPTION 2 |
| 204 | #define NBD_INFO_BLOCK_SIZE 3 |
Daniel P. Berrange | f95910f | 2016-02-10 18:41:11 +0000 | [diff] [blame] | 205 | |
Eric Blake | b626b51 | 2016-10-14 13:33:04 -0500 | [diff] [blame] | 206 | /* Request flags, sent from client to server during transmission phase */ |
Eric Blake | 1f4d6d1 | 2016-10-14 13:33:17 -0500 | [diff] [blame] | 207 | #define NBD_CMD_FLAG_FUA (1 << 0) /* 'force unit access' during write */ |
| 208 | #define NBD_CMD_FLAG_NO_HOLE (1 << 1) /* don't punch hole on zero run */ |
Eric Blake | bae245d | 2017-10-27 12:40:28 +0200 | [diff] [blame] | 209 | #define NBD_CMD_FLAG_DF (1 << 2) /* don't fragment structured read */ |
Vladimir Sementsov-Ogievskiy | 25c1467 | 2018-02-26 10:26:25 -0600 | [diff] [blame] | 210 | #define NBD_CMD_FLAG_REQ_ONE (1 << 3) /* only one extent in BLOCK_STATUS |
| 211 | * reply chunk */ |
Eric Blake | 0a47954 | 2019-08-23 09:37:23 -0500 | [diff] [blame] | 212 | #define NBD_CMD_FLAG_FAST_ZERO (1 << 4) /* fail if WRITE_ZEROES is not fast */ |
Hani Benhabiles | f5076b5 | 2014-06-07 01:32:31 +0100 | [diff] [blame] | 213 | |
Eric Blake | b626b51 | 2016-10-14 13:33:04 -0500 | [diff] [blame] | 214 | /* Supported request types */ |
ths | 7581825 | 2008-07-03 13:41:03 +0000 | [diff] [blame] | 215 | enum { |
| 216 | NBD_CMD_READ = 0, |
| 217 | NBD_CMD_WRITE = 1, |
Paolo Bonzini | bbb74ed | 2011-09-08 17:24:55 +0200 | [diff] [blame] | 218 | NBD_CMD_DISC = 2, |
| 219 | NBD_CMD_FLUSH = 3, |
Eric Blake | 1f4d6d1 | 2016-10-14 13:33:17 -0500 | [diff] [blame] | 220 | NBD_CMD_TRIM = 4, |
Vladimir Sementsov-Ogievskiy | bc37b06 | 2018-04-13 17:31:56 +0300 | [diff] [blame] | 221 | NBD_CMD_CACHE = 5, |
Eric Blake | 1f4d6d1 | 2016-10-14 13:33:17 -0500 | [diff] [blame] | 222 | NBD_CMD_WRITE_ZEROES = 6, |
Vladimir Sementsov-Ogievskiy | 25c1467 | 2018-02-26 10:26:25 -0600 | [diff] [blame] | 223 | NBD_CMD_BLOCK_STATUS = 7, |
ths | 7581825 | 2008-07-03 13:41:03 +0000 | [diff] [blame] | 224 | }; |
| 225 | |
Laurent Vivier | 1d45f8b | 2010-08-25 22:48:33 +0200 | [diff] [blame] | 226 | #define NBD_DEFAULT_PORT 10809 |
| 227 | |
Stefan Hajnoczi | 2d82148 | 2013-05-02 14:23:08 +0200 | [diff] [blame] | 228 | /* Maximum size of a single READ/WRITE data buffer */ |
| 229 | #define NBD_MAX_BUFFER_SIZE (32 * 1024 * 1024) |
Eric Blake | 476b923 | 2016-06-23 16:37:08 -0600 | [diff] [blame] | 230 | |
Eric Blake | 9d7ab22 | 2019-11-13 20:46:32 -0600 | [diff] [blame] | 231 | /* |
Eric Blake | 71719cd | 2020-10-27 00:05:54 -0500 | [diff] [blame] | 232 | * Maximum size of a protocol string (export name, metadata context name, |
Eric Blake | 93676c8 | 2019-11-13 20:46:34 -0600 | [diff] [blame] | 233 | * etc.). Use malloc rather than stack allocation for storage of a |
| 234 | * string. |
Eric Blake | 9d7ab22 | 2019-11-13 20:46:32 -0600 | [diff] [blame] | 235 | */ |
Eric Blake | 93676c8 | 2019-11-13 20:46:34 -0600 | [diff] [blame] | 236 | #define NBD_MAX_STRING_SIZE 4096 |
Paolo Bonzini | 3777b09 | 2011-10-07 14:35:58 +0200 | [diff] [blame] | 237 | |
Eric Blake | bae245d | 2017-10-27 12:40:28 +0200 | [diff] [blame] | 238 | /* Two types of reply structures */ |
| 239 | #define NBD_SIMPLE_REPLY_MAGIC 0x67446698 |
| 240 | #define NBD_STRUCTURED_REPLY_MAGIC 0x668e33ef |
| 241 | |
| 242 | /* Structured reply flags */ |
| 243 | #define NBD_REPLY_FLAG_DONE (1 << 0) /* This reply-chunk is last */ |
| 244 | |
| 245 | /* Structured reply types */ |
| 246 | #define NBD_REPLY_ERR(value) ((1 << 15) | (value)) |
| 247 | |
| 248 | #define NBD_REPLY_TYPE_NONE 0 |
| 249 | #define NBD_REPLY_TYPE_OFFSET_DATA 1 |
| 250 | #define NBD_REPLY_TYPE_OFFSET_HOLE 2 |
Vladimir Sementsov-Ogievskiy | 25c1467 | 2018-02-26 10:26:25 -0600 | [diff] [blame] | 251 | #define NBD_REPLY_TYPE_BLOCK_STATUS 5 |
Eric Blake | bae245d | 2017-10-27 12:40:28 +0200 | [diff] [blame] | 252 | #define NBD_REPLY_TYPE_ERROR NBD_REPLY_ERR(1) |
| 253 | #define NBD_REPLY_TYPE_ERROR_OFFSET NBD_REPLY_ERR(2) |
| 254 | |
Vladimir Sementsov-Ogievskiy | 3d068af | 2018-06-09 18:17:56 +0300 | [diff] [blame] | 255 | /* Extent flags for base:allocation in NBD_REPLY_TYPE_BLOCK_STATUS */ |
Vladimir Sementsov-Ogievskiy | 25c1467 | 2018-02-26 10:26:25 -0600 | [diff] [blame] | 256 | #define NBD_STATE_HOLE (1 << 0) |
| 257 | #define NBD_STATE_ZERO (1 << 1) |
| 258 | |
Vladimir Sementsov-Ogievskiy | 3d068af | 2018-06-09 18:17:56 +0300 | [diff] [blame] | 259 | /* Extent flags for qemu:dirty-bitmap in NBD_REPLY_TYPE_BLOCK_STATUS */ |
| 260 | #define NBD_STATE_DIRTY (1 << 0) |
| 261 | |
Eric Blake | 71719cd | 2020-10-27 00:05:54 -0500 | [diff] [blame] | 262 | /* No flags needed for qemu:allocation-depth in NBD_REPLY_TYPE_BLOCK_STATUS */ |
| 263 | |
Vladimir Sementsov-Ogievskiy | f140e30 | 2017-10-27 12:40:37 +0200 | [diff] [blame] | 264 | static inline bool nbd_reply_type_is_error(int type) |
| 265 | { |
| 266 | return type & (1 << 15); |
| 267 | } |
| 268 | |
Eric Blake | dd68944 | 2017-10-27 12:40:27 +0200 | [diff] [blame] | 269 | /* NBD errors are based on errno numbers, so there is a 1:1 mapping, |
| 270 | * but only a limited set of errno values is specified in the protocol. |
| 271 | * Everything else is squashed to EINVAL. |
| 272 | */ |
| 273 | #define NBD_SUCCESS 0 |
| 274 | #define NBD_EPERM 1 |
| 275 | #define NBD_EIO 5 |
| 276 | #define NBD_ENOMEM 12 |
| 277 | #define NBD_EINVAL 22 |
| 278 | #define NBD_ENOSPC 28 |
Eric Blake | bae245d | 2017-10-27 12:40:28 +0200 | [diff] [blame] | 279 | #define NBD_EOVERFLOW 75 |
Eric Blake | 0a47954 | 2019-08-23 09:37:23 -0500 | [diff] [blame] | 280 | #define NBD_ENOTSUP 95 |
Eric Blake | dd68944 | 2017-10-27 12:40:27 +0200 | [diff] [blame] | 281 | #define NBD_ESHUTDOWN 108 |
| 282 | |
Eric Blake | 004a89f | 2017-07-07 15:30:41 -0500 | [diff] [blame] | 283 | /* Details collected by NBD_OPT_EXPORT_NAME and NBD_OPT_GO */ |
| 284 | struct NBDExportInfo { |
Eric Blake | 081dd1f | 2017-07-07 15:30:49 -0500 | [diff] [blame] | 285 | /* Set by client before nbd_receive_negotiate() */ |
| 286 | bool request_sizes; |
Eric Blake | 216ee36 | 2018-07-02 14:14:57 -0500 | [diff] [blame] | 287 | char *x_dirty_bitmap; |
Eric Blake | d21a2d3 | 2019-01-17 13:36:54 -0600 | [diff] [blame] | 288 | |
| 289 | /* Set by client before nbd_receive_negotiate(), or by server results |
| 290 | * during nbd_receive_export_list() */ |
Eric Blake | 6dc1667 | 2019-01-17 13:36:46 -0600 | [diff] [blame] | 291 | char *name; /* must be non-NULL */ |
Vladimir Sementsov-Ogievskiy | f140e30 | 2017-10-27 12:40:37 +0200 | [diff] [blame] | 292 | |
| 293 | /* In-out fields, set by client before nbd_receive_negotiate() and |
| 294 | * updated by server results during nbd_receive_negotiate() */ |
| 295 | bool structured_reply; |
Vladimir Sementsov-Ogievskiy | 78a33ab | 2018-03-12 18:21:23 +0300 | [diff] [blame] | 296 | bool base_allocation; /* base:allocation context for NBD_CMD_BLOCK_STATUS */ |
Vladimir Sementsov-Ogievskiy | f140e30 | 2017-10-27 12:40:37 +0200 | [diff] [blame] | 297 | |
Eric Blake | d21a2d3 | 2019-01-17 13:36:54 -0600 | [diff] [blame] | 298 | /* Set by server results during nbd_receive_negotiate() and |
| 299 | * nbd_receive_export_list() */ |
Eric Blake | 004a89f | 2017-07-07 15:30:41 -0500 | [diff] [blame] | 300 | uint64_t size; |
| 301 | uint16_t flags; |
Eric Blake | 081dd1f | 2017-07-07 15:30:49 -0500 | [diff] [blame] | 302 | uint32_t min_block; |
| 303 | uint32_t opt_block; |
| 304 | uint32_t max_block; |
Vladimir Sementsov-Ogievskiy | 78a33ab | 2018-03-12 18:21:23 +0300 | [diff] [blame] | 305 | |
Eric Blake | 2df94eb | 2019-01-17 13:36:47 -0600 | [diff] [blame] | 306 | uint32_t context_id; |
Eric Blake | d21a2d3 | 2019-01-17 13:36:54 -0600 | [diff] [blame] | 307 | |
| 308 | /* Set by server results during nbd_receive_export_list() */ |
| 309 | char *description; |
Eric Blake | 0b576b6 | 2019-01-17 13:36:55 -0600 | [diff] [blame] | 310 | int n_contexts; |
| 311 | char **contexts; |
Eric Blake | 004a89f | 2017-07-07 15:30:41 -0500 | [diff] [blame] | 312 | }; |
| 313 | typedef struct NBDExportInfo NBDExportInfo; |
| 314 | |
Vladimir Sementsov-Ogievskiy | a8e2bb6 | 2019-06-18 14:43:21 +0300 | [diff] [blame] | 315 | int nbd_receive_negotiate(AioContext *aio_context, QIOChannel *ioc, |
| 316 | QCryptoTLSCreds *tlscreds, |
Eric Blake | 6dc1667 | 2019-01-17 13:36:46 -0600 | [diff] [blame] | 317 | const char *hostname, QIOChannel **outioc, |
| 318 | NBDExportInfo *info, Error **errp); |
Eric Blake | d21a2d3 | 2019-01-17 13:36:54 -0600 | [diff] [blame] | 319 | void nbd_free_export_list(NBDExportInfo *info, int count); |
| 320 | int nbd_receive_export_list(QIOChannel *ioc, QCryptoTLSCreds *tlscreds, |
| 321 | const char *hostname, NBDExportInfo **info, |
| 322 | Error **errp); |
Eric Blake | 004a89f | 2017-07-07 15:30:41 -0500 | [diff] [blame] | 323 | int nbd_init(int fd, QIOChannelSocket *sioc, NBDExportInfo *info, |
Vladimir Sementsov-Ogievskiy | be41c10 | 2017-05-26 14:09:13 +0300 | [diff] [blame] | 324 | Error **errp); |
Vladimir Sementsov-Ogievskiy | 490dc5e | 2017-08-04 18:14:27 +0300 | [diff] [blame] | 325 | int nbd_send_request(QIOChannel *ioc, NBDRequest *request); |
Kevin Wolf | d3bd5b9 | 2019-02-18 14:56:01 +0100 | [diff] [blame] | 326 | int coroutine_fn nbd_receive_reply(BlockDriverState *bs, QIOChannel *ioc, |
| 327 | NBDReply *reply, Error **errp); |
Jes Sorensen | 0a4eb86 | 2010-08-31 09:30:33 +0200 | [diff] [blame] | 328 | int nbd_client(int fd); |
bellard | 7a5ca86 | 2008-05-27 21:13:40 +0000 | [diff] [blame] | 329 | int nbd_disconnect(int fd); |
Eric Blake | dd68944 | 2017-10-27 12:40:27 +0200 | [diff] [blame] | 330 | int nbd_errno_to_system_errno(int err); |
bellard | 7a5ca86 | 2008-05-27 21:13:40 +0000 | [diff] [blame] | 331 | |
Paolo Bonzini | af49bbb | 2011-09-19 14:03:37 +0200 | [diff] [blame] | 332 | typedef struct NBDExport NBDExport; |
Paolo Bonzini | 1743b51 | 2011-09-19 14:33:23 +0200 | [diff] [blame] | 333 | typedef struct NBDClient NBDClient; |
Paolo Bonzini | af49bbb | 2011-09-19 14:03:37 +0200 | [diff] [blame] | 334 | |
Kevin Wolf | 9b562c6 | 2020-09-24 17:26:53 +0200 | [diff] [blame] | 335 | void nbd_export_set_on_eject_blk(BlockExport *exp, BlockBackend *blk); |
Paolo Bonzini | ce33967 | 2012-09-18 13:17:52 +0200 | [diff] [blame] | 336 | |
Eric Blake | 61bc846 | 2019-09-16 21:39:17 -0500 | [diff] [blame] | 337 | AioContext *nbd_export_aio_context(NBDExport *exp); |
Paolo Bonzini | ee0a19e | 2012-08-22 15:59:23 +0200 | [diff] [blame] | 338 | NBDExport *nbd_export_find(const char *name); |
Paolo Bonzini | ee0a19e | 2012-08-22 15:59:23 +0200 | [diff] [blame] | 339 | |
Vladimir Sementsov-Ogievskiy | 7f7dfe2 | 2018-10-03 20:02:28 +0300 | [diff] [blame] | 340 | void nbd_client_new(QIOChannelSocket *sioc, |
Daniel P. Berrange | f95910f | 2016-02-10 18:41:11 +0000 | [diff] [blame] | 341 | QCryptoTLSCreds *tlscreds, |
Daniel P. Berrange | b25e12d | 2019-02-27 16:20:33 +0000 | [diff] [blame] | 342 | const char *tlsauthz, |
Eric Blake | 0c9390d | 2017-06-08 17:26:17 -0500 | [diff] [blame] | 343 | void (*close_fn)(NBDClient *, bool)); |
Paolo Bonzini | ce33967 | 2012-09-18 13:17:52 +0200 | [diff] [blame] | 344 | void nbd_client_get(NBDClient *client); |
| 345 | void nbd_client_put(NBDClient *client); |
Paolo Bonzini | af49bbb | 2011-09-19 14:03:37 +0200 | [diff] [blame] | 346 | |
Kevin Wolf | 0091717 | 2020-09-24 17:26:57 +0200 | [diff] [blame] | 347 | void nbd_server_is_qemu_nbd(bool value); |
Kevin Wolf | 5b1cb49 | 2020-09-24 17:27:12 +0200 | [diff] [blame] | 348 | bool nbd_server_is_running(void); |
Markus Armbruster | bd269eb | 2017-04-26 09:36:41 +0200 | [diff] [blame] | 349 | void nbd_server_start(SocketAddress *addr, const char *tls_creds, |
Kevin Wolf | 1c8222b | 2020-09-24 17:26:54 +0200 | [diff] [blame] | 350 | const char *tls_authz, uint32_t max_connections, |
| 351 | Error **errp); |
Kevin Wolf | eed8b69 | 2020-02-24 15:29:57 +0100 | [diff] [blame] | 352 | void nbd_server_start_options(NbdServerOptions *arg, Error **errp); |
Markus Armbruster | bd269eb | 2017-04-26 09:36:41 +0200 | [diff] [blame] | 353 | |
Eric Blake | 56dc682 | 2017-10-27 12:40:36 +0200 | [diff] [blame] | 354 | /* nbd_read |
| 355 | * Reads @size bytes from @ioc. Returns 0 on success. |
| 356 | */ |
| 357 | static inline int nbd_read(QIOChannel *ioc, void *buffer, size_t size, |
Vladimir Sementsov-Ogievskiy | e6798f0 | 2019-01-28 19:58:30 +0300 | [diff] [blame] | 358 | const char *desc, Error **errp) |
Eric Blake | 56dc682 | 2017-10-27 12:40:36 +0200 | [diff] [blame] | 359 | { |
Vladimir Sementsov-Ogievskiy | 795d946 | 2020-07-07 18:50:36 +0200 | [diff] [blame] | 360 | ERRP_GUARD(); |
Vladimir Sementsov-Ogievskiy | e6798f0 | 2019-01-28 19:58:30 +0300 | [diff] [blame] | 361 | int ret = qio_channel_read_all(ioc, buffer, size, errp) < 0 ? -EIO : 0; |
| 362 | |
| 363 | if (ret < 0) { |
| 364 | if (desc) { |
| 365 | error_prepend(errp, "Failed to read %s: ", desc); |
| 366 | } |
Roman Kagan | 5082fc8 | 2021-01-29 10:38:59 +0300 | [diff] [blame] | 367 | return ret; |
Vladimir Sementsov-Ogievskiy | e6798f0 | 2019-01-28 19:58:30 +0300 | [diff] [blame] | 368 | } |
| 369 | |
| 370 | return 0; |
Eric Blake | 56dc682 | 2017-10-27 12:40:36 +0200 | [diff] [blame] | 371 | } |
| 372 | |
Vladimir Sementsov-Ogievskiy | e6798f0 | 2019-01-28 19:58:30 +0300 | [diff] [blame] | 373 | #define DEF_NBD_READ_N(bits) \ |
| 374 | static inline int nbd_read##bits(QIOChannel *ioc, \ |
| 375 | uint##bits##_t *val, \ |
| 376 | const char *desc, Error **errp) \ |
| 377 | { \ |
Roman Kagan | 5082fc8 | 2021-01-29 10:38:59 +0300 | [diff] [blame] | 378 | int ret = nbd_read(ioc, val, sizeof(*val), desc, errp); \ |
| 379 | if (ret < 0) { \ |
| 380 | return ret; \ |
Vladimir Sementsov-Ogievskiy | e6798f0 | 2019-01-28 19:58:30 +0300 | [diff] [blame] | 381 | } \ |
| 382 | *val = be##bits##_to_cpu(*val); \ |
| 383 | return 0; \ |
| 384 | } |
| 385 | |
| 386 | DEF_NBD_READ_N(16) /* Defines nbd_read16(). */ |
| 387 | DEF_NBD_READ_N(32) /* Defines nbd_read32(). */ |
| 388 | DEF_NBD_READ_N(64) /* Defines nbd_read64(). */ |
| 389 | |
| 390 | #undef DEF_NBD_READ_N |
| 391 | |
Vladimir Sementsov-Ogievskiy | d2febed | 2017-10-27 12:40:35 +0200 | [diff] [blame] | 392 | static inline bool nbd_reply_is_simple(NBDReply *reply) |
| 393 | { |
| 394 | return reply->magic == NBD_SIMPLE_REPLY_MAGIC; |
| 395 | } |
| 396 | |
| 397 | static inline bool nbd_reply_is_structured(NBDReply *reply) |
| 398 | { |
| 399 | return reply->magic == NBD_STRUCTURED_REPLY_MAGIC; |
| 400 | } |
| 401 | |
Vladimir Sementsov-Ogievskiy | f140e30 | 2017-10-27 12:40:37 +0200 | [diff] [blame] | 402 | const char *nbd_reply_type_lookup(uint16_t type); |
Vladimir Sementsov-Ogievskiy | 757a0d0 | 2018-11-02 18:11:51 +0300 | [diff] [blame] | 403 | const char *nbd_opt_lookup(uint32_t opt); |
| 404 | const char *nbd_rep_lookup(uint32_t rep); |
| 405 | const char *nbd_info_lookup(uint16_t info); |
| 406 | const char *nbd_cmd_lookup(uint16_t info); |
| 407 | const char *nbd_err_lookup(int err); |
Vladimir Sementsov-Ogievskiy | f140e30 | 2017-10-27 12:40:37 +0200 | [diff] [blame] | 408 | |
Vladimir Sementsov-Ogievskiy | 5276c87 | 2021-06-15 14:07:05 -0500 | [diff] [blame] | 409 | /* nbd/client-connection.c */ |
| 410 | typedef struct NBDClientConnection NBDClientConnection; |
| 411 | |
Vladimir Sementsov-Ogievskiy | e0e67cb | 2021-06-10 13:07:50 +0300 | [diff] [blame] | 412 | void nbd_client_connection_enable_retry(NBDClientConnection *conn); |
| 413 | |
Vladimir Sementsov-Ogievskiy | 130d49b | 2021-06-10 13:07:49 +0300 | [diff] [blame] | 414 | NBDClientConnection *nbd_client_connection_new(const SocketAddress *saddr, |
| 415 | bool do_negotiation, |
| 416 | const char *export_name, |
| 417 | const char *x_dirty_bitmap, |
Daniel P. Berrangé | 046f98d | 2022-03-04 19:36:00 +0000 | [diff] [blame] | 418 | QCryptoTLSCreds *tlscreds, |
| 419 | const char *tlshostname); |
Vladimir Sementsov-Ogievskiy | 5276c87 | 2021-06-15 14:07:05 -0500 | [diff] [blame] | 420 | void nbd_client_connection_release(NBDClientConnection *conn); |
| 421 | |
Vladimir Sementsov-Ogievskiy | 43cb34d | 2021-06-10 13:07:56 +0300 | [diff] [blame] | 422 | QIOChannel *coroutine_fn |
Vladimir Sementsov-Ogievskiy | 130d49b | 2021-06-10 13:07:49 +0300 | [diff] [blame] | 423 | nbd_co_establish_connection(NBDClientConnection *conn, NBDExportInfo *info, |
Vladimir Sementsov-Ogievskiy | 97cf892 | 2021-06-10 13:07:59 +0300 | [diff] [blame] | 424 | bool blocking, Error **errp); |
Vladimir Sementsov-Ogievskiy | 5276c87 | 2021-06-15 14:07:05 -0500 | [diff] [blame] | 425 | |
| 426 | void coroutine_fn nbd_co_establish_connection_cancel(NBDClientConnection *conn); |
| 427 | |
bellard | 7a5ca86 | 2008-05-27 21:13:40 +0000 | [diff] [blame] | 428 | #endif |