blob: b05512718c0ae5c0bb7919ef7ab2cb12eba4a708 [file] [log] [blame]
bellard585f8582006-08-05 21:14:20 +00001/*
2 * Block driver for the QCOW version 2 format
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard585f8582006-08-05 21:14:20 +00004 * Copyright (c) 2004-2006 Fabrice Bellard
ths5fafdf22007-09-16 21:08:06 +00005 *
bellard585f8582006-08-05 21:14:20 +00006 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
Markus Armbrustere688df62018-02-01 12:18:31 +010024
Peter Maydell80c71a22016-01-18 18:01:42 +000025#include "qemu/osdep.h"
Vladimir Sementsov-Ogievskiy2714f132018-06-20 17:48:36 +030026
Max Reitz609f45e2018-06-14 21:14:28 +020027#include "block/qdict.h"
Kevin Wolf23588792016-03-08 15:57:05 +010028#include "sysemu/block-backend.h"
Markus Armbrusterdb725812019-08-12 07:23:50 +020029#include "qemu/main-loop.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010030#include "qemu/module.h"
Michael S. Tsirkin0d8c41d2018-05-03 22:50:20 +030031#include "qcow2.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010032#include "qemu/error-report.h"
Markus Armbrustere688df62018-02-01 12:18:31 +010033#include "qapi/error.h"
Markus Armbruster9af23982018-02-11 10:36:01 +010034#include "qapi/qapi-events-block-core.h"
Markus Armbruster6b673952018-02-01 12:18:35 +010035#include "qapi/qmp/qdict.h"
36#include "qapi/qmp/qstring.h"
Kevin Wolf3cce16f2012-03-01 18:36:21 +010037#include "trace.h"
Chunyan Liu1bd0e2d2014-06-05 17:20:59 +080038#include "qemu/option_int.h"
Veronia Bahaaf348b6d2016-03-20 19:16:19 +020039#include "qemu/cutils.h"
Paolo Bonzini58369e22016-03-15 17:22:36 +010040#include "qemu/bswap.h"
Kevin Wolfb76b4f62018-01-11 16:18:08 +010041#include "qapi/qobject-input-visitor.h"
42#include "qapi/qapi-visit-block-core.h"
Michael S. Tsirkin0d8c41d2018-05-03 22:50:20 +030043#include "crypto.h"
Vladimir Sementsov-Ogievskiyd710cf52019-09-16 20:53:24 +030044#include "block/aio_task.h"
bellard585f8582006-08-05 21:14:20 +000045
46/*
47 Differences with QCOW:
48
49 - Support for multiple incremental snapshots.
50 - Memory management by reference counts.
51 - Clusters which have a reference count of one have the bit
52 QCOW_OFLAG_COPIED to optimize write performance.
ths5fafdf22007-09-16 21:08:06 +000053 - Size of compressed clusters is stored in sectors to reduce bit usage
bellard585f8582006-08-05 21:14:20 +000054 in the cluster offsets.
55 - Support for storing additional data (such as the VM state) in the
ths3b46e622007-09-17 08:09:54 +000056 snapshots.
bellard585f8582006-08-05 21:14:20 +000057 - If a backing store is used, the cluster size is not constrained
58 (could be backported to QCOW).
59 - L2 tables have always a size of one cluster.
60*/
61
aliguori9b80ddf2009-03-28 17:55:06 +000062
63typedef struct {
64 uint32_t magic;
65 uint32_t len;
Jeff Codyc4217f62013-09-25 12:08:50 -040066} QEMU_PACKED QCowExtension;
Jeff Cody21d82ac2012-09-20 15:13:28 -040067
Jes Sorensen7c80ab32010-12-17 16:02:39 +010068#define QCOW2_EXT_MAGIC_END 0
Andrey Shinkevich80989692020-07-17 11:14:49 +030069#define QCOW2_EXT_MAGIC_BACKING_FORMAT 0xe2792aca
Kevin Wolfcfcc4c62012-04-12 15:20:27 +020070#define QCOW2_EXT_MAGIC_FEATURE_TABLE 0x6803f857
Daniel P. Berrange4652b8f2017-06-23 17:24:12 +010071#define QCOW2_EXT_MAGIC_CRYPTO_HEADER 0x0537be77
Vladimir Sementsov-Ogievskiy88ddffa2017-06-28 15:05:08 +030072#define QCOW2_EXT_MAGIC_BITMAPS 0x23852875
Kevin Wolf93c24932019-01-14 16:48:25 +010073#define QCOW2_EXT_MAGIC_DATA_FILE 0x44415441
aliguori9b80ddf2009-03-28 17:55:06 +000074
Vladimir Sementsov-Ogievskiyc3c10f72018-11-01 21:27:37 +030075static int coroutine_fn
76qcow2_co_preadv_compressed(BlockDriverState *bs,
Alberto Garcia9c4269d2020-07-10 18:12:43 +020077 uint64_t cluster_descriptor,
Vladimir Sementsov-Ogievskiyc3c10f72018-11-01 21:27:37 +030078 uint64_t offset,
79 uint64_t bytes,
Vladimir Sementsov-Ogievskiydf893d22019-06-04 19:15:13 +030080 QEMUIOVector *qiov,
81 size_t qiov_offset);
Vladimir Sementsov-Ogievskiyc3c10f72018-11-01 21:27:37 +030082
Jes Sorensen7c80ab32010-12-17 16:02:39 +010083static int qcow2_probe(const uint8_t *buf, int buf_size, const char *filename)
bellard585f8582006-08-05 21:14:20 +000084{
85 const QCowHeader *cow_header = (const void *)buf;
ths3b46e622007-09-17 08:09:54 +000086
bellard585f8582006-08-05 21:14:20 +000087 if (buf_size >= sizeof(QCowHeader) &&
88 be32_to_cpu(cow_header->magic) == QCOW_MAGIC &&
Kevin Wolf6744cba2011-12-15 12:20:58 +010089 be32_to_cpu(cow_header->version) >= 2)
bellard585f8582006-08-05 21:14:20 +000090 return 100;
91 else
92 return 0;
93}
94
aliguori9b80ddf2009-03-28 17:55:06 +000095
Daniel P. Berrange4652b8f2017-06-23 17:24:12 +010096static ssize_t qcow2_crypto_hdr_read_func(QCryptoBlock *block, size_t offset,
97 uint8_t *buf, size_t buflen,
98 void *opaque, Error **errp)
99{
100 BlockDriverState *bs = opaque;
101 BDRVQcow2State *s = bs->opaque;
102 ssize_t ret;
103
104 if ((offset + buflen) > s->crypto_header.length) {
105 error_setg(errp, "Request for data outside of extension header");
106 return -1;
107 }
108
109 ret = bdrv_pread(bs->file,
110 s->crypto_header.offset + offset, buf, buflen);
111 if (ret < 0) {
112 error_setg_errno(errp, -ret, "Could not read encryption header");
113 return -1;
114 }
115 return ret;
116}
117
118
119static ssize_t qcow2_crypto_hdr_init_func(QCryptoBlock *block, size_t headerlen,
120 void *opaque, Error **errp)
121{
122 BlockDriverState *bs = opaque;
123 BDRVQcow2State *s = bs->opaque;
124 int64_t ret;
125 int64_t clusterlen;
126
127 ret = qcow2_alloc_clusters(bs, headerlen);
128 if (ret < 0) {
129 error_setg_errno(errp, -ret,
130 "Cannot allocate cluster for LUKS header size %zu",
131 headerlen);
132 return -1;
133 }
134
135 s->crypto_header.length = headerlen;
136 s->crypto_header.offset = ret;
137
Daniel P. Berrangé087ab8e2020-02-07 13:55:20 +0000138 /*
139 * Zero fill all space in cluster so it has predictable
140 * content, as we may not initialize some regions of the
141 * header (eg only 1 out of 8 key slots will be initialized)
142 */
Daniel P. Berrange4652b8f2017-06-23 17:24:12 +0100143 clusterlen = size_to_clusters(s, headerlen) * s->cluster_size;
Kevin Wolf966b0002019-01-15 20:39:06 +0100144 assert(qcow2_pre_write_overlap_check(bs, 0, ret, clusterlen, false) == 0);
Daniel P. Berrange4652b8f2017-06-23 17:24:12 +0100145 ret = bdrv_pwrite_zeroes(bs->file,
Daniel P. Berrangé087ab8e2020-02-07 13:55:20 +0000146 ret,
147 clusterlen, 0);
Daniel P. Berrange4652b8f2017-06-23 17:24:12 +0100148 if (ret < 0) {
149 error_setg_errno(errp, -ret, "Could not zero fill encryption header");
150 return -1;
151 }
152
153 return ret;
154}
155
156
157static ssize_t qcow2_crypto_hdr_write_func(QCryptoBlock *block, size_t offset,
158 const uint8_t *buf, size_t buflen,
159 void *opaque, Error **errp)
160{
161 BlockDriverState *bs = opaque;
162 BDRVQcow2State *s = bs->opaque;
163 ssize_t ret;
164
165 if ((offset + buflen) > s->crypto_header.length) {
166 error_setg(errp, "Request for data outside of extension header");
167 return -1;
168 }
169
170 ret = bdrv_pwrite(bs->file,
171 s->crypto_header.offset + offset, buf, buflen);
172 if (ret < 0) {
173 error_setg_errno(errp, -ret, "Could not read encryption header");
174 return -1;
175 }
176 return ret;
177}
178
Maxim Levitsky90766d92020-06-25 14:55:43 +0200179static QDict*
180qcow2_extract_crypto_opts(QemuOpts *opts, const char *fmt, Error **errp)
181{
182 QDict *cryptoopts_qdict;
183 QDict *opts_qdict;
184
185 /* Extract "encrypt." options into a qdict */
186 opts_qdict = qemu_opts_to_qdict(opts, NULL);
187 qdict_extract_subqdict(opts_qdict, &cryptoopts_qdict, "encrypt.");
188 qobject_unref(opts_qdict);
189 qdict_put_str(cryptoopts_qdict, "format", fmt);
190 return cryptoopts_qdict;
191}
Daniel P. Berrange4652b8f2017-06-23 17:24:12 +0100192
Eric Blakea951a632020-03-24 12:42:30 -0500193/*
aliguori9b80ddf2009-03-28 17:55:06 +0000194 * read qcow2 extension and fill bs
195 * start reading from start_offset
196 * finish reading upon magic of value 0 or when end_offset reached
197 * unknown magic is skipped (future extension this version knows nothing about)
198 * return 0 upon success, non-0 otherwise
199 */
Jes Sorensen7c80ab32010-12-17 16:02:39 +0100200static int qcow2_read_extensions(BlockDriverState *bs, uint64_t start_offset,
Max Reitz3ef6c402013-09-05 09:40:43 +0200201 uint64_t end_offset, void **p_feature_table,
Vladimir Sementsov-Ogievskiy88ddffa2017-06-28 15:05:08 +0300202 int flags, bool *need_update_header,
203 Error **errp)
aliguori9b80ddf2009-03-28 17:55:06 +0000204{
Kevin Wolfff991292015-09-07 17:12:56 +0200205 BDRVQcow2State *s = bs->opaque;
aliguori9b80ddf2009-03-28 17:55:06 +0000206 QCowExtension ext;
207 uint64_t offset;
Kevin Wolf75bab852012-02-02 14:52:08 +0100208 int ret;
Vladimir Sementsov-Ogievskiy88ddffa2017-06-28 15:05:08 +0300209 Qcow2BitmapHeaderExt bitmaps_ext;
210
211 if (need_update_header != NULL) {
212 *need_update_header = false;
213 }
aliguori9b80ddf2009-03-28 17:55:06 +0000214
215#ifdef DEBUG_EXT
Jes Sorensen7c80ab32010-12-17 16:02:39 +0100216 printf("qcow2_read_extensions: start=%ld end=%ld\n", start_offset, end_offset);
aliguori9b80ddf2009-03-28 17:55:06 +0000217#endif
218 offset = start_offset;
219 while (offset < end_offset) {
220
221#ifdef DEBUG_EXT
222 /* Sanity check */
223 if (offset > s->cluster_size)
Jes Sorensen7c80ab32010-12-17 16:02:39 +0100224 printf("qcow2_read_extension: suspicious offset %lu\n", offset);
aliguori9b80ddf2009-03-28 17:55:06 +0000225
Dong Xu Wang9b2260c2011-11-22 18:06:25 +0800226 printf("attempting to read extended header in offset %lu\n", offset);
aliguori9b80ddf2009-03-28 17:55:06 +0000227#endif
228
Kevin Wolfcf2ab8f2016-06-20 18:24:02 +0200229 ret = bdrv_pread(bs->file, offset, &ext, sizeof(ext));
Max Reitz3ef6c402013-09-05 09:40:43 +0200230 if (ret < 0) {
231 error_setg_errno(errp, -ret, "qcow2_read_extension: ERROR: "
232 "pread fail from offset %" PRIu64, offset);
aliguori9b80ddf2009-03-28 17:55:06 +0000233 return 1;
234 }
Peter Maydell3b698f52018-10-09 18:24:59 +0100235 ext.magic = be32_to_cpu(ext.magic);
236 ext.len = be32_to_cpu(ext.len);
aliguori9b80ddf2009-03-28 17:55:06 +0000237 offset += sizeof(ext);
238#ifdef DEBUG_EXT
239 printf("ext.magic = 0x%x\n", ext.magic);
240#endif
Kevin Wolf2ebafc82014-11-25 18:12:40 +0100241 if (offset > end_offset || ext.len > end_offset - offset) {
Max Reitz3ef6c402013-09-05 09:40:43 +0200242 error_setg(errp, "Header extension too large");
Kevin Wolf64ca6ae2012-02-22 12:37:13 +0100243 return -EINVAL;
244 }
245
aliguori9b80ddf2009-03-28 17:55:06 +0000246 switch (ext.magic) {
Jes Sorensen7c80ab32010-12-17 16:02:39 +0100247 case QCOW2_EXT_MAGIC_END:
aliguori9b80ddf2009-03-28 17:55:06 +0000248 return 0;
aliguorif9655092009-03-28 17:55:14 +0000249
Jes Sorensen7c80ab32010-12-17 16:02:39 +0100250 case QCOW2_EXT_MAGIC_BACKING_FORMAT:
aliguorif9655092009-03-28 17:55:14 +0000251 if (ext.len >= sizeof(bs->backing_format)) {
Max Reitz521b2b52014-04-29 19:03:12 +0200252 error_setg(errp, "ERROR: ext_backing_format: len=%" PRIu32
253 " too large (>=%zu)", ext.len,
254 sizeof(bs->backing_format));
aliguorif9655092009-03-28 17:55:14 +0000255 return 2;
256 }
Kevin Wolfcf2ab8f2016-06-20 18:24:02 +0200257 ret = bdrv_pread(bs->file, offset, bs->backing_format, ext.len);
Max Reitz3ef6c402013-09-05 09:40:43 +0200258 if (ret < 0) {
259 error_setg_errno(errp, -ret, "ERROR: ext_backing_format: "
260 "Could not read format name");
aliguorif9655092009-03-28 17:55:14 +0000261 return 3;
Max Reitz3ef6c402013-09-05 09:40:43 +0200262 }
aliguorif9655092009-03-28 17:55:14 +0000263 bs->backing_format[ext.len] = '\0';
Kevin Wolfe4603fe2015-04-07 15:03:16 +0200264 s->image_backing_format = g_strdup(bs->backing_format);
aliguorif9655092009-03-28 17:55:14 +0000265#ifdef DEBUG_EXT
266 printf("Qcow2: Got format extension %s\n", bs->backing_format);
267#endif
aliguorif9655092009-03-28 17:55:14 +0000268 break;
269
Kevin Wolfcfcc4c62012-04-12 15:20:27 +0200270 case QCOW2_EXT_MAGIC_FEATURE_TABLE:
271 if (p_feature_table != NULL) {
272 void* feature_table = g_malloc0(ext.len + 2 * sizeof(Qcow2Feature));
Kevin Wolfcf2ab8f2016-06-20 18:24:02 +0200273 ret = bdrv_pread(bs->file, offset , feature_table, ext.len);
Kevin Wolfcfcc4c62012-04-12 15:20:27 +0200274 if (ret < 0) {
Max Reitz3ef6c402013-09-05 09:40:43 +0200275 error_setg_errno(errp, -ret, "ERROR: ext_feature_table: "
276 "Could not read table");
Kevin Wolfcfcc4c62012-04-12 15:20:27 +0200277 return ret;
278 }
279
280 *p_feature_table = feature_table;
281 }
282 break;
283
Daniel P. Berrange4652b8f2017-06-23 17:24:12 +0100284 case QCOW2_EXT_MAGIC_CRYPTO_HEADER: {
285 unsigned int cflags = 0;
286 if (s->crypt_method_header != QCOW_CRYPT_LUKS) {
287 error_setg(errp, "CRYPTO header extension only "
288 "expected with LUKS encryption method");
289 return -EINVAL;
290 }
291 if (ext.len != sizeof(Qcow2CryptoHeaderExtension)) {
292 error_setg(errp, "CRYPTO header extension size %u, "
293 "but expected size %zu", ext.len,
294 sizeof(Qcow2CryptoHeaderExtension));
295 return -EINVAL;
296 }
297
298 ret = bdrv_pread(bs->file, offset, &s->crypto_header, ext.len);
299 if (ret < 0) {
300 error_setg_errno(errp, -ret,
301 "Unable to read CRYPTO header extension");
302 return ret;
303 }
Peter Maydell3b698f52018-10-09 18:24:59 +0100304 s->crypto_header.offset = be64_to_cpu(s->crypto_header.offset);
305 s->crypto_header.length = be64_to_cpu(s->crypto_header.length);
Daniel P. Berrange4652b8f2017-06-23 17:24:12 +0100306
307 if ((s->crypto_header.offset % s->cluster_size) != 0) {
308 error_setg(errp, "Encryption header offset '%" PRIu64 "' is "
309 "not a multiple of cluster size '%u'",
310 s->crypto_header.offset, s->cluster_size);
311 return -EINVAL;
312 }
313
314 if (flags & BDRV_O_NO_IO) {
315 cflags |= QCRYPTO_BLOCK_OPEN_NO_IO;
316 }
Daniel P. Berrange1cd9a782017-06-23 17:24:17 +0100317 s->crypto = qcrypto_block_open(s->crypto_opts, "encrypt.",
Daniel P. Berrange4652b8f2017-06-23 17:24:12 +0100318 qcow2_crypto_hdr_read_func,
Vladimir Sementsov-Ogievskiy8ac0f152019-05-06 17:27:41 +0300319 bs, cflags, QCOW2_MAX_THREADS, errp);
Daniel P. Berrange4652b8f2017-06-23 17:24:12 +0100320 if (!s->crypto) {
321 return -EINVAL;
322 }
323 } break;
324
Vladimir Sementsov-Ogievskiy88ddffa2017-06-28 15:05:08 +0300325 case QCOW2_EXT_MAGIC_BITMAPS:
326 if (ext.len != sizeof(bitmaps_ext)) {
327 error_setg_errno(errp, -ret, "bitmaps_ext: "
328 "Invalid extension length");
329 return -EINVAL;
330 }
331
332 if (!(s->autoclear_features & QCOW2_AUTOCLEAR_BITMAPS)) {
Max Reitzc9ceb3e2017-11-23 03:08:17 +0100333 if (s->qcow_version < 3) {
334 /* Let's be a bit more specific */
335 warn_report("This qcow2 v2 image contains bitmaps, but "
336 "they may have been modified by a program "
337 "without persistent bitmap support; so now "
338 "they must all be considered inconsistent");
339 } else {
340 warn_report("a program lacking bitmap support "
341 "modified this file, so all bitmaps are now "
342 "considered inconsistent");
343 }
Alistair Francis55d527a2017-09-11 12:52:46 -0700344 error_printf("Some clusters may be leaked, "
345 "run 'qemu-img check -r' on the image "
Vladimir Sementsov-Ogievskiy88ddffa2017-06-28 15:05:08 +0300346 "file to fix.");
347 if (need_update_header != NULL) {
348 /* Updating is needed to drop invalid bitmap extension. */
349 *need_update_header = true;
350 }
351 break;
352 }
353
354 ret = bdrv_pread(bs->file, offset, &bitmaps_ext, ext.len);
355 if (ret < 0) {
356 error_setg_errno(errp, -ret, "bitmaps_ext: "
357 "Could not read ext header");
358 return ret;
359 }
360
361 if (bitmaps_ext.reserved32 != 0) {
362 error_setg_errno(errp, -ret, "bitmaps_ext: "
363 "Reserved field is not zero");
364 return -EINVAL;
365 }
366
Peter Maydell3b698f52018-10-09 18:24:59 +0100367 bitmaps_ext.nb_bitmaps = be32_to_cpu(bitmaps_ext.nb_bitmaps);
368 bitmaps_ext.bitmap_directory_size =
369 be64_to_cpu(bitmaps_ext.bitmap_directory_size);
370 bitmaps_ext.bitmap_directory_offset =
371 be64_to_cpu(bitmaps_ext.bitmap_directory_offset);
Vladimir Sementsov-Ogievskiy88ddffa2017-06-28 15:05:08 +0300372
373 if (bitmaps_ext.nb_bitmaps > QCOW2_MAX_BITMAPS) {
374 error_setg(errp,
375 "bitmaps_ext: Image has %" PRIu32 " bitmaps, "
376 "exceeding the QEMU supported maximum of %d",
377 bitmaps_ext.nb_bitmaps, QCOW2_MAX_BITMAPS);
378 return -EINVAL;
379 }
380
381 if (bitmaps_ext.nb_bitmaps == 0) {
382 error_setg(errp, "found bitmaps extension with zero bitmaps");
383 return -EINVAL;
384 }
385
Alberto Garcia74e60fb2019-12-12 12:01:21 +0200386 if (offset_into_cluster(s, bitmaps_ext.bitmap_directory_offset)) {
Vladimir Sementsov-Ogievskiy88ddffa2017-06-28 15:05:08 +0300387 error_setg(errp, "bitmaps_ext: "
388 "invalid bitmap directory offset");
389 return -EINVAL;
390 }
391
392 if (bitmaps_ext.bitmap_directory_size >
393 QCOW2_MAX_BITMAP_DIRECTORY_SIZE) {
394 error_setg(errp, "bitmaps_ext: "
395 "bitmap directory size (%" PRIu64 ") exceeds "
396 "the maximum supported size (%d)",
397 bitmaps_ext.bitmap_directory_size,
398 QCOW2_MAX_BITMAP_DIRECTORY_SIZE);
399 return -EINVAL;
400 }
401
402 s->nb_bitmaps = bitmaps_ext.nb_bitmaps;
403 s->bitmap_directory_offset =
404 bitmaps_ext.bitmap_directory_offset;
405 s->bitmap_directory_size =
406 bitmaps_ext.bitmap_directory_size;
407
408#ifdef DEBUG_EXT
409 printf("Qcow2: Got bitmaps extension: "
410 "offset=%" PRIu64 " nb_bitmaps=%" PRIu32 "\n",
411 s->bitmap_directory_offset, s->nb_bitmaps);
412#endif
413 break;
414
Kevin Wolf9b890bd2019-01-15 19:02:40 +0100415 case QCOW2_EXT_MAGIC_DATA_FILE:
416 {
417 s->image_data_file = g_malloc0(ext.len + 1);
418 ret = bdrv_pread(bs->file, offset, s->image_data_file, ext.len);
419 if (ret < 0) {
420 error_setg_errno(errp, -ret,
421 "ERROR: Could not read data file name");
422 return ret;
423 }
424#ifdef DEBUG_EXT
425 printf("Qcow2: Got external data file %s\n", s->image_data_file);
426#endif
427 break;
428 }
429
aliguori9b80ddf2009-03-28 17:55:06 +0000430 default:
Kevin Wolf75bab852012-02-02 14:52:08 +0100431 /* unknown magic - save it in case we need to rewrite the header */
Eric Blake40969742017-11-17 10:47:47 -0600432 /* If you add a new feature, make sure to also update the fast
433 * path of qcow2_make_empty() to deal with it. */
Kevin Wolf75bab852012-02-02 14:52:08 +0100434 {
435 Qcow2UnknownHeaderExtension *uext;
436
437 uext = g_malloc0(sizeof(*uext) + ext.len);
438 uext->magic = ext.magic;
439 uext->len = ext.len;
440 QLIST_INSERT_HEAD(&s->unknown_header_ext, uext, next);
441
Kevin Wolfcf2ab8f2016-06-20 18:24:02 +0200442 ret = bdrv_pread(bs->file, offset , uext->data, uext->len);
Kevin Wolf75bab852012-02-02 14:52:08 +0100443 if (ret < 0) {
Max Reitz3ef6c402013-09-05 09:40:43 +0200444 error_setg_errno(errp, -ret, "ERROR: unknown extension: "
445 "Could not read data");
Kevin Wolf75bab852012-02-02 14:52:08 +0100446 return ret;
447 }
Kevin Wolf75bab852012-02-02 14:52:08 +0100448 }
aliguori9b80ddf2009-03-28 17:55:06 +0000449 break;
450 }
Kevin Wolffd29b4b2012-02-22 12:31:47 +0100451
452 offset += ((ext.len + 7) & ~7);
aliguori9b80ddf2009-03-28 17:55:06 +0000453 }
454
455 return 0;
456}
457
Kevin Wolf75bab852012-02-02 14:52:08 +0100458static void cleanup_unknown_header_ext(BlockDriverState *bs)
459{
Kevin Wolfff991292015-09-07 17:12:56 +0200460 BDRVQcow2State *s = bs->opaque;
Kevin Wolf75bab852012-02-02 14:52:08 +0100461 Qcow2UnknownHeaderExtension *uext, *next;
462
463 QLIST_FOREACH_SAFE(uext, &s->unknown_header_ext, next, next) {
464 QLIST_REMOVE(uext, next);
465 g_free(uext);
466 }
467}
aliguori9b80ddf2009-03-28 17:55:06 +0000468
Max Reitza55448b2016-03-16 19:54:33 +0100469static void report_unsupported_feature(Error **errp, Qcow2Feature *table,
470 uint64_t mask)
Kevin Wolfcfcc4c62012-04-12 15:20:27 +0200471{
Alberto Garcia7cdca2e2020-01-15 14:56:26 +0100472 g_autoptr(GString) features = g_string_sized_new(60);
Kevin Wolf12ac6d32014-07-17 11:41:53 +0200473
Kevin Wolfcfcc4c62012-04-12 15:20:27 +0200474 while (table && table->name[0] != '\0') {
475 if (table->type == QCOW2_FEAT_TYPE_INCOMPATIBLE) {
Kevin Wolf12ac6d32014-07-17 11:41:53 +0200476 if (mask & (1ULL << table->bit)) {
Alberto Garcia7cdca2e2020-01-15 14:56:26 +0100477 if (features->len > 0) {
478 g_string_append(features, ", ");
479 }
480 g_string_append_printf(features, "%.46s", table->name);
Kevin Wolf12ac6d32014-07-17 11:41:53 +0200481 mask &= ~(1ULL << table->bit);
Kevin Wolfcfcc4c62012-04-12 15:20:27 +0200482 }
483 }
484 table++;
485 }
486
487 if (mask) {
Alberto Garcia7cdca2e2020-01-15 14:56:26 +0100488 if (features->len > 0) {
489 g_string_append(features, ", ");
490 }
491 g_string_append_printf(features,
492 "Unknown incompatible feature: %" PRIx64, mask);
Kevin Wolfcfcc4c62012-04-12 15:20:27 +0200493 }
Kevin Wolf12ac6d32014-07-17 11:41:53 +0200494
Alberto Garcia7cdca2e2020-01-15 14:56:26 +0100495 error_setg(errp, "Unsupported qcow2 feature(s): %s", features->str);
Kevin Wolfcfcc4c62012-04-12 15:20:27 +0200496}
497
Stefan Hajnoczic61d0002012-07-27 09:05:19 +0100498/*
Stefan Hajnoczibfe80432012-07-27 09:05:22 +0100499 * Sets the dirty bit and flushes afterwards if necessary.
500 *
501 * The incompatible_features bit is only set if the image file header was
502 * updated successfully. Therefore it is not required to check the return
503 * value of this function.
504 */
Kevin Wolf280d3732012-12-07 18:08:47 +0100505int qcow2_mark_dirty(BlockDriverState *bs)
Stefan Hajnoczibfe80432012-07-27 09:05:22 +0100506{
Kevin Wolfff991292015-09-07 17:12:56 +0200507 BDRVQcow2State *s = bs->opaque;
Stefan Hajnoczibfe80432012-07-27 09:05:22 +0100508 uint64_t val;
509 int ret;
510
511 assert(s->qcow_version >= 3);
512
513 if (s->incompatible_features & QCOW2_INCOMPAT_DIRTY) {
514 return 0; /* already dirty */
515 }
516
517 val = cpu_to_be64(s->incompatible_features | QCOW2_INCOMPAT_DIRTY);
Kevin Wolfd9ca2ea2016-06-20 20:09:15 +0200518 ret = bdrv_pwrite(bs->file, offsetof(QCowHeader, incompatible_features),
Stefan Hajnoczibfe80432012-07-27 09:05:22 +0100519 &val, sizeof(val));
520 if (ret < 0) {
521 return ret;
522 }
Kevin Wolf9a4f4c32015-06-16 14:19:22 +0200523 ret = bdrv_flush(bs->file->bs);
Stefan Hajnoczibfe80432012-07-27 09:05:22 +0100524 if (ret < 0) {
525 return ret;
526 }
527
528 /* Only treat image as dirty if the header was updated successfully */
529 s->incompatible_features |= QCOW2_INCOMPAT_DIRTY;
530 return 0;
531}
532
533/*
Stefan Hajnoczic61d0002012-07-27 09:05:19 +0100534 * Clears the dirty bit and flushes before if necessary. Only call this
535 * function when there are no pending requests, it does not guard against
536 * concurrent requests dirtying the image.
537 */
538static int qcow2_mark_clean(BlockDriverState *bs)
539{
Kevin Wolfff991292015-09-07 17:12:56 +0200540 BDRVQcow2State *s = bs->opaque;
Stefan Hajnoczic61d0002012-07-27 09:05:19 +0100541
542 if (s->incompatible_features & QCOW2_INCOMPAT_DIRTY) {
Kevin Wolf4c2e5f82014-04-03 13:47:50 +0200543 int ret;
544
545 s->incompatible_features &= ~QCOW2_INCOMPAT_DIRTY;
546
Paolo Bonzini8b220eb2018-03-01 17:36:14 +0100547 ret = qcow2_flush_caches(bs);
Stefan Hajnoczic61d0002012-07-27 09:05:19 +0100548 if (ret < 0) {
549 return ret;
550 }
551
Stefan Hajnoczic61d0002012-07-27 09:05:19 +0100552 return qcow2_update_header(bs);
553 }
554 return 0;
555}
556
Max Reitz69c98722013-08-30 14:34:24 +0200557/*
558 * Marks the image as corrupt.
559 */
560int qcow2_mark_corrupt(BlockDriverState *bs)
561{
Kevin Wolfff991292015-09-07 17:12:56 +0200562 BDRVQcow2State *s = bs->opaque;
Max Reitz69c98722013-08-30 14:34:24 +0200563
564 s->incompatible_features |= QCOW2_INCOMPAT_CORRUPT;
565 return qcow2_update_header(bs);
566}
567
568/*
569 * Marks the image as consistent, i.e., unsets the corrupt bit, and flushes
570 * before if necessary.
571 */
572int qcow2_mark_consistent(BlockDriverState *bs)
573{
Kevin Wolfff991292015-09-07 17:12:56 +0200574 BDRVQcow2State *s = bs->opaque;
Max Reitz69c98722013-08-30 14:34:24 +0200575
576 if (s->incompatible_features & QCOW2_INCOMPAT_CORRUPT) {
Paolo Bonzini8b220eb2018-03-01 17:36:14 +0100577 int ret = qcow2_flush_caches(bs);
Max Reitz69c98722013-08-30 14:34:24 +0200578 if (ret < 0) {
579 return ret;
580 }
581
582 s->incompatible_features &= ~QCOW2_INCOMPAT_CORRUPT;
583 return qcow2_update_header(bs);
584 }
585 return 0;
586}
587
Max Reitz8bc584f2019-10-11 17:28:06 +0200588static void qcow2_add_check_result(BdrvCheckResult *out,
589 const BdrvCheckResult *src,
590 bool set_allocation_info)
591{
592 out->corruptions += src->corruptions;
593 out->leaks += src->leaks;
594 out->check_errors += src->check_errors;
595 out->corruptions_fixed += src->corruptions_fixed;
596 out->leaks_fixed += src->leaks_fixed;
597
598 if (set_allocation_info) {
599 out->image_end_offset = src->image_end_offset;
600 out->bfi = src->bfi;
601 }
602}
603
Paolo Bonzini2fd61632018-03-01 17:36:19 +0100604static int coroutine_fn qcow2_co_check_locked(BlockDriverState *bs,
605 BdrvCheckResult *result,
606 BdrvCheckMode fix)
Stefan Hajnocziacbe5982012-08-09 13:05:55 +0100607{
Max Reitz8bc584f2019-10-11 17:28:06 +0200608 BdrvCheckResult snapshot_res = {};
609 BdrvCheckResult refcount_res = {};
610 int ret;
611
612 memset(result, 0, sizeof(*result));
613
614 ret = qcow2_check_read_snapshot_table(bs, &snapshot_res, fix);
Max Reitz8bc584f2019-10-11 17:28:06 +0200615 if (ret < 0) {
Max Reitzfe446b52019-10-11 17:28:07 +0200616 qcow2_add_check_result(result, &snapshot_res, false);
Max Reitz8bc584f2019-10-11 17:28:06 +0200617 return ret;
618 }
619
620 ret = qcow2_check_refcounts(bs, &refcount_res, fix);
621 qcow2_add_check_result(result, &refcount_res, true);
Stefan Hajnocziacbe5982012-08-09 13:05:55 +0100622 if (ret < 0) {
Max Reitzfe446b52019-10-11 17:28:07 +0200623 qcow2_add_check_result(result, &snapshot_res, false);
624 return ret;
625 }
626
627 ret = qcow2_check_fix_snapshot_table(bs, &snapshot_res, fix);
628 qcow2_add_check_result(result, &snapshot_res, false);
629 if (ret < 0) {
Stefan Hajnocziacbe5982012-08-09 13:05:55 +0100630 return ret;
631 }
632
633 if (fix && result->check_errors == 0 && result->corruptions == 0) {
Max Reitz24530f32013-08-30 14:34:30 +0200634 ret = qcow2_mark_clean(bs);
635 if (ret < 0) {
636 return ret;
637 }
638 return qcow2_mark_consistent(bs);
Stefan Hajnocziacbe5982012-08-09 13:05:55 +0100639 }
640 return ret;
641}
642
Paolo Bonzini2fd61632018-03-01 17:36:19 +0100643static int coroutine_fn qcow2_co_check(BlockDriverState *bs,
644 BdrvCheckResult *result,
645 BdrvCheckMode fix)
646{
647 BDRVQcow2State *s = bs->opaque;
648 int ret;
649
650 qemu_co_mutex_lock(&s->lock);
651 ret = qcow2_co_check_locked(bs, result, fix);
652 qemu_co_mutex_unlock(&s->lock);
653 return ret;
654}
655
Alberto Garcia0cf0e592018-03-06 18:14:06 +0200656int qcow2_validate_table(BlockDriverState *bs, uint64_t offset,
657 uint64_t entries, size_t entry_len,
658 int64_t max_size_bytes, const char *table_name,
659 Error **errp)
Kevin Wolf8c7de282014-03-26 13:05:44 +0100660{
Kevin Wolfff991292015-09-07 17:12:56 +0200661 BDRVQcow2State *s = bs->opaque;
Alberto Garcia0cf0e592018-03-06 18:14:06 +0200662
663 if (entries > max_size_bytes / entry_len) {
664 error_setg(errp, "%s too large", table_name);
665 return -EFBIG;
666 }
Kevin Wolf8c7de282014-03-26 13:05:44 +0100667
668 /* Use signed INT64_MAX as the maximum even for uint64_t header fields,
669 * because values will be passed to qemu functions taking int64_t. */
Alberto Garcia0cf0e592018-03-06 18:14:06 +0200670 if ((INT64_MAX - entries * entry_len < offset) ||
671 (offset_into_cluster(s, offset) != 0)) {
672 error_setg(errp, "%s offset invalid", table_name);
Kevin Wolf8c7de282014-03-26 13:05:44 +0100673 return -EINVAL;
674 }
675
676 return 0;
677}
678
Alberto Garcia8a2ce0b2019-03-12 18:48:48 +0200679static const char *const mutable_opts[] = {
680 QCOW2_OPT_LAZY_REFCOUNTS,
681 QCOW2_OPT_DISCARD_REQUEST,
682 QCOW2_OPT_DISCARD_SNAPSHOT,
683 QCOW2_OPT_DISCARD_OTHER,
684 QCOW2_OPT_OVERLAP,
685 QCOW2_OPT_OVERLAP_TEMPLATE,
686 QCOW2_OPT_OVERLAP_MAIN_HEADER,
687 QCOW2_OPT_OVERLAP_ACTIVE_L1,
688 QCOW2_OPT_OVERLAP_ACTIVE_L2,
689 QCOW2_OPT_OVERLAP_REFCOUNT_TABLE,
690 QCOW2_OPT_OVERLAP_REFCOUNT_BLOCK,
691 QCOW2_OPT_OVERLAP_SNAPSHOT_TABLE,
692 QCOW2_OPT_OVERLAP_INACTIVE_L1,
693 QCOW2_OPT_OVERLAP_INACTIVE_L2,
694 QCOW2_OPT_OVERLAP_BITMAP_DIRECTORY,
695 QCOW2_OPT_CACHE_SIZE,
696 QCOW2_OPT_L2_CACHE_SIZE,
697 QCOW2_OPT_L2_CACHE_ENTRY_SIZE,
698 QCOW2_OPT_REFCOUNT_CACHE_SIZE,
699 QCOW2_OPT_CACHE_CLEAN_INTERVAL,
700 NULL
701};
702
Kevin Wolf74c45102013-03-15 10:35:08 +0100703static QemuOptsList qcow2_runtime_opts = {
704 .name = "qcow2",
705 .head = QTAILQ_HEAD_INITIALIZER(qcow2_runtime_opts.head),
706 .desc = {
707 {
Kevin Wolf64aa99d2013-07-17 14:45:34 +0200708 .name = QCOW2_OPT_LAZY_REFCOUNTS,
Kevin Wolf74c45102013-03-15 10:35:08 +0100709 .type = QEMU_OPT_BOOL,
710 .help = "Postpone refcount updates",
711 },
Kevin Wolf67af6742013-06-19 13:44:19 +0200712 {
713 .name = QCOW2_OPT_DISCARD_REQUEST,
714 .type = QEMU_OPT_BOOL,
715 .help = "Pass guest discard requests to the layer below",
716 },
717 {
718 .name = QCOW2_OPT_DISCARD_SNAPSHOT,
719 .type = QEMU_OPT_BOOL,
720 .help = "Generate discard requests when snapshot related space "
721 "is freed",
722 },
723 {
724 .name = QCOW2_OPT_DISCARD_OTHER,
725 .type = QEMU_OPT_BOOL,
726 .help = "Generate discard requests when other clusters are freed",
727 },
Max Reitz05de7e82013-10-10 11:09:25 +0200728 {
729 .name = QCOW2_OPT_OVERLAP,
730 .type = QEMU_OPT_STRING,
731 .help = "Selects which overlap checks to perform from a range of "
732 "templates (none, constant, cached, all)",
733 },
734 {
Max Reitzee42b5c2014-08-20 19:59:35 +0200735 .name = QCOW2_OPT_OVERLAP_TEMPLATE,
736 .type = QEMU_OPT_STRING,
737 .help = "Selects which overlap checks to perform from a range of "
738 "templates (none, constant, cached, all)",
739 },
740 {
Max Reitz05de7e82013-10-10 11:09:25 +0200741 .name = QCOW2_OPT_OVERLAP_MAIN_HEADER,
742 .type = QEMU_OPT_BOOL,
743 .help = "Check for unintended writes into the main qcow2 header",
744 },
745 {
746 .name = QCOW2_OPT_OVERLAP_ACTIVE_L1,
747 .type = QEMU_OPT_BOOL,
748 .help = "Check for unintended writes into the active L1 table",
749 },
750 {
751 .name = QCOW2_OPT_OVERLAP_ACTIVE_L2,
752 .type = QEMU_OPT_BOOL,
753 .help = "Check for unintended writes into an active L2 table",
754 },
755 {
756 .name = QCOW2_OPT_OVERLAP_REFCOUNT_TABLE,
757 .type = QEMU_OPT_BOOL,
758 .help = "Check for unintended writes into the refcount table",
759 },
760 {
761 .name = QCOW2_OPT_OVERLAP_REFCOUNT_BLOCK,
762 .type = QEMU_OPT_BOOL,
763 .help = "Check for unintended writes into a refcount block",
764 },
765 {
766 .name = QCOW2_OPT_OVERLAP_SNAPSHOT_TABLE,
767 .type = QEMU_OPT_BOOL,
768 .help = "Check for unintended writes into the snapshot table",
769 },
770 {
771 .name = QCOW2_OPT_OVERLAP_INACTIVE_L1,
772 .type = QEMU_OPT_BOOL,
773 .help = "Check for unintended writes into an inactive L1 table",
774 },
775 {
776 .name = QCOW2_OPT_OVERLAP_INACTIVE_L2,
777 .type = QEMU_OPT_BOOL,
778 .help = "Check for unintended writes into an inactive L2 table",
779 },
Max Reitz6c1c8d52014-08-18 22:07:33 +0200780 {
Vladimir Sementsov-Ogievskiy0e4e4312018-07-05 18:15:15 +0300781 .name = QCOW2_OPT_OVERLAP_BITMAP_DIRECTORY,
782 .type = QEMU_OPT_BOOL,
783 .help = "Check for unintended writes into the bitmap directory",
784 },
785 {
Max Reitz6c1c8d52014-08-18 22:07:33 +0200786 .name = QCOW2_OPT_CACHE_SIZE,
787 .type = QEMU_OPT_SIZE,
788 .help = "Maximum combined metadata (L2 tables and refcount blocks) "
789 "cache size",
790 },
791 {
792 .name = QCOW2_OPT_L2_CACHE_SIZE,
793 .type = QEMU_OPT_SIZE,
794 .help = "Maximum L2 table cache size",
795 },
796 {
Alberto Garcia1221fe62018-02-05 16:33:36 +0200797 .name = QCOW2_OPT_L2_CACHE_ENTRY_SIZE,
798 .type = QEMU_OPT_SIZE,
799 .help = "Size of each entry in the L2 cache",
800 },
801 {
Max Reitz6c1c8d52014-08-18 22:07:33 +0200802 .name = QCOW2_OPT_REFCOUNT_CACHE_SIZE,
803 .type = QEMU_OPT_SIZE,
804 .help = "Maximum refcount block cache size",
805 },
Alberto Garcia279621c2015-08-04 15:14:40 +0300806 {
807 .name = QCOW2_OPT_CACHE_CLEAN_INTERVAL,
808 .type = QEMU_OPT_NUMBER,
809 .help = "Clean unused cache entries after this time (in seconds)",
810 },
Daniel P. Berrange4652b8f2017-06-23 17:24:12 +0100811 BLOCK_CRYPTO_OPT_DEF_KEY_SECRET("encrypt.",
812 "ID of secret providing qcow2 AES key or LUKS passphrase"),
Kevin Wolf74c45102013-03-15 10:35:08 +0100813 { /* end of list */ }
814 },
815};
816
Max Reitz4092e992013-10-10 11:09:26 +0200817static const char *overlap_bool_option_names[QCOW2_OL_MAX_BITNR] = {
Vladimir Sementsov-Ogievskiy0e4e4312018-07-05 18:15:15 +0300818 [QCOW2_OL_MAIN_HEADER_BITNR] = QCOW2_OPT_OVERLAP_MAIN_HEADER,
819 [QCOW2_OL_ACTIVE_L1_BITNR] = QCOW2_OPT_OVERLAP_ACTIVE_L1,
820 [QCOW2_OL_ACTIVE_L2_BITNR] = QCOW2_OPT_OVERLAP_ACTIVE_L2,
821 [QCOW2_OL_REFCOUNT_TABLE_BITNR] = QCOW2_OPT_OVERLAP_REFCOUNT_TABLE,
822 [QCOW2_OL_REFCOUNT_BLOCK_BITNR] = QCOW2_OPT_OVERLAP_REFCOUNT_BLOCK,
823 [QCOW2_OL_SNAPSHOT_TABLE_BITNR] = QCOW2_OPT_OVERLAP_SNAPSHOT_TABLE,
824 [QCOW2_OL_INACTIVE_L1_BITNR] = QCOW2_OPT_OVERLAP_INACTIVE_L1,
825 [QCOW2_OL_INACTIVE_L2_BITNR] = QCOW2_OPT_OVERLAP_INACTIVE_L2,
826 [QCOW2_OL_BITMAP_DIRECTORY_BITNR] = QCOW2_OPT_OVERLAP_BITMAP_DIRECTORY,
Max Reitz4092e992013-10-10 11:09:26 +0200827};
828
Alberto Garcia279621c2015-08-04 15:14:40 +0300829static void cache_clean_timer_cb(void *opaque)
830{
831 BlockDriverState *bs = opaque;
Kevin Wolfff991292015-09-07 17:12:56 +0200832 BDRVQcow2State *s = bs->opaque;
Alberto Garciab2f68bf2018-02-05 16:33:09 +0200833 qcow2_cache_clean_unused(s->l2_table_cache);
834 qcow2_cache_clean_unused(s->refcount_block_cache);
Alberto Garcia279621c2015-08-04 15:14:40 +0300835 timer_mod(s->cache_clean_timer, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) +
836 (int64_t) s->cache_clean_interval * 1000);
837}
838
839static void cache_clean_timer_init(BlockDriverState *bs, AioContext *context)
840{
Kevin Wolfff991292015-09-07 17:12:56 +0200841 BDRVQcow2State *s = bs->opaque;
Alberto Garcia279621c2015-08-04 15:14:40 +0300842 if (s->cache_clean_interval > 0) {
843 s->cache_clean_timer = aio_timer_new(context, QEMU_CLOCK_VIRTUAL,
844 SCALE_MS, cache_clean_timer_cb,
845 bs);
846 timer_mod(s->cache_clean_timer, qemu_clock_get_ms(QEMU_CLOCK_VIRTUAL) +
847 (int64_t) s->cache_clean_interval * 1000);
848 }
849}
850
851static void cache_clean_timer_del(BlockDriverState *bs)
852{
Kevin Wolfff991292015-09-07 17:12:56 +0200853 BDRVQcow2State *s = bs->opaque;
Alberto Garcia279621c2015-08-04 15:14:40 +0300854 if (s->cache_clean_timer) {
855 timer_del(s->cache_clean_timer);
856 timer_free(s->cache_clean_timer);
857 s->cache_clean_timer = NULL;
858 }
859}
860
861static void qcow2_detach_aio_context(BlockDriverState *bs)
862{
863 cache_clean_timer_del(bs);
864}
865
866static void qcow2_attach_aio_context(BlockDriverState *bs,
867 AioContext *new_context)
868{
869 cache_clean_timer_init(bs, new_context);
870}
871
Max Reitzbc85ef22015-06-01 18:09:19 +0200872static void read_cache_sizes(BlockDriverState *bs, QemuOpts *opts,
873 uint64_t *l2_cache_size,
Alberto Garcia1221fe62018-02-05 16:33:36 +0200874 uint64_t *l2_cache_entry_size,
Max Reitz6c1c8d52014-08-18 22:07:33 +0200875 uint64_t *refcount_cache_size, Error **errp)
876{
Kevin Wolfff991292015-09-07 17:12:56 +0200877 BDRVQcow2State *s = bs->opaque;
Leonid Blochb7495622018-09-26 19:04:43 +0300878 uint64_t combined_cache_size, l2_cache_max_setting;
Max Reitz6c1c8d52014-08-18 22:07:33 +0200879 bool l2_cache_size_set, refcount_cache_size_set, combined_cache_size_set;
Alberto Garciaaf39bd02019-02-13 18:48:53 +0200880 bool l2_cache_entry_size_set;
Alberto Garcia7af5eea2018-05-28 17:01:28 +0200881 int min_refcount_cache = MIN_REFCOUNT_CACHE_SIZE * s->cluster_size;
Leonid Blochb7495622018-09-26 19:04:43 +0300882 uint64_t virtual_disk_size = bs->total_sectors * BDRV_SECTOR_SIZE;
Alberto Garciab70d0822019-08-16 15:17:42 +0300883 uint64_t max_l2_entries = DIV_ROUND_UP(virtual_disk_size, s->cluster_size);
884 /* An L2 table is always one cluster in size so the max cache size
885 * should be a multiple of the cluster size. */
Alberto Garciac8fd8552020-07-10 18:12:54 +0200886 uint64_t max_l2_cache = ROUND_UP(max_l2_entries * l2_entry_size(s),
Alberto Garciab70d0822019-08-16 15:17:42 +0300887 s->cluster_size);
Max Reitz6c1c8d52014-08-18 22:07:33 +0200888
889 combined_cache_size_set = qemu_opt_get(opts, QCOW2_OPT_CACHE_SIZE);
890 l2_cache_size_set = qemu_opt_get(opts, QCOW2_OPT_L2_CACHE_SIZE);
891 refcount_cache_size_set = qemu_opt_get(opts, QCOW2_OPT_REFCOUNT_CACHE_SIZE);
Alberto Garciaaf39bd02019-02-13 18:48:53 +0200892 l2_cache_entry_size_set = qemu_opt_get(opts, QCOW2_OPT_L2_CACHE_ENTRY_SIZE);
Max Reitz6c1c8d52014-08-18 22:07:33 +0200893
894 combined_cache_size = qemu_opt_get_size(opts, QCOW2_OPT_CACHE_SIZE, 0);
Leonid Blochb7495622018-09-26 19:04:43 +0300895 l2_cache_max_setting = qemu_opt_get_size(opts, QCOW2_OPT_L2_CACHE_SIZE,
896 DEFAULT_L2_CACHE_MAX_SIZE);
Max Reitz6c1c8d52014-08-18 22:07:33 +0200897 *refcount_cache_size = qemu_opt_get_size(opts,
898 QCOW2_OPT_REFCOUNT_CACHE_SIZE, 0);
899
Alberto Garcia1221fe62018-02-05 16:33:36 +0200900 *l2_cache_entry_size = qemu_opt_get_size(
901 opts, QCOW2_OPT_L2_CACHE_ENTRY_SIZE, s->cluster_size);
902
Leonid Blochb7495622018-09-26 19:04:43 +0300903 *l2_cache_size = MIN(max_l2_cache, l2_cache_max_setting);
904
Max Reitz6c1c8d52014-08-18 22:07:33 +0200905 if (combined_cache_size_set) {
906 if (l2_cache_size_set && refcount_cache_size_set) {
907 error_setg(errp, QCOW2_OPT_CACHE_SIZE ", " QCOW2_OPT_L2_CACHE_SIZE
908 " and " QCOW2_OPT_REFCOUNT_CACHE_SIZE " may not be set "
Leonid Bloch308999e2018-07-25 17:27:55 +0300909 "at the same time");
Max Reitz6c1c8d52014-08-18 22:07:33 +0200910 return;
Leonid Blochb7495622018-09-26 19:04:43 +0300911 } else if (l2_cache_size_set &&
912 (l2_cache_max_setting > combined_cache_size)) {
Max Reitz6c1c8d52014-08-18 22:07:33 +0200913 error_setg(errp, QCOW2_OPT_L2_CACHE_SIZE " may not exceed "
914 QCOW2_OPT_CACHE_SIZE);
915 return;
916 } else if (*refcount_cache_size > combined_cache_size) {
917 error_setg(errp, QCOW2_OPT_REFCOUNT_CACHE_SIZE " may not exceed "
918 QCOW2_OPT_CACHE_SIZE);
919 return;
920 }
921
922 if (l2_cache_size_set) {
923 *refcount_cache_size = combined_cache_size - *l2_cache_size;
924 } else if (refcount_cache_size_set) {
925 *l2_cache_size = combined_cache_size - *refcount_cache_size;
926 } else {
Alberto Garcia52253992018-04-17 15:37:04 +0300927 /* Assign as much memory as possible to the L2 cache, and
928 * use the remainder for the refcount cache */
929 if (combined_cache_size >= max_l2_cache + min_refcount_cache) {
930 *l2_cache_size = max_l2_cache;
931 *refcount_cache_size = combined_cache_size - *l2_cache_size;
932 } else {
933 *refcount_cache_size =
934 MIN(combined_cache_size, min_refcount_cache);
935 *l2_cache_size = combined_cache_size - *refcount_cache_size;
936 }
Max Reitz6c1c8d52014-08-18 22:07:33 +0200937 }
Max Reitz6c1c8d52014-08-18 22:07:33 +0200938 }
Alberto Garciaaf39bd02019-02-13 18:48:53 +0200939
940 /*
941 * If the L2 cache is not enough to cover the whole disk then
942 * default to 4KB entries. Smaller entries reduce the cost of
943 * loads and evictions and increase I/O performance.
944 */
945 if (*l2_cache_size < max_l2_cache && !l2_cache_entry_size_set) {
946 *l2_cache_entry_size = MIN(s->cluster_size, 4096);
947 }
948
Leonid Bloch657ada52018-09-26 19:04:42 +0300949 /* l2_cache_size and refcount_cache_size are ensured to have at least
950 * their minimum values in qcow2_update_options_prepare() */
Alberto Garcia1221fe62018-02-05 16:33:36 +0200951
952 if (*l2_cache_entry_size < (1 << MIN_CLUSTER_BITS) ||
953 *l2_cache_entry_size > s->cluster_size ||
954 !is_power_of_2(*l2_cache_entry_size)) {
955 error_setg(errp, "L2 cache entry size must be a power of two "
956 "between %d and the cluster size (%d)",
957 1 << MIN_CLUSTER_BITS, s->cluster_size);
958 return;
959 }
Max Reitz6c1c8d52014-08-18 22:07:33 +0200960}
961
Kevin Wolfee55b172015-04-16 16:16:02 +0200962typedef struct Qcow2ReopenState {
963 Qcow2Cache *l2_table_cache;
964 Qcow2Cache *refcount_block_cache;
Alberto Garcia3c2e5112018-02-05 16:33:13 +0200965 int l2_slice_size; /* Number of entries in a slice of the L2 table */
Kevin Wolfee55b172015-04-16 16:16:02 +0200966 bool use_lazy_refcounts;
967 int overlap_check;
968 bool discard_passthrough[QCOW2_DISCARD_MAX];
969 uint64_t cache_clean_interval;
Daniel P. Berrangeb25b3872017-06-23 17:24:10 +0100970 QCryptoBlockOpenOptions *crypto_opts; /* Disk encryption runtime options */
Kevin Wolfee55b172015-04-16 16:16:02 +0200971} Qcow2ReopenState;
972
973static int qcow2_update_options_prepare(BlockDriverState *bs,
974 Qcow2ReopenState *r,
975 QDict *options, int flags,
976 Error **errp)
Kevin Wolf4c75d1a2015-04-16 11:29:27 +0200977{
978 BDRVQcow2State *s = bs->opaque;
Kevin Wolf94edf3f2015-04-16 11:44:26 +0200979 QemuOpts *opts = NULL;
Kevin Wolf4c75d1a2015-04-16 11:29:27 +0200980 const char *opt_overlap_check, *opt_overlap_check_template;
981 int overlap_check_template = 0;
Alberto Garcia1221fe62018-02-05 16:33:36 +0200982 uint64_t l2_cache_size, l2_cache_entry_size, refcount_cache_size;
Kevin Wolf4c75d1a2015-04-16 11:29:27 +0200983 int i;
Daniel P. Berrangeb25b3872017-06-23 17:24:10 +0100984 const char *encryptfmt;
985 QDict *encryptopts = NULL;
Kevin Wolf94edf3f2015-04-16 11:44:26 +0200986 Error *local_err = NULL;
Kevin Wolf4c75d1a2015-04-16 11:29:27 +0200987 int ret;
988
Daniel P. Berrangeb25b3872017-06-23 17:24:10 +0100989 qdict_extract_subqdict(options, &encryptopts, "encrypt.");
990 encryptfmt = qdict_get_try_str(encryptopts, "format");
991
Kevin Wolf94edf3f2015-04-16 11:44:26 +0200992 opts = qemu_opts_create(&qcow2_runtime_opts, NULL, 0, &error_abort);
Markus Armbrusteraf175e82020-07-07 18:06:03 +0200993 if (!qemu_opts_absorb_qdict(opts, options, errp)) {
Kevin Wolf94edf3f2015-04-16 11:44:26 +0200994 ret = -EINVAL;
995 goto fail;
996 }
997
998 /* get L2 table/refcount block cache size from command line options */
Alberto Garcia1221fe62018-02-05 16:33:36 +0200999 read_cache_sizes(bs, opts, &l2_cache_size, &l2_cache_entry_size,
1000 &refcount_cache_size, &local_err);
Kevin Wolf94edf3f2015-04-16 11:44:26 +02001001 if (local_err) {
1002 error_propagate(errp, local_err);
1003 ret = -EINVAL;
1004 goto fail;
1005 }
1006
Alberto Garcia1221fe62018-02-05 16:33:36 +02001007 l2_cache_size /= l2_cache_entry_size;
Kevin Wolf94edf3f2015-04-16 11:44:26 +02001008 if (l2_cache_size < MIN_L2_CACHE_SIZE) {
1009 l2_cache_size = MIN_L2_CACHE_SIZE;
1010 }
1011 if (l2_cache_size > INT_MAX) {
1012 error_setg(errp, "L2 cache size too big");
1013 ret = -EINVAL;
1014 goto fail;
1015 }
1016
1017 refcount_cache_size /= s->cluster_size;
1018 if (refcount_cache_size < MIN_REFCOUNT_CACHE_SIZE) {
1019 refcount_cache_size = MIN_REFCOUNT_CACHE_SIZE;
1020 }
1021 if (refcount_cache_size > INT_MAX) {
1022 error_setg(errp, "Refcount cache size too big");
1023 ret = -EINVAL;
1024 goto fail;
1025 }
1026
Kevin Wolf5b0959a2015-04-16 13:42:27 +02001027 /* alloc new L2 table/refcount block cache, flush old one */
1028 if (s->l2_table_cache) {
1029 ret = qcow2_cache_flush(bs, s->l2_table_cache);
1030 if (ret) {
1031 error_setg_errno(errp, -ret, "Failed to flush the L2 table cache");
1032 goto fail;
1033 }
1034 }
1035
1036 if (s->refcount_block_cache) {
1037 ret = qcow2_cache_flush(bs, s->refcount_block_cache);
1038 if (ret) {
1039 error_setg_errno(errp, -ret,
1040 "Failed to flush the refcount block cache");
1041 goto fail;
1042 }
1043 }
1044
Alberto Garciac8fd8552020-07-10 18:12:54 +02001045 r->l2_slice_size = l2_cache_entry_size / l2_entry_size(s);
Alberto Garcia1221fe62018-02-05 16:33:36 +02001046 r->l2_table_cache = qcow2_cache_create(bs, l2_cache_size,
1047 l2_cache_entry_size);
1048 r->refcount_block_cache = qcow2_cache_create(bs, refcount_cache_size,
1049 s->cluster_size);
Kevin Wolfee55b172015-04-16 16:16:02 +02001050 if (r->l2_table_cache == NULL || r->refcount_block_cache == NULL) {
Kevin Wolf94edf3f2015-04-16 11:44:26 +02001051 error_setg(errp, "Could not allocate metadata caches");
1052 ret = -ENOMEM;
1053 goto fail;
1054 }
1055
1056 /* New interval for cache cleanup timer */
Kevin Wolfee55b172015-04-16 16:16:02 +02001057 r->cache_clean_interval =
Kevin Wolf5b0959a2015-04-16 13:42:27 +02001058 qemu_opt_get_number(opts, QCOW2_OPT_CACHE_CLEAN_INTERVAL,
Leonid Bloche957b502018-09-26 19:04:46 +03001059 DEFAULT_CACHE_CLEAN_INTERVAL);
Alberto Garcia91203f02016-11-25 13:27:44 +02001060#ifndef CONFIG_LINUX
1061 if (r->cache_clean_interval != 0) {
1062 error_setg(errp, QCOW2_OPT_CACHE_CLEAN_INTERVAL
1063 " not supported on this host");
1064 ret = -EINVAL;
1065 goto fail;
1066 }
1067#endif
Kevin Wolfee55b172015-04-16 16:16:02 +02001068 if (r->cache_clean_interval > UINT_MAX) {
Kevin Wolf94edf3f2015-04-16 11:44:26 +02001069 error_setg(errp, "Cache clean interval too big");
1070 ret = -EINVAL;
1071 goto fail;
1072 }
Kevin Wolf94edf3f2015-04-16 11:44:26 +02001073
Kevin Wolf5b0959a2015-04-16 13:42:27 +02001074 /* lazy-refcounts; flush if going from enabled to disabled */
Kevin Wolfee55b172015-04-16 16:16:02 +02001075 r->use_lazy_refcounts = qemu_opt_get_bool(opts, QCOW2_OPT_LAZY_REFCOUNTS,
Kevin Wolf4c75d1a2015-04-16 11:29:27 +02001076 (s->compatible_features & QCOW2_COMPAT_LAZY_REFCOUNTS));
Kevin Wolfee55b172015-04-16 16:16:02 +02001077 if (r->use_lazy_refcounts && s->qcow_version < 3) {
Kevin Wolf007dbc32015-04-16 13:11:39 +02001078 error_setg(errp, "Lazy refcounts require a qcow2 image with at least "
1079 "qemu 1.1 compatibility level");
1080 ret = -EINVAL;
1081 goto fail;
1082 }
Kevin Wolf4c75d1a2015-04-16 11:29:27 +02001083
Kevin Wolf5b0959a2015-04-16 13:42:27 +02001084 if (s->use_lazy_refcounts && !r->use_lazy_refcounts) {
1085 ret = qcow2_mark_clean(bs);
1086 if (ret < 0) {
1087 error_setg_errno(errp, -ret, "Failed to disable lazy refcounts");
1088 goto fail;
1089 }
1090 }
1091
Kevin Wolf007dbc32015-04-16 13:11:39 +02001092 /* Overlap check options */
Kevin Wolf4c75d1a2015-04-16 11:29:27 +02001093 opt_overlap_check = qemu_opt_get(opts, QCOW2_OPT_OVERLAP);
1094 opt_overlap_check_template = qemu_opt_get(opts, QCOW2_OPT_OVERLAP_TEMPLATE);
1095 if (opt_overlap_check_template && opt_overlap_check &&
1096 strcmp(opt_overlap_check_template, opt_overlap_check))
1097 {
1098 error_setg(errp, "Conflicting values for qcow2 options '"
1099 QCOW2_OPT_OVERLAP "' ('%s') and '" QCOW2_OPT_OVERLAP_TEMPLATE
1100 "' ('%s')", opt_overlap_check, opt_overlap_check_template);
1101 ret = -EINVAL;
1102 goto fail;
1103 }
1104 if (!opt_overlap_check) {
1105 opt_overlap_check = opt_overlap_check_template ?: "cached";
1106 }
1107
1108 if (!strcmp(opt_overlap_check, "none")) {
1109 overlap_check_template = 0;
1110 } else if (!strcmp(opt_overlap_check, "constant")) {
1111 overlap_check_template = QCOW2_OL_CONSTANT;
1112 } else if (!strcmp(opt_overlap_check, "cached")) {
1113 overlap_check_template = QCOW2_OL_CACHED;
1114 } else if (!strcmp(opt_overlap_check, "all")) {
1115 overlap_check_template = QCOW2_OL_ALL;
1116 } else {
1117 error_setg(errp, "Unsupported value '%s' for qcow2 option "
1118 "'overlap-check'. Allowed are any of the following: "
1119 "none, constant, cached, all", opt_overlap_check);
1120 ret = -EINVAL;
1121 goto fail;
1122 }
1123
Kevin Wolfee55b172015-04-16 16:16:02 +02001124 r->overlap_check = 0;
Kevin Wolf4c75d1a2015-04-16 11:29:27 +02001125 for (i = 0; i < QCOW2_OL_MAX_BITNR; i++) {
1126 /* overlap-check defines a template bitmask, but every flag may be
1127 * overwritten through the associated boolean option */
Kevin Wolfee55b172015-04-16 16:16:02 +02001128 r->overlap_check |=
Kevin Wolf4c75d1a2015-04-16 11:29:27 +02001129 qemu_opt_get_bool(opts, overlap_bool_option_names[i],
1130 overlap_check_template & (1 << i)) << i;
1131 }
1132
Kevin Wolfee55b172015-04-16 16:16:02 +02001133 r->discard_passthrough[QCOW2_DISCARD_NEVER] = false;
1134 r->discard_passthrough[QCOW2_DISCARD_ALWAYS] = true;
1135 r->discard_passthrough[QCOW2_DISCARD_REQUEST] =
Kevin Wolf007dbc32015-04-16 13:11:39 +02001136 qemu_opt_get_bool(opts, QCOW2_OPT_DISCARD_REQUEST,
1137 flags & BDRV_O_UNMAP);
Kevin Wolfee55b172015-04-16 16:16:02 +02001138 r->discard_passthrough[QCOW2_DISCARD_SNAPSHOT] =
Kevin Wolf007dbc32015-04-16 13:11:39 +02001139 qemu_opt_get_bool(opts, QCOW2_OPT_DISCARD_SNAPSHOT, true);
Kevin Wolfee55b172015-04-16 16:16:02 +02001140 r->discard_passthrough[QCOW2_DISCARD_OTHER] =
Kevin Wolf007dbc32015-04-16 13:11:39 +02001141 qemu_opt_get_bool(opts, QCOW2_OPT_DISCARD_OTHER, false);
1142
Daniel P. Berrangeb25b3872017-06-23 17:24:10 +01001143 switch (s->crypt_method_header) {
1144 case QCOW_CRYPT_NONE:
1145 if (encryptfmt) {
1146 error_setg(errp, "No encryption in image header, but options "
1147 "specified format '%s'", encryptfmt);
1148 ret = -EINVAL;
1149 goto fail;
1150 }
1151 break;
1152
1153 case QCOW_CRYPT_AES:
1154 if (encryptfmt && !g_str_equal(encryptfmt, "aes")) {
1155 error_setg(errp,
1156 "Header reported 'aes' encryption format but "
1157 "options specify '%s'", encryptfmt);
1158 ret = -EINVAL;
1159 goto fail;
1160 }
Markus Armbruster796d3232018-06-26 19:41:19 +02001161 qdict_put_str(encryptopts, "format", "qcow");
1162 r->crypto_opts = block_crypto_open_opts_init(encryptopts, errp);
Daniel P. Berrangeb25b3872017-06-23 17:24:10 +01001163 break;
1164
Daniel P. Berrange4652b8f2017-06-23 17:24:12 +01001165 case QCOW_CRYPT_LUKS:
1166 if (encryptfmt && !g_str_equal(encryptfmt, "luks")) {
1167 error_setg(errp,
1168 "Header reported 'luks' encryption format but "
1169 "options specify '%s'", encryptfmt);
1170 ret = -EINVAL;
1171 goto fail;
1172 }
Markus Armbruster796d3232018-06-26 19:41:19 +02001173 qdict_put_str(encryptopts, "format", "luks");
1174 r->crypto_opts = block_crypto_open_opts_init(encryptopts, errp);
Daniel P. Berrange4652b8f2017-06-23 17:24:12 +01001175 break;
1176
Daniel P. Berrangeb25b3872017-06-23 17:24:10 +01001177 default:
1178 error_setg(errp, "Unsupported encryption method %d",
1179 s->crypt_method_header);
1180 break;
1181 }
1182 if (s->crypt_method_header != QCOW_CRYPT_NONE && !r->crypto_opts) {
1183 ret = -EINVAL;
1184 goto fail;
1185 }
1186
Kevin Wolf4c75d1a2015-04-16 11:29:27 +02001187 ret = 0;
1188fail:
Marc-André Lureaucb3e7f02018-04-19 17:01:43 +02001189 qobject_unref(encryptopts);
Kevin Wolf94edf3f2015-04-16 11:44:26 +02001190 qemu_opts_del(opts);
1191 opts = NULL;
Kevin Wolfee55b172015-04-16 16:16:02 +02001192 return ret;
1193}
1194
1195static void qcow2_update_options_commit(BlockDriverState *bs,
1196 Qcow2ReopenState *r)
1197{
1198 BDRVQcow2State *s = bs->opaque;
1199 int i;
1200
Kevin Wolf5b0959a2015-04-16 13:42:27 +02001201 if (s->l2_table_cache) {
Alberto Garciae64d4072018-02-05 16:33:08 +02001202 qcow2_cache_destroy(s->l2_table_cache);
Kevin Wolf5b0959a2015-04-16 13:42:27 +02001203 }
1204 if (s->refcount_block_cache) {
Alberto Garciae64d4072018-02-05 16:33:08 +02001205 qcow2_cache_destroy(s->refcount_block_cache);
Kevin Wolf5b0959a2015-04-16 13:42:27 +02001206 }
Kevin Wolfee55b172015-04-16 16:16:02 +02001207 s->l2_table_cache = r->l2_table_cache;
1208 s->refcount_block_cache = r->refcount_block_cache;
Alberto Garcia3c2e5112018-02-05 16:33:13 +02001209 s->l2_slice_size = r->l2_slice_size;
Kevin Wolfee55b172015-04-16 16:16:02 +02001210
1211 s->overlap_check = r->overlap_check;
1212 s->use_lazy_refcounts = r->use_lazy_refcounts;
1213
1214 for (i = 0; i < QCOW2_DISCARD_MAX; i++) {
1215 s->discard_passthrough[i] = r->discard_passthrough[i];
1216 }
1217
Kevin Wolf5b0959a2015-04-16 13:42:27 +02001218 if (s->cache_clean_interval != r->cache_clean_interval) {
1219 cache_clean_timer_del(bs);
1220 s->cache_clean_interval = r->cache_clean_interval;
1221 cache_clean_timer_init(bs, bdrv_get_aio_context(bs));
1222 }
Daniel P. Berrangeb25b3872017-06-23 17:24:10 +01001223
1224 qapi_free_QCryptoBlockOpenOptions(s->crypto_opts);
1225 s->crypto_opts = r->crypto_opts;
Kevin Wolfee55b172015-04-16 16:16:02 +02001226}
1227
1228static void qcow2_update_options_abort(BlockDriverState *bs,
1229 Qcow2ReopenState *r)
1230{
1231 if (r->l2_table_cache) {
Alberto Garciae64d4072018-02-05 16:33:08 +02001232 qcow2_cache_destroy(r->l2_table_cache);
Kevin Wolfee55b172015-04-16 16:16:02 +02001233 }
1234 if (r->refcount_block_cache) {
Alberto Garciae64d4072018-02-05 16:33:08 +02001235 qcow2_cache_destroy(r->refcount_block_cache);
Kevin Wolfee55b172015-04-16 16:16:02 +02001236 }
Daniel P. Berrangeb25b3872017-06-23 17:24:10 +01001237 qapi_free_QCryptoBlockOpenOptions(r->crypto_opts);
Kevin Wolfee55b172015-04-16 16:16:02 +02001238}
1239
1240static int qcow2_update_options(BlockDriverState *bs, QDict *options,
1241 int flags, Error **errp)
1242{
1243 Qcow2ReopenState r = {};
1244 int ret;
1245
1246 ret = qcow2_update_options_prepare(bs, &r, options, flags, errp);
1247 if (ret >= 0) {
1248 qcow2_update_options_commit(bs, &r);
1249 } else {
1250 qcow2_update_options_abort(bs, &r);
1251 }
Kevin Wolf94edf3f2015-04-16 11:44:26 +02001252
Kevin Wolf4c75d1a2015-04-16 11:29:27 +02001253 return ret;
1254}
1255
Denis Plotnikov572ad972020-05-07 11:25:18 +03001256static int validate_compression_type(BDRVQcow2State *s, Error **errp)
1257{
1258 switch (s->compression_type) {
1259 case QCOW2_COMPRESSION_TYPE_ZLIB:
Denis Plotnikovd298ac12020-05-07 11:25:20 +03001260#ifdef CONFIG_ZSTD
1261 case QCOW2_COMPRESSION_TYPE_ZSTD:
1262#endif
Denis Plotnikov572ad972020-05-07 11:25:18 +03001263 break;
1264
1265 default:
1266 error_setg(errp, "qcow2: unknown compression type: %u",
1267 s->compression_type);
1268 return -ENOTSUP;
1269 }
1270
1271 /*
1272 * if the compression type differs from QCOW2_COMPRESSION_TYPE_ZLIB
1273 * the incompatible feature flag must be set
1274 */
1275 if (s->compression_type == QCOW2_COMPRESSION_TYPE_ZLIB) {
1276 if (s->incompatible_features & QCOW2_INCOMPAT_COMPRESSION) {
1277 error_setg(errp, "qcow2: Compression type incompatible feature "
1278 "bit must not be set");
1279 return -EINVAL;
1280 }
1281 } else {
1282 if (!(s->incompatible_features & QCOW2_INCOMPAT_COMPRESSION)) {
1283 error_setg(errp, "qcow2: Compression type incompatible feature "
1284 "bit must be set");
1285 return -EINVAL;
1286 }
1287 }
1288
1289 return 0;
1290}
1291
Paolo Bonzini1fafcd92018-03-01 17:36:16 +01001292/* Called with s->lock held. */
1293static int coroutine_fn qcow2_do_open(BlockDriverState *bs, QDict *options,
1294 int flags, Error **errp)
bellard585f8582006-08-05 21:14:20 +00001295{
Kevin Wolfff991292015-09-07 17:12:56 +02001296 BDRVQcow2State *s = bs->opaque;
Kevin Wolf6d33e8e2014-03-26 13:05:47 +01001297 unsigned int len, i;
1298 int ret = 0;
bellard585f8582006-08-05 21:14:20 +00001299 QCowHeader header;
Kevin Wolf74c45102013-03-15 10:35:08 +01001300 Error *local_err = NULL;
aliguori9b80ddf2009-03-28 17:55:06 +00001301 uint64_t ext_end;
Kevin Wolf2cf7cfa2013-05-14 16:14:33 +02001302 uint64_t l1_vm_state_index;
Vladimir Sementsov-Ogievskiy88ddffa2017-06-28 15:05:08 +03001303 bool update_header = false;
bellard585f8582006-08-05 21:14:20 +00001304
Kevin Wolfcf2ab8f2016-06-20 18:24:02 +02001305 ret = bdrv_pread(bs->file, 0, &header, sizeof(header));
Jes Sorensen6d85a572010-12-17 16:02:40 +01001306 if (ret < 0) {
Max Reitz3ef6c402013-09-05 09:40:43 +02001307 error_setg_errno(errp, -ret, "Could not read qcow2 header");
bellard585f8582006-08-05 21:14:20 +00001308 goto fail;
Jes Sorensen6d85a572010-12-17 16:02:40 +01001309 }
Peter Maydell3b698f52018-10-09 18:24:59 +01001310 header.magic = be32_to_cpu(header.magic);
1311 header.version = be32_to_cpu(header.version);
1312 header.backing_file_offset = be64_to_cpu(header.backing_file_offset);
1313 header.backing_file_size = be32_to_cpu(header.backing_file_size);
1314 header.size = be64_to_cpu(header.size);
1315 header.cluster_bits = be32_to_cpu(header.cluster_bits);
1316 header.crypt_method = be32_to_cpu(header.crypt_method);
1317 header.l1_table_offset = be64_to_cpu(header.l1_table_offset);
1318 header.l1_size = be32_to_cpu(header.l1_size);
1319 header.refcount_table_offset = be64_to_cpu(header.refcount_table_offset);
1320 header.refcount_table_clusters =
1321 be32_to_cpu(header.refcount_table_clusters);
1322 header.snapshots_offset = be64_to_cpu(header.snapshots_offset);
1323 header.nb_snapshots = be32_to_cpu(header.nb_snapshots);
ths3b46e622007-09-17 08:09:54 +00001324
Kevin Wolfe8cdcec2011-02-09 11:11:07 +01001325 if (header.magic != QCOW_MAGIC) {
Max Reitz3ef6c402013-09-05 09:40:43 +02001326 error_setg(errp, "Image is not in qcow2 format");
Paolo Bonzini76abe402014-02-17 14:44:06 +01001327 ret = -EINVAL;
bellard585f8582006-08-05 21:14:20 +00001328 goto fail;
Jes Sorensen6d85a572010-12-17 16:02:40 +01001329 }
Kevin Wolf6744cba2011-12-15 12:20:58 +01001330 if (header.version < 2 || header.version > 3) {
Max Reitza55448b2016-03-16 19:54:33 +01001331 error_setg(errp, "Unsupported qcow2 version %" PRIu32, header.version);
Kevin Wolfe8cdcec2011-02-09 11:11:07 +01001332 ret = -ENOTSUP;
1333 goto fail;
1334 }
Kevin Wolf6744cba2011-12-15 12:20:58 +01001335
1336 s->qcow_version = header.version;
1337
Kevin Wolf24342f22014-03-26 13:05:41 +01001338 /* Initialise cluster size */
1339 if (header.cluster_bits < MIN_CLUSTER_BITS ||
1340 header.cluster_bits > MAX_CLUSTER_BITS) {
Max Reitz521b2b52014-04-29 19:03:12 +02001341 error_setg(errp, "Unsupported cluster size: 2^%" PRIu32,
1342 header.cluster_bits);
Kevin Wolf24342f22014-03-26 13:05:41 +01001343 ret = -EINVAL;
1344 goto fail;
1345 }
1346
1347 s->cluster_bits = header.cluster_bits;
1348 s->cluster_size = 1 << s->cluster_bits;
Kevin Wolf24342f22014-03-26 13:05:41 +01001349
Kevin Wolf6744cba2011-12-15 12:20:58 +01001350 /* Initialise version 3 header fields */
1351 if (header.version == 2) {
1352 header.incompatible_features = 0;
1353 header.compatible_features = 0;
1354 header.autoclear_features = 0;
1355 header.refcount_order = 4;
1356 header.header_length = 72;
1357 } else {
Peter Maydell3b698f52018-10-09 18:24:59 +01001358 header.incompatible_features =
1359 be64_to_cpu(header.incompatible_features);
1360 header.compatible_features = be64_to_cpu(header.compatible_features);
1361 header.autoclear_features = be64_to_cpu(header.autoclear_features);
1362 header.refcount_order = be32_to_cpu(header.refcount_order);
1363 header.header_length = be32_to_cpu(header.header_length);
Kevin Wolf24342f22014-03-26 13:05:41 +01001364
1365 if (header.header_length < 104) {
1366 error_setg(errp, "qcow2 header too short");
1367 ret = -EINVAL;
1368 goto fail;
1369 }
1370 }
1371
1372 if (header.header_length > s->cluster_size) {
1373 error_setg(errp, "qcow2 header exceeds cluster size");
1374 ret = -EINVAL;
1375 goto fail;
Kevin Wolf6744cba2011-12-15 12:20:58 +01001376 }
1377
1378 if (header.header_length > sizeof(header)) {
1379 s->unknown_header_fields_size = header.header_length - sizeof(header);
1380 s->unknown_header_fields = g_malloc(s->unknown_header_fields_size);
Kevin Wolfcf2ab8f2016-06-20 18:24:02 +02001381 ret = bdrv_pread(bs->file, sizeof(header), s->unknown_header_fields,
Kevin Wolf6744cba2011-12-15 12:20:58 +01001382 s->unknown_header_fields_size);
1383 if (ret < 0) {
Max Reitz3ef6c402013-09-05 09:40:43 +02001384 error_setg_errno(errp, -ret, "Could not read unknown qcow2 header "
1385 "fields");
Kevin Wolf6744cba2011-12-15 12:20:58 +01001386 goto fail;
1387 }
1388 }
1389
Kevin Wolfa1b3955c2014-03-26 13:05:42 +01001390 if (header.backing_file_offset > s->cluster_size) {
1391 error_setg(errp, "Invalid backing file offset");
1392 ret = -EINVAL;
1393 goto fail;
1394 }
1395
Kevin Wolfcfcc4c62012-04-12 15:20:27 +02001396 if (header.backing_file_offset) {
1397 ext_end = header.backing_file_offset;
1398 } else {
1399 ext_end = 1 << header.cluster_bits;
1400 }
1401
Kevin Wolf6744cba2011-12-15 12:20:58 +01001402 /* Handle feature bits */
1403 s->incompatible_features = header.incompatible_features;
1404 s->compatible_features = header.compatible_features;
1405 s->autoclear_features = header.autoclear_features;
1406
Denis Plotnikov572ad972020-05-07 11:25:18 +03001407 /*
1408 * Handle compression type
1409 * Older qcow2 images don't contain the compression type header.
1410 * Distinguish them by the header length and use
1411 * the only valid (default) compression type in that case
1412 */
1413 if (header.header_length > offsetof(QCowHeader, compression_type)) {
1414 s->compression_type = header.compression_type;
1415 } else {
1416 s->compression_type = QCOW2_COMPRESSION_TYPE_ZLIB;
1417 }
1418
1419 ret = validate_compression_type(s, errp);
1420 if (ret) {
1421 goto fail;
1422 }
1423
Stefan Hajnoczic61d0002012-07-27 09:05:19 +01001424 if (s->incompatible_features & ~QCOW2_INCOMPAT_MASK) {
Kevin Wolfcfcc4c62012-04-12 15:20:27 +02001425 void *feature_table = NULL;
1426 qcow2_read_extensions(bs, header.header_length, ext_end,
Vladimir Sementsov-Ogievskiy88ddffa2017-06-28 15:05:08 +03001427 &feature_table, flags, NULL, NULL);
Max Reitza55448b2016-03-16 19:54:33 +01001428 report_unsupported_feature(errp, feature_table,
Stefan Hajnoczic61d0002012-07-27 09:05:19 +01001429 s->incompatible_features &
1430 ~QCOW2_INCOMPAT_MASK);
Kevin Wolf6744cba2011-12-15 12:20:58 +01001431 ret = -ENOTSUP;
Prasad Joshic5a33ee2014-03-28 23:08:58 +05301432 g_free(feature_table);
Kevin Wolf6744cba2011-12-15 12:20:58 +01001433 goto fail;
1434 }
1435
Max Reitz69c98722013-08-30 14:34:24 +02001436 if (s->incompatible_features & QCOW2_INCOMPAT_CORRUPT) {
1437 /* Corrupt images may not be written to unless they are being repaired
1438 */
1439 if ((flags & BDRV_O_RDWR) && !(flags & BDRV_O_CHECK)) {
Max Reitz3ef6c402013-09-05 09:40:43 +02001440 error_setg(errp, "qcow2: Image is corrupt; cannot be opened "
1441 "read/write");
Max Reitz69c98722013-08-30 14:34:24 +02001442 ret = -EACCES;
1443 goto fail;
1444 }
1445 }
1446
Alberto Garciad0346b52020-07-10 18:12:51 +02001447 s->subclusters_per_cluster =
1448 has_subclusters(s) ? QCOW_EXTL2_SUBCLUSTERS_PER_CLUSTER : 1;
1449 s->subcluster_size = s->cluster_size / s->subclusters_per_cluster;
1450 s->subcluster_bits = ctz32(s->subcluster_size);
1451
Alberto Garcia7be20252020-07-10 18:13:13 +02001452 if (s->subcluster_size < (1 << MIN_CLUSTER_BITS)) {
1453 error_setg(errp, "Unsupported subcluster size: %d", s->subcluster_size);
1454 ret = -EINVAL;
1455 goto fail;
1456 }
1457
Kevin Wolf6744cba2011-12-15 12:20:58 +01001458 /* Check support for various header values */
Max Reitzb72faf92015-02-10 15:28:52 -05001459 if (header.refcount_order > 6) {
1460 error_setg(errp, "Reference count entry width too large; may not "
1461 "exceed 64 bits");
1462 ret = -EINVAL;
Kevin Wolf6744cba2011-12-15 12:20:58 +01001463 goto fail;
1464 }
Max Reitzb6481f32013-09-03 10:09:53 +02001465 s->refcount_order = header.refcount_order;
Max Reitz346a53d2015-02-10 15:28:43 -05001466 s->refcount_bits = 1 << s->refcount_order;
1467 s->refcount_max = UINT64_C(1) << (s->refcount_bits - 1);
1468 s->refcount_max += s->refcount_max - 1;
Kevin Wolf6744cba2011-12-15 12:20:58 +01001469
bellard585f8582006-08-05 21:14:20 +00001470 s->crypt_method_header = header.crypt_method;
Jes Sorensen6d85a572010-12-17 16:02:40 +01001471 if (s->crypt_method_header) {
Daniel P. Berrangee6ff69b2016-03-21 14:11:48 +00001472 if (bdrv_uses_whitelist() &&
1473 s->crypt_method_header == QCOW_CRYPT_AES) {
Daniel P. Berrange8c0dcbc2016-06-13 12:30:09 +01001474 error_setg(errp,
1475 "Use of AES-CBC encrypted qcow2 images is no longer "
1476 "supported in system emulators");
1477 error_append_hint(errp,
1478 "You can use 'qemu-img convert' to convert your "
1479 "image to an alternative supported format, such "
1480 "as unencrypted qcow2, or raw with the LUKS "
1481 "format instead.\n");
1482 ret = -ENOSYS;
1483 goto fail;
Daniel P. Berrangee6ff69b2016-03-21 14:11:48 +00001484 }
1485
Daniel P. Berrange4652b8f2017-06-23 17:24:12 +01001486 if (s->crypt_method_header == QCOW_CRYPT_AES) {
1487 s->crypt_physical_offset = false;
1488 } else {
1489 /* Assuming LUKS and any future crypt methods we
1490 * add will all use physical offsets, due to the
1491 * fact that the alternative is insecure... */
1492 s->crypt_physical_offset = true;
1493 }
1494
Eric Blake54115412016-06-23 16:37:26 -06001495 bs->encrypted = true;
Jes Sorensen6d85a572010-12-17 16:02:40 +01001496 }
Kevin Wolf24342f22014-03-26 13:05:41 +01001497
Alberto Garciac8fd8552020-07-10 18:12:54 +02001498 s->l2_bits = s->cluster_bits - ctz32(l2_entry_size(s));
bellard585f8582006-08-05 21:14:20 +00001499 s->l2_size = 1 << s->l2_bits;
Max Reitz1d13d652014-10-22 14:09:28 +02001500 /* 2^(s->refcount_order - 3) is the refcount width in bytes */
1501 s->refcount_block_bits = s->cluster_bits - (s->refcount_order - 3);
1502 s->refcount_block_size = 1 << s->refcount_block_bits;
Leonid Blochbd016b92018-09-26 19:04:47 +03001503 bs->total_sectors = header.size / BDRV_SECTOR_SIZE;
bellard585f8582006-08-05 21:14:20 +00001504 s->csize_shift = (62 - (s->cluster_bits - 8));
1505 s->csize_mask = (1 << (s->cluster_bits - 8)) - 1;
1506 s->cluster_offset_mask = (1LL << s->csize_shift) - 1;
Kevin Wolf5dab2fa2014-03-26 13:05:43 +01001507
bellard585f8582006-08-05 21:14:20 +00001508 s->refcount_table_offset = header.refcount_table_offset;
ths5fafdf22007-09-16 21:08:06 +00001509 s->refcount_table_size =
bellard585f8582006-08-05 21:14:20 +00001510 header.refcount_table_clusters << (s->cluster_bits - 3);
1511
Alberto Garcia951053a2017-11-03 16:18:53 +02001512 if (header.refcount_table_clusters == 0 && !(flags & BDRV_O_CHECK)) {
1513 error_setg(errp, "Image does not contain a reference count table");
1514 ret = -EINVAL;
1515 goto fail;
1516 }
1517
Alberto Garcia0cf0e592018-03-06 18:14:06 +02001518 ret = qcow2_validate_table(bs, s->refcount_table_offset,
1519 header.refcount_table_clusters,
1520 s->cluster_size, QCOW_MAX_REFTABLE_SIZE,
1521 "Reference count table", errp);
Kevin Wolf8c7de282014-03-26 13:05:44 +01001522 if (ret < 0) {
Kevin Wolf8c7de282014-03-26 13:05:44 +01001523 goto fail;
1524 }
1525
Max Reitz8bc584f2019-10-11 17:28:06 +02001526 if (!(flags & BDRV_O_CHECK)) {
1527 /*
1528 * The total size in bytes of the snapshot table is checked in
1529 * qcow2_read_snapshots() because the size of each snapshot is
1530 * variable and we don't know it yet.
1531 * Here we only check the offset and number of snapshots.
1532 */
1533 ret = qcow2_validate_table(bs, header.snapshots_offset,
1534 header.nb_snapshots,
1535 sizeof(QCowSnapshotHeader),
1536 sizeof(QCowSnapshotHeader) *
1537 QCOW_MAX_SNAPSHOTS,
1538 "Snapshot table", errp);
1539 if (ret < 0) {
1540 goto fail;
1541 }
Kevin Wolfce48f2f42014-03-26 13:05:45 +01001542 }
1543
bellard585f8582006-08-05 21:14:20 +00001544 /* read the level 1 table */
Alberto Garcia0cf0e592018-03-06 18:14:06 +02001545 ret = qcow2_validate_table(bs, header.l1_table_offset,
Alberto Garcia02b1ecf2020-08-28 13:08:28 +02001546 header.l1_size, L1E_SIZE,
Alberto Garcia0cf0e592018-03-06 18:14:06 +02001547 QCOW_MAX_L1_SIZE, "Active L1 table", errp);
1548 if (ret < 0) {
Kevin Wolf2d51c322014-03-26 13:05:46 +01001549 goto fail;
1550 }
bellard585f8582006-08-05 21:14:20 +00001551 s->l1_size = header.l1_size;
Alberto Garcia0cf0e592018-03-06 18:14:06 +02001552 s->l1_table_offset = header.l1_table_offset;
Kevin Wolf2cf7cfa2013-05-14 16:14:33 +02001553
1554 l1_vm_state_index = size_to_l1(s, header.size);
1555 if (l1_vm_state_index > INT_MAX) {
Max Reitz3ef6c402013-09-05 09:40:43 +02001556 error_setg(errp, "Image is too big");
Kevin Wolf2cf7cfa2013-05-14 16:14:33 +02001557 ret = -EFBIG;
1558 goto fail;
1559 }
1560 s->l1_vm_state_index = l1_vm_state_index;
1561
bellard585f8582006-08-05 21:14:20 +00001562 /* the L1 table must contain at least enough entries to put
1563 header.size bytes */
Jes Sorensen6d85a572010-12-17 16:02:40 +01001564 if (s->l1_size < s->l1_vm_state_index) {
Max Reitz3ef6c402013-09-05 09:40:43 +02001565 error_setg(errp, "L1 table is too small");
Jes Sorensen6d85a572010-12-17 16:02:40 +01001566 ret = -EINVAL;
bellard585f8582006-08-05 21:14:20 +00001567 goto fail;
Jes Sorensen6d85a572010-12-17 16:02:40 +01001568 }
Kevin Wolf2d51c322014-03-26 13:05:46 +01001569
Stefan Weild191d122009-10-26 16:11:16 +01001570 if (s->l1_size > 0) {
Alberto Garcia02b1ecf2020-08-28 13:08:28 +02001571 s->l1_table = qemu_try_blockalign(bs->file->bs, s->l1_size * L1E_SIZE);
Kevin Wolfde828152014-05-20 17:12:47 +02001572 if (s->l1_table == NULL) {
1573 error_setg(errp, "Could not allocate L1 table");
1574 ret = -ENOMEM;
1575 goto fail;
1576 }
Kevin Wolfcf2ab8f2016-06-20 18:24:02 +02001577 ret = bdrv_pread(bs->file, s->l1_table_offset, s->l1_table,
Alberto Garcia02b1ecf2020-08-28 13:08:28 +02001578 s->l1_size * L1E_SIZE);
Jes Sorensen6d85a572010-12-17 16:02:40 +01001579 if (ret < 0) {
Max Reitz3ef6c402013-09-05 09:40:43 +02001580 error_setg_errno(errp, -ret, "Could not read L1 table");
Stefan Weild191d122009-10-26 16:11:16 +01001581 goto fail;
Jes Sorensen6d85a572010-12-17 16:02:40 +01001582 }
Stefan Weild191d122009-10-26 16:11:16 +01001583 for(i = 0;i < s->l1_size; i++) {
Peter Maydell3b698f52018-10-09 18:24:59 +01001584 s->l1_table[i] = be64_to_cpu(s->l1_table[i]);
Stefan Weild191d122009-10-26 16:11:16 +01001585 }
bellard585f8582006-08-05 21:14:20 +00001586 }
Kevin Wolf29c1a732011-01-10 17:17:28 +01001587
Kevin Wolf94edf3f2015-04-16 11:44:26 +02001588 /* Parse driver-specific options */
1589 ret = qcow2_update_options(bs, options, flags, errp);
Kevin Wolf90efa0e2015-04-16 11:36:10 +02001590 if (ret < 0) {
1591 goto fail;
1592 }
1593
Anthony Liguori06d92602011-11-14 15:09:46 -06001594 s->flags = flags;
ths3b46e622007-09-17 08:09:54 +00001595
Jes Sorensen6d85a572010-12-17 16:02:40 +01001596 ret = qcow2_refcount_init(bs);
1597 if (ret != 0) {
Max Reitz3ef6c402013-09-05 09:40:43 +02001598 error_setg_errno(errp, -ret, "Could not initialize refcount handling");
bellard585f8582006-08-05 21:14:20 +00001599 goto fail;
Jes Sorensen6d85a572010-12-17 16:02:40 +01001600 }
bellard585f8582006-08-05 21:14:20 +00001601
Blue Swirl72cf2d42009-09-12 07:36:22 +00001602 QLIST_INIT(&s->cluster_allocs);
Kevin Wolf0b919fa2013-06-19 13:44:20 +02001603 QTAILQ_INIT(&s->discards);
Kevin Wolff2149782009-08-31 16:48:49 +02001604
aliguori9b80ddf2009-03-28 17:55:06 +00001605 /* read qcow2 extensions */
Max Reitz3ef6c402013-09-05 09:40:43 +02001606 if (qcow2_read_extensions(bs, header.header_length, ext_end, NULL,
Markus Armbrusteraf175e82020-07-07 18:06:03 +02001607 flags, &update_header, errp)) {
Jes Sorensen6d85a572010-12-17 16:02:40 +01001608 ret = -EINVAL;
aliguori9b80ddf2009-03-28 17:55:06 +00001609 goto fail;
Jes Sorensen6d85a572010-12-17 16:02:40 +01001610 }
aliguori9b80ddf2009-03-28 17:55:06 +00001611
Kevin Wolf0e8c08b2019-01-29 17:13:57 +01001612 /* Open external data file */
Max Reitz8b1869d2020-05-13 13:05:35 +02001613 s->data_file = bdrv_open_child(NULL, options, "data-file", bs,
1614 &child_of_bds, BDRV_CHILD_DATA,
1615 true, &local_err);
Kevin Wolf0e8c08b2019-01-29 17:13:57 +01001616 if (local_err) {
1617 error_propagate(errp, local_err);
1618 ret = -EINVAL;
1619 goto fail;
1620 }
1621
1622 if (s->incompatible_features & QCOW2_INCOMPAT_DATA_FILE) {
Kevin Wolf9b890bd2019-01-15 19:02:40 +01001623 if (!s->data_file && s->image_data_file) {
1624 s->data_file = bdrv_open_child(s->image_data_file, options,
Max Reitz8b1869d2020-05-13 13:05:35 +02001625 "data-file", bs, &child_of_bds,
1626 BDRV_CHILD_DATA, false, errp);
Kevin Wolf9b890bd2019-01-15 19:02:40 +01001627 if (!s->data_file) {
1628 ret = -EINVAL;
1629 goto fail;
1630 }
1631 }
Kevin Wolf0e8c08b2019-01-29 17:13:57 +01001632 if (!s->data_file) {
1633 error_setg(errp, "'data-file' is required for this image");
1634 ret = -EINVAL;
1635 goto fail;
1636 }
Max Reitz8b1869d2020-05-13 13:05:35 +02001637
1638 /* No data here */
1639 bs->file->role &= ~BDRV_CHILD_DATA;
1640
1641 /* Must succeed because we have given up permissions if anything */
1642 bdrv_child_refresh_perms(bs, bs->file, &error_abort);
Kevin Wolf0e8c08b2019-01-29 17:13:57 +01001643 } else {
1644 if (s->data_file) {
1645 error_setg(errp, "'data-file' can only be set for images with an "
1646 "external data file");
1647 ret = -EINVAL;
1648 goto fail;
Kevin Wolf6c3944d2019-02-22 14:29:38 +01001649 }
1650
1651 s->data_file = bs->file;
1652
1653 if (data_file_is_raw(bs)) {
1654 error_setg(errp, "data-file-raw requires a data file");
1655 ret = -EINVAL;
1656 goto fail;
Kevin Wolf0e8c08b2019-01-29 17:13:57 +01001657 }
1658 }
Kevin Wolf93c24932019-01-14 16:48:25 +01001659
Daniel P. Berrange4652b8f2017-06-23 17:24:12 +01001660 /* qcow2_read_extension may have set up the crypto context
1661 * if the crypt method needs a header region, some methods
1662 * don't need header extensions, so must check here
1663 */
1664 if (s->crypt_method_header && !s->crypto) {
1665 if (s->crypt_method_header == QCOW_CRYPT_AES) {
1666 unsigned int cflags = 0;
1667 if (flags & BDRV_O_NO_IO) {
1668 cflags |= QCRYPTO_BLOCK_OPEN_NO_IO;
1669 }
Daniel P. Berrange1cd9a782017-06-23 17:24:17 +01001670 s->crypto = qcrypto_block_open(s->crypto_opts, "encrypt.",
Vladimir Sementsov-Ogievskiy8ac0f152019-05-06 17:27:41 +03001671 NULL, NULL, cflags,
1672 QCOW2_MAX_THREADS, errp);
Daniel P. Berrange4652b8f2017-06-23 17:24:12 +01001673 if (!s->crypto) {
1674 ret = -EINVAL;
1675 goto fail;
1676 }
1677 } else if (!(flags & BDRV_O_NO_IO)) {
1678 error_setg(errp, "Missing CRYPTO header for crypt method %d",
1679 s->crypt_method_header);
Daniel P. Berrangeb25b3872017-06-23 17:24:10 +01001680 ret = -EINVAL;
1681 goto fail;
1682 }
1683 }
1684
bellard585f8582006-08-05 21:14:20 +00001685 /* read the backing file name */
1686 if (header.backing_file_offset != 0) {
1687 len = header.backing_file_size;
Jeff Cody9a29e182015-01-22 08:03:30 -05001688 if (len > MIN(1023, s->cluster_size - header.backing_file_offset) ||
Jeff Codye729fa62015-01-27 08:33:55 -05001689 len >= sizeof(bs->backing_file)) {
Kevin Wolf6d33e8e2014-03-26 13:05:47 +01001690 error_setg(errp, "Backing file name too long");
1691 ret = -EINVAL;
1692 goto fail;
Jes Sorensen6d85a572010-12-17 16:02:40 +01001693 }
Kevin Wolfcf2ab8f2016-06-20 18:24:02 +02001694 ret = bdrv_pread(bs->file, header.backing_file_offset,
Max Reitz998c2012019-02-01 20:29:08 +01001695 bs->auto_backing_file, len);
Jes Sorensen6d85a572010-12-17 16:02:40 +01001696 if (ret < 0) {
Max Reitz3ef6c402013-09-05 09:40:43 +02001697 error_setg_errno(errp, -ret, "Could not read backing file name");
bellard585f8582006-08-05 21:14:20 +00001698 goto fail;
Jes Sorensen6d85a572010-12-17 16:02:40 +01001699 }
Max Reitz998c2012019-02-01 20:29:08 +01001700 bs->auto_backing_file[len] = '\0';
1701 pstrcpy(bs->backing_file, sizeof(bs->backing_file),
1702 bs->auto_backing_file);
1703 s->image_backing_file = g_strdup(bs->auto_backing_file);
bellard585f8582006-08-05 21:14:20 +00001704 }
Kevin Wolf42deb292011-11-16 11:43:28 +01001705
Max Reitz8bc584f2019-10-11 17:28:06 +02001706 /*
1707 * Internal snapshots; skip reading them in check mode, because
1708 * we do not need them then, and we do not want to abort because
1709 * of a broken table.
1710 */
1711 if (!(flags & BDRV_O_CHECK)) {
1712 s->snapshots_offset = header.snapshots_offset;
1713 s->nb_snapshots = header.nb_snapshots;
Kevin Wolf11b128f2014-03-26 13:06:04 +01001714
Max Reitz8bc584f2019-10-11 17:28:06 +02001715 ret = qcow2_read_snapshots(bs, errp);
1716 if (ret < 0) {
1717 goto fail;
1718 }
Jes Sorensen6d85a572010-12-17 16:02:40 +01001719 }
bellard585f8582006-08-05 21:14:20 +00001720
Stefan Hajnocziaf7b7082012-06-14 11:42:23 +01001721 /* Clear unknown autoclear feature bits */
Vladimir Sementsov-Ogievskiy88ddffa2017-06-28 15:05:08 +03001722 update_header |= s->autoclear_features & ~QCOW2_AUTOCLEAR_MASK;
Vladimir Sementsov-Ogievskiyd1258dd2017-06-28 15:05:11 +03001723 update_header =
1724 update_header && !bs->read_only && !(flags & BDRV_O_INACTIVE);
1725 if (update_header) {
Vladimir Sementsov-Ogievskiy88ddffa2017-06-28 15:05:08 +03001726 s->autoclear_features &= QCOW2_AUTOCLEAR_MASK;
Vladimir Sementsov-Ogievskiyd1258dd2017-06-28 15:05:11 +03001727 }
1728
Vladimir Sementsov-Ogievskiy9c98f142018-10-29 16:23:17 -04001729 /* == Handle persistent dirty bitmaps ==
1730 *
1731 * We want load dirty bitmaps in three cases:
1732 *
1733 * 1. Normal open of the disk in active mode, not related to invalidation
1734 * after migration.
1735 *
1736 * 2. Invalidation of the target vm after pre-copy phase of migration, if
1737 * bitmaps are _not_ migrating through migration channel, i.e.
1738 * 'dirty-bitmaps' capability is disabled.
1739 *
1740 * 3. Invalidation of source vm after failed or canceled migration.
1741 * This is a very interesting case. There are two possible types of
1742 * bitmaps:
1743 *
1744 * A. Stored on inactivation and removed. They should be loaded from the
1745 * image.
1746 *
1747 * B. Not stored: not-persistent bitmaps and bitmaps, migrated through
1748 * the migration channel (with dirty-bitmaps capability).
1749 *
1750 * On the other hand, there are two possible sub-cases:
1751 *
1752 * 3.1 disk was changed by somebody else while were inactive. In this
1753 * case all in-RAM dirty bitmaps (both persistent and not) are
1754 * definitely invalid. And we don't have any method to determine
1755 * this.
1756 *
1757 * Simple and safe thing is to just drop all the bitmaps of type B on
1758 * inactivation. But in this case we lose bitmaps in valid 4.2 case.
1759 *
1760 * On the other hand, resuming source vm, if disk was already changed
1761 * is a bad thing anyway: not only bitmaps, the whole vm state is
1762 * out of sync with disk.
1763 *
1764 * This means, that user or management tool, who for some reason
1765 * decided to resume source vm, after disk was already changed by
1766 * target vm, should at least drop all dirty bitmaps by hand.
1767 *
1768 * So, we can ignore this case for now, but TODO: "generation"
1769 * extension for qcow2, to determine, that image was changed after
1770 * last inactivation. And if it is changed, we will drop (or at least
1771 * mark as 'invalid' all the bitmaps of type B, both persistent
1772 * and not).
1773 *
1774 * 3.2 disk was _not_ changed while were inactive. Bitmaps may be saved
1775 * to disk ('dirty-bitmaps' capability disabled), or not saved
1776 * ('dirty-bitmaps' capability enabled), but we don't need to care
1777 * of: let's load bitmaps as always: stored bitmaps will be loaded,
1778 * and not stored has flag IN_USE=1 in the image and will be skipped
1779 * on loading.
1780 *
1781 * One remaining possible case when we don't want load bitmaps:
1782 *
1783 * 4. Open disk in inactive mode in target vm (bitmaps are migrating or
1784 * will be loaded on invalidation, no needs try loading them before)
1785 */
1786
1787 if (!(bdrv_get_flags(bs) & BDRV_O_INACTIVE)) {
1788 /* It's case 1, 2 or 3.2. Or 3.1 which is BUG in management layer. */
1789 bool header_updated = qcow2_load_dirty_bitmaps(bs, &local_err);
Tuguoyi66be5c32019-12-18 11:53:35 +00001790 if (local_err != NULL) {
1791 error_propagate(errp, local_err);
1792 ret = -EINVAL;
1793 goto fail;
1794 }
Vladimir Sementsov-Ogievskiy9c98f142018-10-29 16:23:17 -04001795
1796 update_header = update_header && !header_updated;
Vladimir Sementsov-Ogievskiyd1258dd2017-06-28 15:05:11 +03001797 }
Vladimir Sementsov-Ogievskiyd1258dd2017-06-28 15:05:11 +03001798
1799 if (update_header) {
Stefan Hajnocziaf7b7082012-06-14 11:42:23 +01001800 ret = qcow2_update_header(bs);
1801 if (ret < 0) {
Max Reitz3ef6c402013-09-05 09:40:43 +02001802 error_setg_errno(errp, -ret, "Could not update qcow2 header");
Stefan Hajnocziaf7b7082012-06-14 11:42:23 +01001803 goto fail;
1804 }
1805 }
1806
Kevin Wolf3b650812019-11-22 16:57:48 +01001807 bs->supported_zero_flags = header.version >= 3 ?
1808 BDRV_REQ_MAY_UNMAP | BDRV_REQ_NO_FALLBACK : 0;
Kevin Wolff01643f2020-04-24 14:54:42 +02001809 bs->supported_truncate_flags = BDRV_REQ_ZERO_WRITE;
Kevin Wolf68d100e2011-06-30 17:42:09 +02001810
Stefan Hajnoczic61d0002012-07-27 09:05:19 +01001811 /* Repair image if dirty */
Kevin Wolf04c01a52016-01-13 15:56:06 +01001812 if (!(flags & (BDRV_O_CHECK | BDRV_O_INACTIVE)) && !bs->read_only &&
Stefan Hajnoczi058f8f12012-08-09 13:05:56 +01001813 (s->incompatible_features & QCOW2_INCOMPAT_DIRTY)) {
Stefan Hajnoczic61d0002012-07-27 09:05:19 +01001814 BdrvCheckResult result = {0};
1815
Paolo Bonzini2fd61632018-03-01 17:36:19 +01001816 ret = qcow2_co_check_locked(bs, &result,
1817 BDRV_FIX_ERRORS | BDRV_FIX_LEAKS);
Max Reitz791fff52017-11-10 21:31:07 +01001818 if (ret < 0 || result.check_errors) {
1819 if (ret >= 0) {
1820 ret = -EIO;
1821 }
Max Reitz3ef6c402013-09-05 09:40:43 +02001822 error_setg_errno(errp, -ret, "Could not repair dirty image");
Stefan Hajnoczic61d0002012-07-27 09:05:19 +01001823 goto fail;
1824 }
1825 }
1826
bellard585f8582006-08-05 21:14:20 +00001827#ifdef DEBUG_ALLOC
Philipp Hahn6cbc3032011-08-04 19:22:10 +02001828 {
1829 BdrvCheckResult result = {0};
Stefan Hajnoczib35278f2012-06-15 16:41:07 +01001830 qcow2_check_refcounts(bs, &result, 0);
Philipp Hahn6cbc3032011-08-04 19:22:10 +02001831 }
bellard585f8582006-08-05 21:14:20 +00001832#endif
Vladimir Sementsov-Ogievskiyceb029c2018-06-20 17:48:37 +03001833
Vladimir Sementsov-Ogievskiy6f13a312019-05-06 17:27:38 +03001834 qemu_co_queue_init(&s->thread_task_queue);
Vladimir Sementsov-Ogievskiyceb029c2018-06-20 17:48:37 +03001835
Jes Sorensen6d85a572010-12-17 16:02:40 +01001836 return ret;
bellard585f8582006-08-05 21:14:20 +00001837
1838 fail:
Kevin Wolf9b890bd2019-01-15 19:02:40 +01001839 g_free(s->image_data_file);
Kevin Wolf0e8c08b2019-01-29 17:13:57 +01001840 if (has_data_file(bs)) {
1841 bdrv_unref_child(bs, s->data_file);
Vladimir Sementsov-Ogievskiy808cf3c2020-03-16 09:06:31 +03001842 s->data_file = NULL;
Kevin Wolf0e8c08b2019-01-29 17:13:57 +01001843 }
Kevin Wolf6744cba2011-12-15 12:20:58 +01001844 g_free(s->unknown_header_fields);
Kevin Wolf75bab852012-02-02 14:52:08 +01001845 cleanup_unknown_header_ext(bs);
Kevin Wolfed6ccf02009-05-28 16:07:07 +02001846 qcow2_free_snapshots(bs);
1847 qcow2_refcount_close(bs);
Kevin Wolfde828152014-05-20 17:12:47 +02001848 qemu_vfree(s->l1_table);
Max Reitzcf939802013-08-30 14:34:26 +02001849 /* else pre-write overlap checks in cache_destroy may crash */
1850 s->l1_table = NULL;
Alberto Garcia279621c2015-08-04 15:14:40 +03001851 cache_clean_timer_del(bs);
Kevin Wolf29c1a732011-01-10 17:17:28 +01001852 if (s->l2_table_cache) {
Alberto Garciae64d4072018-02-05 16:33:08 +02001853 qcow2_cache_destroy(s->l2_table_cache);
Kevin Wolf29c1a732011-01-10 17:17:28 +01001854 }
Prasad Joshic5a33ee2014-03-28 23:08:58 +05301855 if (s->refcount_block_cache) {
Alberto Garciae64d4072018-02-05 16:33:08 +02001856 qcow2_cache_destroy(s->refcount_block_cache);
Prasad Joshic5a33ee2014-03-28 23:08:58 +05301857 }
Daniel P. Berrangeb25b3872017-06-23 17:24:10 +01001858 qcrypto_block_free(s->crypto);
1859 qapi_free_QCryptoBlockOpenOptions(s->crypto_opts);
Jes Sorensen6d85a572010-12-17 16:02:40 +01001860 return ret;
bellard585f8582006-08-05 21:14:20 +00001861}
1862
Paolo Bonzini1fafcd92018-03-01 17:36:16 +01001863typedef struct QCow2OpenCo {
1864 BlockDriverState *bs;
1865 QDict *options;
1866 int flags;
1867 Error **errp;
1868 int ret;
1869} QCow2OpenCo;
1870
1871static void coroutine_fn qcow2_open_entry(void *opaque)
1872{
1873 QCow2OpenCo *qoc = opaque;
1874 BDRVQcow2State *s = qoc->bs->opaque;
1875
1876 qemu_co_mutex_lock(&s->lock);
1877 qoc->ret = qcow2_do_open(qoc->bs, qoc->options, qoc->flags, qoc->errp);
1878 qemu_co_mutex_unlock(&s->lock);
1879}
1880
Kevin Wolf4e4bf5c2016-12-16 18:52:37 +01001881static int qcow2_open(BlockDriverState *bs, QDict *options, int flags,
1882 Error **errp)
1883{
Paolo Bonzini1fafcd92018-03-01 17:36:16 +01001884 BDRVQcow2State *s = bs->opaque;
1885 QCow2OpenCo qoc = {
1886 .bs = bs,
1887 .options = options,
1888 .flags = flags,
1889 .errp = errp,
1890 .ret = -EINPROGRESS
1891 };
1892
Max Reitz8b1869d2020-05-13 13:05:35 +02001893 bs->file = bdrv_open_child(NULL, options, "file", bs, &child_of_bds,
1894 BDRV_CHILD_IMAGE, false, errp);
Kevin Wolf4e4bf5c2016-12-16 18:52:37 +01001895 if (!bs->file) {
1896 return -EINVAL;
1897 }
1898
Paolo Bonzini1fafcd92018-03-01 17:36:16 +01001899 /* Initialise locks */
1900 qemu_co_mutex_init(&s->lock);
1901
1902 if (qemu_in_coroutine()) {
1903 /* From bdrv_co_create. */
1904 qcow2_open_entry(&qoc);
1905 } else {
Kevin Wolf4720cbe2019-01-07 13:02:48 +01001906 assert(qemu_get_current_aio_context() == qemu_get_aio_context());
Paolo Bonzini1fafcd92018-03-01 17:36:16 +01001907 qemu_coroutine_enter(qemu_coroutine_create(qcow2_open_entry, &qoc));
1908 BDRV_POLL_WHILE(bs, qoc.ret == -EINPROGRESS);
1909 }
1910 return qoc.ret;
Kevin Wolf4e4bf5c2016-12-16 18:52:37 +01001911}
1912
Kevin Wolf3baca892014-07-16 17:48:16 +02001913static void qcow2_refresh_limits(BlockDriverState *bs, Error **errp)
Kevin Wolfd34682c2013-12-11 19:26:16 +01001914{
Kevin Wolfff991292015-09-07 17:12:56 +02001915 BDRVQcow2State *s = bs->opaque;
Kevin Wolfd34682c2013-12-11 19:26:16 +01001916
Eric Blakea84178c2016-06-23 16:37:15 -06001917 if (bs->encrypted) {
1918 /* Encryption works on a sector granularity */
Alberto Garcia6f8f0152018-10-11 13:58:02 +03001919 bs->bl.request_alignment = qcrypto_block_get_sector_size(s->crypto);
Eric Blakea84178c2016-06-23 16:37:15 -06001920 }
Alberto Garciaa6841a22020-07-10 18:13:10 +02001921 bs->bl.pwrite_zeroes_alignment = s->subcluster_size;
Eric Blakeecdbead2016-11-17 14:13:55 -06001922 bs->bl.pdiscard_alignment = s->cluster_size;
Kevin Wolfd34682c2013-12-11 19:26:16 +01001923}
1924
Jeff Cody21d82ac2012-09-20 15:13:28 -04001925static int qcow2_reopen_prepare(BDRVReopenState *state,
1926 BlockReopenQueue *queue, Error **errp)
1927{
Kevin Wolf5b0959a2015-04-16 13:42:27 +02001928 Qcow2ReopenState *r;
Kevin Wolf4c2e5f82014-04-03 13:47:50 +02001929 int ret;
1930
Kevin Wolf5b0959a2015-04-16 13:42:27 +02001931 r = g_new0(Qcow2ReopenState, 1);
1932 state->opaque = r;
1933
1934 ret = qcow2_update_options_prepare(state->bs, r, state->options,
1935 state->flags, errp);
1936 if (ret < 0) {
1937 goto fail;
1938 }
1939
1940 /* We need to write out any unwritten data if we reopen read-only. */
Kevin Wolf4c2e5f82014-04-03 13:47:50 +02001941 if ((state->flags & BDRV_O_RDWR) == 0) {
Vladimir Sementsov-Ogievskiy169b8792017-06-28 15:05:20 +03001942 ret = qcow2_reopen_bitmaps_ro(state->bs, errp);
1943 if (ret < 0) {
1944 goto fail;
1945 }
1946
Kevin Wolf4c2e5f82014-04-03 13:47:50 +02001947 ret = bdrv_flush(state->bs);
1948 if (ret < 0) {
Kevin Wolf5b0959a2015-04-16 13:42:27 +02001949 goto fail;
Kevin Wolf4c2e5f82014-04-03 13:47:50 +02001950 }
1951
1952 ret = qcow2_mark_clean(state->bs);
1953 if (ret < 0) {
Kevin Wolf5b0959a2015-04-16 13:42:27 +02001954 goto fail;
Kevin Wolf4c2e5f82014-04-03 13:47:50 +02001955 }
1956 }
1957
Jeff Cody21d82ac2012-09-20 15:13:28 -04001958 return 0;
Kevin Wolf5b0959a2015-04-16 13:42:27 +02001959
1960fail:
1961 qcow2_update_options_abort(state->bs, r);
1962 g_free(r);
1963 return ret;
1964}
1965
1966static void qcow2_reopen_commit(BDRVReopenState *state)
1967{
1968 qcow2_update_options_commit(state->bs, state->opaque);
Peter Krempa65eb7c82020-02-28 13:44:47 +01001969 g_free(state->opaque);
1970}
1971
1972static void qcow2_reopen_commit_post(BDRVReopenState *state)
1973{
Vladimir Sementsov-Ogievskiy4dd09f62019-09-27 15:23:55 +03001974 if (state->flags & BDRV_O_RDWR) {
1975 Error *local_err = NULL;
1976
1977 if (qcow2_reopen_bitmaps_rw(state->bs, &local_err) < 0) {
1978 /*
1979 * This is not fatal, bitmaps just left read-only, so all following
1980 * writes will fail. User can remove read-only bitmaps to unblock
1981 * writes or retry reopen.
1982 */
1983 error_reportf_err(local_err,
1984 "%s: Failed to make dirty bitmaps writable: ",
1985 bdrv_get_node_name(state->bs));
1986 }
1987 }
Kevin Wolf5b0959a2015-04-16 13:42:27 +02001988}
1989
1990static void qcow2_reopen_abort(BDRVReopenState *state)
1991{
1992 qcow2_update_options_abort(state->bs, state->opaque);
1993 g_free(state->opaque);
Jeff Cody21d82ac2012-09-20 15:13:28 -04001994}
1995
Kevin Wolf5365f442015-11-16 15:34:59 +01001996static void qcow2_join_options(QDict *options, QDict *old_options)
1997{
1998 bool has_new_overlap_template =
1999 qdict_haskey(options, QCOW2_OPT_OVERLAP) ||
2000 qdict_haskey(options, QCOW2_OPT_OVERLAP_TEMPLATE);
2001 bool has_new_total_cache_size =
2002 qdict_haskey(options, QCOW2_OPT_CACHE_SIZE);
2003 bool has_all_cache_options;
2004
2005 /* New overlap template overrides all old overlap options */
2006 if (has_new_overlap_template) {
2007 qdict_del(old_options, QCOW2_OPT_OVERLAP);
2008 qdict_del(old_options, QCOW2_OPT_OVERLAP_TEMPLATE);
2009 qdict_del(old_options, QCOW2_OPT_OVERLAP_MAIN_HEADER);
2010 qdict_del(old_options, QCOW2_OPT_OVERLAP_ACTIVE_L1);
2011 qdict_del(old_options, QCOW2_OPT_OVERLAP_ACTIVE_L2);
2012 qdict_del(old_options, QCOW2_OPT_OVERLAP_REFCOUNT_TABLE);
2013 qdict_del(old_options, QCOW2_OPT_OVERLAP_REFCOUNT_BLOCK);
2014 qdict_del(old_options, QCOW2_OPT_OVERLAP_SNAPSHOT_TABLE);
2015 qdict_del(old_options, QCOW2_OPT_OVERLAP_INACTIVE_L1);
2016 qdict_del(old_options, QCOW2_OPT_OVERLAP_INACTIVE_L2);
2017 }
2018
2019 /* New total cache size overrides all old options */
2020 if (qdict_haskey(options, QCOW2_OPT_CACHE_SIZE)) {
2021 qdict_del(old_options, QCOW2_OPT_L2_CACHE_SIZE);
2022 qdict_del(old_options, QCOW2_OPT_REFCOUNT_CACHE_SIZE);
2023 }
2024
2025 qdict_join(options, old_options, false);
2026
2027 /*
2028 * If after merging all cache size options are set, an old total size is
2029 * overwritten. Do keep all options, however, if all three are new. The
2030 * resulting error message is what we want to happen.
2031 */
2032 has_all_cache_options =
2033 qdict_haskey(options, QCOW2_OPT_CACHE_SIZE) ||
2034 qdict_haskey(options, QCOW2_OPT_L2_CACHE_SIZE) ||
2035 qdict_haskey(options, QCOW2_OPT_REFCOUNT_CACHE_SIZE);
2036
2037 if (has_all_cache_options && !has_new_total_cache_size) {
2038 qdict_del(options, QCOW2_OPT_CACHE_SIZE);
2039 }
2040}
2041
Eric Blakea320fb02018-02-13 14:26:52 -06002042static int coroutine_fn qcow2_co_block_status(BlockDriverState *bs,
2043 bool want_zero,
2044 int64_t offset, int64_t count,
2045 int64_t *pnum, int64_t *map,
2046 BlockDriverState **file)
bellard585f8582006-08-05 21:14:20 +00002047{
Kevin Wolfff991292015-09-07 17:12:56 +02002048 BDRVQcow2State *s = bs->opaque;
Alberto Garcia388e5812020-07-10 18:12:44 +02002049 uint64_t host_offset;
Kevin Wolfecfe1862016-05-31 16:13:07 +02002050 unsigned int bytes;
Alberto Garcia10dabdc52020-07-10 18:13:00 +02002051 QCow2SubclusterType type;
Alberto Garcia74e60fb2019-12-12 12:01:21 +02002052 int ret, status = 0;
bellard585f8582006-08-05 21:14:20 +00002053
Kevin Wolf5e978552019-10-24 16:26:58 +02002054 qemu_co_mutex_lock(&s->lock);
2055
Vladimir Sementsov-Ogievskiy69f47502019-04-08 19:26:17 +03002056 if (!s->metadata_preallocation_checked) {
2057 ret = qcow2_detect_metadata_preallocation(bs);
2058 s->metadata_preallocation = (ret == 1);
2059 s->metadata_preallocation_checked = true;
2060 }
2061
Eric Blakea320fb02018-02-13 14:26:52 -06002062 bytes = MIN(INT_MAX, count);
Alberto Garciaca4a0bb2020-07-10 18:12:59 +02002063 ret = qcow2_get_host_offset(bs, offset, &bytes, &host_offset, &type);
Stefan Hajnoczif8a2e5e2011-11-14 12:44:21 +00002064 qemu_co_mutex_unlock(&s->lock);
Kevin Wolf1c46efa2010-05-21 17:59:36 +02002065 if (ret < 0) {
Paolo Bonzinid6636402013-09-04 19:00:25 +02002066 return ret;
Kevin Wolf1c46efa2010-05-21 17:59:36 +02002067 }
aliguori095a9c52008-08-14 18:10:28 +00002068
Eric Blakea320fb02018-02-13 14:26:52 -06002069 *pnum = bytes;
Kevin Wolfecfe1862016-05-31 16:13:07 +02002070
Alberto Garcia10dabdc52020-07-10 18:13:00 +02002071 if ((type == QCOW2_SUBCLUSTER_NORMAL ||
Alberto Garcia97490a12020-07-10 18:13:01 +02002072 type == QCOW2_SUBCLUSTER_ZERO_ALLOC ||
2073 type == QCOW2_SUBCLUSTER_UNALLOCATED_ALLOC) && !s->crypto) {
Alberto Garcia388e5812020-07-10 18:12:44 +02002074 *map = host_offset;
Kevin Wolf37be1402019-02-27 13:22:56 +01002075 *file = s->data_file->bs;
Eric Blakea320fb02018-02-13 14:26:52 -06002076 status |= BDRV_BLOCK_OFFSET_VALID;
Paolo Bonzini4bc74be2013-09-04 19:00:30 +02002077 }
Alberto Garcia10dabdc52020-07-10 18:13:00 +02002078 if (type == QCOW2_SUBCLUSTER_ZERO_PLAIN ||
2079 type == QCOW2_SUBCLUSTER_ZERO_ALLOC) {
Paolo Bonzini4bc74be2013-09-04 19:00:30 +02002080 status |= BDRV_BLOCK_ZERO;
Alberto Garcia97490a12020-07-10 18:13:01 +02002081 } else if (type != QCOW2_SUBCLUSTER_UNALLOCATED_PLAIN &&
2082 type != QCOW2_SUBCLUSTER_UNALLOCATED_ALLOC) {
Paolo Bonzini4bc74be2013-09-04 19:00:30 +02002083 status |= BDRV_BLOCK_DATA;
2084 }
Vladimir Sementsov-Ogievskiy69f47502019-04-08 19:26:17 +03002085 if (s->metadata_preallocation && (status & BDRV_BLOCK_DATA) &&
2086 (status & BDRV_BLOCK_OFFSET_VALID))
2087 {
2088 status |= BDRV_BLOCK_RECURSE;
2089 }
Paolo Bonzini4bc74be2013-09-04 19:00:30 +02002090 return status;
bellard585f8582006-08-05 21:14:20 +00002091}
2092
Fam Zhengfd9fcd32018-06-01 17:26:42 +08002093static coroutine_fn int qcow2_handle_l2meta(BlockDriverState *bs,
2094 QCowL2Meta **pl2meta,
2095 bool link_l2)
2096{
2097 int ret = 0;
2098 QCowL2Meta *l2meta = *pl2meta;
2099
2100 while (l2meta != NULL) {
2101 QCowL2Meta *next;
2102
Fam Zheng354d9302018-06-27 11:57:51 +08002103 if (link_l2) {
Fam Zhengfd9fcd32018-06-01 17:26:42 +08002104 ret = qcow2_alloc_cluster_link_l2(bs, l2meta);
2105 if (ret) {
2106 goto out;
2107 }
Kevin Wolf8b24cd12018-06-28 17:05:45 +02002108 } else {
2109 qcow2_alloc_cluster_abort(bs, l2meta);
Fam Zhengfd9fcd32018-06-01 17:26:42 +08002110 }
2111
2112 /* Take the request off the list of running requests */
Alberto Garciaf7bd5bb2020-09-03 18:37:48 +02002113 QLIST_REMOVE(l2meta, next_in_flight);
Fam Zhengfd9fcd32018-06-01 17:26:42 +08002114
2115 qemu_co_queue_restart_all(&l2meta->dependent_requests);
2116
2117 next = l2meta->next;
2118 g_free(l2meta);
2119 l2meta = next;
2120 }
2121out:
2122 *pl2meta = l2meta;
2123 return ret;
2124}
2125
Vladimir Sementsov-Ogievskiy88f468e2019-09-16 20:53:22 +03002126static coroutine_fn int
2127qcow2_co_preadv_encrypted(BlockDriverState *bs,
Alberto Garcia9c4269d2020-07-10 18:12:43 +02002128 uint64_t host_offset,
Vladimir Sementsov-Ogievskiy88f468e2019-09-16 20:53:22 +03002129 uint64_t offset,
2130 uint64_t bytes,
2131 QEMUIOVector *qiov,
2132 uint64_t qiov_offset)
2133{
2134 int ret;
2135 BDRVQcow2State *s = bs->opaque;
2136 uint8_t *buf;
2137
2138 assert(bs->encrypted && s->crypto);
2139 assert(bytes <= QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size);
2140
2141 /*
2142 * For encrypted images, read everything into a temporary
2143 * contiguous buffer on which the AES functions can work.
2144 * Also, decryption in a separate buffer is better as it
2145 * prevents the guest from learning information about the
2146 * encrypted nature of the virtual disk.
2147 */
2148
2149 buf = qemu_try_blockalign(s->data_file->bs, bytes);
2150 if (buf == NULL) {
2151 return -ENOMEM;
2152 }
2153
2154 BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO);
Alberto Garcia9c4269d2020-07-10 18:12:43 +02002155 ret = bdrv_co_pread(s->data_file, host_offset, bytes, buf, 0);
Vladimir Sementsov-Ogievskiy88f468e2019-09-16 20:53:22 +03002156 if (ret < 0) {
2157 goto fail;
2158 }
2159
Alberto Garcia9c4269d2020-07-10 18:12:43 +02002160 if (qcow2_co_decrypt(bs, host_offset, offset, buf, bytes) < 0)
Vladimir Sementsov-Ogievskiy88f468e2019-09-16 20:53:22 +03002161 {
2162 ret = -EIO;
2163 goto fail;
2164 }
2165 qemu_iovec_from_buf(qiov, qiov_offset, buf, bytes);
2166
2167fail:
2168 qemu_vfree(buf);
2169
2170 return ret;
2171}
2172
Vladimir Sementsov-Ogievskiyd710cf52019-09-16 20:53:24 +03002173typedef struct Qcow2AioTask {
2174 AioTask task;
2175
2176 BlockDriverState *bs;
Alberto Garcia10dabdc52020-07-10 18:13:00 +02002177 QCow2SubclusterType subcluster_type; /* only for read */
Alberto Garcia9c4269d2020-07-10 18:12:43 +02002178 uint64_t host_offset; /* or full descriptor in compressed clusters */
Vladimir Sementsov-Ogievskiyd710cf52019-09-16 20:53:24 +03002179 uint64_t offset;
2180 uint64_t bytes;
2181 QEMUIOVector *qiov;
2182 uint64_t qiov_offset;
2183 QCowL2Meta *l2meta; /* only for write */
2184} Qcow2AioTask;
2185
2186static coroutine_fn int qcow2_co_preadv_task_entry(AioTask *task);
2187static coroutine_fn int qcow2_add_task(BlockDriverState *bs,
2188 AioTaskPool *pool,
2189 AioTaskFunc func,
Alberto Garcia10dabdc52020-07-10 18:13:00 +02002190 QCow2SubclusterType subcluster_type,
Alberto Garcia9c4269d2020-07-10 18:12:43 +02002191 uint64_t host_offset,
Vladimir Sementsov-Ogievskiyd710cf52019-09-16 20:53:24 +03002192 uint64_t offset,
2193 uint64_t bytes,
2194 QEMUIOVector *qiov,
2195 size_t qiov_offset,
2196 QCowL2Meta *l2meta)
2197{
2198 Qcow2AioTask local_task;
2199 Qcow2AioTask *task = pool ? g_new(Qcow2AioTask, 1) : &local_task;
2200
2201 *task = (Qcow2AioTask) {
2202 .task.func = func,
2203 .bs = bs,
Alberto Garcia10dabdc52020-07-10 18:13:00 +02002204 .subcluster_type = subcluster_type,
Vladimir Sementsov-Ogievskiyd710cf52019-09-16 20:53:24 +03002205 .qiov = qiov,
Alberto Garcia9c4269d2020-07-10 18:12:43 +02002206 .host_offset = host_offset,
Vladimir Sementsov-Ogievskiyd710cf52019-09-16 20:53:24 +03002207 .offset = offset,
2208 .bytes = bytes,
2209 .qiov_offset = qiov_offset,
2210 .l2meta = l2meta,
2211 };
2212
2213 trace_qcow2_add_task(qemu_coroutine_self(), bs, pool,
2214 func == qcow2_co_preadv_task_entry ? "read" : "write",
Alberto Garcia10dabdc52020-07-10 18:13:00 +02002215 subcluster_type, host_offset, offset, bytes,
Vladimir Sementsov-Ogievskiyd710cf52019-09-16 20:53:24 +03002216 qiov, qiov_offset);
2217
2218 if (!pool) {
2219 return func(&task->task);
2220 }
2221
2222 aio_task_pool_start_task(pool, &task->task);
2223
2224 return 0;
2225}
2226
Vladimir Sementsov-Ogievskiy88f468e2019-09-16 20:53:22 +03002227static coroutine_fn int qcow2_co_preadv_task(BlockDriverState *bs,
Alberto Garcia10dabdc52020-07-10 18:13:00 +02002228 QCow2SubclusterType subc_type,
Alberto Garcia9c4269d2020-07-10 18:12:43 +02002229 uint64_t host_offset,
Vladimir Sementsov-Ogievskiy88f468e2019-09-16 20:53:22 +03002230 uint64_t offset, uint64_t bytes,
2231 QEMUIOVector *qiov,
2232 size_t qiov_offset)
2233{
2234 BDRVQcow2State *s = bs->opaque;
Vladimir Sementsov-Ogievskiy88f468e2019-09-16 20:53:22 +03002235
Alberto Garcia10dabdc52020-07-10 18:13:00 +02002236 switch (subc_type) {
2237 case QCOW2_SUBCLUSTER_ZERO_PLAIN:
2238 case QCOW2_SUBCLUSTER_ZERO_ALLOC:
Vladimir Sementsov-Ogievskiy88f468e2019-09-16 20:53:22 +03002239 /* Both zero types are handled in qcow2_co_preadv_part */
2240 g_assert_not_reached();
2241
Alberto Garcia10dabdc52020-07-10 18:13:00 +02002242 case QCOW2_SUBCLUSTER_UNALLOCATED_PLAIN:
Alberto Garcia97490a12020-07-10 18:13:01 +02002243 case QCOW2_SUBCLUSTER_UNALLOCATED_ALLOC:
Vladimir Sementsov-Ogievskiy88f468e2019-09-16 20:53:22 +03002244 assert(bs->backing); /* otherwise handled in qcow2_co_preadv_part */
2245
2246 BLKDBG_EVENT(bs->file, BLKDBG_READ_BACKING_AIO);
2247 return bdrv_co_preadv_part(bs->backing, offset, bytes,
2248 qiov, qiov_offset, 0);
2249
Alberto Garcia10dabdc52020-07-10 18:13:00 +02002250 case QCOW2_SUBCLUSTER_COMPRESSED:
Alberto Garcia9c4269d2020-07-10 18:12:43 +02002251 return qcow2_co_preadv_compressed(bs, host_offset,
Vladimir Sementsov-Ogievskiy88f468e2019-09-16 20:53:22 +03002252 offset, bytes, qiov, qiov_offset);
2253
Alberto Garcia10dabdc52020-07-10 18:13:00 +02002254 case QCOW2_SUBCLUSTER_NORMAL:
Vladimir Sementsov-Ogievskiy88f468e2019-09-16 20:53:22 +03002255 if (bs->encrypted) {
Alberto Garcia9c4269d2020-07-10 18:12:43 +02002256 return qcow2_co_preadv_encrypted(bs, host_offset,
Vladimir Sementsov-Ogievskiy88f468e2019-09-16 20:53:22 +03002257 offset, bytes, qiov, qiov_offset);
2258 }
2259
2260 BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO);
Alberto Garcia9c4269d2020-07-10 18:12:43 +02002261 return bdrv_co_preadv_part(s->data_file, host_offset,
Vladimir Sementsov-Ogievskiy88f468e2019-09-16 20:53:22 +03002262 bytes, qiov, qiov_offset, 0);
2263
2264 default:
2265 g_assert_not_reached();
2266 }
2267
2268 g_assert_not_reached();
2269}
2270
Vladimir Sementsov-Ogievskiyd710cf52019-09-16 20:53:24 +03002271static coroutine_fn int qcow2_co_preadv_task_entry(AioTask *task)
2272{
2273 Qcow2AioTask *t = container_of(task, Qcow2AioTask, task);
2274
2275 assert(!t->l2meta);
2276
Alberto Garcia10dabdc52020-07-10 18:13:00 +02002277 return qcow2_co_preadv_task(t->bs, t->subcluster_type,
2278 t->host_offset, t->offset, t->bytes,
2279 t->qiov, t->qiov_offset);
Vladimir Sementsov-Ogievskiyd710cf52019-09-16 20:53:24 +03002280}
2281
Vladimir Sementsov-Ogievskiydf893d22019-06-04 19:15:13 +03002282static coroutine_fn int qcow2_co_preadv_part(BlockDriverState *bs,
2283 uint64_t offset, uint64_t bytes,
2284 QEMUIOVector *qiov,
2285 size_t qiov_offset, int flags)
aliguori14907912008-10-31 17:28:00 +00002286{
Kevin Wolfff991292015-09-07 17:12:56 +02002287 BDRVQcow2State *s = bs->opaque;
Vladimir Sementsov-Ogievskiyd710cf52019-09-16 20:53:24 +03002288 int ret = 0;
Kevin Wolfecfe1862016-05-31 16:13:07 +02002289 unsigned int cur_bytes; /* number of bytes in current iteration */
Alberto Garcia388e5812020-07-10 18:12:44 +02002290 uint64_t host_offset = 0;
Alberto Garcia10dabdc52020-07-10 18:13:00 +02002291 QCow2SubclusterType type;
Vladimir Sementsov-Ogievskiyd710cf52019-09-16 20:53:24 +03002292 AioTaskPool *aio = NULL;
bellard585f8582006-08-05 21:14:20 +00002293
Vladimir Sementsov-Ogievskiyd710cf52019-09-16 20:53:24 +03002294 while (bytes != 0 && aio_task_pool_status(aio) == 0) {
Frediano Ziglio5ebaa272011-08-23 15:21:18 +02002295 /* prepare next request */
Kevin Wolfecfe1862016-05-31 16:13:07 +02002296 cur_bytes = MIN(bytes, INT_MAX);
Daniel P. Berrangeb25b3872017-06-23 17:24:10 +01002297 if (s->crypto) {
Kevin Wolfecfe1862016-05-31 16:13:07 +02002298 cur_bytes = MIN(cur_bytes,
2299 QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size);
Kevin Wolfbd28f832010-09-13 18:08:52 +02002300 }
2301
Vladimir Sementsov-Ogievskiyf24196d2019-05-06 17:27:39 +03002302 qemu_co_mutex_lock(&s->lock);
Alberto Garciaca4a0bb2020-07-10 18:12:59 +02002303 ret = qcow2_get_host_offset(bs, offset, &cur_bytes,
2304 &host_offset, &type);
Vladimir Sementsov-Ogievskiyf24196d2019-05-06 17:27:39 +03002305 qemu_co_mutex_unlock(&s->lock);
Kevin Wolf68d100e2011-06-30 17:42:09 +02002306 if (ret < 0) {
Vladimir Sementsov-Ogievskiyd710cf52019-09-16 20:53:24 +03002307 goto out;
Kevin Wolf171e3d62010-04-06 15:30:09 +02002308 }
Frediano Ziglio5ebaa272011-08-23 15:21:18 +02002309
Alberto Garcia10dabdc52020-07-10 18:13:00 +02002310 if (type == QCOW2_SUBCLUSTER_ZERO_PLAIN ||
2311 type == QCOW2_SUBCLUSTER_ZERO_ALLOC ||
Alberto Garcia97490a12020-07-10 18:13:01 +02002312 (type == QCOW2_SUBCLUSTER_UNALLOCATED_PLAIN && !bs->backing) ||
2313 (type == QCOW2_SUBCLUSTER_UNALLOCATED_ALLOC && !bs->backing))
Vladimir Sementsov-Ogievskiy88f468e2019-09-16 20:53:22 +03002314 {
Vladimir Sementsov-Ogievskiydf893d22019-06-04 19:15:13 +03002315 qemu_iovec_memset(qiov, qiov_offset, 0, cur_bytes);
Vladimir Sementsov-Ogievskiy88f468e2019-09-16 20:53:22 +03002316 } else {
Vladimir Sementsov-Ogievskiyd710cf52019-09-16 20:53:24 +03002317 if (!aio && cur_bytes != bytes) {
2318 aio = aio_task_pool_new(QCOW2_MAX_WORKERS);
2319 }
Alberto Garciaca4a0bb2020-07-10 18:12:59 +02002320 ret = qcow2_add_task(bs, aio, qcow2_co_preadv_task_entry, type,
Alberto Garcia9c4269d2020-07-10 18:12:43 +02002321 host_offset, offset, cur_bytes,
Vladimir Sementsov-Ogievskiyd710cf52019-09-16 20:53:24 +03002322 qiov, qiov_offset, NULL);
Frediano Ziglio5ebaa272011-08-23 15:21:18 +02002323 if (ret < 0) {
Vladimir Sementsov-Ogievskiyd710cf52019-09-16 20:53:24 +03002324 goto out;
Frediano Ziglio5ebaa272011-08-23 15:21:18 +02002325 }
Frediano Zigliofaf575c2011-08-23 15:21:14 +02002326 }
Frediano Ziglio5ebaa272011-08-23 15:21:18 +02002327
Kevin Wolfecfe1862016-05-31 16:13:07 +02002328 bytes -= cur_bytes;
2329 offset += cur_bytes;
Vladimir Sementsov-Ogievskiydf893d22019-06-04 19:15:13 +03002330 qiov_offset += cur_bytes;
bellard585f8582006-08-05 21:14:20 +00002331 }
aliguorif141eaf2009-04-07 18:43:24 +00002332
Vladimir Sementsov-Ogievskiyd710cf52019-09-16 20:53:24 +03002333out:
2334 if (aio) {
2335 aio_task_pool_wait_all(aio);
2336 if (ret == 0) {
2337 ret = aio_task_pool_status(aio);
2338 }
2339 g_free(aio);
2340 }
2341
2342 return ret;
bellard585f8582006-08-05 21:14:20 +00002343}
2344
Alberto Garciaee22a9d2017-06-19 16:40:08 +03002345/* Check if it's possible to merge a write request with the writing of
2346 * the data from the COW regions */
2347static bool merge_cow(uint64_t offset, unsigned bytes,
Vladimir Sementsov-Ogievskiy53962342019-06-04 19:15:14 +03002348 QEMUIOVector *qiov, size_t qiov_offset,
2349 QCowL2Meta *l2meta)
Alberto Garciaee22a9d2017-06-19 16:40:08 +03002350{
2351 QCowL2Meta *m;
2352
2353 for (m = l2meta; m != NULL; m = m->next) {
2354 /* If both COW regions are empty then there's nothing to merge */
2355 if (m->cow_start.nb_bytes == 0 && m->cow_end.nb_bytes == 0) {
2356 continue;
2357 }
2358
Anton Nefedovc8bb23c2019-05-16 17:27:49 +03002359 /* If COW regions are handled already, skip this too */
2360 if (m->skip_cow) {
2361 continue;
2362 }
2363
Alberto Garciaee22a9d2017-06-19 16:40:08 +03002364 /* The data (middle) region must be immediately after the
2365 * start region */
2366 if (l2meta_cow_start(m) + m->cow_start.nb_bytes != offset) {
2367 continue;
2368 }
2369
2370 /* The end region must be immediately after the data (middle)
2371 * region */
2372 if (m->offset + m->cow_end.offset != offset + bytes) {
2373 continue;
2374 }
2375
2376 /* Make sure that adding both COW regions to the QEMUIOVector
2377 * does not exceed IOV_MAX */
Vladimir Sementsov-Ogievskiy53962342019-06-04 19:15:14 +03002378 if (qemu_iovec_subvec_niov(qiov, qiov_offset, bytes) > IOV_MAX - 2) {
Alberto Garciaee22a9d2017-06-19 16:40:08 +03002379 continue;
2380 }
2381
Vladimir Sementsov-Ogievskiy53962342019-06-04 19:15:14 +03002382 m->data_qiov = qiov;
2383 m->data_qiov_offset = qiov_offset;
Alberto Garciaee22a9d2017-06-19 16:40:08 +03002384 return true;
2385 }
2386
2387 return false;
2388}
2389
Anton Nefedovc8bb23c2019-05-16 17:27:49 +03002390static bool is_unallocated(BlockDriverState *bs, int64_t offset, int64_t bytes)
2391{
2392 int64_t nr;
2393 return !bytes ||
Andrey Shinkevich170d3bd2019-05-29 20:56:14 +03002394 (!bdrv_is_allocated_above(bs, NULL, false, offset, bytes, &nr) &&
2395 nr == bytes);
Anton Nefedovc8bb23c2019-05-16 17:27:49 +03002396}
2397
2398static bool is_zero_cow(BlockDriverState *bs, QCowL2Meta *m)
2399{
2400 /*
2401 * This check is designed for optimization shortcut so it must be
2402 * efficient.
2403 * Instead of is_zero(), use is_unallocated() as it is faster (but not
2404 * as accurate and can result in false negatives).
2405 */
2406 return is_unallocated(bs, m->offset + m->cow_start.offset,
2407 m->cow_start.nb_bytes) &&
2408 is_unallocated(bs, m->offset + m->cow_end.offset,
2409 m->cow_end.nb_bytes);
2410}
2411
2412static int handle_alloc_space(BlockDriverState *bs, QCowL2Meta *l2meta)
2413{
2414 BDRVQcow2State *s = bs->opaque;
2415 QCowL2Meta *m;
2416
2417 if (!(s->data_file->bs->supported_zero_flags & BDRV_REQ_NO_FALLBACK)) {
2418 return 0;
2419 }
2420
2421 if (bs->encrypted) {
2422 return 0;
2423 }
2424
2425 for (m = l2meta; m != NULL; m = m->next) {
2426 int ret;
Alberto Garciabf4a66e2020-07-10 18:13:09 +02002427 uint64_t start_offset = m->alloc_offset + m->cow_start.offset;
2428 unsigned nb_bytes = m->cow_end.offset + m->cow_end.nb_bytes -
2429 m->cow_start.offset;
Anton Nefedovc8bb23c2019-05-16 17:27:49 +03002430
2431 if (!m->cow_start.nb_bytes && !m->cow_end.nb_bytes) {
2432 continue;
2433 }
2434
2435 if (!is_zero_cow(bs, m)) {
2436 continue;
2437 }
2438
2439 /*
2440 * instead of writing zero COW buffers,
2441 * efficiently zero out the whole clusters
2442 */
2443
Alberto Garciabf4a66e2020-07-10 18:13:09 +02002444 ret = qcow2_pre_write_overlap_check(bs, 0, start_offset, nb_bytes,
Anton Nefedovc8bb23c2019-05-16 17:27:49 +03002445 true);
2446 if (ret < 0) {
2447 return ret;
2448 }
2449
2450 BLKDBG_EVENT(bs->file, BLKDBG_CLUSTER_ALLOC_SPACE);
Alberto Garciabf4a66e2020-07-10 18:13:09 +02002451 ret = bdrv_co_pwrite_zeroes(s->data_file, start_offset, nb_bytes,
Anton Nefedovc8bb23c2019-05-16 17:27:49 +03002452 BDRV_REQ_NO_FALLBACK);
2453 if (ret < 0) {
2454 if (ret != -ENOTSUP && ret != -EAGAIN) {
2455 return ret;
2456 }
2457 continue;
2458 }
2459
2460 trace_qcow2_skip_cow(qemu_coroutine_self(), m->offset, m->nb_clusters);
2461 m->skip_cow = true;
2462 }
2463 return 0;
2464}
2465
Vladimir Sementsov-Ogievskiy6aa7a262019-09-16 20:53:23 +03002466/*
2467 * qcow2_co_pwritev_task
2468 * Called with s->lock unlocked
2469 * l2meta - if not NULL, qcow2_co_pwritev_task() will consume it. Caller must
2470 * not use it somehow after qcow2_co_pwritev_task() call
2471 */
2472static coroutine_fn int qcow2_co_pwritev_task(BlockDriverState *bs,
Alberto Garcia9c4269d2020-07-10 18:12:43 +02002473 uint64_t host_offset,
Vladimir Sementsov-Ogievskiy6aa7a262019-09-16 20:53:23 +03002474 uint64_t offset, uint64_t bytes,
2475 QEMUIOVector *qiov,
2476 uint64_t qiov_offset,
2477 QCowL2Meta *l2meta)
2478{
2479 int ret;
2480 BDRVQcow2State *s = bs->opaque;
2481 void *crypt_buf = NULL;
Vladimir Sementsov-Ogievskiy6aa7a262019-09-16 20:53:23 +03002482 QEMUIOVector encrypted_qiov;
2483
2484 if (bs->encrypted) {
2485 assert(s->crypto);
2486 assert(bytes <= QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size);
2487 crypt_buf = qemu_try_blockalign(bs->file->bs, bytes);
2488 if (crypt_buf == NULL) {
2489 ret = -ENOMEM;
2490 goto out_unlocked;
2491 }
2492 qemu_iovec_to_buf(qiov, qiov_offset, crypt_buf, bytes);
2493
Alberto Garcia9c4269d2020-07-10 18:12:43 +02002494 if (qcow2_co_encrypt(bs, host_offset, offset, crypt_buf, bytes) < 0) {
Vladimir Sementsov-Ogievskiy6aa7a262019-09-16 20:53:23 +03002495 ret = -EIO;
2496 goto out_unlocked;
2497 }
2498
2499 qemu_iovec_init_buf(&encrypted_qiov, crypt_buf, bytes);
2500 qiov = &encrypted_qiov;
2501 qiov_offset = 0;
2502 }
2503
2504 /* Try to efficiently initialize the physical space with zeroes */
2505 ret = handle_alloc_space(bs, l2meta);
2506 if (ret < 0) {
2507 goto out_unlocked;
2508 }
2509
2510 /*
2511 * If we need to do COW, check if it's possible to merge the
2512 * writing of the guest data together with that of the COW regions.
2513 * If it's not possible (or not necessary) then write the
2514 * guest data now.
2515 */
2516 if (!merge_cow(offset, bytes, qiov, qiov_offset, l2meta)) {
2517 BLKDBG_EVENT(bs->file, BLKDBG_WRITE_AIO);
Alberto Garcia9c4269d2020-07-10 18:12:43 +02002518 trace_qcow2_writev_data(qemu_coroutine_self(), host_offset);
2519 ret = bdrv_co_pwritev_part(s->data_file, host_offset,
Vladimir Sementsov-Ogievskiy6aa7a262019-09-16 20:53:23 +03002520 bytes, qiov, qiov_offset, 0);
2521 if (ret < 0) {
2522 goto out_unlocked;
2523 }
2524 }
2525
2526 qemu_co_mutex_lock(&s->lock);
2527
2528 ret = qcow2_handle_l2meta(bs, &l2meta, true);
2529 goto out_locked;
2530
2531out_unlocked:
2532 qemu_co_mutex_lock(&s->lock);
2533
2534out_locked:
2535 qcow2_handle_l2meta(bs, &l2meta, false);
2536 qemu_co_mutex_unlock(&s->lock);
2537
2538 qemu_vfree(crypt_buf);
2539
2540 return ret;
2541}
2542
Vladimir Sementsov-Ogievskiyd710cf52019-09-16 20:53:24 +03002543static coroutine_fn int qcow2_co_pwritev_task_entry(AioTask *task)
2544{
2545 Qcow2AioTask *t = container_of(task, Qcow2AioTask, task);
2546
Alberto Garcia10dabdc52020-07-10 18:13:00 +02002547 assert(!t->subcluster_type);
Vladimir Sementsov-Ogievskiyd710cf52019-09-16 20:53:24 +03002548
Alberto Garcia9c4269d2020-07-10 18:12:43 +02002549 return qcow2_co_pwritev_task(t->bs, t->host_offset,
Vladimir Sementsov-Ogievskiyd710cf52019-09-16 20:53:24 +03002550 t->offset, t->bytes, t->qiov, t->qiov_offset,
2551 t->l2meta);
2552}
2553
Vladimir Sementsov-Ogievskiy53962342019-06-04 19:15:14 +03002554static coroutine_fn int qcow2_co_pwritev_part(
2555 BlockDriverState *bs, uint64_t offset, uint64_t bytes,
2556 QEMUIOVector *qiov, size_t qiov_offset, int flags)
bellard585f8582006-08-05 21:14:20 +00002557{
Kevin Wolfff991292015-09-07 17:12:56 +02002558 BDRVQcow2State *s = bs->opaque;
Kevin Wolfd46a0bb2016-06-01 16:55:05 +02002559 int offset_in_cluster;
Kevin Wolf68d100e2011-06-30 17:42:09 +02002560 int ret;
Kevin Wolfd46a0bb2016-06-01 16:55:05 +02002561 unsigned int cur_bytes; /* number of sectors in current iteration */
Alberto Garciabfd09892020-09-11 16:09:42 +02002562 uint64_t host_offset;
Kevin Wolf8d2497c2013-01-14 17:31:31 +01002563 QCowL2Meta *l2meta = NULL;
Vladimir Sementsov-Ogievskiyd710cf52019-09-16 20:53:24 +03002564 AioTaskPool *aio = NULL;
Frediano Ziglioc2271402011-08-23 15:21:15 +02002565
Kevin Wolfd46a0bb2016-06-01 16:55:05 +02002566 trace_qcow2_writev_start_req(qemu_coroutine_self(), offset, bytes);
Kevin Wolf3cce16f2012-03-01 18:36:21 +01002567
Vladimir Sementsov-Ogievskiyd710cf52019-09-16 20:53:24 +03002568 while (bytes != 0 && aio_task_pool_status(aio) == 0) {
Frediano Ziglio3fc48d02011-08-23 15:21:19 +02002569
Kevin Wolff50f88b2012-12-07 18:08:46 +01002570 l2meta = NULL;
Kevin Wolfcf5c1a22012-12-07 18:08:44 +01002571
Kevin Wolf3cce16f2012-03-01 18:36:21 +01002572 trace_qcow2_writev_start_part(qemu_coroutine_self());
Kevin Wolfd46a0bb2016-06-01 16:55:05 +02002573 offset_in_cluster = offset_into_cluster(s, offset);
2574 cur_bytes = MIN(bytes, INT_MAX);
2575 if (bs->encrypted) {
2576 cur_bytes = MIN(cur_bytes,
2577 QCOW_MAX_CRYPT_CLUSTERS * s->cluster_size
2578 - offset_in_cluster);
bellard585f8582006-08-05 21:14:20 +00002579 }
Kevin Wolf6f5f0602010-09-13 18:24:10 +02002580
Vladimir Sementsov-Ogievskiy6aa7a262019-09-16 20:53:23 +03002581 qemu_co_mutex_lock(&s->lock);
2582
Alberto Garciabfd09892020-09-11 16:09:42 +02002583 ret = qcow2_alloc_host_offset(bs, offset, &cur_bytes,
2584 &host_offset, &l2meta);
Frediano Ziglio5ebaa272011-08-23 15:21:18 +02002585 if (ret < 0) {
Vladimir Sementsov-Ogievskiy5447c3a2019-05-06 17:27:40 +03002586 goto out_locked;
Frediano Ziglio5ebaa272011-08-23 15:21:18 +02002587 }
Kevin Wolf6f5f0602010-09-13 18:24:10 +02002588
Alberto Garciabfd09892020-09-11 16:09:42 +02002589 ret = qcow2_pre_write_overlap_check(bs, 0, host_offset,
Vladimir Sementsov-Ogievskiy5447c3a2019-05-06 17:27:40 +03002590 cur_bytes, true);
2591 if (ret < 0) {
2592 goto out_locked;
2593 }
2594
2595 qemu_co_mutex_unlock(&s->lock);
2596
Vladimir Sementsov-Ogievskiyd710cf52019-09-16 20:53:24 +03002597 if (!aio && cur_bytes != bytes) {
2598 aio = aio_task_pool_new(QCOW2_MAX_WORKERS);
2599 }
2600 ret = qcow2_add_task(bs, aio, qcow2_co_pwritev_task_entry, 0,
Alberto Garciabfd09892020-09-11 16:09:42 +02002601 host_offset, offset,
Alberto Garcia9c4269d2020-07-10 18:12:43 +02002602 cur_bytes, qiov, qiov_offset, l2meta);
Vladimir Sementsov-Ogievskiy6aa7a262019-09-16 20:53:23 +03002603 l2meta = NULL; /* l2meta is consumed by qcow2_co_pwritev_task() */
Anton Nefedovc8bb23c2019-05-16 17:27:49 +03002604 if (ret < 0) {
Vladimir Sementsov-Ogievskiy6aa7a262019-09-16 20:53:23 +03002605 goto fail_nometa;
Kevin Wolff50f88b2012-12-07 18:08:46 +01002606 }
Kevin Wolf0fa91312011-09-01 15:02:13 +02002607
Kevin Wolfd46a0bb2016-06-01 16:55:05 +02002608 bytes -= cur_bytes;
2609 offset += cur_bytes;
Vladimir Sementsov-Ogievskiy6aa7a262019-09-16 20:53:23 +03002610 qiov_offset += cur_bytes;
Kevin Wolfd46a0bb2016-06-01 16:55:05 +02002611 trace_qcow2_writev_done_part(qemu_coroutine_self(), cur_bytes);
bellard585f8582006-08-05 21:14:20 +00002612 }
Frediano Ziglio3fc48d02011-08-23 15:21:19 +02002613 ret = 0;
Kevin Wolf6f5f0602010-09-13 18:24:10 +02002614
Vladimir Sementsov-Ogievskiy5447c3a2019-05-06 17:27:40 +03002615 qemu_co_mutex_lock(&s->lock);
2616
2617out_locked:
Fam Zhengfd9fcd32018-06-01 17:26:42 +08002618 qcow2_handle_l2meta(bs, &l2meta, false);
Kevin Wolf0fa91312011-09-01 15:02:13 +02002619
Paolo Bonzinia8c57402017-06-29 15:27:39 +02002620 qemu_co_mutex_unlock(&s->lock);
2621
Vladimir Sementsov-Ogievskiy6aa7a262019-09-16 20:53:23 +03002622fail_nometa:
Vladimir Sementsov-Ogievskiyd710cf52019-09-16 20:53:24 +03002623 if (aio) {
2624 aio_task_pool_wait_all(aio);
2625 if (ret == 0) {
2626 ret = aio_task_pool_status(aio);
2627 }
2628 g_free(aio);
2629 }
2630
Kevin Wolf3cce16f2012-03-01 18:36:21 +01002631 trace_qcow2_writev_done_req(qemu_coroutine_self(), ret);
Kevin Wolf42496d62011-06-07 15:04:32 +02002632
Kevin Wolf68d100e2011-06-30 17:42:09 +02002633 return ret;
bellard585f8582006-08-05 21:14:20 +00002634}
2635
Kevin Wolfec6d8912015-12-22 16:04:57 +01002636static int qcow2_inactivate(BlockDriverState *bs)
2637{
2638 BDRVQcow2State *s = bs->opaque;
2639 int ret, result = 0;
Vladimir Sementsov-Ogievskiy5f728262017-06-28 15:05:19 +03002640 Error *local_err = NULL;
Kevin Wolfec6d8912015-12-22 16:04:57 +01002641
Vladimir Sementsov-Ogievskiy644ddbb2019-09-27 15:23:52 +03002642 qcow2_store_persistent_dirty_bitmaps(bs, true, &local_err);
Pavel Butsykin83a8c772017-09-04 13:18:00 +03002643 if (local_err != NULL) {
2644 result = -EINVAL;
Vladimir Sementsov-Ogievskiy132adb62018-10-29 16:23:15 -04002645 error_reportf_err(local_err, "Lost persistent bitmaps during "
2646 "inactivation of node '%s': ",
2647 bdrv_get_device_or_node_name(bs));
Pavel Butsykin83a8c772017-09-04 13:18:00 +03002648 }
2649
Kevin Wolfec6d8912015-12-22 16:04:57 +01002650 ret = qcow2_cache_flush(bs, s->l2_table_cache);
2651 if (ret) {
2652 result = ret;
2653 error_report("Failed to flush the L2 table cache: %s",
2654 strerror(-ret));
2655 }
2656
2657 ret = qcow2_cache_flush(bs, s->refcount_block_cache);
2658 if (ret) {
2659 result = ret;
2660 error_report("Failed to flush the refcount block cache: %s",
2661 strerror(-ret));
2662 }
2663
2664 if (result == 0) {
2665 qcow2_mark_clean(bs);
2666 }
2667
2668 return result;
2669}
2670
Jes Sorensen7c80ab32010-12-17 16:02:39 +01002671static void qcow2_close(BlockDriverState *bs)
bellard585f8582006-08-05 21:14:20 +00002672{
Kevin Wolfff991292015-09-07 17:12:56 +02002673 BDRVQcow2State *s = bs->opaque;
Kevin Wolfde828152014-05-20 17:12:47 +02002674 qemu_vfree(s->l1_table);
Max Reitzcf939802013-08-30 14:34:26 +02002675 /* else pre-write overlap checks in cache_destroy may crash */
2676 s->l1_table = NULL;
Kevin Wolf29c1a732011-01-10 17:17:28 +01002677
Kevin Wolf140fd5a2015-12-22 16:10:32 +01002678 if (!(s->flags & BDRV_O_INACTIVE)) {
Kevin Wolfec6d8912015-12-22 16:04:57 +01002679 qcow2_inactivate(bs);
Kevin Wolf27eb6c02014-03-11 15:15:03 +01002680 }
Stefan Hajnoczic61d0002012-07-27 09:05:19 +01002681
Alberto Garcia279621c2015-08-04 15:14:40 +03002682 cache_clean_timer_del(bs);
Alberto Garciae64d4072018-02-05 16:33:08 +02002683 qcow2_cache_destroy(s->l2_table_cache);
2684 qcow2_cache_destroy(s->refcount_block_cache);
Kevin Wolf29c1a732011-01-10 17:17:28 +01002685
Daniel P. Berrangeb25b3872017-06-23 17:24:10 +01002686 qcrypto_block_free(s->crypto);
2687 s->crypto = NULL;
Pan Nengyuan4aebf0f2020-02-27 09:29:49 +08002688 qapi_free_QCryptoBlockOpenOptions(s->crypto_opts);
Daniel P. Berrangef6fa64f2015-07-01 18:10:37 +01002689
Kevin Wolf6744cba2011-12-15 12:20:58 +01002690 g_free(s->unknown_header_fields);
Kevin Wolf75bab852012-02-02 14:52:08 +01002691 cleanup_unknown_header_ext(bs);
Kevin Wolf6744cba2011-12-15 12:20:58 +01002692
Kevin Wolf9b890bd2019-01-15 19:02:40 +01002693 g_free(s->image_data_file);
Kevin Wolfe4603fe2015-04-07 15:03:16 +02002694 g_free(s->image_backing_file);
2695 g_free(s->image_backing_format);
2696
Kevin Wolf0e8c08b2019-01-29 17:13:57 +01002697 if (has_data_file(bs)) {
2698 bdrv_unref_child(bs, s->data_file);
Vladimir Sementsov-Ogievskiy808cf3c2020-03-16 09:06:31 +03002699 s->data_file = NULL;
Kevin Wolf0e8c08b2019-01-29 17:13:57 +01002700 }
2701
Kevin Wolfed6ccf02009-05-28 16:07:07 +02002702 qcow2_refcount_close(bs);
Li Zhi Hui28c12022011-12-07 17:25:48 +08002703 qcow2_free_snapshots(bs);
bellard585f8582006-08-05 21:14:20 +00002704}
2705
Paolo Bonzini2b148f32018-03-01 17:36:18 +01002706static void coroutine_fn qcow2_co_invalidate_cache(BlockDriverState *bs,
2707 Error **errp)
Anthony Liguori06d92602011-11-14 15:09:46 -06002708{
Kevin Wolfff991292015-09-07 17:12:56 +02002709 BDRVQcow2State *s = bs->opaque;
Anthony Liguori06d92602011-11-14 15:09:46 -06002710 int flags = s->flags;
Daniel P. Berrangeb25b3872017-06-23 17:24:10 +01002711 QCryptoBlock *crypto = NULL;
Kevin Wolfacdfb482013-03-18 13:08:10 +01002712 QDict *options;
Kevin Wolf5a8a30d2014-03-12 15:59:16 +01002713 Error *local_err = NULL;
2714 int ret;
Anthony Liguori06d92602011-11-14 15:09:46 -06002715
2716 /*
2717 * Backing files are read-only which makes all of their metadata immutable,
2718 * that means we don't have to worry about reopening them here.
2719 */
2720
Daniel P. Berrangeb25b3872017-06-23 17:24:10 +01002721 crypto = s->crypto;
2722 s->crypto = NULL;
Anthony Liguori06d92602011-11-14 15:09:46 -06002723
2724 qcow2_close(bs);
2725
Kevin Wolfff991292015-09-07 17:12:56 +02002726 memset(s, 0, sizeof(BDRVQcow2State));
Kevin Wolfd475e5a2014-03-11 17:42:41 +01002727 options = qdict_clone_shallow(bs->options);
Kevin Wolf5a8a30d2014-03-12 15:59:16 +01002728
Kevin Wolf140fd5a2015-12-22 16:10:32 +01002729 flags &= ~BDRV_O_INACTIVE;
Paolo Bonzini2b148f32018-03-01 17:36:18 +01002730 qemu_co_mutex_lock(&s->lock);
Kevin Wolf4e4bf5c2016-12-16 18:52:37 +01002731 ret = qcow2_do_open(bs, options, flags, &local_err);
Paolo Bonzini2b148f32018-03-01 17:36:18 +01002732 qemu_co_mutex_unlock(&s->lock);
Marc-André Lureaucb3e7f02018-04-19 17:01:43 +02002733 qobject_unref(options);
Kevin Wolf5a8a30d2014-03-12 15:59:16 +01002734 if (local_err) {
Markus Armbruster4b576642018-10-17 10:26:25 +02002735 error_propagate_prepend(errp, local_err,
2736 "Could not reopen qcow2 layer: ");
Kevin Wolf191fb112015-12-22 16:14:10 +01002737 bs->drv = NULL;
Kevin Wolf5a8a30d2014-03-12 15:59:16 +01002738 return;
2739 } else if (ret < 0) {
2740 error_setg_errno(errp, -ret, "Could not reopen qcow2 layer");
Kevin Wolf191fb112015-12-22 16:14:10 +01002741 bs->drv = NULL;
Kevin Wolf5a8a30d2014-03-12 15:59:16 +01002742 return;
2743 }
Kevin Wolfacdfb482013-03-18 13:08:10 +01002744
Daniel P. Berrangeb25b3872017-06-23 17:24:10 +01002745 s->crypto = crypto;
Anthony Liguori06d92602011-11-14 15:09:46 -06002746}
2747
Kevin Wolfe24e49e2012-02-02 12:32:31 +01002748static size_t header_ext_add(char *buf, uint32_t magic, const void *s,
2749 size_t len, size_t buflen)
Kevin Wolf756e6732010-01-12 12:55:17 +01002750{
Kevin Wolfe24e49e2012-02-02 12:32:31 +01002751 QCowExtension *ext_backing_fmt = (QCowExtension*) buf;
2752 size_t ext_len = sizeof(QCowExtension) + ((len + 7) & ~7);
Kevin Wolf756e6732010-01-12 12:55:17 +01002753
Kevin Wolfe24e49e2012-02-02 12:32:31 +01002754 if (buflen < ext_len) {
Kevin Wolf756e6732010-01-12 12:55:17 +01002755 return -ENOSPC;
2756 }
2757
Kevin Wolfe24e49e2012-02-02 12:32:31 +01002758 *ext_backing_fmt = (QCowExtension) {
2759 .magic = cpu_to_be32(magic),
2760 .len = cpu_to_be32(len),
2761 };
Stefan Hajnoczi0647d472016-09-13 09:56:27 +01002762
2763 if (len) {
2764 memcpy(buf + sizeof(QCowExtension), s, len);
2765 }
Kevin Wolf756e6732010-01-12 12:55:17 +01002766
Kevin Wolfe24e49e2012-02-02 12:32:31 +01002767 return ext_len;
2768}
Kevin Wolf756e6732010-01-12 12:55:17 +01002769
Kevin Wolfe24e49e2012-02-02 12:32:31 +01002770/*
2771 * Updates the qcow2 header, including the variable length parts of it, i.e.
2772 * the backing file name and all extensions. qcow2 was not designed to allow
2773 * such changes, so if we run out of space (we can only use the first cluster)
2774 * this function may fail.
2775 *
2776 * Returns 0 on success, -errno in error cases.
2777 */
2778int qcow2_update_header(BlockDriverState *bs)
2779{
Kevin Wolfff991292015-09-07 17:12:56 +02002780 BDRVQcow2State *s = bs->opaque;
Kevin Wolfe24e49e2012-02-02 12:32:31 +01002781 QCowHeader *header;
2782 char *buf;
2783 size_t buflen = s->cluster_size;
2784 int ret;
2785 uint64_t total_size;
2786 uint32_t refcount_table_clusters;
Kevin Wolf6744cba2011-12-15 12:20:58 +01002787 size_t header_length;
Kevin Wolf75bab852012-02-02 14:52:08 +01002788 Qcow2UnknownHeaderExtension *uext;
Kevin Wolf756e6732010-01-12 12:55:17 +01002789
Kevin Wolfe24e49e2012-02-02 12:32:31 +01002790 buf = qemu_blockalign(bs, buflen);
Kevin Wolf756e6732010-01-12 12:55:17 +01002791
Kevin Wolfe24e49e2012-02-02 12:32:31 +01002792 /* Header structure */
2793 header = (QCowHeader*) buf;
2794
2795 if (buflen < sizeof(*header)) {
2796 ret = -ENOSPC;
2797 goto fail;
2798 }
2799
Kevin Wolf6744cba2011-12-15 12:20:58 +01002800 header_length = sizeof(*header) + s->unknown_header_fields_size;
Kevin Wolfe24e49e2012-02-02 12:32:31 +01002801 total_size = bs->total_sectors * BDRV_SECTOR_SIZE;
2802 refcount_table_clusters = s->refcount_table_size >> (s->cluster_bits - 3);
2803
Denis Plotnikov572ad972020-05-07 11:25:18 +03002804 ret = validate_compression_type(s, NULL);
2805 if (ret) {
2806 goto fail;
2807 }
2808
Kevin Wolfe24e49e2012-02-02 12:32:31 +01002809 *header = (QCowHeader) {
Kevin Wolf6744cba2011-12-15 12:20:58 +01002810 /* Version 2 fields */
Kevin Wolfe24e49e2012-02-02 12:32:31 +01002811 .magic = cpu_to_be32(QCOW_MAGIC),
Kevin Wolf6744cba2011-12-15 12:20:58 +01002812 .version = cpu_to_be32(s->qcow_version),
Kevin Wolfe24e49e2012-02-02 12:32:31 +01002813 .backing_file_offset = 0,
2814 .backing_file_size = 0,
2815 .cluster_bits = cpu_to_be32(s->cluster_bits),
2816 .size = cpu_to_be64(total_size),
2817 .crypt_method = cpu_to_be32(s->crypt_method_header),
2818 .l1_size = cpu_to_be32(s->l1_size),
2819 .l1_table_offset = cpu_to_be64(s->l1_table_offset),
2820 .refcount_table_offset = cpu_to_be64(s->refcount_table_offset),
2821 .refcount_table_clusters = cpu_to_be32(refcount_table_clusters),
2822 .nb_snapshots = cpu_to_be32(s->nb_snapshots),
2823 .snapshots_offset = cpu_to_be64(s->snapshots_offset),
Kevin Wolf6744cba2011-12-15 12:20:58 +01002824
2825 /* Version 3 fields */
2826 .incompatible_features = cpu_to_be64(s->incompatible_features),
2827 .compatible_features = cpu_to_be64(s->compatible_features),
2828 .autoclear_features = cpu_to_be64(s->autoclear_features),
Max Reitzb6481f32013-09-03 10:09:53 +02002829 .refcount_order = cpu_to_be32(s->refcount_order),
Kevin Wolf6744cba2011-12-15 12:20:58 +01002830 .header_length = cpu_to_be32(header_length),
Denis Plotnikov572ad972020-05-07 11:25:18 +03002831 .compression_type = s->compression_type,
Kevin Wolfe24e49e2012-02-02 12:32:31 +01002832 };
2833
Kevin Wolf6744cba2011-12-15 12:20:58 +01002834 /* For older versions, write a shorter header */
2835 switch (s->qcow_version) {
2836 case 2:
2837 ret = offsetof(QCowHeader, incompatible_features);
2838 break;
2839 case 3:
2840 ret = sizeof(*header);
2841 break;
2842 default:
Jim Meyeringb6c14762012-05-21 13:06:54 +02002843 ret = -EINVAL;
2844 goto fail;
Kevin Wolf6744cba2011-12-15 12:20:58 +01002845 }
2846
2847 buf += ret;
2848 buflen -= ret;
2849 memset(buf, 0, buflen);
2850
2851 /* Preserve any unknown field in the header */
2852 if (s->unknown_header_fields_size) {
2853 if (buflen < s->unknown_header_fields_size) {
2854 ret = -ENOSPC;
2855 goto fail;
2856 }
2857
2858 memcpy(buf, s->unknown_header_fields, s->unknown_header_fields_size);
2859 buf += s->unknown_header_fields_size;
2860 buflen -= s->unknown_header_fields_size;
2861 }
Kevin Wolfe24e49e2012-02-02 12:32:31 +01002862
2863 /* Backing file format header extension */
Kevin Wolfe4603fe2015-04-07 15:03:16 +02002864 if (s->image_backing_format) {
Kevin Wolfe24e49e2012-02-02 12:32:31 +01002865 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_BACKING_FORMAT,
Kevin Wolfe4603fe2015-04-07 15:03:16 +02002866 s->image_backing_format,
2867 strlen(s->image_backing_format),
Kevin Wolfe24e49e2012-02-02 12:32:31 +01002868 buflen);
2869 if (ret < 0) {
2870 goto fail;
Kevin Wolf756e6732010-01-12 12:55:17 +01002871 }
2872
Kevin Wolfe24e49e2012-02-02 12:32:31 +01002873 buf += ret;
2874 buflen -= ret;
Kevin Wolf756e6732010-01-12 12:55:17 +01002875 }
2876
Kevin Wolf9b890bd2019-01-15 19:02:40 +01002877 /* External data file header extension */
2878 if (has_data_file(bs) && s->image_data_file) {
2879 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_DATA_FILE,
2880 s->image_data_file, strlen(s->image_data_file),
2881 buflen);
2882 if (ret < 0) {
2883 goto fail;
2884 }
2885
2886 buf += ret;
2887 buflen -= ret;
2888 }
2889
Daniel P. Berrange4652b8f2017-06-23 17:24:12 +01002890 /* Full disk encryption header pointer extension */
2891 if (s->crypto_header.offset != 0) {
Peter Maydell3b698f52018-10-09 18:24:59 +01002892 s->crypto_header.offset = cpu_to_be64(s->crypto_header.offset);
2893 s->crypto_header.length = cpu_to_be64(s->crypto_header.length);
Daniel P. Berrange4652b8f2017-06-23 17:24:12 +01002894 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_CRYPTO_HEADER,
2895 &s->crypto_header, sizeof(s->crypto_header),
2896 buflen);
Peter Maydell3b698f52018-10-09 18:24:59 +01002897 s->crypto_header.offset = be64_to_cpu(s->crypto_header.offset);
2898 s->crypto_header.length = be64_to_cpu(s->crypto_header.length);
Daniel P. Berrange4652b8f2017-06-23 17:24:12 +01002899 if (ret < 0) {
2900 goto fail;
2901 }
2902 buf += ret;
2903 buflen -= ret;
2904 }
2905
Eric Blakee7be13a2020-03-24 12:42:32 -05002906 /*
2907 * Feature table. A mere 8 feature names occupies 392 bytes, and
2908 * when coupled with the v3 minimum header of 104 bytes plus the
2909 * 8-byte end-of-extension marker, that would leave only 8 bytes
2910 * for a backing file name in an image with 512-byte clusters.
2911 * Thus, we choose to omit this header for cluster sizes 4k and
2912 * smaller.
2913 */
2914 if (s->qcow_version >= 3 && s->cluster_size > 4096) {
Eric Blakebb40ebc2020-03-24 12:42:31 -05002915 static const Qcow2Feature features[] = {
Kevin Wolf1a4828c2015-12-02 19:11:04 +01002916 {
2917 .type = QCOW2_FEAT_TYPE_INCOMPATIBLE,
2918 .bit = QCOW2_INCOMPAT_DIRTY_BITNR,
2919 .name = "dirty bit",
2920 },
2921 {
2922 .type = QCOW2_FEAT_TYPE_INCOMPATIBLE,
2923 .bit = QCOW2_INCOMPAT_CORRUPT_BITNR,
2924 .name = "corrupt bit",
2925 },
2926 {
Kevin Wolf93c24932019-01-14 16:48:25 +01002927 .type = QCOW2_FEAT_TYPE_INCOMPATIBLE,
2928 .bit = QCOW2_INCOMPAT_DATA_FILE_BITNR,
2929 .name = "external data file",
2930 },
2931 {
Denis Plotnikov572ad972020-05-07 11:25:18 +03002932 .type = QCOW2_FEAT_TYPE_INCOMPATIBLE,
2933 .bit = QCOW2_INCOMPAT_COMPRESSION_BITNR,
2934 .name = "compression type",
2935 },
2936 {
Alberto Garcia7be20252020-07-10 18:13:13 +02002937 .type = QCOW2_FEAT_TYPE_INCOMPATIBLE,
2938 .bit = QCOW2_INCOMPAT_EXTL2_BITNR,
2939 .name = "extended L2 entries",
2940 },
2941 {
Kevin Wolf1a4828c2015-12-02 19:11:04 +01002942 .type = QCOW2_FEAT_TYPE_COMPATIBLE,
2943 .bit = QCOW2_COMPAT_LAZY_REFCOUNTS_BITNR,
2944 .name = "lazy refcounts",
2945 },
Eric Blakebb40ebc2020-03-24 12:42:31 -05002946 {
2947 .type = QCOW2_FEAT_TYPE_AUTOCLEAR,
2948 .bit = QCOW2_AUTOCLEAR_BITMAPS_BITNR,
2949 .name = "bitmaps",
2950 },
2951 {
2952 .type = QCOW2_FEAT_TYPE_AUTOCLEAR,
2953 .bit = QCOW2_AUTOCLEAR_DATA_FILE_RAW_BITNR,
2954 .name = "raw external data",
2955 },
Kevin Wolf1a4828c2015-12-02 19:11:04 +01002956 };
Kevin Wolfcfcc4c62012-04-12 15:20:27 +02002957
Kevin Wolf1a4828c2015-12-02 19:11:04 +01002958 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_FEATURE_TABLE,
2959 features, sizeof(features), buflen);
2960 if (ret < 0) {
2961 goto fail;
2962 }
2963 buf += ret;
2964 buflen -= ret;
Kevin Wolfcfcc4c62012-04-12 15:20:27 +02002965 }
Kevin Wolfcfcc4c62012-04-12 15:20:27 +02002966
Vladimir Sementsov-Ogievskiy88ddffa2017-06-28 15:05:08 +03002967 /* Bitmap extension */
2968 if (s->nb_bitmaps > 0) {
2969 Qcow2BitmapHeaderExt bitmaps_header = {
2970 .nb_bitmaps = cpu_to_be32(s->nb_bitmaps),
2971 .bitmap_directory_size =
2972 cpu_to_be64(s->bitmap_directory_size),
2973 .bitmap_directory_offset =
2974 cpu_to_be64(s->bitmap_directory_offset)
2975 };
2976 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_BITMAPS,
2977 &bitmaps_header, sizeof(bitmaps_header),
2978 buflen);
2979 if (ret < 0) {
2980 goto fail;
2981 }
2982 buf += ret;
2983 buflen -= ret;
2984 }
2985
Kevin Wolf75bab852012-02-02 14:52:08 +01002986 /* Keep unknown header extensions */
2987 QLIST_FOREACH(uext, &s->unknown_header_ext, next) {
2988 ret = header_ext_add(buf, uext->magic, uext->data, uext->len, buflen);
2989 if (ret < 0) {
2990 goto fail;
2991 }
2992
2993 buf += ret;
2994 buflen -= ret;
2995 }
2996
Kevin Wolfe24e49e2012-02-02 12:32:31 +01002997 /* End of header extensions */
2998 ret = header_ext_add(buf, QCOW2_EXT_MAGIC_END, NULL, 0, buflen);
Kevin Wolf756e6732010-01-12 12:55:17 +01002999 if (ret < 0) {
3000 goto fail;
3001 }
3002
Kevin Wolfe24e49e2012-02-02 12:32:31 +01003003 buf += ret;
3004 buflen -= ret;
Kevin Wolf756e6732010-01-12 12:55:17 +01003005
Kevin Wolfe24e49e2012-02-02 12:32:31 +01003006 /* Backing file name */
Kevin Wolfe4603fe2015-04-07 15:03:16 +02003007 if (s->image_backing_file) {
3008 size_t backing_file_len = strlen(s->image_backing_file);
Kevin Wolfe24e49e2012-02-02 12:32:31 +01003009
3010 if (buflen < backing_file_len) {
3011 ret = -ENOSPC;
3012 goto fail;
3013 }
3014
Jim Meyering00ea1882012-10-04 13:10:01 +02003015 /* Using strncpy is ok here, since buf is not NUL-terminated. */
Kevin Wolfe4603fe2015-04-07 15:03:16 +02003016 strncpy(buf, s->image_backing_file, buflen);
Kevin Wolfe24e49e2012-02-02 12:32:31 +01003017
3018 header->backing_file_offset = cpu_to_be64(buf - ((char*) header));
3019 header->backing_file_size = cpu_to_be32(backing_file_len);
Kevin Wolf756e6732010-01-12 12:55:17 +01003020 }
3021
Kevin Wolfe24e49e2012-02-02 12:32:31 +01003022 /* Write the new header */
Kevin Wolfd9ca2ea2016-06-20 20:09:15 +02003023 ret = bdrv_pwrite(bs->file, 0, header, s->cluster_size);
Kevin Wolf756e6732010-01-12 12:55:17 +01003024 if (ret < 0) {
3025 goto fail;
3026 }
3027
3028 ret = 0;
3029fail:
Kevin Wolfe24e49e2012-02-02 12:32:31 +01003030 qemu_vfree(header);
Kevin Wolf756e6732010-01-12 12:55:17 +01003031 return ret;
3032}
3033
3034static int qcow2_change_backing_file(BlockDriverState *bs,
3035 const char *backing_file, const char *backing_fmt)
3036{
Kevin Wolfff991292015-09-07 17:12:56 +02003037 BDRVQcow2State *s = bs->opaque;
Kevin Wolfe4603fe2015-04-07 15:03:16 +02003038
Kevin Wolf6c3944d2019-02-22 14:29:38 +01003039 /* Adding a backing file means that the external data file alone won't be
3040 * enough to make sense of the content */
3041 if (backing_file && data_file_is_raw(bs)) {
3042 return -EINVAL;
3043 }
3044
Max Reitz4e876bc2016-04-06 18:32:48 +02003045 if (backing_file && strlen(backing_file) > 1023) {
3046 return -EINVAL;
3047 }
3048
Max Reitz998c2012019-02-01 20:29:08 +01003049 pstrcpy(bs->auto_backing_file, sizeof(bs->auto_backing_file),
3050 backing_file ?: "");
Kevin Wolfe24e49e2012-02-02 12:32:31 +01003051 pstrcpy(bs->backing_file, sizeof(bs->backing_file), backing_file ?: "");
3052 pstrcpy(bs->backing_format, sizeof(bs->backing_format), backing_fmt ?: "");
3053
Kevin Wolfe4603fe2015-04-07 15:03:16 +02003054 g_free(s->image_backing_file);
3055 g_free(s->image_backing_format);
3056
3057 s->image_backing_file = backing_file ? g_strdup(bs->backing_file) : NULL;
3058 s->image_backing_format = backing_fmt ? g_strdup(bs->backing_format) : NULL;
3059
Kevin Wolfe24e49e2012-02-02 12:32:31 +01003060 return qcow2_update_header(bs);
Kevin Wolf756e6732010-01-12 12:55:17 +01003061}
3062
Kevin Wolf60900b72018-01-10 17:55:16 +01003063static int qcow2_set_up_encryption(BlockDriverState *bs,
3064 QCryptoBlockCreateOptions *cryptoopts,
3065 Error **errp)
3066{
3067 BDRVQcow2State *s = bs->opaque;
3068 QCryptoBlock *crypto = NULL;
3069 int fmt, ret;
3070
3071 switch (cryptoopts->format) {
3072 case Q_CRYPTO_BLOCK_FORMAT_LUKS:
3073 fmt = QCOW_CRYPT_LUKS;
3074 break;
3075 case Q_CRYPTO_BLOCK_FORMAT_QCOW:
3076 fmt = QCOW_CRYPT_AES;
3077 break;
3078 default:
3079 error_setg(errp, "Crypto format not supported in qcow2");
3080 return -EINVAL;
Daniel P. Berrangeb25b3872017-06-23 17:24:10 +01003081 }
Kevin Wolf60900b72018-01-10 17:55:16 +01003082
Daniel P. Berrange4652b8f2017-06-23 17:24:12 +01003083 s->crypt_method_header = fmt;
Daniel P. Berrangeb25b3872017-06-23 17:24:10 +01003084
Daniel P. Berrange1cd9a782017-06-23 17:24:17 +01003085 crypto = qcrypto_block_create(cryptoopts, "encrypt.",
Daniel P. Berrange4652b8f2017-06-23 17:24:12 +01003086 qcow2_crypto_hdr_init_func,
3087 qcow2_crypto_hdr_write_func,
Daniel P. Berrangeb25b3872017-06-23 17:24:10 +01003088 bs, errp);
3089 if (!crypto) {
Kevin Wolf60900b72018-01-10 17:55:16 +01003090 return -EINVAL;
Daniel P. Berrangeb25b3872017-06-23 17:24:10 +01003091 }
3092
3093 ret = qcow2_update_header(bs);
3094 if (ret < 0) {
3095 error_setg_errno(errp, -ret, "Could not write encryption header");
3096 goto out;
3097 }
3098
Kevin Wolf60900b72018-01-10 17:55:16 +01003099 ret = 0;
Daniel P. Berrangeb25b3872017-06-23 17:24:10 +01003100 out:
Daniel P. Berrangeb25b3872017-06-23 17:24:10 +01003101 qcrypto_block_free(crypto);
Daniel P. Berrangeb25b3872017-06-23 17:24:10 +01003102 return ret;
3103}
3104
Max Reitz7bc45dc2017-06-13 22:21:00 +02003105/**
3106 * Preallocates metadata structures for data clusters between @offset (in the
3107 * guest disk) and @new_length (which is thus generally the new guest disk
3108 * size).
3109 *
3110 * Returns: 0 on success, -errno on failure.
3111 */
Kevin Wolf47e86b82018-06-26 15:52:13 +02003112static int coroutine_fn preallocate_co(BlockDriverState *bs, uint64_t offset,
Kevin Wolf718c0fc2019-04-15 16:34:30 +02003113 uint64_t new_length, PreallocMode mode,
3114 Error **errp)
Kevin Wolfa35e1c12009-08-17 15:50:10 +02003115{
Kevin Wolf93e32b32019-04-15 17:54:50 +02003116 BDRVQcow2State *s = bs->opaque;
Kevin Wolfd46a0bb2016-06-01 16:55:05 +02003117 uint64_t bytes;
Kevin Wolf060bee82012-12-07 18:08:45 +01003118 uint64_t host_offset = 0;
Kevin Wolf718c0fc2019-04-15 16:34:30 +02003119 int64_t file_length;
Kevin Wolfd46a0bb2016-06-01 16:55:05 +02003120 unsigned int cur_bytes;
Kevin Wolf148da7e2010-01-20 15:03:01 +01003121 int ret;
Alberto Garcia1a52b732020-09-08 16:08:27 +02003122 QCowL2Meta *meta = NULL, *m;
Kevin Wolfa35e1c12009-08-17 15:50:10 +02003123
Max Reitz7bc45dc2017-06-13 22:21:00 +02003124 assert(offset <= new_length);
3125 bytes = new_length - offset;
Kevin Wolfa35e1c12009-08-17 15:50:10 +02003126
Kevin Wolfd46a0bb2016-06-01 16:55:05 +02003127 while (bytes) {
Kevin Wolff29fbf72019-04-15 16:25:01 +02003128 cur_bytes = MIN(bytes, QEMU_ALIGN_DOWN(INT_MAX, s->cluster_size));
Alberto Garciabfd09892020-09-11 16:09:42 +02003129 ret = qcow2_alloc_host_offset(bs, offset, &cur_bytes,
3130 &host_offset, &meta);
Kevin Wolf148da7e2010-01-20 15:03:01 +01003131 if (ret < 0) {
Kevin Wolf360bd072019-04-15 16:56:07 +02003132 error_setg_errno(errp, -ret, "Allocating clusters failed");
Alberto Garcia1a52b732020-09-08 16:08:27 +02003133 goto out;
Kevin Wolfa35e1c12009-08-17 15:50:10 +02003134 }
3135
Alberto Garcia1a52b732020-09-08 16:08:27 +02003136 for (m = meta; m != NULL; m = m->next) {
3137 m->prealloc = true;
3138 }
Stefan Hajnoczic7927072014-04-01 11:12:57 +02003139
Alberto Garcia1a52b732020-09-08 16:08:27 +02003140 ret = qcow2_handle_l2meta(bs, &meta, true);
3141 if (ret < 0) {
3142 error_setg_errno(errp, -ret, "Mapping clusters failed");
3143 goto out;
Kevin Wolff50f88b2012-12-07 18:08:46 +01003144 }
Kevin Wolff2149782009-08-31 16:48:49 +02003145
Kevin Wolfa35e1c12009-08-17 15:50:10 +02003146 /* TODO Preallocate data if requested */
3147
Kevin Wolfd46a0bb2016-06-01 16:55:05 +02003148 bytes -= cur_bytes;
3149 offset += cur_bytes;
Kevin Wolfa35e1c12009-08-17 15:50:10 +02003150 }
3151
3152 /*
3153 * It is expected that the image file is large enough to actually contain
3154 * all of the allocated clusters (otherwise we get failing reads after
3155 * EOF). Extend the image to the last allocated sector.
3156 */
Kevin Wolf718c0fc2019-04-15 16:34:30 +02003157 file_length = bdrv_getlength(s->data_file->bs);
3158 if (file_length < 0) {
3159 error_setg_errno(errp, -file_length, "Could not get file size");
Alberto Garcia1a52b732020-09-08 16:08:27 +02003160 ret = file_length;
3161 goto out;
Kevin Wolf718c0fc2019-04-15 16:34:30 +02003162 }
3163
3164 if (host_offset + cur_bytes > file_length) {
3165 if (mode == PREALLOC_MODE_METADATA) {
3166 mode = PREALLOC_MODE_OFF;
3167 }
Max Reitzc80d8b02019-09-18 11:51:40 +02003168 ret = bdrv_co_truncate(s->data_file, host_offset + cur_bytes, false,
Kevin Wolf7b8e4852020-04-24 14:54:40 +02003169 mode, 0, errp);
Kevin Wolf19dbcbf2010-06-22 16:59:46 +02003170 if (ret < 0) {
Alberto Garcia1a52b732020-09-08 16:08:27 +02003171 goto out;
Kevin Wolf19dbcbf2010-06-22 16:59:46 +02003172 }
Kevin Wolfa35e1c12009-08-17 15:50:10 +02003173 }
3174
Alberto Garcia1a52b732020-09-08 16:08:27 +02003175 ret = 0;
3176
3177out:
3178 qcow2_handle_l2meta(bs, &meta, false);
3179 return ret;
Kevin Wolfa35e1c12009-08-17 15:50:10 +02003180}
3181
Stefan Hajnoczi7c5bcc42017-07-05 13:57:33 +01003182/* qcow2_refcount_metadata_size:
3183 * @clusters: number of clusters to refcount (including data and L1/L2 tables)
3184 * @cluster_size: size of a cluster, in bytes
3185 * @refcount_order: refcount bits power-of-2 exponent
Max Reitz12cc30a2017-06-13 22:21:03 +02003186 * @generous_increase: allow for the refcount table to be 1.5x as large as it
3187 * needs to be
Stefan Hajnoczi7c5bcc42017-07-05 13:57:33 +01003188 *
3189 * Returns: Number of bytes required for refcount blocks and table metadata.
3190 */
Max Reitz12cc30a2017-06-13 22:21:03 +02003191int64_t qcow2_refcount_metadata_size(int64_t clusters, size_t cluster_size,
3192 int refcount_order, bool generous_increase,
3193 uint64_t *refblock_count)
Stefan Hajnoczi7c5bcc42017-07-05 13:57:33 +01003194{
3195 /*
3196 * Every host cluster is reference-counted, including metadata (even
3197 * refcount metadata is recursively included).
3198 *
3199 * An accurate formula for the size of refcount metadata size is difficult
3200 * to derive. An easier method of calculation is finding the fixed point
3201 * where no further refcount blocks or table clusters are required to
3202 * reference count every cluster.
3203 */
Alberto Garcia02b1ecf2020-08-28 13:08:28 +02003204 int64_t blocks_per_table_cluster = cluster_size / REFTABLE_ENTRY_SIZE;
Stefan Hajnoczi7c5bcc42017-07-05 13:57:33 +01003205 int64_t refcounts_per_block = cluster_size * 8 / (1 << refcount_order);
3206 int64_t table = 0; /* number of refcount table clusters */
3207 int64_t blocks = 0; /* number of refcount block clusters */
3208 int64_t last;
3209 int64_t n = 0;
3210
3211 do {
3212 last = n;
3213 blocks = DIV_ROUND_UP(clusters + table + blocks, refcounts_per_block);
3214 table = DIV_ROUND_UP(blocks, blocks_per_table_cluster);
3215 n = clusters + blocks + table;
Max Reitz12cc30a2017-06-13 22:21:03 +02003216
3217 if (n == last && generous_increase) {
3218 clusters += DIV_ROUND_UP(table, 2);
3219 n = 0; /* force another loop */
3220 generous_increase = false;
3221 }
Stefan Hajnoczi7c5bcc42017-07-05 13:57:33 +01003222 } while (n != last);
3223
Max Reitz12cc30a2017-06-13 22:21:03 +02003224 if (refblock_count) {
3225 *refblock_count = blocks;
3226 }
3227
Stefan Hajnoczi7c5bcc42017-07-05 13:57:33 +01003228 return (blocks + table) * cluster_size;
3229}
3230
Stefan Hajnoczi95c67e32017-07-05 13:57:32 +01003231/**
3232 * qcow2_calc_prealloc_size:
3233 * @total_size: virtual disk size in bytes
3234 * @cluster_size: cluster size in bytes
3235 * @refcount_order: refcount bits power-of-2 exponent
Alberto Garcia0dd07b22020-07-10 18:13:11 +02003236 * @extended_l2: true if the image has extended L2 entries
Stefan Hajnoczi95c67e32017-07-05 13:57:32 +01003237 *
3238 * Returns: Total number of bytes required for the fully allocated image
3239 * (including metadata).
3240 */
3241static int64_t qcow2_calc_prealloc_size(int64_t total_size,
3242 size_t cluster_size,
Alberto Garcia0dd07b22020-07-10 18:13:11 +02003243 int refcount_order,
3244 bool extended_l2)
Stefan Hajnoczi95c67e32017-07-05 13:57:32 +01003245{
Stefan Hajnoczi95c67e32017-07-05 13:57:32 +01003246 int64_t meta_size = 0;
Stefan Hajnoczi7c5bcc42017-07-05 13:57:33 +01003247 uint64_t nl1e, nl2e;
Alberto Garcia9e029682018-02-15 15:10:08 +02003248 int64_t aligned_total_size = ROUND_UP(total_size, cluster_size);
Alberto Garcia0dd07b22020-07-10 18:13:11 +02003249 size_t l2e_size = extended_l2 ? L2E_SIZE_EXTENDED : L2E_SIZE_NORMAL;
Stefan Hajnoczi95c67e32017-07-05 13:57:32 +01003250
3251 /* header: 1 cluster */
3252 meta_size += cluster_size;
3253
3254 /* total size of L2 tables */
3255 nl2e = aligned_total_size / cluster_size;
Alberto Garcia0dd07b22020-07-10 18:13:11 +02003256 nl2e = ROUND_UP(nl2e, cluster_size / l2e_size);
3257 meta_size += nl2e * l2e_size;
Stefan Hajnoczi95c67e32017-07-05 13:57:32 +01003258
3259 /* total size of L1 tables */
Alberto Garcia0dd07b22020-07-10 18:13:11 +02003260 nl1e = nl2e * l2e_size / cluster_size;
Alberto Garcia02b1ecf2020-08-28 13:08:28 +02003261 nl1e = ROUND_UP(nl1e, cluster_size / L1E_SIZE);
3262 meta_size += nl1e * L1E_SIZE;
Stefan Hajnoczi95c67e32017-07-05 13:57:32 +01003263
Stefan Hajnoczi7c5bcc42017-07-05 13:57:33 +01003264 /* total size of refcount table and blocks */
3265 meta_size += qcow2_refcount_metadata_size(
3266 (meta_size + aligned_total_size) / cluster_size,
Max Reitz12cc30a2017-06-13 22:21:03 +02003267 cluster_size, refcount_order, false, NULL);
Stefan Hajnoczi95c67e32017-07-05 13:57:32 +01003268
3269 return meta_size + aligned_total_size;
3270}
3271
Alberto Garcia7be20252020-07-10 18:13:13 +02003272static bool validate_cluster_size(size_t cluster_size, bool extended_l2,
3273 Error **errp)
Kevin Wolfa9420732010-06-11 21:37:37 +02003274{
Kevin Wolf29ca9e42018-01-09 19:44:33 +01003275 int cluster_bits = ctz32(cluster_size);
Kevin Wolfa9420732010-06-11 21:37:37 +02003276 if (cluster_bits < MIN_CLUSTER_BITS || cluster_bits > MAX_CLUSTER_BITS ||
3277 (1 << cluster_bits) != cluster_size)
3278 {
Max Reitz3ef6c402013-09-05 09:40:43 +02003279 error_setg(errp, "Cluster size must be a power of two between %d and "
3280 "%dk", 1 << MIN_CLUSTER_BITS, 1 << (MAX_CLUSTER_BITS - 10));
Kevin Wolf29ca9e42018-01-09 19:44:33 +01003281 return false;
3282 }
Alberto Garcia7be20252020-07-10 18:13:13 +02003283
3284 if (extended_l2) {
3285 unsigned min_cluster_size =
3286 (1 << MIN_CLUSTER_BITS) * QCOW_EXTL2_SUBCLUSTERS_PER_CLUSTER;
3287 if (cluster_size < min_cluster_size) {
3288 error_setg(errp, "Extended L2 entries are only supported with "
3289 "cluster sizes of at least %u bytes", min_cluster_size);
3290 return false;
3291 }
3292 }
3293
Kevin Wolf29ca9e42018-01-09 19:44:33 +01003294 return true;
3295}
3296
Alberto Garcia7be20252020-07-10 18:13:13 +02003297static size_t qcow2_opt_get_cluster_size_del(QemuOpts *opts, bool extended_l2,
3298 Error **errp)
Kevin Wolf29ca9e42018-01-09 19:44:33 +01003299{
3300 size_t cluster_size;
3301
3302 cluster_size = qemu_opt_get_size_del(opts, BLOCK_OPT_CLUSTER_SIZE,
3303 DEFAULT_CLUSTER_SIZE);
Alberto Garcia7be20252020-07-10 18:13:13 +02003304 if (!validate_cluster_size(cluster_size, extended_l2, errp)) {
Stefan Hajnoczi0eb4a8c2017-07-05 13:57:34 +01003305 return 0;
Kevin Wolfa9420732010-06-11 21:37:37 +02003306 }
Stefan Hajnoczi0eb4a8c2017-07-05 13:57:34 +01003307 return cluster_size;
3308}
3309
3310static int qcow2_opt_get_version_del(QemuOpts *opts, Error **errp)
3311{
3312 char *buf;
3313 int ret;
3314
3315 buf = qemu_opt_get_del(opts, BLOCK_OPT_COMPAT_LEVEL);
3316 if (!buf) {
3317 ret = 3; /* default */
3318 } else if (!strcmp(buf, "0.10")) {
3319 ret = 2;
3320 } else if (!strcmp(buf, "1.1")) {
3321 ret = 3;
3322 } else {
3323 error_setg(errp, "Invalid compatibility level: '%s'", buf);
3324 ret = -EINVAL;
3325 }
3326 g_free(buf);
3327 return ret;
3328}
3329
3330static uint64_t qcow2_opt_get_refcount_bits_del(QemuOpts *opts, int version,
3331 Error **errp)
3332{
3333 uint64_t refcount_bits;
3334
3335 refcount_bits = qemu_opt_get_number_del(opts, BLOCK_OPT_REFCOUNT_BITS, 16);
3336 if (refcount_bits > 64 || !is_power_of_2(refcount_bits)) {
3337 error_setg(errp, "Refcount width must be a power of two and may not "
3338 "exceed 64 bits");
3339 return 0;
3340 }
3341
3342 if (version < 3 && refcount_bits != 16) {
3343 error_setg(errp, "Different refcount widths than 16 bits require "
3344 "compatibility level 1.1 or above (use compat=1.1 or "
3345 "greater)");
3346 return 0;
3347 }
3348
3349 return refcount_bits;
3350}
3351
Stefan Hajnoczic2743932018-01-18 13:43:46 +01003352static int coroutine_fn
Kevin Wolf60900b72018-01-10 17:55:16 +01003353qcow2_co_create(BlockdevCreateOptions *create_options, Error **errp)
Stefan Hajnoczi0eb4a8c2017-07-05 13:57:34 +01003354{
Kevin Wolf29ca9e42018-01-09 19:44:33 +01003355 BlockdevCreateOptionsQcow2 *qcow2_opts;
Stefan Hajnoczi0eb4a8c2017-07-05 13:57:34 +01003356 QDict *options;
Kevin Wolfa9420732010-06-11 21:37:37 +02003357
3358 /*
3359 * Open the image file and write a minimal qcow2 header.
3360 *
3361 * We keep things simple and start with a zero-sized image. We also
3362 * do without refcount blocks or a L1 table for now. We'll fix the
3363 * inconsistency later.
3364 *
3365 * We do need a refcount table because growing the refcount table means
Eric Blakea951a632020-03-24 12:42:30 -05003366 * allocating two new refcount blocks - the second of which would be at
Kevin Wolfa9420732010-06-11 21:37:37 +02003367 * 2 GB for 64k clusters, and we don't want to have a 2 GB initial file
3368 * size for any qcow2 image.
3369 */
Kevin Wolfe1d74bc2018-01-10 15:52:33 +01003370 BlockBackend *blk = NULL;
3371 BlockDriverState *bs = NULL;
Kevin Wolfdcc98682019-01-14 16:57:27 +01003372 BlockDriverState *data_bs = NULL;
Kevin Wolff8413b32013-12-04 11:06:36 +01003373 QCowHeader *header;
Kevin Wolf29ca9e42018-01-09 19:44:33 +01003374 size_t cluster_size;
3375 int version;
3376 int refcount_order;
Kevin Wolfb106ad92014-03-28 18:06:31 +01003377 uint64_t* refcount_table;
Kevin Wolfa9420732010-06-11 21:37:37 +02003378 int ret;
Denis Plotnikov572ad972020-05-07 11:25:18 +03003379 uint8_t compression_type = QCOW2_COMPRESSION_TYPE_ZLIB;
Kevin Wolfa9420732010-06-11 21:37:37 +02003380
Kevin Wolf29ca9e42018-01-09 19:44:33 +01003381 assert(create_options->driver == BLOCKDEV_DRIVER_QCOW2);
3382 qcow2_opts = &create_options->u.qcow2;
3383
Kevin Wolfe1d74bc2018-01-10 15:52:33 +01003384 bs = bdrv_open_blockdev_ref(qcow2_opts->file, errp);
3385 if (bs == NULL) {
3386 return -EIO;
3387 }
3388
3389 /* Validate options and set default values */
Kevin Wolf29ca9e42018-01-09 19:44:33 +01003390 if (!QEMU_IS_ALIGNED(qcow2_opts->size, BDRV_SECTOR_SIZE)) {
Alberto Garcia3afea402020-01-18 20:09:30 +01003391 error_setg(errp, "Image size must be a multiple of %u bytes",
3392 (unsigned) BDRV_SECTOR_SIZE);
Kevin Wolf29ca9e42018-01-09 19:44:33 +01003393 ret = -EINVAL;
3394 goto out;
3395 }
3396
3397 if (qcow2_opts->has_version) {
3398 switch (qcow2_opts->version) {
3399 case BLOCKDEV_QCOW2_VERSION_V2:
3400 version = 2;
3401 break;
3402 case BLOCKDEV_QCOW2_VERSION_V3:
3403 version = 3;
3404 break;
3405 default:
3406 g_assert_not_reached();
3407 }
3408 } else {
3409 version = 3;
3410 }
3411
3412 if (qcow2_opts->has_cluster_size) {
3413 cluster_size = qcow2_opts->cluster_size;
3414 } else {
3415 cluster_size = DEFAULT_CLUSTER_SIZE;
3416 }
3417
Alberto Garcia7be20252020-07-10 18:13:13 +02003418 if (!qcow2_opts->has_extended_l2) {
3419 qcow2_opts->extended_l2 = false;
3420 }
3421 if (qcow2_opts->extended_l2) {
3422 if (version < 3) {
3423 error_setg(errp, "Extended L2 entries are only supported with "
3424 "compatibility level 1.1 and above (use version=v3 or "
3425 "greater)");
3426 ret = -EINVAL;
3427 goto out;
3428 }
3429 }
3430
3431 if (!validate_cluster_size(cluster_size, qcow2_opts->extended_l2, errp)) {
Kevin Wolfe1d74bc2018-01-10 15:52:33 +01003432 ret = -EINVAL;
3433 goto out;
Kevin Wolf29ca9e42018-01-09 19:44:33 +01003434 }
3435
3436 if (!qcow2_opts->has_preallocation) {
3437 qcow2_opts->preallocation = PREALLOC_MODE_OFF;
3438 }
3439 if (qcow2_opts->has_backing_file &&
Alberto Garcia21187712020-07-10 18:13:14 +02003440 qcow2_opts->preallocation != PREALLOC_MODE_OFF &&
3441 !qcow2_opts->extended_l2)
Kevin Wolf29ca9e42018-01-09 19:44:33 +01003442 {
Alberto Garcia21187712020-07-10 18:13:14 +02003443 error_setg(errp, "Backing file and preallocation can only be used at "
3444 "the same time if extended_l2 is on");
Kevin Wolfe1d74bc2018-01-10 15:52:33 +01003445 ret = -EINVAL;
3446 goto out;
Kevin Wolf29ca9e42018-01-09 19:44:33 +01003447 }
3448 if (qcow2_opts->has_backing_fmt && !qcow2_opts->has_backing_file) {
3449 error_setg(errp, "Backing format cannot be used without backing file");
Kevin Wolfe1d74bc2018-01-10 15:52:33 +01003450 ret = -EINVAL;
3451 goto out;
Kevin Wolf29ca9e42018-01-09 19:44:33 +01003452 }
3453
3454 if (!qcow2_opts->has_lazy_refcounts) {
3455 qcow2_opts->lazy_refcounts = false;
3456 }
3457 if (version < 3 && qcow2_opts->lazy_refcounts) {
3458 error_setg(errp, "Lazy refcounts only supported with compatibility "
Kevin Wolfb76b4f62018-01-11 16:18:08 +01003459 "level 1.1 and above (use version=v3 or greater)");
Kevin Wolfe1d74bc2018-01-10 15:52:33 +01003460 ret = -EINVAL;
3461 goto out;
Kevin Wolf29ca9e42018-01-09 19:44:33 +01003462 }
3463
3464 if (!qcow2_opts->has_refcount_bits) {
3465 qcow2_opts->refcount_bits = 16;
3466 }
3467 if (qcow2_opts->refcount_bits > 64 ||
3468 !is_power_of_2(qcow2_opts->refcount_bits))
3469 {
3470 error_setg(errp, "Refcount width must be a power of two and may not "
3471 "exceed 64 bits");
Kevin Wolfe1d74bc2018-01-10 15:52:33 +01003472 ret = -EINVAL;
3473 goto out;
Kevin Wolf29ca9e42018-01-09 19:44:33 +01003474 }
3475 if (version < 3 && qcow2_opts->refcount_bits != 16) {
3476 error_setg(errp, "Different refcount widths than 16 bits require "
Kevin Wolfb76b4f62018-01-11 16:18:08 +01003477 "compatibility level 1.1 or above (use version=v3 or "
Kevin Wolf29ca9e42018-01-09 19:44:33 +01003478 "greater)");
Kevin Wolfe1d74bc2018-01-10 15:52:33 +01003479 ret = -EINVAL;
3480 goto out;
Kevin Wolf29ca9e42018-01-09 19:44:33 +01003481 }
3482 refcount_order = ctz32(qcow2_opts->refcount_bits);
3483
Kevin Wolf6c3944d2019-02-22 14:29:38 +01003484 if (qcow2_opts->data_file_raw && !qcow2_opts->data_file) {
3485 error_setg(errp, "data-file-raw requires data-file");
3486 ret = -EINVAL;
3487 goto out;
3488 }
3489 if (qcow2_opts->data_file_raw && qcow2_opts->has_backing_file) {
3490 error_setg(errp, "Backing file and data-file-raw cannot be used at "
3491 "the same time");
3492 ret = -EINVAL;
3493 goto out;
3494 }
3495
Kevin Wolfdcc98682019-01-14 16:57:27 +01003496 if (qcow2_opts->data_file) {
3497 if (version < 3) {
3498 error_setg(errp, "External data files are only supported with "
3499 "compatibility level 1.1 and above (use version=v3 or "
3500 "greater)");
3501 ret = -EINVAL;
3502 goto out;
3503 }
3504 data_bs = bdrv_open_blockdev_ref(qcow2_opts->data_file, errp);
Kevin Wolfa0cf8362019-03-13 15:22:38 +01003505 if (data_bs == NULL) {
Kevin Wolfdcc98682019-01-14 16:57:27 +01003506 ret = -EIO;
3507 goto out;
3508 }
3509 }
Kevin Wolf29ca9e42018-01-09 19:44:33 +01003510
Denis Plotnikov572ad972020-05-07 11:25:18 +03003511 if (qcow2_opts->has_compression_type &&
3512 qcow2_opts->compression_type != QCOW2_COMPRESSION_TYPE_ZLIB) {
3513
3514 ret = -EINVAL;
3515
3516 if (version < 3) {
3517 error_setg(errp, "Non-zlib compression type is only supported with "
3518 "compatibility level 1.1 and above (use version=v3 or "
3519 "greater)");
3520 goto out;
3521 }
3522
3523 switch (qcow2_opts->compression_type) {
Denis Plotnikovd298ac12020-05-07 11:25:20 +03003524#ifdef CONFIG_ZSTD
3525 case QCOW2_COMPRESSION_TYPE_ZSTD:
3526 break;
3527#endif
Denis Plotnikov572ad972020-05-07 11:25:18 +03003528 default:
3529 error_setg(errp, "Unknown compression type");
3530 goto out;
3531 }
3532
3533 compression_type = qcow2_opts->compression_type;
3534 }
3535
Kevin Wolf29ca9e42018-01-09 19:44:33 +01003536 /* Create BlockBackend to write to the image */
Eric Blakea3aeeab2020-04-28 14:26:46 -05003537 blk = blk_new_with_bs(bs, BLK_PERM_WRITE | BLK_PERM_RESIZE, BLK_PERM_ALL,
3538 errp);
3539 if (!blk) {
3540 ret = -EPERM;
Kevin Wolfcbf2b7c2018-01-09 17:35:26 +01003541 goto out;
Kevin Wolfa9420732010-06-11 21:37:37 +02003542 }
Kevin Wolf23588792016-03-08 15:57:05 +01003543 blk_set_allow_write_beyond_eof(blk, true);
3544
Kevin Wolfa9420732010-06-11 21:37:37 +02003545 /* Write the header */
Kevin Wolff8413b32013-12-04 11:06:36 +01003546 QEMU_BUILD_BUG_ON((1 << MIN_CLUSTER_BITS) < sizeof(*header));
3547 header = g_malloc0(cluster_size);
3548 *header = (QCowHeader) {
3549 .magic = cpu_to_be32(QCOW_MAGIC),
3550 .version = cpu_to_be32(version),
Stefan Hajnoczi0eb4a8c2017-07-05 13:57:34 +01003551 .cluster_bits = cpu_to_be32(ctz32(cluster_size)),
Kevin Wolff8413b32013-12-04 11:06:36 +01003552 .size = cpu_to_be64(0),
3553 .l1_table_offset = cpu_to_be64(0),
3554 .l1_size = cpu_to_be32(0),
3555 .refcount_table_offset = cpu_to_be64(cluster_size),
3556 .refcount_table_clusters = cpu_to_be32(1),
Max Reitzbd4b1672015-02-18 17:40:46 -05003557 .refcount_order = cpu_to_be32(refcount_order),
Denis Plotnikov572ad972020-05-07 11:25:18 +03003558 /* don't deal with endianness since compression_type is 1 byte long */
3559 .compression_type = compression_type,
Kevin Wolff8413b32013-12-04 11:06:36 +01003560 .header_length = cpu_to_be32(sizeof(*header)),
3561 };
Kevin Wolfa9420732010-06-11 21:37:37 +02003562
Daniel P. Berrangeb25b3872017-06-23 17:24:10 +01003563 /* We'll update this to correct value later */
3564 header->crypt_method = cpu_to_be32(QCOW_CRYPT_NONE);
Kevin Wolfa9420732010-06-11 21:37:37 +02003565
Kevin Wolf29ca9e42018-01-09 19:44:33 +01003566 if (qcow2_opts->lazy_refcounts) {
Kevin Wolff8413b32013-12-04 11:06:36 +01003567 header->compatible_features |=
Stefan Hajnoczibfe80432012-07-27 09:05:22 +01003568 cpu_to_be64(QCOW2_COMPAT_LAZY_REFCOUNTS);
3569 }
Kevin Wolfdcc98682019-01-14 16:57:27 +01003570 if (data_bs) {
3571 header->incompatible_features |=
3572 cpu_to_be64(QCOW2_INCOMPAT_DATA_FILE);
3573 }
Kevin Wolf6c3944d2019-02-22 14:29:38 +01003574 if (qcow2_opts->data_file_raw) {
3575 header->autoclear_features |=
3576 cpu_to_be64(QCOW2_AUTOCLEAR_DATA_FILE_RAW);
3577 }
Denis Plotnikov572ad972020-05-07 11:25:18 +03003578 if (compression_type != QCOW2_COMPRESSION_TYPE_ZLIB) {
3579 header->incompatible_features |=
3580 cpu_to_be64(QCOW2_INCOMPAT_COMPRESSION);
3581 }
Stefan Hajnoczibfe80432012-07-27 09:05:22 +01003582
Alberto Garcia7be20252020-07-10 18:13:13 +02003583 if (qcow2_opts->extended_l2) {
3584 header->incompatible_features |=
3585 cpu_to_be64(QCOW2_INCOMPAT_EXTL2);
3586 }
3587
Eric Blake8341f002016-05-06 10:26:27 -06003588 ret = blk_pwrite(blk, 0, header, cluster_size, 0);
Kevin Wolff8413b32013-12-04 11:06:36 +01003589 g_free(header);
Kevin Wolfa9420732010-06-11 21:37:37 +02003590 if (ret < 0) {
Max Reitz3ef6c402013-09-05 09:40:43 +02003591 error_setg_errno(errp, -ret, "Could not write qcow2 header");
Kevin Wolfa9420732010-06-11 21:37:37 +02003592 goto out;
3593 }
3594
Kevin Wolfb106ad92014-03-28 18:06:31 +01003595 /* Write a refcount table with one refcount block */
3596 refcount_table = g_malloc0(2 * cluster_size);
3597 refcount_table[0] = cpu_to_be64(2 * cluster_size);
Eric Blake8341f002016-05-06 10:26:27 -06003598 ret = blk_pwrite(blk, cluster_size, refcount_table, 2 * cluster_size, 0);
Anthony Liguori7267c092011-08-20 22:09:37 -05003599 g_free(refcount_table);
Kevin Wolfa9420732010-06-11 21:37:37 +02003600
3601 if (ret < 0) {
Max Reitz3ef6c402013-09-05 09:40:43 +02003602 error_setg_errno(errp, -ret, "Could not write refcount table");
Kevin Wolfa9420732010-06-11 21:37:37 +02003603 goto out;
3604 }
3605
Kevin Wolf23588792016-03-08 15:57:05 +01003606 blk_unref(blk);
3607 blk = NULL;
Kevin Wolfa9420732010-06-11 21:37:37 +02003608
3609 /*
3610 * And now open the image and make it consistent first (i.e. increase the
3611 * refcount of the cluster that is occupied by the header and the refcount
3612 * table)
3613 */
Max Reitze6641712015-08-26 19:47:48 +02003614 options = qdict_new();
Eric Blake46f5ac22017-04-27 16:58:17 -05003615 qdict_put_str(options, "driver", "qcow2");
Kevin Wolfcbf2b7c2018-01-09 17:35:26 +01003616 qdict_put_str(options, "file", bs->node_name);
Kevin Wolfdcc98682019-01-14 16:57:27 +01003617 if (data_bs) {
3618 qdict_put_str(options, "data-file", data_bs->node_name);
3619 }
Kevin Wolfcbf2b7c2018-01-09 17:35:26 +01003620 blk = blk_new_open(NULL, NULL, options,
Kevin Wolf55880602017-02-17 15:07:38 +01003621 BDRV_O_RDWR | BDRV_O_RESIZE | BDRV_O_NO_FLUSH,
Markus Armbrusteraf175e82020-07-07 18:06:03 +02003622 errp);
Kevin Wolf23588792016-03-08 15:57:05 +01003623 if (blk == NULL) {
Kevin Wolf23588792016-03-08 15:57:05 +01003624 ret = -EIO;
Kevin Wolfa9420732010-06-11 21:37:37 +02003625 goto out;
3626 }
3627
Kevin Wolf23588792016-03-08 15:57:05 +01003628 ret = qcow2_alloc_clusters(blk_bs(blk), 3 * cluster_size);
Kevin Wolfa9420732010-06-11 21:37:37 +02003629 if (ret < 0) {
Max Reitz3ef6c402013-09-05 09:40:43 +02003630 error_setg_errno(errp, -ret, "Could not allocate clusters for qcow2 "
3631 "header and refcount table");
Kevin Wolfa9420732010-06-11 21:37:37 +02003632 goto out;
3633
3634 } else if (ret != 0) {
3635 error_report("Huh, first cluster in empty image is already in use?");
3636 abort();
3637 }
3638
Kevin Wolf9b890bd2019-01-15 19:02:40 +01003639 /* Set the external data file if necessary */
3640 if (data_bs) {
3641 BDRVQcow2State *s = blk_bs(blk)->opaque;
3642 s->image_data_file = g_strdup(data_bs->filename);
3643 }
3644
Kevin Wolfb527c9b2015-12-02 18:34:39 +01003645 /* Create a full header (including things like feature table) */
Kevin Wolf23588792016-03-08 15:57:05 +01003646 ret = qcow2_update_header(blk_bs(blk));
Kevin Wolfb527c9b2015-12-02 18:34:39 +01003647 if (ret < 0) {
3648 error_setg_errno(errp, -ret, "Could not update qcow2 header");
3649 goto out;
3650 }
3651
Kevin Wolfa9420732010-06-11 21:37:37 +02003652 /* Okay, now that we have a valid image, let's give it the right size */
Max Reitzc80d8b02019-09-18 11:51:40 +02003653 ret = blk_truncate(blk, qcow2_opts->size, false, qcow2_opts->preallocation,
Kevin Wolf8c6242b2020-04-24 14:54:41 +02003654 0, errp);
Kevin Wolfa9420732010-06-11 21:37:37 +02003655 if (ret < 0) {
Max Reitzed3d2ec2017-03-28 22:51:27 +02003656 error_prepend(errp, "Could not resize image: ");
Kevin Wolfa9420732010-06-11 21:37:37 +02003657 goto out;
3658 }
3659
Eric Blakea951a632020-03-24 12:42:30 -05003660 /* Want a backing file? There you go. */
Kevin Wolf29ca9e42018-01-09 19:44:33 +01003661 if (qcow2_opts->has_backing_file) {
3662 const char *backing_format = NULL;
3663
3664 if (qcow2_opts->has_backing_fmt) {
3665 backing_format = BlockdevDriver_str(qcow2_opts->backing_fmt);
3666 }
3667
3668 ret = bdrv_change_backing_file(blk_bs(blk), qcow2_opts->backing_file,
Eric Blakee54ee1b2020-07-06 15:39:53 -05003669 backing_format, false);
Kevin Wolfa9420732010-06-11 21:37:37 +02003670 if (ret < 0) {
Max Reitz3ef6c402013-09-05 09:40:43 +02003671 error_setg_errno(errp, -ret, "Could not assign backing file '%s' "
Kevin Wolf29ca9e42018-01-09 19:44:33 +01003672 "with format '%s'", qcow2_opts->backing_file,
3673 backing_format);
Kevin Wolfa9420732010-06-11 21:37:37 +02003674 goto out;
3675 }
3676 }
3677
Daniel P. Berrangeb25b3872017-06-23 17:24:10 +01003678 /* Want encryption? There you go. */
Kevin Wolf60900b72018-01-10 17:55:16 +01003679 if (qcow2_opts->has_encrypt) {
3680 ret = qcow2_set_up_encryption(blk_bs(blk), qcow2_opts->encrypt, errp);
Daniel P. Berrangeb25b3872017-06-23 17:24:10 +01003681 if (ret < 0) {
3682 goto out;
3683 }
3684 }
3685
Kevin Wolf23588792016-03-08 15:57:05 +01003686 blk_unref(blk);
3687 blk = NULL;
Max Reitzba2ab2f2013-10-24 20:35:06 +02003688
Daniel P. Berrangeb25b3872017-06-23 17:24:10 +01003689 /* Reopen the image without BDRV_O_NO_FLUSH to flush it before returning.
3690 * Using BDRV_O_NO_IO, since encryption is now setup we don't want to
3691 * have to setup decryption context. We're not doing any I/O on the top
3692 * level BlockDriverState, only lower layers, where BDRV_O_NO_IO does
3693 * not have effect.
3694 */
Max Reitze6641712015-08-26 19:47:48 +02003695 options = qdict_new();
Eric Blake46f5ac22017-04-27 16:58:17 -05003696 qdict_put_str(options, "driver", "qcow2");
Kevin Wolfcbf2b7c2018-01-09 17:35:26 +01003697 qdict_put_str(options, "file", bs->node_name);
Kevin Wolfdcc98682019-01-14 16:57:27 +01003698 if (data_bs) {
3699 qdict_put_str(options, "data-file", data_bs->node_name);
3700 }
Kevin Wolfcbf2b7c2018-01-09 17:35:26 +01003701 blk = blk_new_open(NULL, NULL, options,
Daniel P. Berrangeb25b3872017-06-23 17:24:10 +01003702 BDRV_O_RDWR | BDRV_O_NO_BACKING | BDRV_O_NO_IO,
Markus Armbrusteraf175e82020-07-07 18:06:03 +02003703 errp);
Kevin Wolf23588792016-03-08 15:57:05 +01003704 if (blk == NULL) {
Kevin Wolf23588792016-03-08 15:57:05 +01003705 ret = -EIO;
Max Reitzba2ab2f2013-10-24 20:35:06 +02003706 goto out;
3707 }
3708
Kevin Wolfa9420732010-06-11 21:37:37 +02003709 ret = 0;
3710out:
Kevin Wolfe1d74bc2018-01-10 15:52:33 +01003711 blk_unref(blk);
3712 bdrv_unref(bs);
Kevin Wolfdcc98682019-01-14 16:57:27 +01003713 bdrv_unref(data_bs);
Kevin Wolfa9420732010-06-11 21:37:37 +02003714 return ret;
3715}
Kevin Wolfde5f3f42010-05-07 12:43:45 +02003716
Maxim Levitskyb92902d2020-03-26 03:12:17 +02003717static int coroutine_fn qcow2_co_create_opts(BlockDriver *drv,
3718 const char *filename,
3719 QemuOpts *opts,
Stefan Hajnocziefc75e22018-01-18 13:43:45 +01003720 Error **errp)
Kevin Wolfde5f3f42010-05-07 12:43:45 +02003721{
Kevin Wolfb76b4f62018-01-11 16:18:08 +01003722 BlockdevCreateOptions *create_options = NULL;
Markus Armbruster92adf9d2018-06-14 21:14:32 +02003723 QDict *qdict;
Kevin Wolfb76b4f62018-01-11 16:18:08 +01003724 Visitor *v;
Kevin Wolfcbf2b7c2018-01-09 17:35:26 +01003725 BlockDriverState *bs = NULL;
Kevin Wolf9b890bd2019-01-15 19:02:40 +01003726 BlockDriverState *data_bs = NULL;
Kevin Wolfb76b4f62018-01-11 16:18:08 +01003727 const char *val;
Max Reitz3ef6c402013-09-05 09:40:43 +02003728 int ret;
Kevin Wolfde5f3f42010-05-07 12:43:45 +02003729
Kevin Wolfb76b4f62018-01-11 16:18:08 +01003730 /* Only the keyval visitor supports the dotted syntax needed for
3731 * encryption, so go through a QDict before getting a QAPI type. Ignore
3732 * options meant for the protocol layer so that the visitor doesn't
3733 * complain. */
3734 qdict = qemu_opts_to_qdict_filtered(opts, NULL, bdrv_qcow2.create_opts,
3735 true);
3736
3737 /* Handle encryption options */
3738 val = qdict_get_try_str(qdict, BLOCK_OPT_ENCRYPT);
3739 if (val && !strcmp(val, "on")) {
3740 qdict_put_str(qdict, BLOCK_OPT_ENCRYPT, "qcow");
3741 } else if (val && !strcmp(val, "off")) {
3742 qdict_del(qdict, BLOCK_OPT_ENCRYPT);
3743 }
3744
3745 val = qdict_get_try_str(qdict, BLOCK_OPT_ENCRYPT_FORMAT);
3746 if (val && !strcmp(val, "aes")) {
3747 qdict_put_str(qdict, BLOCK_OPT_ENCRYPT_FORMAT, "qcow");
3748 }
3749
3750 /* Convert compat=0.10/1.1 into compat=v2/v3, to be renamed into
3751 * version=v2/v3 below. */
3752 val = qdict_get_try_str(qdict, BLOCK_OPT_COMPAT_LEVEL);
3753 if (val && !strcmp(val, "0.10")) {
3754 qdict_put_str(qdict, BLOCK_OPT_COMPAT_LEVEL, "v2");
3755 } else if (val && !strcmp(val, "1.1")) {
3756 qdict_put_str(qdict, BLOCK_OPT_COMPAT_LEVEL, "v3");
3757 }
3758
3759 /* Change legacy command line options into QMP ones */
3760 static const QDictRenames opt_renames[] = {
3761 { BLOCK_OPT_BACKING_FILE, "backing-file" },
3762 { BLOCK_OPT_BACKING_FMT, "backing-fmt" },
3763 { BLOCK_OPT_CLUSTER_SIZE, "cluster-size" },
3764 { BLOCK_OPT_LAZY_REFCOUNTS, "lazy-refcounts" },
Alberto Garcia7be20252020-07-10 18:13:13 +02003765 { BLOCK_OPT_EXTL2, "extended-l2" },
Kevin Wolfb76b4f62018-01-11 16:18:08 +01003766 { BLOCK_OPT_REFCOUNT_BITS, "refcount-bits" },
3767 { BLOCK_OPT_ENCRYPT, BLOCK_OPT_ENCRYPT_FORMAT },
3768 { BLOCK_OPT_COMPAT_LEVEL, "version" },
Kevin Wolf6c3944d2019-02-22 14:29:38 +01003769 { BLOCK_OPT_DATA_FILE_RAW, "data-file-raw" },
Denis Plotnikov572ad972020-05-07 11:25:18 +03003770 { BLOCK_OPT_COMPRESSION_TYPE, "compression-type" },
Kevin Wolfb76b4f62018-01-11 16:18:08 +01003771 { NULL, NULL },
3772 };
3773
3774 if (!qdict_rename_keys(qdict, opt_renames, errp)) {
Kevin Wolf29ca9e42018-01-09 19:44:33 +01003775 ret = -EINVAL;
3776 goto finish;
3777 }
Kevin Wolf60900b72018-01-10 17:55:16 +01003778
Kevin Wolfcbf2b7c2018-01-09 17:35:26 +01003779 /* Create and open the file (protocol layer) */
Kevin Wolfcbf2b7c2018-01-09 17:35:26 +01003780 ret = bdrv_create_file(filename, opts, errp);
3781 if (ret < 0) {
3782 goto finish;
3783 }
3784
3785 bs = bdrv_open(filename, NULL, NULL,
3786 BDRV_O_RDWR | BDRV_O_RESIZE | BDRV_O_PROTOCOL, errp);
3787 if (bs == NULL) {
3788 ret = -EIO;
3789 goto finish;
3790 }
3791
Kevin Wolf9b890bd2019-01-15 19:02:40 +01003792 /* Create and open an external data file (protocol layer) */
3793 val = qdict_get_try_str(qdict, BLOCK_OPT_DATA_FILE);
3794 if (val) {
3795 ret = bdrv_create_file(val, opts, errp);
3796 if (ret < 0) {
3797 goto finish;
3798 }
3799
3800 data_bs = bdrv_open(val, NULL, NULL,
3801 BDRV_O_RDWR | BDRV_O_RESIZE | BDRV_O_PROTOCOL,
3802 errp);
3803 if (data_bs == NULL) {
3804 ret = -EIO;
3805 goto finish;
3806 }
3807
3808 qdict_del(qdict, BLOCK_OPT_DATA_FILE);
3809 qdict_put_str(qdict, "data-file", data_bs->node_name);
3810 }
3811
Kevin Wolfb76b4f62018-01-11 16:18:08 +01003812 /* Set 'driver' and 'node' options */
3813 qdict_put_str(qdict, "driver", "qcow2");
3814 qdict_put_str(qdict, "file", bs->node_name);
3815
3816 /* Now get the QAPI type BlockdevCreateOptions */
Markus Armbrusteraf910622018-06-14 21:14:33 +02003817 v = qobject_input_visitor_new_flat_confused(qdict, errp);
3818 if (!v) {
Kevin Wolfb76b4f62018-01-11 16:18:08 +01003819 ret = -EINVAL;
3820 goto finish;
3821 }
3822
Markus Armbrusterb11a0932020-07-07 18:06:07 +02003823 visit_type_BlockdevCreateOptions(v, NULL, &create_options, errp);
Kevin Wolfb76b4f62018-01-11 16:18:08 +01003824 visit_free(v);
Markus Armbrusterb11a0932020-07-07 18:06:07 +02003825 if (!create_options) {
Kevin Wolfb76b4f62018-01-11 16:18:08 +01003826 ret = -EINVAL;
3827 goto finish;
3828 }
3829
3830 /* Silently round up size */
3831 create_options->u.qcow2.size = ROUND_UP(create_options->u.qcow2.size,
3832 BDRV_SECTOR_SIZE);
3833
Kevin Wolfcbf2b7c2018-01-09 17:35:26 +01003834 /* Create the qcow2 image (format layer) */
Kevin Wolfb76b4f62018-01-11 16:18:08 +01003835 ret = qcow2_co_create(create_options, errp);
Kevin Wolfcbf2b7c2018-01-09 17:35:26 +01003836 if (ret < 0) {
3837 goto finish;
3838 }
Chunyan Liu1bd0e2d2014-06-05 17:20:59 +08003839
Kevin Wolfb76b4f62018-01-11 16:18:08 +01003840 ret = 0;
Chunyan Liu1bd0e2d2014-06-05 17:20:59 +08003841finish:
Marc-André Lureaucb3e7f02018-04-19 17:01:43 +02003842 qobject_unref(qdict);
Kevin Wolfcbf2b7c2018-01-09 17:35:26 +01003843 bdrv_unref(bs);
Kevin Wolf9b890bd2019-01-15 19:02:40 +01003844 bdrv_unref(data_bs);
Kevin Wolfb76b4f62018-01-11 16:18:08 +01003845 qapi_free_BlockdevCreateOptions(create_options);
Max Reitz3ef6c402013-09-05 09:40:43 +02003846 return ret;
Kevin Wolfde5f3f42010-05-07 12:43:45 +02003847}
3848
Denis V. Lunev2928abc2016-05-11 10:00:14 +03003849
Eric Blakef06f6b62017-10-11 22:47:00 -05003850static bool is_zero(BlockDriverState *bs, int64_t offset, int64_t bytes)
Denis V. Lunev2928abc2016-05-11 10:00:14 +03003851{
Eric Blake31826642017-10-11 22:47:08 -05003852 int64_t nr;
3853 int res;
Eric Blakef06f6b62017-10-11 22:47:00 -05003854
3855 /* Clamp to image length, before checking status of underlying sectors */
Eric Blake8cbf74b2017-10-11 22:47:19 -05003856 if (offset + bytes > bs->total_sectors * BDRV_SECTOR_SIZE) {
3857 bytes = bs->total_sectors * BDRV_SECTOR_SIZE - offset;
Eric Blakefbaa6bb2017-05-06 19:05:50 -05003858 }
3859
Eric Blakef06f6b62017-10-11 22:47:00 -05003860 if (!bytes) {
Eric Blakeebb718a2016-05-25 21:48:49 -06003861 return true;
3862 }
Eric Blake8cbf74b2017-10-11 22:47:19 -05003863 res = bdrv_block_status_above(bs, NULL, offset, bytes, &nr, NULL, NULL);
Eric Blake31826642017-10-11 22:47:08 -05003864 return res >= 0 && (res & BDRV_BLOCK_ZERO) && nr == bytes;
Denis V. Lunev2928abc2016-05-11 10:00:14 +03003865}
3866
Eric Blake5544b592016-06-01 15:10:06 -06003867static coroutine_fn int qcow2_co_pwrite_zeroes(BlockDriverState *bs,
Manos Pitsidianakisf5a5ca72017-06-09 13:18:08 +03003868 int64_t offset, int bytes, BdrvRequestFlags flags)
Kevin Wolf621f0582012-03-20 15:12:58 +01003869{
3870 int ret;
Kevin Wolfff991292015-09-07 17:12:56 +02003871 BDRVQcow2State *s = bs->opaque;
Kevin Wolf621f0582012-03-20 15:12:58 +01003872
Alberto Garciaa6841a22020-07-10 18:13:10 +02003873 uint32_t head = offset_into_subcluster(s, offset);
3874 uint32_t tail = ROUND_UP(offset + bytes, s->subcluster_size) -
3875 (offset + bytes);
Denis V. Lunev2928abc2016-05-11 10:00:14 +03003876
Manos Pitsidianakisf5a5ca72017-06-09 13:18:08 +03003877 trace_qcow2_pwrite_zeroes_start_req(qemu_coroutine_self(), offset, bytes);
3878 if (offset + bytes == bs->total_sectors * BDRV_SECTOR_SIZE) {
Eric Blakefbaa6bb2017-05-06 19:05:50 -05003879 tail = 0;
3880 }
Denis V. Lunev5a64e942016-05-25 21:48:47 -06003881
Eric Blakeebb718a2016-05-25 21:48:49 -06003882 if (head || tail) {
Eric Blakeebb718a2016-05-25 21:48:49 -06003883 uint64_t off;
Kevin Wolfecfe1862016-05-31 16:13:07 +02003884 unsigned int nr;
Alberto Garcia10dabdc52020-07-10 18:13:00 +02003885 QCow2SubclusterType type;
Denis V. Lunev2928abc2016-05-11 10:00:14 +03003886
Alberto Garciaa6841a22020-07-10 18:13:10 +02003887 assert(head + bytes + tail <= s->subcluster_size);
Denis V. Lunev2928abc2016-05-11 10:00:14 +03003888
Eric Blakeebb718a2016-05-25 21:48:49 -06003889 /* check whether remainder of cluster already reads as zero */
Eric Blakef06f6b62017-10-11 22:47:00 -05003890 if (!(is_zero(bs, offset - head, head) &&
Alberto Garciaa6841a22020-07-10 18:13:10 +02003891 is_zero(bs, offset + bytes, tail))) {
Denis V. Lunev2928abc2016-05-11 10:00:14 +03003892 return -ENOTSUP;
3893 }
3894
Denis V. Lunev2928abc2016-05-11 10:00:14 +03003895 qemu_co_mutex_lock(&s->lock);
3896 /* We can have new write after previous check */
Alberto Garciaa6841a22020-07-10 18:13:10 +02003897 offset -= head;
3898 bytes = s->subcluster_size;
3899 nr = s->subcluster_size;
Alberto Garciaca4a0bb2020-07-10 18:12:59 +02003900 ret = qcow2_get_host_offset(bs, offset, &nr, &off, &type);
3901 if (ret < 0 ||
Alberto Garcia10dabdc52020-07-10 18:13:00 +02003902 (type != QCOW2_SUBCLUSTER_UNALLOCATED_PLAIN &&
Alberto Garcia97490a12020-07-10 18:13:01 +02003903 type != QCOW2_SUBCLUSTER_UNALLOCATED_ALLOC &&
Alberto Garcia10dabdc52020-07-10 18:13:00 +02003904 type != QCOW2_SUBCLUSTER_ZERO_PLAIN &&
3905 type != QCOW2_SUBCLUSTER_ZERO_ALLOC)) {
Denis V. Lunev2928abc2016-05-11 10:00:14 +03003906 qemu_co_mutex_unlock(&s->lock);
Alberto Garcia580384d2020-09-09 14:37:39 +02003907 return ret < 0 ? ret : -ENOTSUP;
Denis V. Lunev2928abc2016-05-11 10:00:14 +03003908 }
3909 } else {
3910 qemu_co_mutex_lock(&s->lock);
Kevin Wolf621f0582012-03-20 15:12:58 +01003911 }
3912
Manos Pitsidianakisf5a5ca72017-06-09 13:18:08 +03003913 trace_qcow2_pwrite_zeroes(qemu_coroutine_self(), offset, bytes);
Denis V. Lunev5a64e942016-05-25 21:48:47 -06003914
Alberto Garciaa6841a22020-07-10 18:13:10 +02003915 /* Whatever is left can use real zero subclusters */
3916 ret = qcow2_subcluster_zeroize(bs, offset, bytes, flags);
Kevin Wolf621f0582012-03-20 15:12:58 +01003917 qemu_co_mutex_unlock(&s->lock);
3918
3919 return ret;
3920}
3921
Eric Blake82e8a782016-07-15 17:23:03 -06003922static coroutine_fn int qcow2_co_pdiscard(BlockDriverState *bs,
Manos Pitsidianakisf5a5ca72017-06-09 13:18:08 +03003923 int64_t offset, int bytes)
Kevin Wolf5ea929e2011-01-26 16:56:48 +01003924{
Paolo Bonzini6db39ae2011-10-20 13:16:25 +02003925 int ret;
Kevin Wolfff991292015-09-07 17:12:56 +02003926 BDRVQcow2State *s = bs->opaque;
Paolo Bonzini6db39ae2011-10-20 13:16:25 +02003927
Alberto Garcia80f5c012020-03-31 13:43:45 +02003928 /* If the image does not support QCOW_OFLAG_ZERO then discarding
3929 * clusters could expose stale data from the backing file. */
3930 if (s->qcow_version < 3 && bs->backing) {
3931 return -ENOTSUP;
3932 }
3933
Manos Pitsidianakisf5a5ca72017-06-09 13:18:08 +03003934 if (!QEMU_IS_ALIGNED(offset | bytes, s->cluster_size)) {
3935 assert(bytes < s->cluster_size);
Eric Blake048c5fd2017-04-06 20:37:09 -05003936 /* Ignore partial clusters, except for the special case of the
3937 * complete partial cluster at the end of an unaligned file */
3938 if (!QEMU_IS_ALIGNED(offset, s->cluster_size) ||
Manos Pitsidianakisf5a5ca72017-06-09 13:18:08 +03003939 offset + bytes != bs->total_sectors * BDRV_SECTOR_SIZE) {
Eric Blake048c5fd2017-04-06 20:37:09 -05003940 return -ENOTSUP;
3941 }
Eric Blake49228d12016-11-17 14:13:57 -06003942 }
3943
Paolo Bonzini6db39ae2011-10-20 13:16:25 +02003944 qemu_co_mutex_lock(&s->lock);
Manos Pitsidianakisf5a5ca72017-06-09 13:18:08 +03003945 ret = qcow2_cluster_discard(bs, offset, bytes, QCOW2_DISCARD_REQUEST,
Eric Blaked2cb36a2017-05-06 19:05:52 -05003946 false);
Paolo Bonzini6db39ae2011-10-20 13:16:25 +02003947 qemu_co_mutex_unlock(&s->lock);
3948 return ret;
Kevin Wolf5ea929e2011-01-26 16:56:48 +01003949}
3950
Fam Zhengfd9fcd32018-06-01 17:26:42 +08003951static int coroutine_fn
3952qcow2_co_copy_range_from(BlockDriverState *bs,
3953 BdrvChild *src, uint64_t src_offset,
3954 BdrvChild *dst, uint64_t dst_offset,
Vladimir Sementsov-Ogievskiy67b51fb2018-07-09 19:37:17 +03003955 uint64_t bytes, BdrvRequestFlags read_flags,
3956 BdrvRequestFlags write_flags)
Fam Zhengfd9fcd32018-06-01 17:26:42 +08003957{
3958 BDRVQcow2State *s = bs->opaque;
3959 int ret;
3960 unsigned int cur_bytes; /* number of bytes in current iteration */
3961 BdrvChild *child = NULL;
Vladimir Sementsov-Ogievskiy67b51fb2018-07-09 19:37:17 +03003962 BdrvRequestFlags cur_write_flags;
Fam Zhengfd9fcd32018-06-01 17:26:42 +08003963
3964 assert(!bs->encrypted);
3965 qemu_co_mutex_lock(&s->lock);
3966
3967 while (bytes != 0) {
3968 uint64_t copy_offset = 0;
Alberto Garcia10dabdc52020-07-10 18:13:00 +02003969 QCow2SubclusterType type;
Fam Zhengfd9fcd32018-06-01 17:26:42 +08003970 /* prepare next request */
3971 cur_bytes = MIN(bytes, INT_MAX);
Vladimir Sementsov-Ogievskiy67b51fb2018-07-09 19:37:17 +03003972 cur_write_flags = write_flags;
Fam Zhengfd9fcd32018-06-01 17:26:42 +08003973
Alberto Garciaca4a0bb2020-07-10 18:12:59 +02003974 ret = qcow2_get_host_offset(bs, src_offset, &cur_bytes,
3975 &copy_offset, &type);
Fam Zhengfd9fcd32018-06-01 17:26:42 +08003976 if (ret < 0) {
3977 goto out;
3978 }
3979
Alberto Garciaca4a0bb2020-07-10 18:12:59 +02003980 switch (type) {
Alberto Garcia10dabdc52020-07-10 18:13:00 +02003981 case QCOW2_SUBCLUSTER_UNALLOCATED_PLAIN:
Alberto Garcia97490a12020-07-10 18:13:01 +02003982 case QCOW2_SUBCLUSTER_UNALLOCATED_ALLOC:
Fam Zhengfd9fcd32018-06-01 17:26:42 +08003983 if (bs->backing && bs->backing->bs) {
3984 int64_t backing_length = bdrv_getlength(bs->backing->bs);
3985 if (src_offset >= backing_length) {
Vladimir Sementsov-Ogievskiy67b51fb2018-07-09 19:37:17 +03003986 cur_write_flags |= BDRV_REQ_ZERO_WRITE;
Fam Zhengfd9fcd32018-06-01 17:26:42 +08003987 } else {
3988 child = bs->backing;
3989 cur_bytes = MIN(cur_bytes, backing_length - src_offset);
3990 copy_offset = src_offset;
3991 }
3992 } else {
Vladimir Sementsov-Ogievskiy67b51fb2018-07-09 19:37:17 +03003993 cur_write_flags |= BDRV_REQ_ZERO_WRITE;
Fam Zhengfd9fcd32018-06-01 17:26:42 +08003994 }
3995 break;
3996
Alberto Garcia10dabdc52020-07-10 18:13:00 +02003997 case QCOW2_SUBCLUSTER_ZERO_PLAIN:
3998 case QCOW2_SUBCLUSTER_ZERO_ALLOC:
Vladimir Sementsov-Ogievskiy67b51fb2018-07-09 19:37:17 +03003999 cur_write_flags |= BDRV_REQ_ZERO_WRITE;
Fam Zhengfd9fcd32018-06-01 17:26:42 +08004000 break;
4001
Alberto Garcia10dabdc52020-07-10 18:13:00 +02004002 case QCOW2_SUBCLUSTER_COMPRESSED:
Fam Zhengfd9fcd32018-06-01 17:26:42 +08004003 ret = -ENOTSUP;
4004 goto out;
Fam Zhengfd9fcd32018-06-01 17:26:42 +08004005
Alberto Garcia10dabdc52020-07-10 18:13:00 +02004006 case QCOW2_SUBCLUSTER_NORMAL:
Kevin Wolf966b0002019-01-15 20:39:06 +01004007 child = s->data_file;
Fam Zhengfd9fcd32018-06-01 17:26:42 +08004008 break;
4009
4010 default:
4011 abort();
4012 }
4013 qemu_co_mutex_unlock(&s->lock);
4014 ret = bdrv_co_copy_range_from(child,
4015 copy_offset,
4016 dst, dst_offset,
Vladimir Sementsov-Ogievskiy67b51fb2018-07-09 19:37:17 +03004017 cur_bytes, read_flags, cur_write_flags);
Fam Zhengfd9fcd32018-06-01 17:26:42 +08004018 qemu_co_mutex_lock(&s->lock);
4019 if (ret < 0) {
4020 goto out;
4021 }
4022
4023 bytes -= cur_bytes;
4024 src_offset += cur_bytes;
4025 dst_offset += cur_bytes;
4026 }
4027 ret = 0;
4028
4029out:
4030 qemu_co_mutex_unlock(&s->lock);
4031 return ret;
4032}
4033
4034static int coroutine_fn
4035qcow2_co_copy_range_to(BlockDriverState *bs,
4036 BdrvChild *src, uint64_t src_offset,
4037 BdrvChild *dst, uint64_t dst_offset,
Vladimir Sementsov-Ogievskiy67b51fb2018-07-09 19:37:17 +03004038 uint64_t bytes, BdrvRequestFlags read_flags,
4039 BdrvRequestFlags write_flags)
Fam Zhengfd9fcd32018-06-01 17:26:42 +08004040{
4041 BDRVQcow2State *s = bs->opaque;
Fam Zhengfd9fcd32018-06-01 17:26:42 +08004042 int ret;
4043 unsigned int cur_bytes; /* number of sectors in current iteration */
Alberto Garciabfd09892020-09-11 16:09:42 +02004044 uint64_t host_offset;
Fam Zhengfd9fcd32018-06-01 17:26:42 +08004045 QCowL2Meta *l2meta = NULL;
4046
4047 assert(!bs->encrypted);
Fam Zhengfd9fcd32018-06-01 17:26:42 +08004048
4049 qemu_co_mutex_lock(&s->lock);
4050
4051 while (bytes != 0) {
4052
4053 l2meta = NULL;
4054
Fam Zhengfd9fcd32018-06-01 17:26:42 +08004055 cur_bytes = MIN(bytes, INT_MAX);
4056
4057 /* TODO:
4058 * If src->bs == dst->bs, we could simply copy by incrementing
4059 * the refcnt, without copying user data.
4060 * Or if src->bs == dst->bs->backing->bs, we could copy by discarding. */
Alberto Garciabfd09892020-09-11 16:09:42 +02004061 ret = qcow2_alloc_host_offset(bs, dst_offset, &cur_bytes,
4062 &host_offset, &l2meta);
Fam Zhengfd9fcd32018-06-01 17:26:42 +08004063 if (ret < 0) {
4064 goto fail;
4065 }
4066
Alberto Garciabfd09892020-09-11 16:09:42 +02004067 ret = qcow2_pre_write_overlap_check(bs, 0, host_offset, cur_bytes,
4068 true);
Fam Zhengfd9fcd32018-06-01 17:26:42 +08004069 if (ret < 0) {
4070 goto fail;
4071 }
4072
4073 qemu_co_mutex_unlock(&s->lock);
Alberto Garciabfd09892020-09-11 16:09:42 +02004074 ret = bdrv_co_copy_range_to(src, src_offset, s->data_file, host_offset,
Vladimir Sementsov-Ogievskiy67b51fb2018-07-09 19:37:17 +03004075 cur_bytes, read_flags, write_flags);
Fam Zhengfd9fcd32018-06-01 17:26:42 +08004076 qemu_co_mutex_lock(&s->lock);
4077 if (ret < 0) {
4078 goto fail;
4079 }
4080
4081 ret = qcow2_handle_l2meta(bs, &l2meta, true);
4082 if (ret) {
4083 goto fail;
4084 }
4085
4086 bytes -= cur_bytes;
Fam Zhenge06f4632018-06-29 14:03:26 +08004087 src_offset += cur_bytes;
Fam Zhengfd9fcd32018-06-01 17:26:42 +08004088 dst_offset += cur_bytes;
4089 }
4090 ret = 0;
4091
4092fail:
4093 qcow2_handle_l2meta(bs, &l2meta, false);
4094
4095 qemu_co_mutex_unlock(&s->lock);
4096
Fam Zhengfd9fcd32018-06-01 17:26:42 +08004097 trace_qcow2_writev_done_req(qemu_coroutine_self(), ret);
4098
4099 return ret;
4100}
4101
Kevin Wolf061ca8a2018-06-21 17:54:35 +02004102static int coroutine_fn qcow2_co_truncate(BlockDriverState *bs, int64_t offset,
Max Reitzc80d8b02019-09-18 11:51:40 +02004103 bool exact, PreallocMode prealloc,
Kevin Wolf92b92792020-04-24 14:54:39 +02004104 BdrvRequestFlags flags, Error **errp)
Stefan Hajnoczi419b19d2010-04-28 11:36:11 +01004105{
Kevin Wolfff991292015-09-07 17:12:56 +02004106 BDRVQcow2State *s = bs->opaque;
Max Reitz95b98f32017-06-13 22:21:02 +02004107 uint64_t old_length;
Kevin Wolf2cf7cfa2013-05-14 16:14:33 +02004108 int64_t new_l1_size;
4109 int ret;
Leonid Bloch45b49492018-09-26 19:04:45 +03004110 QDict *options;
Stefan Hajnoczi419b19d2010-04-28 11:36:11 +01004111
Max Reitz772d1f92017-06-13 22:21:05 +02004112 if (prealloc != PREALLOC_MODE_OFF && prealloc != PREALLOC_MODE_METADATA &&
4113 prealloc != PREALLOC_MODE_FALLOC && prealloc != PREALLOC_MODE_FULL)
4114 {
Max Reitz8243ccb2017-06-13 22:20:52 +02004115 error_setg(errp, "Unsupported preallocation mode '%s'",
Markus Armbruster977c7362017-08-24 10:46:08 +02004116 PreallocMode_str(prealloc));
Max Reitz8243ccb2017-06-13 22:20:52 +02004117 return -ENOTSUP;
4118 }
4119
Alberto Garcia3afea402020-01-18 20:09:30 +01004120 if (!QEMU_IS_ALIGNED(offset, BDRV_SECTOR_SIZE)) {
4121 error_setg(errp, "The new size must be a multiple of %u",
4122 (unsigned) BDRV_SECTOR_SIZE);
Stefan Hajnoczi419b19d2010-04-28 11:36:11 +01004123 return -EINVAL;
4124 }
4125
Kevin Wolf061ca8a2018-06-21 17:54:35 +02004126 qemu_co_mutex_lock(&s->lock);
4127
Eric Blake7fa140a2020-04-28 14:26:47 -05004128 /*
4129 * Even though we store snapshot size for all images, it was not
4130 * required until v3, so it is not safe to proceed for v2.
4131 */
4132 if (s->nb_snapshots && s->qcow_version < 3) {
4133 error_setg(errp, "Can't resize a v2 image which has snapshots");
Kevin Wolf061ca8a2018-06-21 17:54:35 +02004134 ret = -ENOTSUP;
4135 goto fail;
Stefan Hajnoczi419b19d2010-04-28 11:36:11 +01004136 }
4137
Eric Blakeee1244a2020-04-28 14:26:48 -05004138 /* See qcow2-bitmap.c for which bitmap scenarios prevent a resize. */
John Snowd19c6b32019-03-11 21:51:46 +03004139 if (qcow2_truncate_bitmaps_check(bs, errp)) {
Kevin Wolf061ca8a2018-06-21 17:54:35 +02004140 ret = -ENOTSUP;
4141 goto fail;
Vladimir Sementsov-Ogievskiy88ddffa2017-06-28 15:05:08 +03004142 }
4143
Leonid Blochbd016b92018-09-26 19:04:47 +03004144 old_length = bs->total_sectors * BDRV_SECTOR_SIZE;
Stefan Hajnoczi419b19d2010-04-28 11:36:11 +01004145 new_l1_size = size_to_l1(s, offset);
Pavel Butsykin46b732c2017-09-18 15:42:29 +03004146
4147 if (offset < old_length) {
Pavel Butsykin163bc392017-09-29 15:16:13 +03004148 int64_t last_cluster, old_file_size;
Pavel Butsykin46b732c2017-09-18 15:42:29 +03004149 if (prealloc != PREALLOC_MODE_OFF) {
4150 error_setg(errp,
4151 "Preallocation can't be used for shrinking an image");
Kevin Wolf061ca8a2018-06-21 17:54:35 +02004152 ret = -EINVAL;
4153 goto fail;
Pavel Butsykin46b732c2017-09-18 15:42:29 +03004154 }
4155
4156 ret = qcow2_cluster_discard(bs, ROUND_UP(offset, s->cluster_size),
4157 old_length - ROUND_UP(offset,
4158 s->cluster_size),
4159 QCOW2_DISCARD_ALWAYS, true);
4160 if (ret < 0) {
4161 error_setg_errno(errp, -ret, "Failed to discard cropped clusters");
Kevin Wolf061ca8a2018-06-21 17:54:35 +02004162 goto fail;
Pavel Butsykin46b732c2017-09-18 15:42:29 +03004163 }
4164
4165 ret = qcow2_shrink_l1_table(bs, new_l1_size);
4166 if (ret < 0) {
4167 error_setg_errno(errp, -ret,
4168 "Failed to reduce the number of L2 tables");
Kevin Wolf061ca8a2018-06-21 17:54:35 +02004169 goto fail;
Pavel Butsykin46b732c2017-09-18 15:42:29 +03004170 }
4171
4172 ret = qcow2_shrink_reftable(bs);
4173 if (ret < 0) {
4174 error_setg_errno(errp, -ret,
4175 "Failed to discard unused refblocks");
Kevin Wolf061ca8a2018-06-21 17:54:35 +02004176 goto fail;
Pavel Butsykin46b732c2017-09-18 15:42:29 +03004177 }
Pavel Butsykin163bc392017-09-29 15:16:13 +03004178
4179 old_file_size = bdrv_getlength(bs->file->bs);
4180 if (old_file_size < 0) {
4181 error_setg_errno(errp, -old_file_size,
4182 "Failed to inquire current file length");
Kevin Wolf061ca8a2018-06-21 17:54:35 +02004183 ret = old_file_size;
4184 goto fail;
Pavel Butsykin163bc392017-09-29 15:16:13 +03004185 }
4186 last_cluster = qcow2_get_last_cluster(bs, old_file_size);
4187 if (last_cluster < 0) {
4188 error_setg_errno(errp, -last_cluster,
4189 "Failed to find the last cluster");
Kevin Wolf061ca8a2018-06-21 17:54:35 +02004190 ret = last_cluster;
4191 goto fail;
Pavel Butsykin163bc392017-09-29 15:16:13 +03004192 }
4193 if ((last_cluster + 1) * s->cluster_size < old_file_size) {
Max Reitz233521b2017-10-09 17:54:31 +02004194 Error *local_err = NULL;
4195
Max Reitze61a28a2019-09-18 11:51:42 +02004196 /*
4197 * Do not pass @exact here: It will not help the user if
4198 * we get an error here just because they wanted to shrink
4199 * their qcow2 image (on a block device) with qemu-img.
4200 * (And on the qcow2 layer, the @exact requirement is
4201 * always fulfilled, so there is no need to pass it on.)
4202 */
Kevin Wolf061ca8a2018-06-21 17:54:35 +02004203 bdrv_co_truncate(bs->file, (last_cluster + 1) * s->cluster_size,
Kevin Wolf7b8e4852020-04-24 14:54:40 +02004204 false, PREALLOC_MODE_OFF, 0, &local_err);
Max Reitz233521b2017-10-09 17:54:31 +02004205 if (local_err) {
4206 warn_reportf_err(local_err,
4207 "Failed to truncate the tail of the image: ");
Pavel Butsykin163bc392017-09-29 15:16:13 +03004208 }
4209 }
Pavel Butsykin46b732c2017-09-18 15:42:29 +03004210 } else {
4211 ret = qcow2_grow_l1_table(bs, new_l1_size, true);
4212 if (ret < 0) {
4213 error_setg_errno(errp, -ret, "Failed to grow the L1 table");
Kevin Wolf061ca8a2018-06-21 17:54:35 +02004214 goto fail;
Pavel Butsykin46b732c2017-09-18 15:42:29 +03004215 }
Stefan Hajnoczi419b19d2010-04-28 11:36:11 +01004216 }
4217
Max Reitz95b98f32017-06-13 22:21:02 +02004218 switch (prealloc) {
4219 case PREALLOC_MODE_OFF:
Kevin Wolf718c0fc2019-04-15 16:34:30 +02004220 if (has_data_file(bs)) {
Max Reitze61a28a2019-09-18 11:51:42 +02004221 /*
4222 * If the caller wants an exact resize, the external data
4223 * file should be resized to the exact target size, too,
4224 * so we pass @exact here.
4225 */
Kevin Wolf7b8e4852020-04-24 14:54:40 +02004226 ret = bdrv_co_truncate(s->data_file, offset, exact, prealloc, 0,
4227 errp);
Kevin Wolf718c0fc2019-04-15 16:34:30 +02004228 if (ret < 0) {
4229 goto fail;
4230 }
4231 }
Max Reitz95b98f32017-06-13 22:21:02 +02004232 break;
4233
4234 case PREALLOC_MODE_METADATA:
Kevin Wolf718c0fc2019-04-15 16:34:30 +02004235 ret = preallocate_co(bs, old_length, offset, prealloc, errp);
Max Reitz95b98f32017-06-13 22:21:02 +02004236 if (ret < 0) {
Kevin Wolf061ca8a2018-06-21 17:54:35 +02004237 goto fail;
Max Reitz95b98f32017-06-13 22:21:02 +02004238 }
4239 break;
4240
Max Reitz772d1f92017-06-13 22:21:05 +02004241 case PREALLOC_MODE_FALLOC:
4242 case PREALLOC_MODE_FULL:
4243 {
4244 int64_t allocation_start, host_offset, guest_offset;
4245 int64_t clusters_allocated;
Max Reitz4b96fa32020-05-05 16:18:01 +02004246 int64_t old_file_size, last_cluster, new_file_size;
Max Reitz772d1f92017-06-13 22:21:05 +02004247 uint64_t nb_new_data_clusters, nb_new_l2_tables;
Alberto Garcia40dee942020-07-10 18:13:12 +02004248 bool subclusters_need_allocation = false;
Max Reitz772d1f92017-06-13 22:21:05 +02004249
Kevin Wolf966b0002019-01-15 20:39:06 +01004250 /* With a data file, preallocation means just allocating the metadata
4251 * and forwarding the truncate request to the data file */
4252 if (has_data_file(bs)) {
Kevin Wolf718c0fc2019-04-15 16:34:30 +02004253 ret = preallocate_co(bs, old_length, offset, prealloc, errp);
Kevin Wolf966b0002019-01-15 20:39:06 +01004254 if (ret < 0) {
Kevin Wolf966b0002019-01-15 20:39:06 +01004255 goto fail;
4256 }
4257 break;
4258 }
4259
Max Reitz772d1f92017-06-13 22:21:05 +02004260 old_file_size = bdrv_getlength(bs->file->bs);
4261 if (old_file_size < 0) {
4262 error_setg_errno(errp, -old_file_size,
4263 "Failed to inquire current file length");
Kevin Wolf061ca8a2018-06-21 17:54:35 +02004264 ret = old_file_size;
4265 goto fail;
Max Reitz772d1f92017-06-13 22:21:05 +02004266 }
Max Reitz4b96fa32020-05-05 16:18:01 +02004267
4268 last_cluster = qcow2_get_last_cluster(bs, old_file_size);
4269 if (last_cluster >= 0) {
4270 old_file_size = (last_cluster + 1) * s->cluster_size;
4271 } else {
4272 old_file_size = ROUND_UP(old_file_size, s->cluster_size);
4273 }
Max Reitz772d1f92017-06-13 22:21:05 +02004274
Alberto Garciaa5675f32020-06-17 16:00:36 +02004275 nb_new_data_clusters = (ROUND_UP(offset, s->cluster_size) -
4276 start_of_cluster(s, old_length)) >> s->cluster_bits;
Max Reitz772d1f92017-06-13 22:21:05 +02004277
4278 /* This is an overestimation; we will not actually allocate space for
4279 * these in the file but just make sure the new refcount structures are
4280 * able to cover them so we will not have to allocate new refblocks
4281 * while entering the data blocks in the potentially new L2 tables.
4282 * (We do not actually care where the L2 tables are placed. Maybe they
4283 * are already allocated or they can be placed somewhere before
4284 * @old_file_size. It does not matter because they will be fully
4285 * allocated automatically, so they do not need to be covered by the
4286 * preallocation. All that matters is that we will not have to allocate
4287 * new refcount structures for them.) */
4288 nb_new_l2_tables = DIV_ROUND_UP(nb_new_data_clusters,
Alberto Garciac8fd8552020-07-10 18:12:54 +02004289 s->cluster_size / l2_entry_size(s));
Max Reitz772d1f92017-06-13 22:21:05 +02004290 /* The cluster range may not be aligned to L2 boundaries, so add one L2
4291 * table for a potential head/tail */
4292 nb_new_l2_tables++;
4293
4294 allocation_start = qcow2_refcount_area(bs, old_file_size,
4295 nb_new_data_clusters +
4296 nb_new_l2_tables,
4297 true, 0, 0);
4298 if (allocation_start < 0) {
4299 error_setg_errno(errp, -allocation_start,
4300 "Failed to resize refcount structures");
Kevin Wolf061ca8a2018-06-21 17:54:35 +02004301 ret = allocation_start;
4302 goto fail;
Max Reitz772d1f92017-06-13 22:21:05 +02004303 }
4304
4305 clusters_allocated = qcow2_alloc_clusters_at(bs, allocation_start,
4306 nb_new_data_clusters);
4307 if (clusters_allocated < 0) {
4308 error_setg_errno(errp, -clusters_allocated,
4309 "Failed to allocate data clusters");
Kevin Wolf061ca8a2018-06-21 17:54:35 +02004310 ret = clusters_allocated;
4311 goto fail;
Max Reitz772d1f92017-06-13 22:21:05 +02004312 }
4313
4314 assert(clusters_allocated == nb_new_data_clusters);
4315
4316 /* Allocate the data area */
4317 new_file_size = allocation_start +
4318 nb_new_data_clusters * s->cluster_size;
Kevin Wolfeb8a0cf2020-04-24 16:27:01 +02004319 /*
4320 * Image file grows, so @exact does not matter.
4321 *
4322 * If we need to zero out the new area, try first whether the protocol
4323 * driver can already take care of this.
4324 */
4325 if (flags & BDRV_REQ_ZERO_WRITE) {
4326 ret = bdrv_co_truncate(bs->file, new_file_size, false, prealloc,
4327 BDRV_REQ_ZERO_WRITE, NULL);
4328 if (ret >= 0) {
4329 flags &= ~BDRV_REQ_ZERO_WRITE;
Alberto Garcia40dee942020-07-10 18:13:12 +02004330 /* Ensure that we read zeroes and not backing file data */
4331 subclusters_need_allocation = true;
Kevin Wolfeb8a0cf2020-04-24 16:27:01 +02004332 }
4333 } else {
4334 ret = -1;
4335 }
4336 if (ret < 0) {
4337 ret = bdrv_co_truncate(bs->file, new_file_size, false, prealloc, 0,
4338 errp);
4339 }
Max Reitz772d1f92017-06-13 22:21:05 +02004340 if (ret < 0) {
4341 error_prepend(errp, "Failed to resize underlying file: ");
4342 qcow2_free_clusters(bs, allocation_start,
4343 nb_new_data_clusters * s->cluster_size,
4344 QCOW2_DISCARD_OTHER);
Kevin Wolf061ca8a2018-06-21 17:54:35 +02004345 goto fail;
Max Reitz772d1f92017-06-13 22:21:05 +02004346 }
4347
4348 /* Create the necessary L2 entries */
4349 host_offset = allocation_start;
4350 guest_offset = old_length;
4351 while (nb_new_data_clusters) {
Alberto Garcia13bec222018-02-05 16:33:31 +02004352 int64_t nb_clusters = MIN(
4353 nb_new_data_clusters,
4354 s->l2_slice_size - offset_to_l2_slice_index(s, guest_offset));
Alberto Garciaa5675f32020-06-17 16:00:36 +02004355 unsigned cow_start_length = offset_into_cluster(s, guest_offset);
4356 QCowL2Meta allocation;
4357 guest_offset = start_of_cluster(s, guest_offset);
4358 allocation = (QCowL2Meta) {
Max Reitz772d1f92017-06-13 22:21:05 +02004359 .offset = guest_offset,
4360 .alloc_offset = host_offset,
4361 .nb_clusters = nb_clusters,
Alberto Garciaa5675f32020-06-17 16:00:36 +02004362 .cow_start = {
4363 .offset = 0,
4364 .nb_bytes = cow_start_length,
4365 },
4366 .cow_end = {
4367 .offset = nb_clusters << s->cluster_bits,
4368 .nb_bytes = 0,
4369 },
Alberto Garcia40dee942020-07-10 18:13:12 +02004370 .prealloc = !subclusters_need_allocation,
Max Reitz772d1f92017-06-13 22:21:05 +02004371 };
4372 qemu_co_queue_init(&allocation.dependent_requests);
4373
4374 ret = qcow2_alloc_cluster_link_l2(bs, &allocation);
4375 if (ret < 0) {
4376 error_setg_errno(errp, -ret, "Failed to update L2 tables");
4377 qcow2_free_clusters(bs, host_offset,
4378 nb_new_data_clusters * s->cluster_size,
4379 QCOW2_DISCARD_OTHER);
Kevin Wolf061ca8a2018-06-21 17:54:35 +02004380 goto fail;
Max Reitz772d1f92017-06-13 22:21:05 +02004381 }
4382
4383 guest_offset += nb_clusters * s->cluster_size;
4384 host_offset += nb_clusters * s->cluster_size;
4385 nb_new_data_clusters -= nb_clusters;
4386 }
4387 break;
4388 }
4389
Max Reitz95b98f32017-06-13 22:21:02 +02004390 default:
4391 g_assert_not_reached();
4392 }
4393
Kevin Wolff01643f2020-04-24 14:54:42 +02004394 if ((flags & BDRV_REQ_ZERO_WRITE) && offset > old_length) {
Alberto Garciaa6841a22020-07-10 18:13:10 +02004395 uint64_t zero_start = QEMU_ALIGN_UP(old_length, s->subcluster_size);
Kevin Wolff01643f2020-04-24 14:54:42 +02004396
4397 /*
Alberto Garciaa6841a22020-07-10 18:13:10 +02004398 * Use zero clusters as much as we can. qcow2_subcluster_zeroize()
4399 * requires a subcluster-aligned start. The end may be unaligned if
4400 * it is at the end of the image (which it is here).
Kevin Wolff01643f2020-04-24 14:54:42 +02004401 */
Alberto Garciae4d70192020-05-04 17:52:17 +02004402 if (offset > zero_start) {
Alberto Garciaa6841a22020-07-10 18:13:10 +02004403 ret = qcow2_subcluster_zeroize(bs, zero_start, offset - zero_start,
4404 0);
Alberto Garciae4d70192020-05-04 17:52:17 +02004405 if (ret < 0) {
4406 error_setg_errno(errp, -ret, "Failed to zero out new clusters");
4407 goto fail;
4408 }
Kevin Wolff01643f2020-04-24 14:54:42 +02004409 }
4410
4411 /* Write explicit zeros for the unaligned head */
4412 if (zero_start > old_length) {
Alberto Garciae4d70192020-05-04 17:52:17 +02004413 uint64_t len = MIN(zero_start, offset) - old_length;
Kevin Wolff01643f2020-04-24 14:54:42 +02004414 uint8_t *buf = qemu_blockalign0(bs, len);
4415 QEMUIOVector qiov;
4416 qemu_iovec_init_buf(&qiov, buf, len);
4417
4418 qemu_co_mutex_unlock(&s->lock);
4419 ret = qcow2_co_pwritev_part(bs, old_length, len, &qiov, 0, 0);
4420 qemu_co_mutex_lock(&s->lock);
4421
4422 qemu_vfree(buf);
4423 if (ret < 0) {
4424 error_setg_errno(errp, -ret, "Failed to zero out the new area");
4425 goto fail;
4426 }
4427 }
4428 }
4429
Max Reitz95b98f32017-06-13 22:21:02 +02004430 if (prealloc != PREALLOC_MODE_OFF) {
4431 /* Flush metadata before actually changing the image size */
Kevin Wolf061ca8a2018-06-21 17:54:35 +02004432 ret = qcow2_write_caches(bs);
Max Reitz95b98f32017-06-13 22:21:02 +02004433 if (ret < 0) {
4434 error_setg_errno(errp, -ret,
4435 "Failed to flush the preallocated area to disk");
Kevin Wolf061ca8a2018-06-21 17:54:35 +02004436 goto fail;
Max Reitz95b98f32017-06-13 22:21:02 +02004437 }
4438 }
4439
Leonid Bloch45b49492018-09-26 19:04:45 +03004440 bs->total_sectors = offset / BDRV_SECTOR_SIZE;
4441
Stefan Hajnoczi419b19d2010-04-28 11:36:11 +01004442 /* write updated header.size */
4443 offset = cpu_to_be64(offset);
Kevin Wolfd9ca2ea2016-06-20 20:09:15 +02004444 ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, size),
Alberto Garcia02b1ecf2020-08-28 13:08:28 +02004445 &offset, sizeof(offset));
Stefan Hajnoczi419b19d2010-04-28 11:36:11 +01004446 if (ret < 0) {
Max Reitzf59adb32017-03-28 22:51:29 +02004447 error_setg_errno(errp, -ret, "Failed to update the image size");
Kevin Wolf061ca8a2018-06-21 17:54:35 +02004448 goto fail;
Stefan Hajnoczi419b19d2010-04-28 11:36:11 +01004449 }
4450
4451 s->l1_vm_state_index = new_l1_size;
Leonid Bloch45b49492018-09-26 19:04:45 +03004452
4453 /* Update cache sizes */
4454 options = qdict_clone_shallow(bs->options);
4455 ret = qcow2_update_options(bs, options, s->flags, errp);
4456 qobject_unref(options);
4457 if (ret < 0) {
4458 goto fail;
4459 }
Kevin Wolf061ca8a2018-06-21 17:54:35 +02004460 ret = 0;
4461fail:
4462 qemu_co_mutex_unlock(&s->lock);
4463 return ret;
Stefan Hajnoczi419b19d2010-04-28 11:36:11 +01004464}
4465
Pavel Butsykinfcccefc2016-07-22 11:17:43 +03004466static coroutine_fn int
Andrey Shinkevich0d483dc2019-12-02 15:15:05 +03004467qcow2_co_pwritev_compressed_task(BlockDriverState *bs,
Vladimir Sementsov-Ogievskiy53962342019-06-04 19:15:14 +03004468 uint64_t offset, uint64_t bytes,
4469 QEMUIOVector *qiov, size_t qiov_offset)
Blue Swirl20d97352010-04-23 20:19:47 +00004470{
Kevin Wolfff991292015-09-07 17:12:56 +02004471 BDRVQcow2State *s = bs->opaque;
Vladimir Sementsov-Ogievskiy2714f132018-06-20 17:48:36 +03004472 int ret;
Alberto Garciae1f4a372019-04-30 13:08:02 +03004473 ssize_t out_len;
Pavel Butsykinfcccefc2016-07-22 11:17:43 +03004474 uint8_t *buf, *out_buf;
Kevin Wolf77e023f2019-02-27 10:26:24 +01004475 uint64_t cluster_offset;
Blue Swirl20d97352010-04-23 20:19:47 +00004476
Andrey Shinkevich0d483dc2019-12-02 15:15:05 +03004477 assert(bytes == s->cluster_size || (bytes < s->cluster_size &&
4478 (offset + bytes == bs->total_sectors << BDRV_SECTOR_BITS)));
Anton Nefedov3e3b8382017-11-14 13:16:49 +03004479
Pavel Butsykinfcccefc2016-07-22 11:17:43 +03004480 buf = qemu_blockalign(bs, s->cluster_size);
Andrey Shinkevich0d483dc2019-12-02 15:15:05 +03004481 if (bytes < s->cluster_size) {
Pavel Butsykina2c0ca62016-07-22 11:17:44 +03004482 /* Zero-pad last write if image size is not cluster aligned */
4483 memset(buf + bytes, 0, s->cluster_size - bytes);
4484 }
Vladimir Sementsov-Ogievskiy53962342019-06-04 19:15:14 +03004485 qemu_iovec_to_buf(qiov, qiov_offset, buf, bytes);
Blue Swirl20d97352010-04-23 20:19:47 +00004486
Vladimir Sementsov-Ogievskiyebf7bba2016-07-14 19:59:25 +03004487 out_buf = g_malloc(s->cluster_size);
Blue Swirl20d97352010-04-23 20:19:47 +00004488
Vladimir Sementsov-Ogievskiy6994fd72018-11-01 21:27:33 +03004489 out_len = qcow2_co_compress(bs, out_buf, s->cluster_size - 1,
4490 buf, s->cluster_size);
Alberto Garciae1f4a372019-04-30 13:08:02 +03004491 if (out_len == -ENOMEM) {
Blue Swirl20d97352010-04-23 20:19:47 +00004492 /* could not compress: write normal cluster */
Vladimir Sementsov-Ogievskiy53962342019-06-04 19:15:14 +03004493 ret = qcow2_co_pwritev_part(bs, offset, bytes, qiov, qiov_offset, 0);
Kevin Wolf8f1efd02011-10-18 17:12:44 +02004494 if (ret < 0) {
4495 goto fail;
4496 }
Pavel Butsykinfcccefc2016-07-22 11:17:43 +03004497 goto success;
Alberto Garciae1f4a372019-04-30 13:08:02 +03004498 } else if (out_len < 0) {
4499 ret = -EINVAL;
4500 goto fail;
Blue Swirl20d97352010-04-23 20:19:47 +00004501 }
4502
Pavel Butsykinfcccefc2016-07-22 11:17:43 +03004503 qemu_co_mutex_lock(&s->lock);
Kevin Wolf77e023f2019-02-27 10:26:24 +01004504 ret = qcow2_alloc_compressed_cluster_offset(bs, offset, out_len,
4505 &cluster_offset);
4506 if (ret < 0) {
Pavel Butsykinfcccefc2016-07-22 11:17:43 +03004507 qemu_co_mutex_unlock(&s->lock);
Pavel Butsykinfcccefc2016-07-22 11:17:43 +03004508 goto fail;
4509 }
Pavel Butsykinfcccefc2016-07-22 11:17:43 +03004510
Kevin Wolf966b0002019-01-15 20:39:06 +01004511 ret = qcow2_pre_write_overlap_check(bs, 0, cluster_offset, out_len, true);
Pavel Butsykinfcccefc2016-07-22 11:17:43 +03004512 qemu_co_mutex_unlock(&s->lock);
4513 if (ret < 0) {
4514 goto fail;
4515 }
4516
Kevin Wolf966b0002019-01-15 20:39:06 +01004517 BLKDBG_EVENT(s->data_file, BLKDBG_WRITE_COMPRESSED);
Vladimir Sementsov-Ogievskiyb00cb152019-04-22 17:58:31 +03004518 ret = bdrv_co_pwrite(s->data_file, cluster_offset, out_len, out_buf, 0);
Pavel Butsykinfcccefc2016-07-22 11:17:43 +03004519 if (ret < 0) {
4520 goto fail;
4521 }
4522success:
Kevin Wolf8f1efd02011-10-18 17:12:44 +02004523 ret = 0;
4524fail:
Pavel Butsykinfcccefc2016-07-22 11:17:43 +03004525 qemu_vfree(buf);
Anthony Liguori7267c092011-08-20 22:09:37 -05004526 g_free(out_buf);
Kevin Wolf8f1efd02011-10-18 17:12:44 +02004527 return ret;
Blue Swirl20d97352010-04-23 20:19:47 +00004528}
4529
Andrey Shinkevich0d483dc2019-12-02 15:15:05 +03004530static coroutine_fn int qcow2_co_pwritev_compressed_task_entry(AioTask *task)
4531{
4532 Qcow2AioTask *t = container_of(task, Qcow2AioTask, task);
4533
Alberto Garcia10dabdc52020-07-10 18:13:00 +02004534 assert(!t->subcluster_type && !t->l2meta);
Andrey Shinkevich0d483dc2019-12-02 15:15:05 +03004535
4536 return qcow2_co_pwritev_compressed_task(t->bs, t->offset, t->bytes, t->qiov,
4537 t->qiov_offset);
4538}
4539
4540/*
4541 * XXX: put compressed sectors first, then all the cluster aligned
4542 * tables to avoid losing bytes in alignment
4543 */
4544static coroutine_fn int
4545qcow2_co_pwritev_compressed_part(BlockDriverState *bs,
4546 uint64_t offset, uint64_t bytes,
4547 QEMUIOVector *qiov, size_t qiov_offset)
4548{
4549 BDRVQcow2State *s = bs->opaque;
4550 AioTaskPool *aio = NULL;
4551 int ret = 0;
4552
4553 if (has_data_file(bs)) {
4554 return -ENOTSUP;
4555 }
4556
4557 if (bytes == 0) {
4558 /*
4559 * align end of file to a sector boundary to ease reading with
4560 * sector based I/Os
4561 */
4562 int64_t len = bdrv_getlength(bs->file->bs);
4563 if (len < 0) {
4564 return len;
4565 }
Kevin Wolf7b8e4852020-04-24 14:54:40 +02004566 return bdrv_co_truncate(bs->file, len, false, PREALLOC_MODE_OFF, 0,
4567 NULL);
Andrey Shinkevich0d483dc2019-12-02 15:15:05 +03004568 }
4569
4570 if (offset_into_cluster(s, offset)) {
4571 return -EINVAL;
4572 }
4573
Alberto Garciafb43d2d2020-04-06 16:34:01 +02004574 if (offset_into_cluster(s, bytes) &&
4575 (offset + bytes) != (bs->total_sectors << BDRV_SECTOR_BITS)) {
4576 return -EINVAL;
4577 }
4578
Andrey Shinkevich0d483dc2019-12-02 15:15:05 +03004579 while (bytes && aio_task_pool_status(aio) == 0) {
4580 uint64_t chunk_size = MIN(bytes, s->cluster_size);
4581
4582 if (!aio && chunk_size != bytes) {
4583 aio = aio_task_pool_new(QCOW2_MAX_WORKERS);
4584 }
4585
4586 ret = qcow2_add_task(bs, aio, qcow2_co_pwritev_compressed_task_entry,
4587 0, 0, offset, chunk_size, qiov, qiov_offset, NULL);
4588 if (ret < 0) {
4589 break;
4590 }
4591 qiov_offset += chunk_size;
4592 offset += chunk_size;
4593 bytes -= chunk_size;
4594 }
4595
4596 if (aio) {
4597 aio_task_pool_wait_all(aio);
4598 if (ret == 0) {
4599 ret = aio_task_pool_status(aio);
4600 }
4601 g_free(aio);
4602 }
4603
4604 return ret;
4605}
4606
Vladimir Sementsov-Ogievskiyc3c10f72018-11-01 21:27:37 +03004607static int coroutine_fn
4608qcow2_co_preadv_compressed(BlockDriverState *bs,
Alberto Garcia9c4269d2020-07-10 18:12:43 +02004609 uint64_t cluster_descriptor,
Vladimir Sementsov-Ogievskiyc3c10f72018-11-01 21:27:37 +03004610 uint64_t offset,
4611 uint64_t bytes,
Vladimir Sementsov-Ogievskiydf893d22019-06-04 19:15:13 +03004612 QEMUIOVector *qiov,
4613 size_t qiov_offset)
Vladimir Sementsov-Ogievskiyf4b3e2a2018-11-01 21:27:34 +03004614{
4615 BDRVQcow2State *s = bs->opaque;
Vladimir Sementsov-Ogievskiyc3c10f72018-11-01 21:27:37 +03004616 int ret = 0, csize, nb_csectors;
Vladimir Sementsov-Ogievskiyf4b3e2a2018-11-01 21:27:34 +03004617 uint64_t coffset;
Vladimir Sementsov-Ogievskiyc3c10f72018-11-01 21:27:37 +03004618 uint8_t *buf, *out_buf;
Vladimir Sementsov-Ogievskiyc3c10f72018-11-01 21:27:37 +03004619 int offset_in_cluster = offset_into_cluster(s, offset);
Vladimir Sementsov-Ogievskiyf4b3e2a2018-11-01 21:27:34 +03004620
Alberto Garcia9c4269d2020-07-10 18:12:43 +02004621 coffset = cluster_descriptor & s->cluster_offset_mask;
4622 nb_csectors = ((cluster_descriptor >> s->csize_shift) & s->csize_mask) + 1;
Alberto Garciab6c24692019-05-10 19:22:54 +03004623 csize = nb_csectors * QCOW2_COMPRESSED_SECTOR_SIZE -
4624 (coffset & ~QCOW2_COMPRESSED_SECTOR_MASK);
Vladimir Sementsov-Ogievskiyf4b3e2a2018-11-01 21:27:34 +03004625
Vladimir Sementsov-Ogievskiyc3c10f72018-11-01 21:27:37 +03004626 buf = g_try_malloc(csize);
4627 if (!buf) {
4628 return -ENOMEM;
Vladimir Sementsov-Ogievskiyf4b3e2a2018-11-01 21:27:34 +03004629 }
Vladimir Sementsov-Ogievskiyc3c10f72018-11-01 21:27:37 +03004630
4631 out_buf = qemu_blockalign(bs, s->cluster_size);
4632
4633 BLKDBG_EVENT(bs->file, BLKDBG_READ_COMPRESSED);
Vladimir Sementsov-Ogievskiyb00cb152019-04-22 17:58:31 +03004634 ret = bdrv_co_pread(bs->file, coffset, csize, buf, 0);
Vladimir Sementsov-Ogievskiyc3c10f72018-11-01 21:27:37 +03004635 if (ret < 0) {
4636 goto fail;
4637 }
4638
Vladimir Sementsov-Ogievskiye23c9d72018-11-01 21:27:38 +03004639 if (qcow2_co_decompress(bs, out_buf, s->cluster_size, buf, csize) < 0) {
Vladimir Sementsov-Ogievskiyc3c10f72018-11-01 21:27:37 +03004640 ret = -EIO;
4641 goto fail;
4642 }
4643
Vladimir Sementsov-Ogievskiydf893d22019-06-04 19:15:13 +03004644 qemu_iovec_from_buf(qiov, qiov_offset, out_buf + offset_in_cluster, bytes);
Vladimir Sementsov-Ogievskiyc3c10f72018-11-01 21:27:37 +03004645
4646fail:
4647 qemu_vfree(out_buf);
4648 g_free(buf);
4649
4650 return ret;
Vladimir Sementsov-Ogievskiyf4b3e2a2018-11-01 21:27:34 +03004651}
4652
Max Reitz94054182014-10-24 15:57:32 +02004653static int make_completely_empty(BlockDriverState *bs)
4654{
Kevin Wolfff991292015-09-07 17:12:56 +02004655 BDRVQcow2State *s = bs->opaque;
Max Reitzed3d2ec2017-03-28 22:51:27 +02004656 Error *local_err = NULL;
Max Reitz94054182014-10-24 15:57:32 +02004657 int ret, l1_clusters;
4658 int64_t offset;
4659 uint64_t *new_reftable = NULL;
4660 uint64_t rt_entry, l1_size2;
4661 struct {
4662 uint64_t l1_offset;
4663 uint64_t reftable_offset;
4664 uint32_t reftable_clusters;
4665 } QEMU_PACKED l1_ofs_rt_ofs_cls;
4666
4667 ret = qcow2_cache_empty(bs, s->l2_table_cache);
4668 if (ret < 0) {
4669 goto fail;
4670 }
4671
4672 ret = qcow2_cache_empty(bs, s->refcount_block_cache);
4673 if (ret < 0) {
4674 goto fail;
4675 }
4676
4677 /* Refcounts will be broken utterly */
4678 ret = qcow2_mark_dirty(bs);
4679 if (ret < 0) {
4680 goto fail;
4681 }
4682
4683 BLKDBG_EVENT(bs->file, BLKDBG_L1_UPDATE);
4684
Alberto Garcia02b1ecf2020-08-28 13:08:28 +02004685 l1_clusters = DIV_ROUND_UP(s->l1_size, s->cluster_size / L1E_SIZE);
4686 l1_size2 = (uint64_t)s->l1_size * L1E_SIZE;
Max Reitz94054182014-10-24 15:57:32 +02004687
4688 /* After this call, neither the in-memory nor the on-disk refcount
4689 * information accurately describe the actual references */
4690
Kevin Wolf720ff282016-06-16 15:13:15 +02004691 ret = bdrv_pwrite_zeroes(bs->file, s->l1_table_offset,
Eric Blake74021bc2016-06-01 15:10:04 -06004692 l1_clusters * s->cluster_size, 0);
Max Reitz94054182014-10-24 15:57:32 +02004693 if (ret < 0) {
4694 goto fail_broken_refcounts;
4695 }
4696 memset(s->l1_table, 0, l1_size2);
4697
4698 BLKDBG_EVENT(bs->file, BLKDBG_EMPTY_IMAGE_PREPARE);
4699
4700 /* Overwrite enough clusters at the beginning of the sectors to place
4701 * the refcount table, a refcount block and the L1 table in; this may
4702 * overwrite parts of the existing refcount and L1 table, which is not
4703 * an issue because the dirty flag is set, complete data loss is in fact
4704 * desired and partial data loss is consequently fine as well */
Kevin Wolf720ff282016-06-16 15:13:15 +02004705 ret = bdrv_pwrite_zeroes(bs->file, s->cluster_size,
Eric Blake74021bc2016-06-01 15:10:04 -06004706 (2 + l1_clusters) * s->cluster_size, 0);
Max Reitz94054182014-10-24 15:57:32 +02004707 /* This call (even if it failed overall) may have overwritten on-disk
4708 * refcount structures; in that case, the in-memory refcount information
4709 * will probably differ from the on-disk information which makes the BDS
4710 * unusable */
4711 if (ret < 0) {
4712 goto fail_broken_refcounts;
4713 }
4714
4715 BLKDBG_EVENT(bs->file, BLKDBG_L1_UPDATE);
4716 BLKDBG_EVENT(bs->file, BLKDBG_REFTABLE_UPDATE);
4717
4718 /* "Create" an empty reftable (one cluster) directly after the image
4719 * header and an empty L1 table three clusters after the image header;
4720 * the cluster between those two will be used as the first refblock */
Peter Maydellf1f7a1d2016-06-16 17:06:17 +01004721 l1_ofs_rt_ofs_cls.l1_offset = cpu_to_be64(3 * s->cluster_size);
4722 l1_ofs_rt_ofs_cls.reftable_offset = cpu_to_be64(s->cluster_size);
4723 l1_ofs_rt_ofs_cls.reftable_clusters = cpu_to_be32(1);
Kevin Wolfd9ca2ea2016-06-20 20:09:15 +02004724 ret = bdrv_pwrite_sync(bs->file, offsetof(QCowHeader, l1_table_offset),
Max Reitz94054182014-10-24 15:57:32 +02004725 &l1_ofs_rt_ofs_cls, sizeof(l1_ofs_rt_ofs_cls));
4726 if (ret < 0) {
4727 goto fail_broken_refcounts;
4728 }
4729
4730 s->l1_table_offset = 3 * s->cluster_size;
4731
Alberto Garcia02b1ecf2020-08-28 13:08:28 +02004732 new_reftable = g_try_new0(uint64_t, s->cluster_size / REFTABLE_ENTRY_SIZE);
Max Reitz94054182014-10-24 15:57:32 +02004733 if (!new_reftable) {
4734 ret = -ENOMEM;
4735 goto fail_broken_refcounts;
4736 }
4737
4738 s->refcount_table_offset = s->cluster_size;
Alberto Garcia02b1ecf2020-08-28 13:08:28 +02004739 s->refcount_table_size = s->cluster_size / REFTABLE_ENTRY_SIZE;
Alberto Garcia7061a072017-02-01 14:38:28 +02004740 s->max_refcount_table_index = 0;
Max Reitz94054182014-10-24 15:57:32 +02004741
4742 g_free(s->refcount_table);
4743 s->refcount_table = new_reftable;
4744 new_reftable = NULL;
4745
4746 /* Now the in-memory refcount information again corresponds to the on-disk
4747 * information (reftable is empty and no refblocks (the refblock cache is
4748 * empty)); however, this means some clusters (e.g. the image header) are
4749 * referenced, but not refcounted, but the normal qcow2 code assumes that
4750 * the in-memory information is always correct */
4751
4752 BLKDBG_EVENT(bs->file, BLKDBG_REFBLOCK_ALLOC);
4753
4754 /* Enter the first refblock into the reftable */
4755 rt_entry = cpu_to_be64(2 * s->cluster_size);
Kevin Wolfd9ca2ea2016-06-20 20:09:15 +02004756 ret = bdrv_pwrite_sync(bs->file, s->cluster_size,
Max Reitz94054182014-10-24 15:57:32 +02004757 &rt_entry, sizeof(rt_entry));
4758 if (ret < 0) {
4759 goto fail_broken_refcounts;
4760 }
4761 s->refcount_table[0] = 2 * s->cluster_size;
4762
4763 s->free_cluster_index = 0;
4764 assert(3 + l1_clusters <= s->refcount_block_size);
4765 offset = qcow2_alloc_clusters(bs, 3 * s->cluster_size + l1_size2);
4766 if (offset < 0) {
4767 ret = offset;
4768 goto fail_broken_refcounts;
4769 } else if (offset > 0) {
4770 error_report("First cluster in emptied image is in use");
4771 abort();
4772 }
4773
4774 /* Now finally the in-memory information corresponds to the on-disk
4775 * structures and is correct */
4776 ret = qcow2_mark_clean(bs);
4777 if (ret < 0) {
4778 goto fail;
4779 }
4780
Max Reitzc80d8b02019-09-18 11:51:40 +02004781 ret = bdrv_truncate(bs->file, (3 + l1_clusters) * s->cluster_size, false,
Kevin Wolf7b8e4852020-04-24 14:54:40 +02004782 PREALLOC_MODE_OFF, 0, &local_err);
Max Reitz94054182014-10-24 15:57:32 +02004783 if (ret < 0) {
Max Reitzed3d2ec2017-03-28 22:51:27 +02004784 error_report_err(local_err);
Max Reitz94054182014-10-24 15:57:32 +02004785 goto fail;
4786 }
4787
4788 return 0;
4789
4790fail_broken_refcounts:
4791 /* The BDS is unusable at this point. If we wanted to make it usable, we
4792 * would have to call qcow2_refcount_close(), qcow2_refcount_init(),
4793 * qcow2_check_refcounts(), qcow2_refcount_close() and qcow2_refcount_init()
4794 * again. However, because the functions which could have caused this error
4795 * path to be taken are used by those functions as well, it's very likely
4796 * that that sequence will fail as well. Therefore, just eject the BDS. */
4797 bs->drv = NULL;
4798
4799fail:
4800 g_free(new_reftable);
4801 return ret;
4802}
4803
Max Reitz491d27e2014-10-24 15:57:31 +02004804static int qcow2_make_empty(BlockDriverState *bs)
4805{
Kevin Wolfff991292015-09-07 17:12:56 +02004806 BDRVQcow2State *s = bs->opaque;
Eric Blaked2cb36a2017-05-06 19:05:52 -05004807 uint64_t offset, end_offset;
4808 int step = QEMU_ALIGN_DOWN(INT_MAX, s->cluster_size);
Max Reitz94054182014-10-24 15:57:32 +02004809 int l1_clusters, ret = 0;
Max Reitz491d27e2014-10-24 15:57:31 +02004810
Alberto Garcia02b1ecf2020-08-28 13:08:28 +02004811 l1_clusters = DIV_ROUND_UP(s->l1_size, s->cluster_size / L1E_SIZE);
Max Reitz94054182014-10-24 15:57:32 +02004812
Eric Blake40969742017-11-17 10:47:47 -06004813 if (s->qcow_version >= 3 && !s->snapshots && !s->nb_bitmaps &&
Daniel P. Berrangef0603322017-11-17 11:29:13 +00004814 3 + l1_clusters <= s->refcount_block_size &&
Kevin Wolfdb045242019-04-29 12:52:21 +02004815 s->crypt_method_header != QCOW_CRYPT_LUKS &&
4816 !has_data_file(bs)) {
Eric Blake40969742017-11-17 10:47:47 -06004817 /* The following function only works for qcow2 v3 images (it
4818 * requires the dirty flag) and only as long as there are no
4819 * features that reserve extra clusters (such as snapshots,
4820 * LUKS header, or persistent bitmaps), because it completely
4821 * empties the image. Furthermore, the L1 table and three
4822 * additional clusters (image header, refcount table, one
Kevin Wolfdb045242019-04-29 12:52:21 +02004823 * refcount block) have to fit inside one refcount block. It
4824 * only resets the image file, i.e. does not work with an
4825 * external data file. */
Max Reitz94054182014-10-24 15:57:32 +02004826 return make_completely_empty(bs);
4827 }
4828
4829 /* This fallback code simply discards every active cluster; this is slow,
4830 * but works in all cases */
Eric Blaked2cb36a2017-05-06 19:05:52 -05004831 end_offset = bs->total_sectors * BDRV_SECTOR_SIZE;
4832 for (offset = 0; offset < end_offset; offset += step) {
Max Reitz491d27e2014-10-24 15:57:31 +02004833 /* As this function is generally used after committing an external
4834 * snapshot, QCOW2_DISCARD_SNAPSHOT seems appropriate. Also, the
4835 * default action for this kind of discard is to pass the discard,
4836 * which will ideally result in an actually smaller image file, as
4837 * is probably desired. */
Eric Blaked2cb36a2017-05-06 19:05:52 -05004838 ret = qcow2_cluster_discard(bs, offset, MIN(step, end_offset - offset),
4839 QCOW2_DISCARD_SNAPSHOT, true);
Max Reitz491d27e2014-10-24 15:57:31 +02004840 if (ret < 0) {
4841 break;
4842 }
4843 }
4844
4845 return ret;
4846}
4847
Dong Xu Wanga9681682011-11-10 16:23:22 +08004848static coroutine_fn int qcow2_co_flush_to_os(BlockDriverState *bs)
Blue Swirl20d97352010-04-23 20:19:47 +00004849{
Kevin Wolfff991292015-09-07 17:12:56 +02004850 BDRVQcow2State *s = bs->opaque;
Kevin Wolf29c1a732011-01-10 17:17:28 +01004851 int ret;
4852
Paolo Bonzini8b94ff82011-10-20 13:16:24 +02004853 qemu_co_mutex_lock(&s->lock);
Paolo Bonzini8b220eb2018-03-01 17:36:14 +01004854 ret = qcow2_write_caches(bs);
Paolo Bonzini8b94ff82011-10-20 13:16:24 +02004855 qemu_co_mutex_unlock(&s->lock);
Kevin Wolf29c1a732011-01-10 17:17:28 +01004856
Paolo Bonzini8b220eb2018-03-01 17:36:14 +01004857 return ret;
Kevin Wolfeb489bb2011-11-10 18:10:11 +01004858}
4859
Stefan Hajnoczic501c352017-07-05 13:57:35 +01004860static BlockMeasureInfo *qcow2_measure(QemuOpts *opts, BlockDriverState *in_bs,
4861 Error **errp)
4862{
4863 Error *local_err = NULL;
4864 BlockMeasureInfo *info;
4865 uint64_t required = 0; /* bytes that contribute to required size */
4866 uint64_t virtual_size; /* disk size as seen by guest */
4867 uint64_t refcount_bits;
4868 uint64_t l2_tables;
Stefan Hajnoczi61914f82019-02-18 10:45:24 +00004869 uint64_t luks_payload_size = 0;
Stefan Hajnoczic501c352017-07-05 13:57:35 +01004870 size_t cluster_size;
4871 int version;
4872 char *optstr;
4873 PreallocMode prealloc;
4874 bool has_backing_file;
Stefan Hajnoczi61914f82019-02-18 10:45:24 +00004875 bool has_luks;
Alberto Garcia7be20252020-07-10 18:13:13 +02004876 bool extended_l2;
Alberto Garcia0dd07b22020-07-10 18:13:11 +02004877 size_t l2e_size;
Stefan Hajnoczic501c352017-07-05 13:57:35 +01004878
4879 /* Parse image creation options */
Alberto Garcia7be20252020-07-10 18:13:13 +02004880 extended_l2 = qemu_opt_get_bool_del(opts, BLOCK_OPT_EXTL2, false);
4881
4882 cluster_size = qcow2_opt_get_cluster_size_del(opts, extended_l2,
4883 &local_err);
Stefan Hajnoczic501c352017-07-05 13:57:35 +01004884 if (local_err) {
4885 goto err;
4886 }
4887
4888 version = qcow2_opt_get_version_del(opts, &local_err);
4889 if (local_err) {
4890 goto err;
4891 }
4892
4893 refcount_bits = qcow2_opt_get_refcount_bits_del(opts, version, &local_err);
4894 if (local_err) {
4895 goto err;
4896 }
4897
4898 optstr = qemu_opt_get_del(opts, BLOCK_OPT_PREALLOC);
Marc-André Lureauf7abe0e2017-08-24 10:46:10 +02004899 prealloc = qapi_enum_parse(&PreallocMode_lookup, optstr,
Markus Armbruster06c60b62017-08-24 10:45:57 +02004900 PREALLOC_MODE_OFF, &local_err);
Stefan Hajnoczic501c352017-07-05 13:57:35 +01004901 g_free(optstr);
4902 if (local_err) {
4903 goto err;
4904 }
4905
4906 optstr = qemu_opt_get_del(opts, BLOCK_OPT_BACKING_FILE);
4907 has_backing_file = !!optstr;
4908 g_free(optstr);
4909
Stefan Hajnoczi61914f82019-02-18 10:45:24 +00004910 optstr = qemu_opt_get_del(opts, BLOCK_OPT_ENCRYPT_FORMAT);
4911 has_luks = optstr && strcmp(optstr, "luks") == 0;
4912 g_free(optstr);
4913
4914 if (has_luks) {
Stefan Hajnoczi6d49d3a2020-02-21 11:25:19 +00004915 g_autoptr(QCryptoBlockCreateOptions) create_opts = NULL;
Maxim Levitsky90766d92020-06-25 14:55:43 +02004916 QDict *cryptoopts = qcow2_extract_crypto_opts(opts, "luks", errp);
Stefan Hajnoczi61914f82019-02-18 10:45:24 +00004917 size_t headerlen;
4918
Stefan Hajnoczi6d49d3a2020-02-21 11:25:19 +00004919 create_opts = block_crypto_create_opts_init(cryptoopts, errp);
4920 qobject_unref(cryptoopts);
4921 if (!create_opts) {
4922 goto err;
4923 }
4924
4925 if (!qcrypto_block_calculate_payload_offset(create_opts,
4926 "encrypt.",
4927 &headerlen,
4928 &local_err)) {
Stefan Hajnoczi61914f82019-02-18 10:45:24 +00004929 goto err;
4930 }
4931
4932 luks_payload_size = ROUND_UP(headerlen, cluster_size);
4933 }
4934
Alberto Garcia9e029682018-02-15 15:10:08 +02004935 virtual_size = qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0);
4936 virtual_size = ROUND_UP(virtual_size, cluster_size);
Stefan Hajnoczic501c352017-07-05 13:57:35 +01004937
4938 /* Check that virtual disk size is valid */
Alberto Garcia0dd07b22020-07-10 18:13:11 +02004939 l2e_size = extended_l2 ? L2E_SIZE_EXTENDED : L2E_SIZE_NORMAL;
Stefan Hajnoczic501c352017-07-05 13:57:35 +01004940 l2_tables = DIV_ROUND_UP(virtual_size / cluster_size,
Alberto Garcia0dd07b22020-07-10 18:13:11 +02004941 cluster_size / l2e_size);
Alberto Garcia02b1ecf2020-08-28 13:08:28 +02004942 if (l2_tables * L1E_SIZE > QCOW_MAX_L1_SIZE) {
Stefan Hajnoczic501c352017-07-05 13:57:35 +01004943 error_setg(&local_err, "The image size is too large "
4944 "(try using a larger cluster size)");
4945 goto err;
4946 }
4947
4948 /* Account for input image */
4949 if (in_bs) {
4950 int64_t ssize = bdrv_getlength(in_bs);
4951 if (ssize < 0) {
4952 error_setg_errno(&local_err, -ssize,
4953 "Unable to get image virtual_size");
4954 goto err;
4955 }
4956
Alberto Garcia9e029682018-02-15 15:10:08 +02004957 virtual_size = ROUND_UP(ssize, cluster_size);
Stefan Hajnoczic501c352017-07-05 13:57:35 +01004958
4959 if (has_backing_file) {
4960 /* We don't how much of the backing chain is shared by the input
4961 * image and the new image file. In the worst case the new image's
4962 * backing file has nothing in common with the input image. Be
4963 * conservative and assume all clusters need to be written.
4964 */
4965 required = virtual_size;
4966 } else {
Eric Blakeb85ee452017-09-25 09:55:22 -05004967 int64_t offset;
Eric Blake31826642017-10-11 22:47:08 -05004968 int64_t pnum = 0;
Stefan Hajnoczic501c352017-07-05 13:57:35 +01004969
Eric Blake31826642017-10-11 22:47:08 -05004970 for (offset = 0; offset < ssize; offset += pnum) {
4971 int ret;
Stefan Hajnoczic501c352017-07-05 13:57:35 +01004972
Eric Blake31826642017-10-11 22:47:08 -05004973 ret = bdrv_block_status_above(in_bs, NULL, offset,
4974 ssize - offset, &pnum, NULL,
4975 NULL);
Stefan Hajnoczic501c352017-07-05 13:57:35 +01004976 if (ret < 0) {
4977 error_setg_errno(&local_err, -ret,
4978 "Unable to get block status");
4979 goto err;
4980 }
4981
4982 if (ret & BDRV_BLOCK_ZERO) {
4983 /* Skip zero regions (safe with no backing file) */
4984 } else if ((ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED)) ==
4985 (BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED)) {
4986 /* Extend pnum to end of cluster for next iteration */
Eric Blake31826642017-10-11 22:47:08 -05004987 pnum = ROUND_UP(offset + pnum, cluster_size) - offset;
Stefan Hajnoczic501c352017-07-05 13:57:35 +01004988
4989 /* Count clusters we've seen */
Eric Blake31826642017-10-11 22:47:08 -05004990 required += offset % cluster_size + pnum;
Stefan Hajnoczic501c352017-07-05 13:57:35 +01004991 }
4992 }
4993 }
4994 }
4995
4996 /* Take into account preallocation. Nothing special is needed for
4997 * PREALLOC_MODE_METADATA since metadata is always counted.
4998 */
4999 if (prealloc == PREALLOC_MODE_FULL || prealloc == PREALLOC_MODE_FALLOC) {
5000 required = virtual_size;
5001 }
5002
Eric Blake5d72c682020-05-21 14:21:34 -05005003 info = g_new0(BlockMeasureInfo, 1);
Alberto Garcia0dd07b22020-07-10 18:13:11 +02005004 info->fully_allocated = luks_payload_size +
Stefan Hajnoczic501c352017-07-05 13:57:35 +01005005 qcow2_calc_prealloc_size(virtual_size, cluster_size,
Alberto Garcia0dd07b22020-07-10 18:13:11 +02005006 ctz32(refcount_bits), extended_l2);
Stefan Hajnoczic501c352017-07-05 13:57:35 +01005007
Eric Blake5d72c682020-05-21 14:21:34 -05005008 /*
5009 * Remove data clusters that are not required. This overestimates the
Stefan Hajnoczic501c352017-07-05 13:57:35 +01005010 * required size because metadata needed for the fully allocated file is
Eric Blake5d72c682020-05-21 14:21:34 -05005011 * still counted. Show bitmaps only if both source and destination
5012 * would support them.
Stefan Hajnoczic501c352017-07-05 13:57:35 +01005013 */
5014 info->required = info->fully_allocated - virtual_size + required;
Eric Blake5d72c682020-05-21 14:21:34 -05005015 info->has_bitmaps = version >= 3 && in_bs &&
5016 bdrv_supports_persistent_dirty_bitmap(in_bs);
5017 if (info->has_bitmaps) {
5018 info->bitmaps = qcow2_get_persistent_dirty_bitmap_size(in_bs,
5019 cluster_size);
5020 }
Stefan Hajnoczic501c352017-07-05 13:57:35 +01005021 return info;
5022
5023err:
5024 error_propagate(errp, local_err);
5025 return NULL;
5026}
5027
Jes Sorensen7c80ab32010-12-17 16:02:39 +01005028static int qcow2_get_info(BlockDriverState *bs, BlockDriverInfo *bdi)
Blue Swirl20d97352010-04-23 20:19:47 +00005029{
Kevin Wolfff991292015-09-07 17:12:56 +02005030 BDRVQcow2State *s = bs->opaque;
Blue Swirl20d97352010-04-23 20:19:47 +00005031 bdi->cluster_size = s->cluster_size;
Jes Sorensen7c80ab32010-12-17 16:02:39 +01005032 bdi->vm_state_offset = qcow2_vm_state_offset(s);
Blue Swirl20d97352010-04-23 20:19:47 +00005033 return 0;
5034}
5035
Andrey Shinkevich1bf6e9c2019-02-08 18:06:06 +03005036static ImageInfoSpecific *qcow2_get_specific_info(BlockDriverState *bs,
5037 Error **errp)
Max Reitz37764df2013-10-09 10:46:18 +02005038{
Kevin Wolfff991292015-09-07 17:12:56 +02005039 BDRVQcow2State *s = bs->opaque;
Daniel P. Berrange0a12f6f2017-06-23 17:24:18 +01005040 ImageInfoSpecific *spec_info;
5041 QCryptoBlockInfo *encrypt_info = NULL;
Andrey Shinkevich1bf6e9c2019-02-08 18:06:06 +03005042 Error *local_err = NULL;
Max Reitz37764df2013-10-09 10:46:18 +02005043
Daniel P. Berrange0a12f6f2017-06-23 17:24:18 +01005044 if (s->crypto != NULL) {
Andrey Shinkevich1bf6e9c2019-02-08 18:06:06 +03005045 encrypt_info = qcrypto_block_get_info(s->crypto, &local_err);
5046 if (local_err) {
5047 error_propagate(errp, local_err);
5048 return NULL;
5049 }
Daniel P. Berrange0a12f6f2017-06-23 17:24:18 +01005050 }
5051
5052 spec_info = g_new(ImageInfoSpecific, 1);
Max Reitz37764df2013-10-09 10:46:18 +02005053 *spec_info = (ImageInfoSpecific){
Eric Blake6a8f9662015-10-26 16:34:54 -06005054 .type = IMAGE_INFO_SPECIFIC_KIND_QCOW2,
Andrey Shinkevichb8968c82019-02-08 18:06:07 +03005055 .u.qcow2.data = g_new0(ImageInfoSpecificQCow2, 1),
Max Reitz37764df2013-10-09 10:46:18 +02005056 };
5057 if (s->qcow_version == 2) {
Eric Blake32bafa82016-03-17 16:48:37 -06005058 *spec_info->u.qcow2.data = (ImageInfoSpecificQCow2){
Max Reitz0709c5a2015-02-10 15:28:44 -05005059 .compat = g_strdup("0.10"),
5060 .refcount_bits = s->refcount_bits,
Max Reitz37764df2013-10-09 10:46:18 +02005061 };
5062 } else if (s->qcow_version == 3) {
Andrey Shinkevichb8968c82019-02-08 18:06:07 +03005063 Qcow2BitmapInfoList *bitmaps;
5064 bitmaps = qcow2_get_bitmap_info_list(bs, &local_err);
5065 if (local_err) {
5066 error_propagate(errp, local_err);
5067 qapi_free_ImageInfoSpecific(spec_info);
Eric Blake71eaec22020-03-20 13:36:20 -05005068 qapi_free_QCryptoBlockInfo(encrypt_info);
Andrey Shinkevichb8968c82019-02-08 18:06:07 +03005069 return NULL;
5070 }
Eric Blake32bafa82016-03-17 16:48:37 -06005071 *spec_info->u.qcow2.data = (ImageInfoSpecificQCow2){
Max Reitz37764df2013-10-09 10:46:18 +02005072 .compat = g_strdup("1.1"),
5073 .lazy_refcounts = s->compatible_features &
5074 QCOW2_COMPAT_LAZY_REFCOUNTS,
5075 .has_lazy_refcounts = true,
Max Reitz9009b192014-09-30 21:31:28 +02005076 .corrupt = s->incompatible_features &
5077 QCOW2_INCOMPAT_CORRUPT,
5078 .has_corrupt = true,
Alberto Garcia7be20252020-07-10 18:13:13 +02005079 .has_extended_l2 = true,
5080 .extended_l2 = has_subclusters(s),
Max Reitz0709c5a2015-02-10 15:28:44 -05005081 .refcount_bits = s->refcount_bits,
Andrey Shinkevichb8968c82019-02-08 18:06:07 +03005082 .has_bitmaps = !!bitmaps,
5083 .bitmaps = bitmaps,
Kevin Wolf9b890bd2019-01-15 19:02:40 +01005084 .has_data_file = !!s->image_data_file,
5085 .data_file = g_strdup(s->image_data_file),
Kevin Wolf6c3944d2019-02-22 14:29:38 +01005086 .has_data_file_raw = has_data_file(bs),
5087 .data_file_raw = data_file_is_raw(bs),
Denis Plotnikov572ad972020-05-07 11:25:18 +03005088 .compression_type = s->compression_type,
Max Reitz37764df2013-10-09 10:46:18 +02005089 };
Denis V. Lunevb1fc8f92015-12-10 12:55:48 +03005090 } else {
5091 /* if this assertion fails, this probably means a new version was
5092 * added without having it covered here */
5093 assert(false);
Max Reitz37764df2013-10-09 10:46:18 +02005094 }
5095
Daniel P. Berrange0a12f6f2017-06-23 17:24:18 +01005096 if (encrypt_info) {
5097 ImageInfoSpecificQCow2Encryption *qencrypt =
5098 g_new(ImageInfoSpecificQCow2Encryption, 1);
5099 switch (encrypt_info->format) {
5100 case Q_CRYPTO_BLOCK_FORMAT_QCOW:
5101 qencrypt->format = BLOCKDEV_QCOW2_ENCRYPTION_FORMAT_AES;
Daniel P. Berrange0a12f6f2017-06-23 17:24:18 +01005102 break;
5103 case Q_CRYPTO_BLOCK_FORMAT_LUKS:
5104 qencrypt->format = BLOCKDEV_QCOW2_ENCRYPTION_FORMAT_LUKS;
5105 qencrypt->u.luks = encrypt_info->u.luks;
5106 break;
5107 default:
5108 abort();
5109 }
5110 /* Since we did shallow copy above, erase any pointers
5111 * in the original info */
5112 memset(&encrypt_info->u, 0, sizeof(encrypt_info->u));
5113 qapi_free_QCryptoBlockInfo(encrypt_info);
5114
5115 spec_info->u.qcow2.data->has_encrypt = true;
5116 spec_info->u.qcow2.data->encrypt = qencrypt;
5117 }
5118
Max Reitz37764df2013-10-09 10:46:18 +02005119 return spec_info;
5120}
5121
Max Reitz38841dc2019-07-24 19:12:34 +02005122static int qcow2_has_zero_init(BlockDriverState *bs)
5123{
5124 BDRVQcow2State *s = bs->opaque;
5125 bool preallocated;
5126
5127 if (qemu_in_coroutine()) {
5128 qemu_co_mutex_lock(&s->lock);
5129 }
5130 /*
5131 * Check preallocation status: Preallocated images have all L2
5132 * tables allocated, nonpreallocated images have none. It is
5133 * therefore enough to check the first one.
5134 */
5135 preallocated = s->l1_size > 0 && s->l1_table[0] != 0;
5136 if (qemu_in_coroutine()) {
5137 qemu_co_mutex_unlock(&s->lock);
5138 }
5139
5140 if (!preallocated) {
5141 return 1;
5142 } else if (bs->encrypted) {
5143 return 0;
5144 } else {
5145 return bdrv_has_zero_init(s->data_file->bs);
5146 }
5147}
5148
Kevin Wolfcf8074b2013-04-05 21:27:53 +02005149static int qcow2_save_vmstate(BlockDriverState *bs, QEMUIOVector *qiov,
5150 int64_t pos)
Blue Swirl20d97352010-04-23 20:19:47 +00005151{
Kevin Wolfff991292015-09-07 17:12:56 +02005152 BDRVQcow2State *s = bs->opaque;
Blue Swirl20d97352010-04-23 20:19:47 +00005153
Kevin Wolf66f82ce2010-04-14 14:17:38 +02005154 BLKDBG_EVENT(bs->file, BLKDBG_VMSTATE_SAVE);
Vladimir Sementsov-Ogievskiy53962342019-06-04 19:15:14 +03005155 return bs->drv->bdrv_co_pwritev_part(bs, qcow2_vm_state_offset(s) + pos,
5156 qiov->size, qiov, 0, 0);
Blue Swirl20d97352010-04-23 20:19:47 +00005157}
5158
Kevin Wolf5ddda0b2016-06-09 16:50:16 +02005159static int qcow2_load_vmstate(BlockDriverState *bs, QEMUIOVector *qiov,
5160 int64_t pos)
Blue Swirl20d97352010-04-23 20:19:47 +00005161{
Kevin Wolfff991292015-09-07 17:12:56 +02005162 BDRVQcow2State *s = bs->opaque;
Blue Swirl20d97352010-04-23 20:19:47 +00005163
Kevin Wolf66f82ce2010-04-14 14:17:38 +02005164 BLKDBG_EVENT(bs->file, BLKDBG_VMSTATE_LOAD);
Vladimir Sementsov-Ogievskiydf893d22019-06-04 19:15:13 +03005165 return bs->drv->bdrv_co_preadv_part(bs, qcow2_vm_state_offset(s) + pos,
5166 qiov->size, qiov, 0, 0);
Blue Swirl20d97352010-04-23 20:19:47 +00005167}
5168
Max Reitz9296b3e2013-09-03 10:09:54 +02005169/*
5170 * Downgrades an image's version. To achieve this, any incompatible features
5171 * have to be removed.
5172 */
Max Reitz4057a2b2014-10-27 11:12:53 +01005173static int qcow2_downgrade(BlockDriverState *bs, int target_version,
Max Reitzd1402b52018-05-09 23:00:18 +02005174 BlockDriverAmendStatusCB *status_cb, void *cb_opaque,
5175 Error **errp)
Max Reitz9296b3e2013-09-03 10:09:54 +02005176{
Kevin Wolfff991292015-09-07 17:12:56 +02005177 BDRVQcow2State *s = bs->opaque;
Max Reitz9296b3e2013-09-03 10:09:54 +02005178 int current_version = s->qcow_version;
5179 int ret;
Eric Blake7fa140a2020-04-28 14:26:47 -05005180 int i;
Max Reitz9296b3e2013-09-03 10:09:54 +02005181
Max Reitzd1402b52018-05-09 23:00:18 +02005182 /* This is qcow2_downgrade(), not qcow2_upgrade() */
5183 assert(target_version < current_version);
5184
5185 /* There are no other versions (now) that you can downgrade to */
5186 assert(target_version == 2);
Max Reitz9296b3e2013-09-03 10:09:54 +02005187
5188 if (s->refcount_order != 4) {
Max Reitzd1402b52018-05-09 23:00:18 +02005189 error_setg(errp, "compat=0.10 requires refcount_bits=16");
Max Reitz9296b3e2013-09-03 10:09:54 +02005190 return -ENOTSUP;
5191 }
5192
Kevin Wolf966b0002019-01-15 20:39:06 +01005193 if (has_data_file(bs)) {
5194 error_setg(errp, "Cannot downgrade an image with a data file");
5195 return -ENOTSUP;
5196 }
5197
Eric Blake7fa140a2020-04-28 14:26:47 -05005198 /*
5199 * If any internal snapshot has a different size than the current
5200 * image size, or VM state size that exceeds 32 bits, downgrading
5201 * is unsafe. Even though we would still use v3-compliant output
5202 * to preserve that data, other v2 programs might not realize
5203 * those optional fields are important.
5204 */
5205 for (i = 0; i < s->nb_snapshots; i++) {
5206 if (s->snapshots[i].vm_state_size > UINT32_MAX ||
5207 s->snapshots[i].disk_size != bs->total_sectors * BDRV_SECTOR_SIZE) {
5208 error_setg(errp, "Internal snapshots prevent downgrade of image");
5209 return -ENOTSUP;
5210 }
5211 }
5212
Max Reitz9296b3e2013-09-03 10:09:54 +02005213 /* clear incompatible features */
5214 if (s->incompatible_features & QCOW2_INCOMPAT_DIRTY) {
5215 ret = qcow2_mark_clean(bs);
5216 if (ret < 0) {
Max Reitzd1402b52018-05-09 23:00:18 +02005217 error_setg_errno(errp, -ret, "Failed to make the image clean");
Max Reitz9296b3e2013-09-03 10:09:54 +02005218 return ret;
5219 }
5220 }
5221
5222 /* with QCOW2_INCOMPAT_CORRUPT, it is pretty much impossible to get here in
5223 * the first place; if that happens nonetheless, returning -ENOTSUP is the
5224 * best thing to do anyway */
5225
5226 if (s->incompatible_features) {
Max Reitzd1402b52018-05-09 23:00:18 +02005227 error_setg(errp, "Cannot downgrade an image with incompatible features "
5228 "%#" PRIx64 " set", s->incompatible_features);
Max Reitz9296b3e2013-09-03 10:09:54 +02005229 return -ENOTSUP;
5230 }
5231
5232 /* since we can ignore compatible features, we can set them to 0 as well */
5233 s->compatible_features = 0;
5234 /* if lazy refcounts have been used, they have already been fixed through
5235 * clearing the dirty flag */
5236
5237 /* clearing autoclear features is trivial */
5238 s->autoclear_features = 0;
5239
Max Reitz8b139762015-07-27 17:51:32 +02005240 ret = qcow2_expand_zero_clusters(bs, status_cb, cb_opaque);
Max Reitz9296b3e2013-09-03 10:09:54 +02005241 if (ret < 0) {
Max Reitzd1402b52018-05-09 23:00:18 +02005242 error_setg_errno(errp, -ret, "Failed to turn zero into data clusters");
Max Reitz9296b3e2013-09-03 10:09:54 +02005243 return ret;
5244 }
5245
5246 s->qcow_version = target_version;
5247 ret = qcow2_update_header(bs);
5248 if (ret < 0) {
5249 s->qcow_version = current_version;
Max Reitzd1402b52018-05-09 23:00:18 +02005250 error_setg_errno(errp, -ret, "Failed to update the image header");
Max Reitz9296b3e2013-09-03 10:09:54 +02005251 return ret;
5252 }
5253 return 0;
5254}
5255
Max Reitz722efb02019-10-11 17:28:04 +02005256/*
5257 * Upgrades an image's version. While newer versions encompass all
5258 * features of older versions, some things may have to be presented
5259 * differently.
5260 */
5261static int qcow2_upgrade(BlockDriverState *bs, int target_version,
5262 BlockDriverAmendStatusCB *status_cb, void *cb_opaque,
5263 Error **errp)
5264{
5265 BDRVQcow2State *s = bs->opaque;
Max Reitz0a85af32019-10-11 17:28:05 +02005266 bool need_snapshot_update;
Max Reitz722efb02019-10-11 17:28:04 +02005267 int current_version = s->qcow_version;
Max Reitz0a85af32019-10-11 17:28:05 +02005268 int i;
Max Reitz722efb02019-10-11 17:28:04 +02005269 int ret;
5270
5271 /* This is qcow2_upgrade(), not qcow2_downgrade() */
5272 assert(target_version > current_version);
5273
5274 /* There are no other versions (yet) that you can upgrade to */
5275 assert(target_version == 3);
5276
Max Reitz0a85af32019-10-11 17:28:05 +02005277 status_cb(bs, 0, 2, cb_opaque);
5278
5279 /*
5280 * In v2, snapshots do not need to have extra data. v3 requires
5281 * the 64-bit VM state size and the virtual disk size to be
5282 * present.
5283 * qcow2_write_snapshots() will always write the list in the
5284 * v3-compliant format.
5285 */
5286 need_snapshot_update = false;
5287 for (i = 0; i < s->nb_snapshots; i++) {
5288 if (s->snapshots[i].extra_data_size <
5289 sizeof_field(QCowSnapshotExtraData, vm_state_size_large) +
5290 sizeof_field(QCowSnapshotExtraData, disk_size))
5291 {
5292 need_snapshot_update = true;
5293 break;
5294 }
5295 }
5296 if (need_snapshot_update) {
5297 ret = qcow2_write_snapshots(bs);
5298 if (ret < 0) {
5299 error_setg_errno(errp, -ret, "Failed to update the snapshot table");
5300 return ret;
5301 }
5302 }
5303 status_cb(bs, 1, 2, cb_opaque);
Max Reitz722efb02019-10-11 17:28:04 +02005304
5305 s->qcow_version = target_version;
5306 ret = qcow2_update_header(bs);
5307 if (ret < 0) {
5308 s->qcow_version = current_version;
5309 error_setg_errno(errp, -ret, "Failed to update the image header");
5310 return ret;
5311 }
Max Reitz0a85af32019-10-11 17:28:05 +02005312 status_cb(bs, 2, 2, cb_opaque);
Max Reitz722efb02019-10-11 17:28:04 +02005313
5314 return 0;
5315}
5316
Max Reitzc293a802015-07-27 17:51:36 +02005317typedef enum Qcow2AmendOperation {
5318 /* This is the value Qcow2AmendHelperCBInfo::last_operation will be
5319 * statically initialized to so that the helper CB can discern the first
5320 * invocation from an operation change */
5321 QCOW2_NO_OPERATION = 0,
5322
Max Reitz722efb02019-10-11 17:28:04 +02005323 QCOW2_UPGRADING,
Maxim Levitsky90766d92020-06-25 14:55:43 +02005324 QCOW2_UPDATING_ENCRYPTION,
Max Reitz61ce55f2015-07-27 17:51:38 +02005325 QCOW2_CHANGING_REFCOUNT_ORDER,
Max Reitzc293a802015-07-27 17:51:36 +02005326 QCOW2_DOWNGRADING,
5327} Qcow2AmendOperation;
5328
5329typedef struct Qcow2AmendHelperCBInfo {
5330 /* The code coordinating the amend operations should only modify
5331 * these four fields; the rest will be managed by the CB */
5332 BlockDriverAmendStatusCB *original_status_cb;
5333 void *original_cb_opaque;
5334
5335 Qcow2AmendOperation current_operation;
5336
5337 /* Total number of operations to perform (only set once) */
5338 int total_operations;
5339
5340 /* The following fields are managed by the CB */
5341
5342 /* Number of operations completed */
5343 int operations_completed;
5344
5345 /* Cumulative offset of all completed operations */
5346 int64_t offset_completed;
5347
5348 Qcow2AmendOperation last_operation;
5349 int64_t last_work_size;
5350} Qcow2AmendHelperCBInfo;
5351
5352static void qcow2_amend_helper_cb(BlockDriverState *bs,
5353 int64_t operation_offset,
5354 int64_t operation_work_size, void *opaque)
5355{
5356 Qcow2AmendHelperCBInfo *info = opaque;
5357 int64_t current_work_size;
5358 int64_t projected_work_size;
5359
5360 if (info->current_operation != info->last_operation) {
5361 if (info->last_operation != QCOW2_NO_OPERATION) {
5362 info->offset_completed += info->last_work_size;
5363 info->operations_completed++;
5364 }
5365
5366 info->last_operation = info->current_operation;
5367 }
5368
5369 assert(info->total_operations > 0);
5370 assert(info->operations_completed < info->total_operations);
5371
5372 info->last_work_size = operation_work_size;
5373
5374 current_work_size = info->offset_completed + operation_work_size;
5375
5376 /* current_work_size is the total work size for (operations_completed + 1)
5377 * operations (which includes this one), so multiply it by the number of
5378 * operations not covered and divide it by the number of operations
5379 * covered to get a projection for the operations not covered */
5380 projected_work_size = current_work_size * (info->total_operations -
5381 info->operations_completed - 1)
5382 / (info->operations_completed + 1);
5383
5384 info->original_status_cb(bs, info->offset_completed + operation_offset,
5385 current_work_size + projected_work_size,
5386 info->original_cb_opaque);
5387}
5388
Max Reitz77485432014-10-27 11:12:50 +01005389static int qcow2_amend_options(BlockDriverState *bs, QemuOpts *opts,
Max Reitz8b139762015-07-27 17:51:32 +02005390 BlockDriverAmendStatusCB *status_cb,
Max Reitzd1402b52018-05-09 23:00:18 +02005391 void *cb_opaque,
Maxim Levitskya3579bf2020-06-25 14:55:38 +02005392 bool force,
Max Reitzd1402b52018-05-09 23:00:18 +02005393 Error **errp)
Max Reitz9296b3e2013-09-03 10:09:54 +02005394{
Kevin Wolfff991292015-09-07 17:12:56 +02005395 BDRVQcow2State *s = bs->opaque;
Max Reitz9296b3e2013-09-03 10:09:54 +02005396 int old_version = s->qcow_version, new_version = old_version;
5397 uint64_t new_size = 0;
Kevin Wolf9b890bd2019-01-15 19:02:40 +01005398 const char *backing_file = NULL, *backing_format = NULL, *data_file = NULL;
Max Reitz9296b3e2013-09-03 10:09:54 +02005399 bool lazy_refcounts = s->use_lazy_refcounts;
Kevin Wolf6c3944d2019-02-22 14:29:38 +01005400 bool data_file_raw = data_file_is_raw(bs);
Chunyan Liu1bd0e2d2014-06-05 17:20:59 +08005401 const char *compat = NULL;
Max Reitz61ce55f2015-07-27 17:51:38 +02005402 int refcount_bits = s->refcount_bits;
Max Reitz9296b3e2013-09-03 10:09:54 +02005403 int ret;
Chunyan Liu1bd0e2d2014-06-05 17:20:59 +08005404 QemuOptDesc *desc = opts->list->desc;
Max Reitzc293a802015-07-27 17:51:36 +02005405 Qcow2AmendHelperCBInfo helper_cb_info;
Maxim Levitsky90766d92020-06-25 14:55:43 +02005406 bool encryption_update = false;
Max Reitz9296b3e2013-09-03 10:09:54 +02005407
Chunyan Liu1bd0e2d2014-06-05 17:20:59 +08005408 while (desc && desc->name) {
5409 if (!qemu_opt_find(opts, desc->name)) {
Max Reitz9296b3e2013-09-03 10:09:54 +02005410 /* only change explicitly defined options */
Chunyan Liu1bd0e2d2014-06-05 17:20:59 +08005411 desc++;
Max Reitz9296b3e2013-09-03 10:09:54 +02005412 continue;
5413 }
5414
Max Reitz8a17b832015-02-18 17:40:47 -05005415 if (!strcmp(desc->name, BLOCK_OPT_COMPAT_LEVEL)) {
5416 compat = qemu_opt_get(opts, BLOCK_OPT_COMPAT_LEVEL);
Chunyan Liu1bd0e2d2014-06-05 17:20:59 +08005417 if (!compat) {
Max Reitz9296b3e2013-09-03 10:09:54 +02005418 /* preserve default */
Eric Blakef7077c92019-07-05 10:28:12 -05005419 } else if (!strcmp(compat, "0.10") || !strcmp(compat, "v2")) {
Max Reitz9296b3e2013-09-03 10:09:54 +02005420 new_version = 2;
Eric Blakef7077c92019-07-05 10:28:12 -05005421 } else if (!strcmp(compat, "1.1") || !strcmp(compat, "v3")) {
Max Reitz9296b3e2013-09-03 10:09:54 +02005422 new_version = 3;
5423 } else {
Max Reitzd1402b52018-05-09 23:00:18 +02005424 error_setg(errp, "Unknown compatibility level %s", compat);
Max Reitz9296b3e2013-09-03 10:09:54 +02005425 return -EINVAL;
5426 }
Max Reitz8a17b832015-02-18 17:40:47 -05005427 } else if (!strcmp(desc->name, BLOCK_OPT_SIZE)) {
5428 new_size = qemu_opt_get_size(opts, BLOCK_OPT_SIZE, 0);
5429 } else if (!strcmp(desc->name, BLOCK_OPT_BACKING_FILE)) {
5430 backing_file = qemu_opt_get(opts, BLOCK_OPT_BACKING_FILE);
5431 } else if (!strcmp(desc->name, BLOCK_OPT_BACKING_FMT)) {
5432 backing_format = qemu_opt_get(opts, BLOCK_OPT_BACKING_FMT);
Maxim Levitsky90766d92020-06-25 14:55:43 +02005433 } else if (g_str_has_prefix(desc->name, "encrypt.")) {
5434 if (!s->crypto) {
5435 error_setg(errp,
5436 "Can't amend encryption options - encryption not present");
5437 return -EINVAL;
5438 }
5439 if (s->crypt_method_header != QCOW_CRYPT_LUKS) {
5440 error_setg(errp,
5441 "Only LUKS encryption options can be amended");
5442 return -ENOTSUP;
5443 }
5444 encryption_update = true;
Max Reitz8a17b832015-02-18 17:40:47 -05005445 } else if (!strcmp(desc->name, BLOCK_OPT_LAZY_REFCOUNTS)) {
5446 lazy_refcounts = qemu_opt_get_bool(opts, BLOCK_OPT_LAZY_REFCOUNTS,
Chunyan Liu1bd0e2d2014-06-05 17:20:59 +08005447 lazy_refcounts);
Max Reitz06d05fa2015-02-18 17:40:49 -05005448 } else if (!strcmp(desc->name, BLOCK_OPT_REFCOUNT_BITS)) {
Max Reitz61ce55f2015-07-27 17:51:38 +02005449 refcount_bits = qemu_opt_get_number(opts, BLOCK_OPT_REFCOUNT_BITS,
5450 refcount_bits);
5451
5452 if (refcount_bits <= 0 || refcount_bits > 64 ||
5453 !is_power_of_2(refcount_bits))
5454 {
Max Reitzd1402b52018-05-09 23:00:18 +02005455 error_setg(errp, "Refcount width must be a power of two and "
5456 "may not exceed 64 bits");
Max Reitz61ce55f2015-07-27 17:51:38 +02005457 return -EINVAL;
5458 }
Kevin Wolf9b890bd2019-01-15 19:02:40 +01005459 } else if (!strcmp(desc->name, BLOCK_OPT_DATA_FILE)) {
5460 data_file = qemu_opt_get(opts, BLOCK_OPT_DATA_FILE);
5461 if (data_file && !has_data_file(bs)) {
5462 error_setg(errp, "data-file can only be set for images that "
5463 "use an external data file");
5464 return -EINVAL;
5465 }
Kevin Wolf6c3944d2019-02-22 14:29:38 +01005466 } else if (!strcmp(desc->name, BLOCK_OPT_DATA_FILE_RAW)) {
5467 data_file_raw = qemu_opt_get_bool(opts, BLOCK_OPT_DATA_FILE_RAW,
5468 data_file_raw);
5469 if (data_file_raw && !data_file_is_raw(bs)) {
5470 error_setg(errp, "data-file-raw cannot be set on existing "
5471 "images");
5472 return -EINVAL;
5473 }
Max Reitz9296b3e2013-09-03 10:09:54 +02005474 } else {
Max Reitz164e0f82015-07-27 17:51:34 +02005475 /* if this point is reached, this probably means a new option was
Max Reitz9296b3e2013-09-03 10:09:54 +02005476 * added without having it covered here */
Max Reitz164e0f82015-07-27 17:51:34 +02005477 abort();
Max Reitz9296b3e2013-09-03 10:09:54 +02005478 }
Chunyan Liu1bd0e2d2014-06-05 17:20:59 +08005479
5480 desc++;
Max Reitz9296b3e2013-09-03 10:09:54 +02005481 }
5482
Max Reitzc293a802015-07-27 17:51:36 +02005483 helper_cb_info = (Qcow2AmendHelperCBInfo){
5484 .original_status_cb = status_cb,
5485 .original_cb_opaque = cb_opaque,
Max Reitz722efb02019-10-11 17:28:04 +02005486 .total_operations = (new_version != old_version)
Maxim Levitsky90766d92020-06-25 14:55:43 +02005487 + (s->refcount_bits != refcount_bits) +
5488 (encryption_update == true)
Max Reitzc293a802015-07-27 17:51:36 +02005489 };
5490
Max Reitz1038bbb2015-07-27 17:51:35 +02005491 /* Upgrade first (some features may require compat=1.1) */
5492 if (new_version > old_version) {
Max Reitz722efb02019-10-11 17:28:04 +02005493 helper_cb_info.current_operation = QCOW2_UPGRADING;
5494 ret = qcow2_upgrade(bs, new_version, &qcow2_amend_helper_cb,
5495 &helper_cb_info, errp);
Max Reitz1038bbb2015-07-27 17:51:35 +02005496 if (ret < 0) {
Max Reitz1038bbb2015-07-27 17:51:35 +02005497 return ret;
Max Reitz9296b3e2013-09-03 10:09:54 +02005498 }
5499 }
5500
Maxim Levitsky90766d92020-06-25 14:55:43 +02005501 if (encryption_update) {
5502 QDict *amend_opts_dict;
5503 QCryptoBlockAmendOptions *amend_opts;
5504
5505 helper_cb_info.current_operation = QCOW2_UPDATING_ENCRYPTION;
5506 amend_opts_dict = qcow2_extract_crypto_opts(opts, "luks", errp);
5507 if (!amend_opts_dict) {
5508 return -EINVAL;
5509 }
5510 amend_opts = block_crypto_amend_opts_init(amend_opts_dict, errp);
5511 qobject_unref(amend_opts_dict);
5512 if (!amend_opts) {
5513 return -EINVAL;
5514 }
5515 ret = qcrypto_block_amend_options(s->crypto,
5516 qcow2_crypto_hdr_read_func,
5517 qcow2_crypto_hdr_write_func,
5518 bs,
5519 amend_opts,
5520 force,
5521 errp);
5522 qapi_free_QCryptoBlockAmendOptions(amend_opts);
5523 if (ret < 0) {
5524 return ret;
5525 }
5526 }
5527
Max Reitz61ce55f2015-07-27 17:51:38 +02005528 if (s->refcount_bits != refcount_bits) {
5529 int refcount_order = ctz32(refcount_bits);
Max Reitz61ce55f2015-07-27 17:51:38 +02005530
5531 if (new_version < 3 && refcount_bits != 16) {
Max Reitzd1402b52018-05-09 23:00:18 +02005532 error_setg(errp, "Refcount widths other than 16 bits require "
5533 "compatibility level 1.1 or above (use compat=1.1 or "
5534 "greater)");
Max Reitz61ce55f2015-07-27 17:51:38 +02005535 return -EINVAL;
5536 }
5537
5538 helper_cb_info.current_operation = QCOW2_CHANGING_REFCOUNT_ORDER;
5539 ret = qcow2_change_refcount_order(bs, refcount_order,
5540 &qcow2_amend_helper_cb,
Max Reitzd1402b52018-05-09 23:00:18 +02005541 &helper_cb_info, errp);
Max Reitz61ce55f2015-07-27 17:51:38 +02005542 if (ret < 0) {
Max Reitz61ce55f2015-07-27 17:51:38 +02005543 return ret;
5544 }
5545 }
5546
Kevin Wolf6c3944d2019-02-22 14:29:38 +01005547 /* data-file-raw blocks backing files, so clear it first if requested */
5548 if (data_file_raw) {
5549 s->autoclear_features |= QCOW2_AUTOCLEAR_DATA_FILE_RAW;
5550 } else {
5551 s->autoclear_features &= ~QCOW2_AUTOCLEAR_DATA_FILE_RAW;
5552 }
5553
Kevin Wolf9b890bd2019-01-15 19:02:40 +01005554 if (data_file) {
5555 g_free(s->image_data_file);
5556 s->image_data_file = *data_file ? g_strdup(data_file) : NULL;
5557 }
5558
5559 ret = qcow2_update_header(bs);
5560 if (ret < 0) {
5561 error_setg_errno(errp, -ret, "Failed to update the image header");
5562 return ret;
5563 }
5564
Max Reitz9296b3e2013-09-03 10:09:54 +02005565 if (backing_file || backing_format) {
Eric Blakebc5ee6d2020-07-06 15:39:51 -05005566 if (g_strcmp0(backing_file, s->image_backing_file) ||
5567 g_strcmp0(backing_format, s->image_backing_format)) {
5568 warn_report("Deprecated use of amend to alter the backing file; "
5569 "use qemu-img rebase instead");
5570 }
Kevin Wolfe4603fe2015-04-07 15:03:16 +02005571 ret = qcow2_change_backing_file(bs,
5572 backing_file ?: s->image_backing_file,
5573 backing_format ?: s->image_backing_format);
Max Reitz9296b3e2013-09-03 10:09:54 +02005574 if (ret < 0) {
Max Reitzd1402b52018-05-09 23:00:18 +02005575 error_setg_errno(errp, -ret, "Failed to change the backing file");
Max Reitz9296b3e2013-09-03 10:09:54 +02005576 return ret;
5577 }
5578 }
5579
5580 if (s->use_lazy_refcounts != lazy_refcounts) {
5581 if (lazy_refcounts) {
Max Reitz1038bbb2015-07-27 17:51:35 +02005582 if (new_version < 3) {
Max Reitzd1402b52018-05-09 23:00:18 +02005583 error_setg(errp, "Lazy refcounts only supported with "
5584 "compatibility level 1.1 and above (use compat=1.1 "
5585 "or greater)");
Max Reitz9296b3e2013-09-03 10:09:54 +02005586 return -EINVAL;
5587 }
5588 s->compatible_features |= QCOW2_COMPAT_LAZY_REFCOUNTS;
5589 ret = qcow2_update_header(bs);
5590 if (ret < 0) {
5591 s->compatible_features &= ~QCOW2_COMPAT_LAZY_REFCOUNTS;
Max Reitzd1402b52018-05-09 23:00:18 +02005592 error_setg_errno(errp, -ret, "Failed to update the image header");
Max Reitz9296b3e2013-09-03 10:09:54 +02005593 return ret;
5594 }
5595 s->use_lazy_refcounts = true;
5596 } else {
5597 /* make image clean first */
5598 ret = qcow2_mark_clean(bs);
5599 if (ret < 0) {
Max Reitzd1402b52018-05-09 23:00:18 +02005600 error_setg_errno(errp, -ret, "Failed to make the image clean");
Max Reitz9296b3e2013-09-03 10:09:54 +02005601 return ret;
5602 }
5603 /* now disallow lazy refcounts */
5604 s->compatible_features &= ~QCOW2_COMPAT_LAZY_REFCOUNTS;
5605 ret = qcow2_update_header(bs);
5606 if (ret < 0) {
5607 s->compatible_features |= QCOW2_COMPAT_LAZY_REFCOUNTS;
Max Reitzd1402b52018-05-09 23:00:18 +02005608 error_setg_errno(errp, -ret, "Failed to update the image header");
Max Reitz9296b3e2013-09-03 10:09:54 +02005609 return ret;
5610 }
5611 s->use_lazy_refcounts = false;
5612 }
5613 }
5614
5615 if (new_size) {
Eric Blakea3aeeab2020-04-28 14:26:46 -05005616 BlockBackend *blk = blk_new_with_bs(bs, BLK_PERM_RESIZE, BLK_PERM_ALL,
5617 errp);
5618 if (!blk) {
5619 return -EPERM;
Kevin Wolfd7086422017-01-13 19:02:32 +01005620 }
5621
Max Reitze8d04f92019-09-18 11:51:43 +02005622 /*
5623 * Amending image options should ensure that the image has
5624 * exactly the given new values, so pass exact=true here.
5625 */
Kevin Wolf8c6242b2020-04-24 14:54:41 +02005626 ret = blk_truncate(blk, new_size, true, PREALLOC_MODE_OFF, 0, errp);
Kevin Wolf70b27f32017-02-17 10:58:25 +01005627 blk_unref(blk);
Max Reitz9296b3e2013-09-03 10:09:54 +02005628 if (ret < 0) {
5629 return ret;
5630 }
5631 }
5632
Max Reitz1038bbb2015-07-27 17:51:35 +02005633 /* Downgrade last (so unsupported features can be removed before) */
5634 if (new_version < old_version) {
Max Reitzc293a802015-07-27 17:51:36 +02005635 helper_cb_info.current_operation = QCOW2_DOWNGRADING;
5636 ret = qcow2_downgrade(bs, new_version, &qcow2_amend_helper_cb,
Max Reitzd1402b52018-05-09 23:00:18 +02005637 &helper_cb_info, errp);
Max Reitz1038bbb2015-07-27 17:51:35 +02005638 if (ret < 0) {
5639 return ret;
5640 }
5641 }
5642
Max Reitz9296b3e2013-09-03 10:09:54 +02005643 return 0;
5644}
5645
Maxim Levitsky8ea16132020-06-25 14:55:47 +02005646static int coroutine_fn qcow2_co_amend(BlockDriverState *bs,
5647 BlockdevAmendOptions *opts,
5648 bool force,
5649 Error **errp)
5650{
5651 BlockdevAmendOptionsQcow2 *qopts = &opts->u.qcow2;
5652 BDRVQcow2State *s = bs->opaque;
5653 int ret = 0;
5654
5655 if (qopts->has_encrypt) {
5656 if (!s->crypto) {
5657 error_setg(errp, "image is not encrypted, can't amend");
5658 return -EOPNOTSUPP;
5659 }
5660
5661 if (qopts->encrypt->format != Q_CRYPTO_BLOCK_FORMAT_LUKS) {
5662 error_setg(errp,
5663 "Amend can't be used to change the qcow2 encryption format");
5664 return -EOPNOTSUPP;
5665 }
5666
5667 if (s->crypt_method_header != QCOW_CRYPT_LUKS) {
5668 error_setg(errp,
5669 "Only LUKS encryption options can be amended for qcow2 with blockdev-amend");
5670 return -EOPNOTSUPP;
5671 }
5672
5673 ret = qcrypto_block_amend_options(s->crypto,
5674 qcow2_crypto_hdr_read_func,
5675 qcow2_crypto_hdr_write_func,
5676 bs,
5677 qopts->encrypt,
5678 force,
5679 errp);
5680 }
5681 return ret;
5682}
5683
Max Reitz85186eb2014-09-05 16:07:16 +02005684/*
5685 * If offset or size are negative, respectively, they will not be included in
5686 * the BLOCK_IMAGE_CORRUPTED event emitted.
5687 * fatal will be ignored for read-only BDS; corruptions found there will always
5688 * be considered non-fatal.
5689 */
5690void qcow2_signal_corruption(BlockDriverState *bs, bool fatal, int64_t offset,
5691 int64_t size, const char *message_format, ...)
5692{
Kevin Wolfff991292015-09-07 17:12:56 +02005693 BDRVQcow2State *s = bs->opaque;
Alberto Garciadc881b42015-04-08 12:29:20 +03005694 const char *node_name;
Max Reitz85186eb2014-09-05 16:07:16 +02005695 char *message;
5696 va_list ap;
5697
Max Reitzddf3b472018-06-06 21:37:01 +02005698 fatal = fatal && bdrv_is_writable(bs);
Max Reitz85186eb2014-09-05 16:07:16 +02005699
5700 if (s->signaled_corruption &&
5701 (!fatal || (s->incompatible_features & QCOW2_INCOMPAT_CORRUPT)))
5702 {
5703 return;
5704 }
5705
5706 va_start(ap, message_format);
5707 message = g_strdup_vprintf(message_format, ap);
5708 va_end(ap);
5709
5710 if (fatal) {
5711 fprintf(stderr, "qcow2: Marking image as corrupt: %s; further "
5712 "corruption events will be suppressed\n", message);
5713 } else {
5714 fprintf(stderr, "qcow2: Image is corrupt: %s; further non-fatal "
5715 "corruption events will be suppressed\n", message);
5716 }
5717
Alberto Garciadc881b42015-04-08 12:29:20 +03005718 node_name = bdrv_get_node_name(bs);
5719 qapi_event_send_block_image_corrupted(bdrv_get_device_name(bs),
5720 *node_name != '\0', node_name,
5721 message, offset >= 0, offset,
5722 size >= 0, size,
Peter Xu3ab72382018-08-15 21:37:37 +08005723 fatal);
Max Reitz85186eb2014-09-05 16:07:16 +02005724 g_free(message);
5725
5726 if (fatal) {
5727 qcow2_mark_corrupt(bs);
5728 bs->drv = NULL; /* make BDS unusable */
5729 }
5730
5731 s->signaled_corruption = true;
5732}
5733
Maxim Levitskydf373fb2020-06-25 14:55:39 +02005734#define QCOW_COMMON_OPTIONS \
5735 { \
5736 .name = BLOCK_OPT_SIZE, \
5737 .type = QEMU_OPT_SIZE, \
5738 .help = "Virtual disk size" \
5739 }, \
5740 { \
5741 .name = BLOCK_OPT_COMPAT_LEVEL, \
5742 .type = QEMU_OPT_STRING, \
5743 .help = "Compatibility level (v2 [0.10] or v3 [1.1])" \
5744 }, \
5745 { \
5746 .name = BLOCK_OPT_BACKING_FILE, \
5747 .type = QEMU_OPT_STRING, \
5748 .help = "File name of a base image" \
5749 }, \
5750 { \
5751 .name = BLOCK_OPT_BACKING_FMT, \
5752 .type = QEMU_OPT_STRING, \
5753 .help = "Image format of the base image" \
5754 }, \
5755 { \
5756 .name = BLOCK_OPT_DATA_FILE, \
5757 .type = QEMU_OPT_STRING, \
5758 .help = "File name of an external data file" \
5759 }, \
5760 { \
5761 .name = BLOCK_OPT_DATA_FILE_RAW, \
5762 .type = QEMU_OPT_BOOL, \
5763 .help = "The external data file must stay valid " \
5764 "as a raw image" \
5765 }, \
5766 { \
Maxim Levitskydf373fb2020-06-25 14:55:39 +02005767 .name = BLOCK_OPT_LAZY_REFCOUNTS, \
5768 .type = QEMU_OPT_BOOL, \
5769 .help = "Postpone refcount updates", \
5770 .def_value_str = "off" \
5771 }, \
5772 { \
5773 .name = BLOCK_OPT_REFCOUNT_BITS, \
5774 .type = QEMU_OPT_NUMBER, \
5775 .help = "Width of a reference count entry in bits", \
5776 .def_value_str = "16" \
Maxim Levitskydf373fb2020-06-25 14:55:39 +02005777 }
5778
Chunyan Liu1bd0e2d2014-06-05 17:20:59 +08005779static QemuOptsList qcow2_create_opts = {
5780 .name = "qcow2-create-opts",
5781 .head = QTAILQ_HEAD_INITIALIZER(qcow2_create_opts.head),
5782 .desc = {
Maxim Levitsky0b6786a2020-06-25 14:55:40 +02005783 { \
5784 .name = BLOCK_OPT_ENCRYPT, \
5785 .type = QEMU_OPT_BOOL, \
5786 .help = "Encrypt the image with format 'aes'. (Deprecated " \
5787 "in favor of " BLOCK_OPT_ENCRYPT_FORMAT "=aes)", \
5788 }, \
5789 { \
5790 .name = BLOCK_OPT_ENCRYPT_FORMAT, \
5791 .type = QEMU_OPT_STRING, \
5792 .help = "Encrypt the image, format choices: 'aes', 'luks'", \
5793 }, \
5794 BLOCK_CRYPTO_OPT_DEF_KEY_SECRET("encrypt.", \
5795 "ID of secret providing qcow AES key or LUKS passphrase"), \
5796 BLOCK_CRYPTO_OPT_DEF_LUKS_CIPHER_ALG("encrypt."), \
5797 BLOCK_CRYPTO_OPT_DEF_LUKS_CIPHER_MODE("encrypt."), \
5798 BLOCK_CRYPTO_OPT_DEF_LUKS_IVGEN_ALG("encrypt."), \
5799 BLOCK_CRYPTO_OPT_DEF_LUKS_IVGEN_HASH_ALG("encrypt."), \
5800 BLOCK_CRYPTO_OPT_DEF_LUKS_HASH_ALG("encrypt."), \
5801 BLOCK_CRYPTO_OPT_DEF_LUKS_ITER_TIME("encrypt."), \
5802 { \
5803 .name = BLOCK_OPT_CLUSTER_SIZE, \
5804 .type = QEMU_OPT_SIZE, \
5805 .help = "qcow2 cluster size", \
5806 .def_value_str = stringify(DEFAULT_CLUSTER_SIZE) \
5807 }, \
5808 { \
Alberto Garcia7be20252020-07-10 18:13:13 +02005809 .name = BLOCK_OPT_EXTL2, \
5810 .type = QEMU_OPT_BOOL, \
5811 .help = "Extended L2 tables", \
5812 .def_value_str = "off" \
5813 }, \
5814 { \
Maxim Levitsky0b6786a2020-06-25 14:55:40 +02005815 .name = BLOCK_OPT_PREALLOC, \
5816 .type = QEMU_OPT_STRING, \
5817 .help = "Preallocation mode (allowed values: off, " \
5818 "metadata, falloc, full)" \
5819 }, \
5820 { \
5821 .name = BLOCK_OPT_COMPRESSION_TYPE, \
5822 .type = QEMU_OPT_STRING, \
5823 .help = "Compression method used for image cluster " \
5824 "compression", \
5825 .def_value_str = "zlib" \
5826 },
Maxim Levitskydf373fb2020-06-25 14:55:39 +02005827 QCOW_COMMON_OPTIONS,
5828 { /* end of list */ }
5829 }
5830};
5831
5832static QemuOptsList qcow2_amend_opts = {
5833 .name = "qcow2-amend-opts",
5834 .head = QTAILQ_HEAD_INITIALIZER(qcow2_amend_opts.head),
5835 .desc = {
Maxim Levitsky90766d92020-06-25 14:55:43 +02005836 BLOCK_CRYPTO_OPT_DEF_LUKS_STATE("encrypt."),
5837 BLOCK_CRYPTO_OPT_DEF_LUKS_KEYSLOT("encrypt."),
5838 BLOCK_CRYPTO_OPT_DEF_LUKS_OLD_SECRET("encrypt."),
5839 BLOCK_CRYPTO_OPT_DEF_LUKS_NEW_SECRET("encrypt."),
5840 BLOCK_CRYPTO_OPT_DEF_LUKS_ITER_TIME("encrypt."),
Maxim Levitskydf373fb2020-06-25 14:55:39 +02005841 QCOW_COMMON_OPTIONS,
Chunyan Liu1bd0e2d2014-06-05 17:20:59 +08005842 { /* end of list */ }
5843 }
Blue Swirl20d97352010-04-23 20:19:47 +00005844};
5845
Max Reitz26542672019-02-01 20:29:25 +01005846static const char *const qcow2_strong_runtime_opts[] = {
5847 "encrypt." BLOCK_CRYPTO_OPT_QCOW_KEY_SECRET,
5848
5849 NULL
5850};
5851
Max Reitz5f535a92014-12-02 18:32:41 +01005852BlockDriver bdrv_qcow2 = {
Jes Sorensen7c80ab32010-12-17 16:02:39 +01005853 .format_name = "qcow2",
Kevin Wolfff991292015-09-07 17:12:56 +02005854 .instance_size = sizeof(BDRVQcow2State),
Jes Sorensen7c80ab32010-12-17 16:02:39 +01005855 .bdrv_probe = qcow2_probe,
5856 .bdrv_open = qcow2_open,
5857 .bdrv_close = qcow2_close,
Jeff Cody21d82ac2012-09-20 15:13:28 -04005858 .bdrv_reopen_prepare = qcow2_reopen_prepare,
Kevin Wolf5b0959a2015-04-16 13:42:27 +02005859 .bdrv_reopen_commit = qcow2_reopen_commit,
Peter Krempa65eb7c82020-02-28 13:44:47 +01005860 .bdrv_reopen_commit_post = qcow2_reopen_commit_post,
Kevin Wolf5b0959a2015-04-16 13:42:27 +02005861 .bdrv_reopen_abort = qcow2_reopen_abort,
Kevin Wolf5365f442015-11-16 15:34:59 +01005862 .bdrv_join_options = qcow2_join_options,
Max Reitz69dca432020-05-13 13:05:39 +02005863 .bdrv_child_perm = bdrv_default_perms,
Stefan Hajnocziefc75e22018-01-18 13:43:45 +01005864 .bdrv_co_create_opts = qcow2_co_create_opts,
Kevin Wolfb0292b82018-01-09 16:50:57 +01005865 .bdrv_co_create = qcow2_co_create,
Max Reitz38841dc2019-07-24 19:12:34 +02005866 .bdrv_has_zero_init = qcow2_has_zero_init,
Eric Blakea320fb02018-02-13 14:26:52 -06005867 .bdrv_co_block_status = qcow2_co_block_status,
Blue Swirl20d97352010-04-23 20:19:47 +00005868
Vladimir Sementsov-Ogievskiydf893d22019-06-04 19:15:13 +03005869 .bdrv_co_preadv_part = qcow2_co_preadv_part,
Vladimir Sementsov-Ogievskiy53962342019-06-04 19:15:14 +03005870 .bdrv_co_pwritev_part = qcow2_co_pwritev_part,
Kevin Wolfeb489bb2011-11-10 18:10:11 +01005871 .bdrv_co_flush_to_os = qcow2_co_flush_to_os,
Stefan Hajnoczi419b19d2010-04-28 11:36:11 +01005872
Eric Blake5544b592016-06-01 15:10:06 -06005873 .bdrv_co_pwrite_zeroes = qcow2_co_pwrite_zeroes,
Eric Blake82e8a782016-07-15 17:23:03 -06005874 .bdrv_co_pdiscard = qcow2_co_pdiscard,
Fam Zhengfd9fcd32018-06-01 17:26:42 +08005875 .bdrv_co_copy_range_from = qcow2_co_copy_range_from,
5876 .bdrv_co_copy_range_to = qcow2_co_copy_range_to,
Kevin Wolf061ca8a2018-06-21 17:54:35 +02005877 .bdrv_co_truncate = qcow2_co_truncate,
Vladimir Sementsov-Ogievskiy53962342019-06-04 19:15:14 +03005878 .bdrv_co_pwritev_compressed_part = qcow2_co_pwritev_compressed_part,
Max Reitz491d27e2014-10-24 15:57:31 +02005879 .bdrv_make_empty = qcow2_make_empty,
Blue Swirl20d97352010-04-23 20:19:47 +00005880
5881 .bdrv_snapshot_create = qcow2_snapshot_create,
5882 .bdrv_snapshot_goto = qcow2_snapshot_goto,
5883 .bdrv_snapshot_delete = qcow2_snapshot_delete,
5884 .bdrv_snapshot_list = qcow2_snapshot_list,
Chunyan Liu1bd0e2d2014-06-05 17:20:59 +08005885 .bdrv_snapshot_load_tmp = qcow2_snapshot_load_tmp,
Stefan Hajnoczic501c352017-07-05 13:57:35 +01005886 .bdrv_measure = qcow2_measure,
Chunyan Liu1bd0e2d2014-06-05 17:20:59 +08005887 .bdrv_get_info = qcow2_get_info,
Max Reitz37764df2013-10-09 10:46:18 +02005888 .bdrv_get_specific_info = qcow2_get_specific_info,
Blue Swirl20d97352010-04-23 20:19:47 +00005889
Jes Sorensen7c80ab32010-12-17 16:02:39 +01005890 .bdrv_save_vmstate = qcow2_save_vmstate,
5891 .bdrv_load_vmstate = qcow2_load_vmstate,
Blue Swirl20d97352010-04-23 20:19:47 +00005892
Max Reitzd67066d2020-05-13 13:05:12 +02005893 .is_format = true,
Kevin Wolf8ee79e72014-06-04 15:09:35 +02005894 .supports_backing = true,
Blue Swirl20d97352010-04-23 20:19:47 +00005895 .bdrv_change_backing_file = qcow2_change_backing_file,
5896
Kevin Wolfd34682c2013-12-11 19:26:16 +01005897 .bdrv_refresh_limits = qcow2_refresh_limits,
Paolo Bonzini2b148f32018-03-01 17:36:18 +01005898 .bdrv_co_invalidate_cache = qcow2_co_invalidate_cache,
Kevin Wolfec6d8912015-12-22 16:04:57 +01005899 .bdrv_inactivate = qcow2_inactivate,
Anthony Liguori06d92602011-11-14 15:09:46 -06005900
Chunyan Liu1bd0e2d2014-06-05 17:20:59 +08005901 .create_opts = &qcow2_create_opts,
Maxim Levitskydf373fb2020-06-25 14:55:39 +02005902 .amend_opts = &qcow2_amend_opts,
Max Reitz26542672019-02-01 20:29:25 +01005903 .strong_runtime_opts = qcow2_strong_runtime_opts,
Alberto Garcia8a2ce0b2019-03-12 18:48:48 +02005904 .mutable_opts = mutable_opts,
Paolo Bonzini2fd61632018-03-01 17:36:19 +01005905 .bdrv_co_check = qcow2_co_check,
Chunyan Liuc282e1f2014-06-05 17:21:11 +08005906 .bdrv_amend_options = qcow2_amend_options,
Maxim Levitsky8ea16132020-06-25 14:55:47 +02005907 .bdrv_co_amend = qcow2_co_amend,
Alberto Garcia279621c2015-08-04 15:14:40 +03005908
5909 .bdrv_detach_aio_context = qcow2_detach_aio_context,
5910 .bdrv_attach_aio_context = qcow2_attach_aio_context,
Vladimir Sementsov-Ogievskiy1b6b0562017-06-28 15:05:14 +03005911
Eric Blakeef893b52020-05-12 20:16:42 -05005912 .bdrv_supports_persistent_dirty_bitmap =
5913 qcow2_supports_persistent_dirty_bitmap,
Vladimir Sementsov-Ogievskiyd2c30802019-09-20 11:25:43 +03005914 .bdrv_co_can_store_new_dirty_bitmap = qcow2_co_can_store_new_dirty_bitmap,
5915 .bdrv_co_remove_persistent_dirty_bitmap =
5916 qcow2_co_remove_persistent_dirty_bitmap,
Blue Swirl20d97352010-04-23 20:19:47 +00005917};
5918
Anthony Liguori5efa9d52009-05-09 17:03:42 -05005919static void bdrv_qcow2_init(void)
5920{
5921 bdrv_register(&bdrv_qcow2);
5922}
5923
5924block_init(bdrv_qcow2_init);