blob: 33dcb3a34981a0414a1070d3ac4362d264ee519a [file] [log] [blame]
bellard585d0ed2004-12-12 11:24:44 +00001/*
2 * QEMU Block driver for DMG images
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard585d0ed2004-12-12 11:24:44 +00004 * Copyright (c) 2004 Johannes E. Schindelin
ths5fafdf22007-09-16 21:08:06 +00005 *
bellard585d0ed2004-12-12 11:24:44 +00006 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
Peter Maydell80c71a22016-01-18 18:01:42 +000024#include "qemu/osdep.h"
Markus Armbrusterda34e652016-03-14 09:01:28 +010025#include "qapi/error.h"
Markus Armbrustere2c1c342022-12-21 14:35:49 +010026#include "block/block-io.h"
Paolo Bonzini737e1502012-12-17 18:19:44 +010027#include "block/block_int.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010028#include "qemu/bswap.h"
Markus Armbrusterd49b6832015-03-17 18:29:20 +010029#include "qemu/error-report.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010030#include "qemu/module.h"
Peter Maydell5df022c2022-02-26 18:07:23 +000031#include "qemu/memalign.h"
Fam Zheng27685a82016-09-05 10:50:45 +080032#include "dmg.h"
33
Philippe Mathieu-Daudéfdd5e902023-03-20 16:26:10 +010034BdrvDmgUncompressFunc *dmg_uncompress_bz2;
35BdrvDmgUncompressFunc *dmg_uncompress_lzfse;
Julio Faracco7a40b412018-11-05 13:08:05 -020036
Stefan Hajnoczic165f772014-03-26 13:05:58 +010037enum {
38 /* Limit chunk sizes to prevent unreasonable amounts of memory being used
39 * or truncating when converting to 32-bit types
40 */
41 DMG_LENGTHS_MAX = 64 * 1024 * 1024, /* 64 MB */
42 DMG_SECTORCOUNTS_MAX = DMG_LENGTHS_MAX / 512,
43};
44
Julio Faracco95a156f2018-11-05 13:08:06 -020045enum {
46 /* DMG Block Type */
47 UDZE = 0, /* Zeroes */
48 UDRW, /* RAW type */
49 UDIG, /* Ignore */
50 UDCO = 0x80000004,
51 UDZO,
52 UDBZ,
53 ULFO,
54 UDCM = 0x7ffffffe, /* Comments */
Julio Faraccob47c7d52018-12-28 12:50:55 -020055 UDLE = 0xffffffff /* Last Entry */
Julio Faracco95a156f2018-11-05 13:08:06 -020056};
57
bellard585d0ed2004-12-12 11:24:44 +000058static int dmg_probe(const uint8_t *buf, int buf_size, const char *filename)
59{
Kevin Wolff5866fa2013-03-18 16:20:27 +010060 int len;
61
62 if (!filename) {
63 return 0;
64 }
65
66 len = strlen(filename);
67 if (len > 4 && !strcmp(filename + len - 4, ".dmg")) {
68 return 2;
69 }
bellard585d0ed2004-12-12 11:24:44 +000070 return 0;
71}
72
Kevin Wolf1f051dc2023-10-27 17:53:33 +020073static int GRAPH_RDLOCK
74read_uint64(BlockDriverState *bs, int64_t offset, uint64_t *result)
bellard585d0ed2004-12-12 11:24:44 +000075{
Kevin Wolf69d34a32013-01-25 17:07:30 +010076 uint64_t buffer;
77 int ret;
78
Alberto Faria32cc71d2022-06-09 16:27:36 +010079 ret = bdrv_pread(bs->file, offset, 8, &buffer, 0);
Kevin Wolf69d34a32013-01-25 17:07:30 +010080 if (ret < 0) {
81 return ret;
82 }
83
84 *result = be64_to_cpu(buffer);
85 return 0;
bellard585d0ed2004-12-12 11:24:44 +000086}
87
Kevin Wolf1f051dc2023-10-27 17:53:33 +020088static int GRAPH_RDLOCK
89read_uint32(BlockDriverState *bs, int64_t offset, uint32_t *result)
bellard585d0ed2004-12-12 11:24:44 +000090{
Kevin Wolf69d34a32013-01-25 17:07:30 +010091 uint32_t buffer;
92 int ret;
93
Alberto Faria32cc71d2022-06-09 16:27:36 +010094 ret = bdrv_pread(bs->file, offset, 4, &buffer, 0);
Kevin Wolf69d34a32013-01-25 17:07:30 +010095 if (ret < 0) {
96 return ret;
97 }
98
99 *result = be32_to_cpu(buffer);
100 return 0;
bellard585d0ed2004-12-12 11:24:44 +0000101}
102
Peter Wu7aee37b2015-01-06 18:48:07 +0100103static inline uint64_t buff_read_uint64(const uint8_t *buffer, int64_t offset)
104{
105 return be64_to_cpu(*(uint64_t *)&buffer[offset]);
106}
107
108static inline uint32_t buff_read_uint32(const uint8_t *buffer, int64_t offset)
109{
110 return be32_to_cpu(*(uint32_t *)&buffer[offset]);
111}
112
Stefan Hajnoczif0dce232014-03-26 13:06:00 +0100113/* Increase max chunk sizes, if necessary. This function is used to calculate
114 * the buffer sizes needed for compressed/uncompressed chunk I/O.
115 */
116static void update_max_chunk_size(BDRVDMGState *s, uint32_t chunk,
117 uint32_t *max_compressed_size,
118 uint32_t *max_sectors_per_chunk)
119{
120 uint32_t compressed_size = 0;
121 uint32_t uncompressed_sectors = 0;
122
123 switch (s->types[chunk]) {
Julio Faracco95a156f2018-11-05 13:08:06 -0200124 case UDZO: /* zlib compressed */
125 case UDBZ: /* bzip2 compressed */
126 case ULFO: /* lzfse compressed */
Stefan Hajnoczif0dce232014-03-26 13:06:00 +0100127 compressed_size = s->lengths[chunk];
128 uncompressed_sectors = s->sectorcounts[chunk];
129 break;
Julio Faracco95a156f2018-11-05 13:08:06 -0200130 case UDRW: /* copy */
Marc-André Lureau6fb00222017-06-22 13:04:16 +0200131 uncompressed_sectors = DIV_ROUND_UP(s->lengths[chunk], 512);
Stefan Hajnoczif0dce232014-03-26 13:06:00 +0100132 break;
yuchenlin39a04082019-01-03 19:47:00 +0800133 case UDZE: /* zero */
134 case UDIG: /* ignore */
Peter Wu177b7512015-01-06 18:48:15 +0100135 /* as the all-zeroes block may be large, it is treated specially: the
136 * sector is not copied from a large buffer, a simple memset is used
137 * instead. Therefore uncompressed_sectors does not need to be set. */
Stefan Hajnoczif0dce232014-03-26 13:06:00 +0100138 break;
139 }
140
141 if (compressed_size > *max_compressed_size) {
142 *max_compressed_size = compressed_size;
143 }
144 if (uncompressed_sectors > *max_sectors_per_chunk) {
145 *max_sectors_per_chunk = uncompressed_sectors;
146 }
147}
148
Kevin Wolfcf2ab8f2016-06-20 18:24:02 +0200149static int64_t dmg_find_koly_offset(BdrvChild *file, Error **errp)
Peter Wufa8354b2015-01-06 18:48:04 +0100150{
Kevin Wolfcf2ab8f2016-06-20 18:24:02 +0200151 BlockDriverState *file_bs = file->bs;
Peter Wufa8354b2015-01-06 18:48:04 +0100152 int64_t length;
153 int64_t offset = 0;
154 uint8_t buffer[515];
155 int i, ret;
156
157 /* bdrv_getlength returns a multiple of block size (512), rounded up. Since
158 * dmg images can have odd sizes, try to look for the "koly" magic which
159 * marks the begin of the UDIF trailer (512 bytes). This magic can be found
160 * in the last 511 bytes of the second-last sector or the first 4 bytes of
161 * the last sector (search space: 515 bytes) */
162 length = bdrv_getlength(file_bs);
163 if (length < 0) {
164 error_setg_errno(errp, -length,
165 "Failed to get file size while reading UDIF trailer");
166 return length;
167 } else if (length < 512) {
168 error_setg(errp, "dmg file must be at least 512 bytes long");
169 return -EINVAL;
170 }
171 if (length > 511 + 512) {
172 offset = length - 511 - 512;
173 }
174 length = length < 515 ? length : 515;
Alberto Faria32cc71d2022-06-09 16:27:36 +0100175 ret = bdrv_pread(file, offset, length, buffer, 0);
Peter Wufa8354b2015-01-06 18:48:04 +0100176 if (ret < 0) {
177 error_setg_errno(errp, -ret, "Failed while reading UDIF trailer");
178 return ret;
179 }
180 for (i = 0; i < length - 3; i++) {
181 if (buffer[i] == 'k' && buffer[i+1] == 'o' &&
182 buffer[i+2] == 'l' && buffer[i+3] == 'y') {
183 return offset + i;
184 }
185 }
186 error_setg(errp, "Could not locate UDIF trailer in dmg file");
187 return -EINVAL;
188}
189
Peter Wu65a1c7c2015-01-06 18:48:05 +0100190/* used when building the sector table */
191typedef struct DmgHeaderState {
192 /* used internally by dmg_read_mish_block to remember offsets of blocks
193 * across calls */
Peter Wuc6d34862015-01-06 18:48:11 +0100194 uint64_t data_fork_offset;
Peter Wu65a1c7c2015-01-06 18:48:05 +0100195 /* exported for dmg_open */
196 uint32_t max_compressed_size;
197 uint32_t max_sectors_per_chunk;
198} DmgHeaderState;
199
Peter Wua8b10c62015-01-06 18:48:13 +0100200static bool dmg_is_known_block_type(uint32_t entry_type)
201{
202 switch (entry_type) {
yuchenlin39a04082019-01-03 19:47:00 +0800203 case UDZE: /* zeros */
Julio Faracco95a156f2018-11-05 13:08:06 -0200204 case UDRW: /* uncompressed */
yuchenlin39a04082019-01-03 19:47:00 +0800205 case UDIG: /* ignore */
Julio Faracco95a156f2018-11-05 13:08:06 -0200206 case UDZO: /* zlib */
Peter Wua8b10c62015-01-06 18:48:13 +0100207 return true;
Julio Faracco95a156f2018-11-05 13:08:06 -0200208 case UDBZ: /* bzip2 */
Fam Zheng27685a82016-09-05 10:50:45 +0800209 return !!dmg_uncompress_bz2;
Julio Faracco95a156f2018-11-05 13:08:06 -0200210 case ULFO: /* lzfse */
Julio Faracco7a40b412018-11-05 13:08:05 -0200211 return !!dmg_uncompress_lzfse;
Peter Wua8b10c62015-01-06 18:48:13 +0100212 default:
213 return false;
214 }
215}
216
Peter Wu7aee37b2015-01-06 18:48:07 +0100217static int dmg_read_mish_block(BDRVDMGState *s, DmgHeaderState *ds,
218 uint8_t *buffer, uint32_t count)
Peter Wu65a1c7c2015-01-06 18:48:05 +0100219{
Peter Wu65a1c7c2015-01-06 18:48:05 +0100220 uint32_t type, i;
221 int ret;
222 size_t new_size;
223 uint32_t chunk_count;
Peter Wu7aee37b2015-01-06 18:48:07 +0100224 int64_t offset = 0;
Peter Wuc6d34862015-01-06 18:48:11 +0100225 uint64_t data_offset;
226 uint64_t in_offset = ds->data_fork_offset;
Peter Wu66ec3bb2015-01-06 18:48:12 +0100227 uint64_t out_offset;
Peter Wu65a1c7c2015-01-06 18:48:05 +0100228
Peter Wu7aee37b2015-01-06 18:48:07 +0100229 type = buff_read_uint32(buffer, offset);
Peter Wu65a1c7c2015-01-06 18:48:05 +0100230 /* skip data that is not a valid MISH block (invalid magic or too small) */
231 if (type != 0x6d697368 || count < 244) {
232 /* assume success for now */
233 return 0;
234 }
235
Peter Wu66ec3bb2015-01-06 18:48:12 +0100236 /* chunk offsets are relative to this sector number */
237 out_offset = buff_read_uint64(buffer, offset + 8);
238
Peter Wuc6d34862015-01-06 18:48:11 +0100239 /* location in data fork for (compressed) blob (in bytes) */
240 data_offset = buff_read_uint64(buffer, offset + 0x18);
241 in_offset += data_offset;
242
243 /* move to begin of chunk entries */
244 offset += 204;
Peter Wu65a1c7c2015-01-06 18:48:05 +0100245
246 chunk_count = (count - 204) / 40;
247 new_size = sizeof(uint64_t) * (s->n_chunks + chunk_count);
248 s->types = g_realloc(s->types, new_size / 2);
249 s->offsets = g_realloc(s->offsets, new_size);
250 s->lengths = g_realloc(s->lengths, new_size);
251 s->sectors = g_realloc(s->sectors, new_size);
252 s->sectorcounts = g_realloc(s->sectorcounts, new_size);
253
254 for (i = s->n_chunks; i < s->n_chunks + chunk_count; i++) {
Peter Wu7aee37b2015-01-06 18:48:07 +0100255 s->types[i] = buff_read_uint32(buffer, offset);
Peter Wua8b10c62015-01-06 18:48:13 +0100256 if (!dmg_is_known_block_type(s->types[i])) {
Kevin Wolf971974f2022-09-29 11:30:34 +0200257 switch (s->types[i]) {
258 case UDBZ:
259 warn_report_once("dmg-bzip2 module is missing, accessing bzip2 "
260 "compressed blocks will result in I/O errors");
261 break;
262 case ULFO:
263 warn_report_once("dmg-lzfse module is missing, accessing lzfse "
264 "compressed blocks will result in I/O errors");
265 break;
266 case UDCM:
267 case UDLE:
268 /* Comments and last entry can be ignored without problems */
269 break;
270 default:
271 warn_report_once("Image contains chunks of unknown type %x, "
272 "accessing them will result in I/O errors",
273 s->types[i]);
274 break;
275 }
Peter Wu65a1c7c2015-01-06 18:48:05 +0100276 chunk_count--;
277 i--;
Peter Wua8b10c62015-01-06 18:48:13 +0100278 offset += 40;
Peter Wu65a1c7c2015-01-06 18:48:05 +0100279 continue;
280 }
Peter Wu65a1c7c2015-01-06 18:48:05 +0100281
Peter Wua8b10c62015-01-06 18:48:13 +0100282 /* sector number */
283 s->sectors[i] = buff_read_uint64(buffer, offset + 8);
Peter Wu66ec3bb2015-01-06 18:48:12 +0100284 s->sectors[i] += out_offset;
Peter Wu65a1c7c2015-01-06 18:48:05 +0100285
Peter Wua8b10c62015-01-06 18:48:13 +0100286 /* sector count */
287 s->sectorcounts[i] = buff_read_uint64(buffer, offset + 0x10);
Peter Wu65a1c7c2015-01-06 18:48:05 +0100288
yuchenlin39a04082019-01-03 19:47:00 +0800289 /* all-zeroes sector (type UDZE and UDIG) does not need to be
290 * "uncompressed" and can therefore be unbounded. */
291 if (s->types[i] != UDZE && s->types[i] != UDIG
292 && s->sectorcounts[i] > DMG_SECTORCOUNTS_MAX) {
Peter Wu65a1c7c2015-01-06 18:48:05 +0100293 error_report("sector count %" PRIu64 " for chunk %" PRIu32
294 " is larger than max (%u)",
295 s->sectorcounts[i], i, DMG_SECTORCOUNTS_MAX);
296 ret = -EINVAL;
297 goto fail;
298 }
299
Peter Wua8b10c62015-01-06 18:48:13 +0100300 /* offset in (compressed) data fork */
301 s->offsets[i] = buff_read_uint64(buffer, offset + 0x18);
Peter Wuc6d34862015-01-06 18:48:11 +0100302 s->offsets[i] += in_offset;
Peter Wu65a1c7c2015-01-06 18:48:05 +0100303
Peter Wua8b10c62015-01-06 18:48:13 +0100304 /* length in (compressed) data fork */
305 s->lengths[i] = buff_read_uint64(buffer, offset + 0x20);
Peter Wu65a1c7c2015-01-06 18:48:05 +0100306
307 if (s->lengths[i] > DMG_LENGTHS_MAX) {
308 error_report("length %" PRIu64 " for chunk %" PRIu32
309 " is larger than max (%u)",
310 s->lengths[i], i, DMG_LENGTHS_MAX);
311 ret = -EINVAL;
312 goto fail;
313 }
314
315 update_max_chunk_size(s, i, &ds->max_compressed_size,
316 &ds->max_sectors_per_chunk);
Peter Wua8b10c62015-01-06 18:48:13 +0100317 offset += 40;
Peter Wu65a1c7c2015-01-06 18:48:05 +0100318 }
319 s->n_chunks += chunk_count;
320 return 0;
321
322fail:
323 return ret;
324}
325
Kevin Wolf1f051dc2023-10-27 17:53:33 +0200326static int GRAPH_RDLOCK
327dmg_read_resource_fork(BlockDriverState *bs, DmgHeaderState *ds,
328 uint64_t info_begin, uint64_t info_length)
Peter Wub0e8dc52015-01-06 18:48:06 +0100329{
Peter Wu7aee37b2015-01-06 18:48:07 +0100330 BDRVDMGState *s = bs->opaque;
Peter Wub0e8dc52015-01-06 18:48:06 +0100331 int ret;
332 uint32_t count, rsrc_data_offset;
Peter Wu7aee37b2015-01-06 18:48:07 +0100333 uint8_t *buffer = NULL;
Peter Wub0e8dc52015-01-06 18:48:06 +0100334 uint64_t info_end;
335 uint64_t offset;
336
337 /* read offset from begin of resource fork (info_begin) to resource data */
338 ret = read_uint32(bs, info_begin, &rsrc_data_offset);
339 if (ret < 0) {
340 goto fail;
341 } else if (rsrc_data_offset > info_length) {
342 ret = -EINVAL;
343 goto fail;
344 }
345
346 /* read length of resource data */
347 ret = read_uint32(bs, info_begin + 8, &count);
348 if (ret < 0) {
349 goto fail;
350 } else if (count == 0 || rsrc_data_offset + count > info_length) {
351 ret = -EINVAL;
352 goto fail;
353 }
354
355 /* begin of resource data (consisting of one or more resources) */
356 offset = info_begin + rsrc_data_offset;
357
358 /* end of resource data (there is possibly a following resource map
359 * which will be ignored). */
360 info_end = offset + count;
361
362 /* read offsets (mish blocks) from one or more resources in resource data */
363 while (offset < info_end) {
364 /* size of following resource */
365 ret = read_uint32(bs, offset, &count);
366 if (ret < 0) {
367 goto fail;
Peter Wuf6e66522015-01-06 18:48:08 +0100368 } else if (count == 0 || count > info_end - offset) {
Peter Wub0e8dc52015-01-06 18:48:06 +0100369 ret = -EINVAL;
370 goto fail;
371 }
372 offset += 4;
373
Peter Wu7aee37b2015-01-06 18:48:07 +0100374 buffer = g_realloc(buffer, count);
Alberto Faria32cc71d2022-06-09 16:27:36 +0100375 ret = bdrv_pread(bs->file, offset, count, buffer, 0);
Peter Wu7aee37b2015-01-06 18:48:07 +0100376 if (ret < 0) {
377 goto fail;
378 }
379
380 ret = dmg_read_mish_block(s, ds, buffer, count);
Peter Wub0e8dc52015-01-06 18:48:06 +0100381 if (ret < 0) {
382 goto fail;
383 }
384 /* advance offset by size of resource */
385 offset += count;
386 }
Peter Wu7aee37b2015-01-06 18:48:07 +0100387 ret = 0;
Peter Wub0e8dc52015-01-06 18:48:06 +0100388
389fail:
Peter Wu7aee37b2015-01-06 18:48:07 +0100390 g_free(buffer);
Peter Wub0e8dc52015-01-06 18:48:06 +0100391 return ret;
392}
393
Kevin Wolf1f051dc2023-10-27 17:53:33 +0200394static int GRAPH_RDLOCK
395dmg_read_plist_xml(BlockDriverState *bs, DmgHeaderState *ds,
396 uint64_t info_begin, uint64_t info_length)
Peter Wu0599e562015-01-06 18:48:09 +0100397{
398 BDRVDMGState *s = bs->opaque;
399 int ret;
400 uint8_t *buffer = NULL;
401 char *data_begin, *data_end;
402
403 /* Have at least some length to avoid NULL for g_malloc. Attempt to set a
404 * safe upper cap on the data length. A test sample had a XML length of
405 * about 1 MiB. */
406 if (info_length == 0 || info_length > 16 * 1024 * 1024) {
407 ret = -EINVAL;
408 goto fail;
409 }
410
411 buffer = g_malloc(info_length + 1);
412 buffer[info_length] = '\0';
Alberto Faria32cc71d2022-06-09 16:27:36 +0100413 ret = bdrv_pread(bs->file, info_begin, info_length, buffer, 0);
Alberto Faria353a5d82022-06-09 16:27:37 +0100414 if (ret < 0) {
Peter Wu0599e562015-01-06 18:48:09 +0100415 ret = -EINVAL;
416 goto fail;
417 }
418
419 /* look for <data>...</data>. The data is 284 (0x11c) bytes after base64
420 * decode. The actual data element has 431 (0x1af) bytes which includes tabs
421 * and line feeds. */
422 data_end = (char *)buffer;
423 while ((data_begin = strstr(data_end, "<data>")) != NULL) {
424 guchar *mish;
425 gsize out_len = 0;
426
427 data_begin += 6;
428 data_end = strstr(data_begin, "</data>");
429 /* malformed XML? */
430 if (data_end == NULL) {
431 ret = -EINVAL;
432 goto fail;
433 }
434 *data_end++ = '\0';
435 mish = g_base64_decode(data_begin, &out_len);
436 ret = dmg_read_mish_block(s, ds, mish, (uint32_t)out_len);
437 g_free(mish);
438 if (ret < 0) {
439 goto fail;
440 }
441 }
442 ret = 0;
443
444fail:
445 g_free(buffer);
446 return ret;
447}
448
Max Reitz015a1032013-09-05 14:22:29 +0200449static int dmg_open(BlockDriverState *bs, QDict *options, int flags,
450 Error **errp)
bellard585d0ed2004-12-12 11:24:44 +0000451{
452 BDRVDMGState *s = bs->opaque;
Peter Wu65a1c7c2015-01-06 18:48:05 +0100453 DmgHeaderState ds;
Peter Wub0e8dc52015-01-06 18:48:06 +0100454 uint64_t rsrc_fork_offset, rsrc_fork_length;
Peter Wu0599e562015-01-06 18:48:09 +0100455 uint64_t plist_xml_offset, plist_xml_length;
Christoph Hellwig16cdf7c2010-05-12 16:31:35 +0200456 int64_t offset;
Kevin Wolf69d34a32013-01-25 17:07:30 +0100457 int ret;
bellard585d0ed2004-12-12 11:24:44 +0000458
Kevin Wolfa4b740d2023-10-27 17:53:32 +0200459 GLOBAL_STATE_CODE();
460
Kevin Wolf018f9de2023-09-29 16:51:53 +0200461 bdrv_graph_rdlock_main_loop();
Kevin Wolfeaa24102018-10-12 11:27:41 +0200462 ret = bdrv_apply_auto_read_only(bs, NULL, errp);
Kevin Wolf018f9de2023-09-29 16:51:53 +0200463 bdrv_graph_rdunlock_main_loop();
Kevin Wolfeaa24102018-10-12 11:27:41 +0200464 if (ret < 0) {
465 return ret;
466 }
467
Vladimir Sementsov-Ogievskiy83930782022-07-26 23:11:21 +0300468 ret = bdrv_open_file_child(NULL, options, "file", bs, errp);
469 if (ret < 0) {
470 return ret;
Kevin Wolf4e4bf5c2016-12-16 18:52:37 +0100471 }
Kevin Wolfa4b740d2023-10-27 17:53:32 +0200472
473 GRAPH_RDLOCK_GUARD_MAINLOOP();
474
Claudio Fontanac551fb02022-09-29 11:30:33 +0200475 /*
476 * NB: if uncompress submodules are absent,
477 * ie block_module_load return value == 0, the function pointers
478 * dmg_uncompress_bz2 and dmg_uncompress_lzfse will be NULL.
479 */
480 if (block_module_load("dmg-bz2", errp) < 0) {
481 return -EINVAL;
482 }
483 if (block_module_load("dmg-lzfse", errp) < 0) {
484 return -EINVAL;
485 }
Kevin Wolf3edf1e72016-04-25 15:43:09 +0200486
bellard585d0ed2004-12-12 11:24:44 +0000487 s->n_chunks = 0;
blueswir1511d2b12009-03-07 15:32:56 +0000488 s->offsets = s->lengths = s->sectors = s->sectorcounts = NULL;
Peter Wu65a1c7c2015-01-06 18:48:05 +0100489 /* used by dmg_read_mish_block to keep track of the current I/O position */
Peter Wuc6d34862015-01-06 18:48:11 +0100490 ds.data_fork_offset = 0;
Peter Wu65a1c7c2015-01-06 18:48:05 +0100491 ds.max_compressed_size = 1;
492 ds.max_sectors_per_chunk = 1;
ths3b46e622007-09-17 08:09:54 +0000493
Peter Wufa8354b2015-01-06 18:48:04 +0100494 /* locate the UDIF trailer */
Kevin Wolfcf2ab8f2016-06-20 18:24:02 +0200495 offset = dmg_find_koly_offset(bs->file, errp);
Christoph Hellwig16cdf7c2010-05-12 16:31:35 +0200496 if (offset < 0) {
Kevin Wolf69d34a32013-01-25 17:07:30 +0100497 ret = offset;
Christoph Hellwig1559ca02010-01-11 14:06:54 +0100498 goto fail;
bellard585d0ed2004-12-12 11:24:44 +0000499 }
Christoph Hellwig1559ca02010-01-11 14:06:54 +0100500
Peter Wuc6d34862015-01-06 18:48:11 +0100501 /* offset of data fork (DataForkOffset) */
502 ret = read_uint64(bs, offset + 0x18, &ds.data_fork_offset);
503 if (ret < 0) {
504 goto fail;
505 } else if (ds.data_fork_offset > offset) {
506 ret = -EINVAL;
507 goto fail;
508 }
509
Peter Wub0e8dc52015-01-06 18:48:06 +0100510 /* offset of resource fork (RsrcForkOffset) */
511 ret = read_uint64(bs, offset + 0x28, &rsrc_fork_offset);
Kevin Wolf69d34a32013-01-25 17:07:30 +0100512 if (ret < 0) {
513 goto fail;
Christoph Hellwig16cdf7c2010-05-12 16:31:35 +0200514 }
Peter Wub0e8dc52015-01-06 18:48:06 +0100515 ret = read_uint64(bs, offset + 0x30, &rsrc_fork_length);
Kevin Wolf69d34a32013-01-25 17:07:30 +0100516 if (ret < 0) {
517 goto fail;
Kevin Wolf69d34a32013-01-25 17:07:30 +0100518 }
Peter Wuf6e66522015-01-06 18:48:08 +0100519 if (rsrc_fork_offset >= offset ||
520 rsrc_fork_length > offset - rsrc_fork_offset) {
521 ret = -EINVAL;
522 goto fail;
523 }
Peter Wu0599e562015-01-06 18:48:09 +0100524 /* offset of property list (XMLOffset) */
525 ret = read_uint64(bs, offset + 0xd8, &plist_xml_offset);
526 if (ret < 0) {
527 goto fail;
528 }
529 ret = read_uint64(bs, offset + 0xe0, &plist_xml_length);
530 if (ret < 0) {
531 goto fail;
532 }
533 if (plist_xml_offset >= offset ||
534 plist_xml_length > offset - plist_xml_offset) {
535 ret = -EINVAL;
536 goto fail;
537 }
Peter Wu8daf4252015-01-06 18:48:10 +0100538 ret = read_uint64(bs, offset + 0x1ec, (uint64_t *)&bs->total_sectors);
539 if (ret < 0) {
540 goto fail;
541 }
542 if (bs->total_sectors < 0) {
543 ret = -EINVAL;
544 goto fail;
545 }
Peter Wub0e8dc52015-01-06 18:48:06 +0100546 if (rsrc_fork_length != 0) {
547 ret = dmg_read_resource_fork(bs, &ds,
548 rsrc_fork_offset, rsrc_fork_length);
Kevin Wolf69d34a32013-01-25 17:07:30 +0100549 if (ret < 0) {
550 goto fail;
551 }
Peter Wu0599e562015-01-06 18:48:09 +0100552 } else if (plist_xml_length != 0) {
553 ret = dmg_read_plist_xml(bs, &ds, plist_xml_offset, plist_xml_length);
554 if (ret < 0) {
555 goto fail;
556 }
Peter Wub0e8dc52015-01-06 18:48:06 +0100557 } else {
558 ret = -EINVAL;
559 goto fail;
bellard585d0ed2004-12-12 11:24:44 +0000560 }
561
562 /* initialize zlib engine */
Kevin Wolf9a4f4c32015-06-16 14:19:22 +0200563 s->compressed_chunk = qemu_try_blockalign(bs->file->bs,
Peter Wu65a1c7c2015-01-06 18:48:05 +0100564 ds.max_compressed_size + 1);
Kevin Wolf9a4f4c32015-06-16 14:19:22 +0200565 s->uncompressed_chunk = qemu_try_blockalign(bs->file->bs,
Peter Wu65a1c7c2015-01-06 18:48:05 +0100566 512 * ds.max_sectors_per_chunk);
Kevin Wolfb546a942014-05-20 13:28:14 +0200567 if (s->compressed_chunk == NULL || s->uncompressed_chunk == NULL) {
568 ret = -ENOMEM;
569 goto fail;
570 }
571
Stefan Hajnoczi2c1885a2014-03-26 13:05:54 +0100572 if (inflateInit(&s->zstream) != Z_OK) {
Kevin Wolf69d34a32013-01-25 17:07:30 +0100573 ret = -EINVAL;
574 goto fail;
575 }
bellard585d0ed2004-12-12 11:24:44 +0000576
577 s->current_chunk = s->n_chunks;
ths3b46e622007-09-17 08:09:54 +0000578
Paolo Bonzini848c66e2011-10-20 13:16:21 +0200579 qemu_co_mutex_init(&s->lock);
bellard585d0ed2004-12-12 11:24:44 +0000580 return 0;
Kevin Wolf69d34a32013-01-25 17:07:30 +0100581
Christoph Hellwig1559ca02010-01-11 14:06:54 +0100582fail:
Kevin Wolf69d34a32013-01-25 17:07:30 +0100583 g_free(s->types);
584 g_free(s->offsets);
585 g_free(s->lengths);
586 g_free(s->sectors);
587 g_free(s->sectorcounts);
Kevin Wolfb546a942014-05-20 13:28:14 +0200588 qemu_vfree(s->compressed_chunk);
589 qemu_vfree(s->uncompressed_chunk);
Kevin Wolf69d34a32013-01-25 17:07:30 +0100590 return ret;
bellard585d0ed2004-12-12 11:24:44 +0000591}
592
Eric Blakea6506482016-06-23 16:37:17 -0600593static void dmg_refresh_limits(BlockDriverState *bs, Error **errp)
594{
Eric Blakea5b8dd22016-06-23 16:37:24 -0600595 bs->bl.request_alignment = BDRV_SECTOR_SIZE; /* No sub-sector I/O */
Eric Blakea6506482016-06-23 16:37:17 -0600596}
597
shiliyang5f14f312020-10-30 11:35:12 +0800598static inline int is_sector_in_chunk(BDRVDMGState *s,
Stefan Hajnoczi686d7142014-03-26 13:05:59 +0100599 uint32_t chunk_num, uint64_t sector_num)
bellard585d0ed2004-12-12 11:24:44 +0000600{
Stefan Hajnoczi2c1885a2014-03-26 13:05:54 +0100601 if (chunk_num >= s->n_chunks || s->sectors[chunk_num] > sector_num ||
602 s->sectors[chunk_num] + s->sectorcounts[chunk_num] <= sector_num) {
603 return 0;
604 } else {
605 return -1;
606 }
bellard585d0ed2004-12-12 11:24:44 +0000607}
608
Stefan Hajnoczi686d7142014-03-26 13:05:59 +0100609static inline uint32_t search_chunk(BDRVDMGState *s, uint64_t sector_num)
bellard585d0ed2004-12-12 11:24:44 +0000610{
611 /* binary search */
Stefan Hajnoczi2c1885a2014-03-26 13:05:54 +0100612 uint32_t chunk1 = 0, chunk2 = s->n_chunks, chunk3;
yuchenlin5ef40822019-01-03 19:46:58 +0800613 while (chunk1 <= chunk2) {
Stefan Hajnoczi2c1885a2014-03-26 13:05:54 +0100614 chunk3 = (chunk1 + chunk2) / 2;
615 if (s->sectors[chunk3] > sector_num) {
yuchenlin5ef40822019-01-03 19:46:58 +0800616 if (chunk3 == 0) {
617 goto err;
618 }
619 chunk2 = chunk3 - 1;
Stefan Hajnoczi2c1885a2014-03-26 13:05:54 +0100620 } else if (s->sectors[chunk3] + s->sectorcounts[chunk3] > sector_num) {
621 return chunk3;
622 } else {
yuchenlin5ef40822019-01-03 19:46:58 +0800623 chunk1 = chunk3 + 1;
Stefan Hajnoczi2c1885a2014-03-26 13:05:54 +0100624 }
bellard585d0ed2004-12-12 11:24:44 +0000625 }
yuchenlin5ef40822019-01-03 19:46:58 +0800626err:
bellard585d0ed2004-12-12 11:24:44 +0000627 return s->n_chunks; /* error */
628}
629
Paolo Bonzini688dc492023-06-01 13:51:40 +0200630static int coroutine_fn GRAPH_RDLOCK
631dmg_read_chunk(BlockDriverState *bs, uint64_t sector_num)
bellard585d0ed2004-12-12 11:24:44 +0000632{
Christoph Hellwig64a31d52010-05-12 16:31:49 +0200633 BDRVDMGState *s = bs->opaque;
634
Stefan Hajnoczi2c1885a2014-03-26 13:05:54 +0100635 if (!is_sector_in_chunk(s, s->current_chunk, sector_num)) {
636 int ret;
637 uint32_t chunk = search_chunk(s, sector_num);
bellard585d0ed2004-12-12 11:24:44 +0000638
Stefan Hajnoczi2c1885a2014-03-26 13:05:54 +0100639 if (chunk >= s->n_chunks) {
640 return -1;
641 }
bellard585d0ed2004-12-12 11:24:44 +0000642
Stefan Hajnoczi2c1885a2014-03-26 13:05:54 +0100643 s->current_chunk = s->n_chunks;
Peter Wu6b383c02015-01-06 18:48:14 +0100644 switch (s->types[chunk]) { /* block entry type */
Julio Faracco95a156f2018-11-05 13:08:06 -0200645 case UDZO: { /* zlib compressed */
Stefan Hajnoczi2c1885a2014-03-26 13:05:54 +0100646 /* we need to buffer, because only the chunk as whole can be
647 * inflated. */
Paolo Bonzini688dc492023-06-01 13:51:40 +0200648 ret = bdrv_co_pread(bs->file, s->offsets[chunk], s->lengths[chunk],
649 s->compressed_chunk, 0);
Alberto Faria353a5d82022-06-09 16:27:37 +0100650 if (ret < 0) {
Stefan Hajnoczi2c1885a2014-03-26 13:05:54 +0100651 return -1;
652 }
ths5fafdf22007-09-16 21:08:06 +0000653
Stefan Hajnoczi2c1885a2014-03-26 13:05:54 +0100654 s->zstream.next_in = s->compressed_chunk;
655 s->zstream.avail_in = s->lengths[chunk];
656 s->zstream.next_out = s->uncompressed_chunk;
657 s->zstream.avail_out = 512 * s->sectorcounts[chunk];
658 ret = inflateReset(&s->zstream);
659 if (ret != Z_OK) {
660 return -1;
661 }
662 ret = inflate(&s->zstream, Z_FINISH);
663 if (ret != Z_STREAM_END ||
664 s->zstream.total_out != 512 * s->sectorcounts[chunk]) {
665 return -1;
666 }
667 break; }
Julio Faracco95a156f2018-11-05 13:08:06 -0200668 case UDBZ: /* bzip2 compressed */
Fam Zheng27685a82016-09-05 10:50:45 +0800669 if (!dmg_uncompress_bz2) {
670 break;
671 }
Peter Wu6b383c02015-01-06 18:48:14 +0100672 /* we need to buffer, because only the chunk as whole can be
673 * inflated. */
Paolo Bonzini688dc492023-06-01 13:51:40 +0200674 ret = bdrv_co_pread(bs->file, s->offsets[chunk], s->lengths[chunk],
675 s->compressed_chunk, 0);
Alberto Faria353a5d82022-06-09 16:27:37 +0100676 if (ret < 0) {
Peter Wu6b383c02015-01-06 18:48:14 +0100677 return -1;
678 }
679
Fam Zheng27685a82016-09-05 10:50:45 +0800680 ret = dmg_uncompress_bz2((char *)s->compressed_chunk,
681 (unsigned int) s->lengths[chunk],
682 (char *)s->uncompressed_chunk,
683 (unsigned int)
684 (512 * s->sectorcounts[chunk]));
685 if (ret < 0) {
686 return ret;
Peter Wu6b383c02015-01-06 18:48:14 +0100687 }
688 break;
Julio Faracco95a156f2018-11-05 13:08:06 -0200689 case ULFO:
Julio Faracco7a40b412018-11-05 13:08:05 -0200690 if (!dmg_uncompress_lzfse) {
691 break;
692 }
693 /* we need to buffer, because only the chunk as whole can be
694 * inflated. */
Paolo Bonzini688dc492023-06-01 13:51:40 +0200695 ret = bdrv_co_pread(bs->file, s->offsets[chunk], s->lengths[chunk],
696 s->compressed_chunk, 0);
Alberto Faria353a5d82022-06-09 16:27:37 +0100697 if (ret < 0) {
Julio Faracco7a40b412018-11-05 13:08:05 -0200698 return -1;
699 }
700
701 ret = dmg_uncompress_lzfse((char *)s->compressed_chunk,
702 (unsigned int) s->lengths[chunk],
703 (char *)s->uncompressed_chunk,
704 (unsigned int)
705 (512 * s->sectorcounts[chunk]));
706 if (ret < 0) {
707 return ret;
708 }
709 break;
Julio Faracco95a156f2018-11-05 13:08:06 -0200710 case UDRW: /* copy */
Paolo Bonzini688dc492023-06-01 13:51:40 +0200711 ret = bdrv_co_pread(bs->file, s->offsets[chunk], s->lengths[chunk],
712 s->uncompressed_chunk, 0);
Alberto Faria353a5d82022-06-09 16:27:37 +0100713 if (ret < 0) {
Stefan Hajnoczi2c1885a2014-03-26 13:05:54 +0100714 return -1;
715 }
716 break;
yuchenlin39a04082019-01-03 19:47:00 +0800717 case UDZE: /* zeros */
718 case UDIG: /* ignore */
Peter Wu177b7512015-01-06 18:48:15 +0100719 /* see dmg_read, it is treated specially. No buffer needs to be
720 * pre-filled, the zeroes can be set directly. */
Stefan Hajnoczi2c1885a2014-03-26 13:05:54 +0100721 break;
722 }
723 s->current_chunk = chunk;
bellard585d0ed2004-12-12 11:24:44 +0000724 }
725 return 0;
726}
727
Paolo Bonzini688dc492023-06-01 13:51:40 +0200728static int coroutine_fn GRAPH_RDLOCK
Vladimir Sementsov-Ogievskiyf7ef38d2021-09-03 13:27:59 +0300729dmg_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes,
730 QEMUIOVector *qiov, BdrvRequestFlags flags)
bellard585d0ed2004-12-12 11:24:44 +0000731{
732 BDRVDMGState *s = bs->opaque;
Kevin Wolf3edf1e72016-04-25 15:43:09 +0200733 uint64_t sector_num = offset >> BDRV_SECTOR_BITS;
734 int nb_sectors = bytes >> BDRV_SECTOR_BITS;
735 int ret, i;
736
Nir Soffer1bbbf322019-08-27 21:59:12 +0300737 assert(QEMU_IS_ALIGNED(offset, BDRV_SECTOR_SIZE));
738 assert(QEMU_IS_ALIGNED(bytes, BDRV_SECTOR_SIZE));
Kevin Wolf3edf1e72016-04-25 15:43:09 +0200739
740 qemu_co_mutex_lock(&s->lock);
bellard585d0ed2004-12-12 11:24:44 +0000741
Stefan Hajnoczi2c1885a2014-03-26 13:05:54 +0100742 for (i = 0; i < nb_sectors; i++) {
743 uint32_t sector_offset_in_chunk;
Kevin Wolf3edf1e72016-04-25 15:43:09 +0200744 void *data;
745
Stefan Hajnoczi2c1885a2014-03-26 13:05:54 +0100746 if (dmg_read_chunk(bs, sector_num + i) != 0) {
Kevin Wolf3edf1e72016-04-25 15:43:09 +0200747 ret = -EIO;
748 goto fail;
Stefan Hajnoczi2c1885a2014-03-26 13:05:54 +0100749 }
Peter Wu177b7512015-01-06 18:48:15 +0100750 /* Special case: current chunk is all zeroes. Do not perform a memcpy as
751 * s->uncompressed_chunk may be too small to cover the large all-zeroes
752 * section. dmg_read_chunk is called to find s->current_chunk */
yuchenlin39a04082019-01-03 19:47:00 +0800753 if (s->types[s->current_chunk] == UDZE
754 || s->types[s->current_chunk] == UDIG) { /* all zeroes block entry */
Kevin Wolf3edf1e72016-04-25 15:43:09 +0200755 qemu_iovec_memset(qiov, i * 512, 0, 512);
Peter Wu177b7512015-01-06 18:48:15 +0100756 continue;
757 }
Stefan Hajnoczi2c1885a2014-03-26 13:05:54 +0100758 sector_offset_in_chunk = sector_num + i - s->sectors[s->current_chunk];
Kevin Wolf3edf1e72016-04-25 15:43:09 +0200759 data = s->uncompressed_chunk + sector_offset_in_chunk * 512;
760 qemu_iovec_from_buf(qiov, i * 512, data, 512);
bellard585d0ed2004-12-12 11:24:44 +0000761 }
bellard585d0ed2004-12-12 11:24:44 +0000762
Kevin Wolf3edf1e72016-04-25 15:43:09 +0200763 ret = 0;
764fail:
Paolo Bonzini2914caa2011-10-20 13:16:22 +0200765 qemu_co_mutex_unlock(&s->lock);
766 return ret;
767}
768
bellard585d0ed2004-12-12 11:24:44 +0000769static void dmg_close(BlockDriverState *bs)
770{
771 BDRVDMGState *s = bs->opaque;
Kevin Wolf4f8aa2e2013-01-25 17:07:31 +0100772
773 g_free(s->types);
774 g_free(s->offsets);
775 g_free(s->lengths);
776 g_free(s->sectors);
777 g_free(s->sectorcounts);
Kevin Wolfb546a942014-05-20 13:28:14 +0200778 qemu_vfree(s->compressed_chunk);
779 qemu_vfree(s->uncompressed_chunk);
Kevin Wolf4f8aa2e2013-01-25 17:07:31 +0100780
bellard585d0ed2004-12-12 11:24:44 +0000781 inflateEnd(&s->zstream);
782}
783
Anthony Liguori5efa9d52009-05-09 17:03:42 -0500784static BlockDriver bdrv_dmg = {
Stefan Hajnoczi2c1885a2014-03-26 13:05:54 +0100785 .format_name = "dmg",
786 .instance_size = sizeof(BDRVDMGState),
787 .bdrv_probe = dmg_probe,
788 .bdrv_open = dmg_open,
Eric Blakea6506482016-06-23 16:37:17 -0600789 .bdrv_refresh_limits = dmg_refresh_limits,
Max Reitz69dca432020-05-13 13:05:39 +0200790 .bdrv_child_perm = bdrv_default_perms,
Kevin Wolf3edf1e72016-04-25 15:43:09 +0200791 .bdrv_co_preadv = dmg_co_preadv,
Stefan Hajnoczi2c1885a2014-03-26 13:05:54 +0100792 .bdrv_close = dmg_close,
Max Reitzd67066d2020-05-13 13:05:12 +0200793 .is_format = true,
bellard585d0ed2004-12-12 11:24:44 +0000794};
Anthony Liguori5efa9d52009-05-09 17:03:42 -0500795
796static void bdrv_dmg_init(void)
797{
798 bdrv_register(&bdrv_dmg);
799}
800
801block_init(bdrv_dmg_init);