blob: ef35a505f2662049fb2b32007fd6dcb313ddff9a [file] [log] [blame]
bellard585d0ed2004-12-12 11:24:44 +00001/*
2 * QEMU Block driver for DMG images
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard585d0ed2004-12-12 11:24:44 +00004 * Copyright (c) 2004 Johannes E. Schindelin
ths5fafdf22007-09-16 21:08:06 +00005 *
bellard585d0ed2004-12-12 11:24:44 +00006 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
Peter Maydell80c71a22016-01-18 18:01:42 +000024#include "qemu/osdep.h"
Markus Armbrusterda34e652016-03-14 09:01:28 +010025#include "qapi/error.h"
Paolo Bonzini737e1502012-12-17 18:19:44 +010026#include "block/block_int.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010027#include "qemu/bswap.h"
Markus Armbrusterd49b6832015-03-17 18:29:20 +010028#include "qemu/error-report.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010029#include "qemu/module.h"
Fam Zheng27685a82016-09-05 10:50:45 +080030#include "dmg.h"
31
32int (*dmg_uncompress_bz2)(char *next_in, unsigned int avail_in,
33 char *next_out, unsigned int avail_out);
bellard585d0ed2004-12-12 11:24:44 +000034
Julio Faracco7a40b412018-11-05 13:08:05 -020035int (*dmg_uncompress_lzfse)(char *next_in, unsigned int avail_in,
36 char *next_out, unsigned int avail_out);
37
Stefan Hajnoczic165f772014-03-26 13:05:58 +010038enum {
39 /* Limit chunk sizes to prevent unreasonable amounts of memory being used
40 * or truncating when converting to 32-bit types
41 */
42 DMG_LENGTHS_MAX = 64 * 1024 * 1024, /* 64 MB */
43 DMG_SECTORCOUNTS_MAX = DMG_LENGTHS_MAX / 512,
44};
45
Julio Faracco95a156f2018-11-05 13:08:06 -020046enum {
47 /* DMG Block Type */
48 UDZE = 0, /* Zeroes */
49 UDRW, /* RAW type */
50 UDIG, /* Ignore */
51 UDCO = 0x80000004,
52 UDZO,
53 UDBZ,
54 ULFO,
55 UDCM = 0x7ffffffe, /* Comments */
Julio Faraccob47c7d52018-12-28 12:50:55 -020056 UDLE = 0xffffffff /* Last Entry */
Julio Faracco95a156f2018-11-05 13:08:06 -020057};
58
bellard585d0ed2004-12-12 11:24:44 +000059static int dmg_probe(const uint8_t *buf, int buf_size, const char *filename)
60{
Kevin Wolff5866fa2013-03-18 16:20:27 +010061 int len;
62
63 if (!filename) {
64 return 0;
65 }
66
67 len = strlen(filename);
68 if (len > 4 && !strcmp(filename + len - 4, ".dmg")) {
69 return 2;
70 }
bellard585d0ed2004-12-12 11:24:44 +000071 return 0;
72}
73
Kevin Wolf69d34a32013-01-25 17:07:30 +010074static int read_uint64(BlockDriverState *bs, int64_t offset, uint64_t *result)
bellard585d0ed2004-12-12 11:24:44 +000075{
Kevin Wolf69d34a32013-01-25 17:07:30 +010076 uint64_t buffer;
77 int ret;
78
Kevin Wolfcf2ab8f2016-06-20 18:24:02 +020079 ret = bdrv_pread(bs->file, offset, &buffer, 8);
Kevin Wolf69d34a32013-01-25 17:07:30 +010080 if (ret < 0) {
81 return ret;
82 }
83
84 *result = be64_to_cpu(buffer);
85 return 0;
bellard585d0ed2004-12-12 11:24:44 +000086}
87
Kevin Wolf69d34a32013-01-25 17:07:30 +010088static int read_uint32(BlockDriverState *bs, int64_t offset, uint32_t *result)
bellard585d0ed2004-12-12 11:24:44 +000089{
Kevin Wolf69d34a32013-01-25 17:07:30 +010090 uint32_t buffer;
91 int ret;
92
Kevin Wolfcf2ab8f2016-06-20 18:24:02 +020093 ret = bdrv_pread(bs->file, offset, &buffer, 4);
Kevin Wolf69d34a32013-01-25 17:07:30 +010094 if (ret < 0) {
95 return ret;
96 }
97
98 *result = be32_to_cpu(buffer);
99 return 0;
bellard585d0ed2004-12-12 11:24:44 +0000100}
101
Peter Wu7aee37b2015-01-06 18:48:07 +0100102static inline uint64_t buff_read_uint64(const uint8_t *buffer, int64_t offset)
103{
104 return be64_to_cpu(*(uint64_t *)&buffer[offset]);
105}
106
107static inline uint32_t buff_read_uint32(const uint8_t *buffer, int64_t offset)
108{
109 return be32_to_cpu(*(uint32_t *)&buffer[offset]);
110}
111
Stefan Hajnoczif0dce232014-03-26 13:06:00 +0100112/* Increase max chunk sizes, if necessary. This function is used to calculate
113 * the buffer sizes needed for compressed/uncompressed chunk I/O.
114 */
115static void update_max_chunk_size(BDRVDMGState *s, uint32_t chunk,
116 uint32_t *max_compressed_size,
117 uint32_t *max_sectors_per_chunk)
118{
119 uint32_t compressed_size = 0;
120 uint32_t uncompressed_sectors = 0;
121
122 switch (s->types[chunk]) {
Julio Faracco95a156f2018-11-05 13:08:06 -0200123 case UDZO: /* zlib compressed */
124 case UDBZ: /* bzip2 compressed */
125 case ULFO: /* lzfse compressed */
Stefan Hajnoczif0dce232014-03-26 13:06:00 +0100126 compressed_size = s->lengths[chunk];
127 uncompressed_sectors = s->sectorcounts[chunk];
128 break;
Julio Faracco95a156f2018-11-05 13:08:06 -0200129 case UDRW: /* copy */
Marc-André Lureau6fb00222017-06-22 13:04:16 +0200130 uncompressed_sectors = DIV_ROUND_UP(s->lengths[chunk], 512);
Stefan Hajnoczif0dce232014-03-26 13:06:00 +0100131 break;
yuchenlin39a04082019-01-03 19:47:00 +0800132 case UDZE: /* zero */
133 case UDIG: /* ignore */
Peter Wu177b7512015-01-06 18:48:15 +0100134 /* as the all-zeroes block may be large, it is treated specially: the
135 * sector is not copied from a large buffer, a simple memset is used
136 * instead. Therefore uncompressed_sectors does not need to be set. */
Stefan Hajnoczif0dce232014-03-26 13:06:00 +0100137 break;
138 }
139
140 if (compressed_size > *max_compressed_size) {
141 *max_compressed_size = compressed_size;
142 }
143 if (uncompressed_sectors > *max_sectors_per_chunk) {
144 *max_sectors_per_chunk = uncompressed_sectors;
145 }
146}
147
Kevin Wolfcf2ab8f2016-06-20 18:24:02 +0200148static int64_t dmg_find_koly_offset(BdrvChild *file, Error **errp)
Peter Wufa8354b2015-01-06 18:48:04 +0100149{
Kevin Wolfcf2ab8f2016-06-20 18:24:02 +0200150 BlockDriverState *file_bs = file->bs;
Peter Wufa8354b2015-01-06 18:48:04 +0100151 int64_t length;
152 int64_t offset = 0;
153 uint8_t buffer[515];
154 int i, ret;
155
156 /* bdrv_getlength returns a multiple of block size (512), rounded up. Since
157 * dmg images can have odd sizes, try to look for the "koly" magic which
158 * marks the begin of the UDIF trailer (512 bytes). This magic can be found
159 * in the last 511 bytes of the second-last sector or the first 4 bytes of
160 * the last sector (search space: 515 bytes) */
161 length = bdrv_getlength(file_bs);
162 if (length < 0) {
163 error_setg_errno(errp, -length,
164 "Failed to get file size while reading UDIF trailer");
165 return length;
166 } else if (length < 512) {
167 error_setg(errp, "dmg file must be at least 512 bytes long");
168 return -EINVAL;
169 }
170 if (length > 511 + 512) {
171 offset = length - 511 - 512;
172 }
173 length = length < 515 ? length : 515;
Kevin Wolfcf2ab8f2016-06-20 18:24:02 +0200174 ret = bdrv_pread(file, offset, buffer, length);
Peter Wufa8354b2015-01-06 18:48:04 +0100175 if (ret < 0) {
176 error_setg_errno(errp, -ret, "Failed while reading UDIF trailer");
177 return ret;
178 }
179 for (i = 0; i < length - 3; i++) {
180 if (buffer[i] == 'k' && buffer[i+1] == 'o' &&
181 buffer[i+2] == 'l' && buffer[i+3] == 'y') {
182 return offset + i;
183 }
184 }
185 error_setg(errp, "Could not locate UDIF trailer in dmg file");
186 return -EINVAL;
187}
188
Peter Wu65a1c7c2015-01-06 18:48:05 +0100189/* used when building the sector table */
190typedef struct DmgHeaderState {
191 /* used internally by dmg_read_mish_block to remember offsets of blocks
192 * across calls */
Peter Wuc6d34862015-01-06 18:48:11 +0100193 uint64_t data_fork_offset;
Peter Wu65a1c7c2015-01-06 18:48:05 +0100194 /* exported for dmg_open */
195 uint32_t max_compressed_size;
196 uint32_t max_sectors_per_chunk;
197} DmgHeaderState;
198
Peter Wua8b10c62015-01-06 18:48:13 +0100199static bool dmg_is_known_block_type(uint32_t entry_type)
200{
201 switch (entry_type) {
yuchenlin39a04082019-01-03 19:47:00 +0800202 case UDZE: /* zeros */
Julio Faracco95a156f2018-11-05 13:08:06 -0200203 case UDRW: /* uncompressed */
yuchenlin39a04082019-01-03 19:47:00 +0800204 case UDIG: /* ignore */
Julio Faracco95a156f2018-11-05 13:08:06 -0200205 case UDZO: /* zlib */
Peter Wua8b10c62015-01-06 18:48:13 +0100206 return true;
Julio Faracco95a156f2018-11-05 13:08:06 -0200207 case UDBZ: /* bzip2 */
Fam Zheng27685a82016-09-05 10:50:45 +0800208 return !!dmg_uncompress_bz2;
Julio Faracco95a156f2018-11-05 13:08:06 -0200209 case ULFO: /* lzfse */
Julio Faracco7a40b412018-11-05 13:08:05 -0200210 return !!dmg_uncompress_lzfse;
Peter Wua8b10c62015-01-06 18:48:13 +0100211 default:
212 return false;
213 }
214}
215
Peter Wu7aee37b2015-01-06 18:48:07 +0100216static int dmg_read_mish_block(BDRVDMGState *s, DmgHeaderState *ds,
217 uint8_t *buffer, uint32_t count)
Peter Wu65a1c7c2015-01-06 18:48:05 +0100218{
Peter Wu65a1c7c2015-01-06 18:48:05 +0100219 uint32_t type, i;
220 int ret;
221 size_t new_size;
222 uint32_t chunk_count;
Peter Wu7aee37b2015-01-06 18:48:07 +0100223 int64_t offset = 0;
Peter Wuc6d34862015-01-06 18:48:11 +0100224 uint64_t data_offset;
225 uint64_t in_offset = ds->data_fork_offset;
Peter Wu66ec3bb2015-01-06 18:48:12 +0100226 uint64_t out_offset;
Peter Wu65a1c7c2015-01-06 18:48:05 +0100227
Peter Wu7aee37b2015-01-06 18:48:07 +0100228 type = buff_read_uint32(buffer, offset);
Peter Wu65a1c7c2015-01-06 18:48:05 +0100229 /* skip data that is not a valid MISH block (invalid magic or too small) */
230 if (type != 0x6d697368 || count < 244) {
231 /* assume success for now */
232 return 0;
233 }
234
Peter Wu66ec3bb2015-01-06 18:48:12 +0100235 /* chunk offsets are relative to this sector number */
236 out_offset = buff_read_uint64(buffer, offset + 8);
237
Peter Wuc6d34862015-01-06 18:48:11 +0100238 /* location in data fork for (compressed) blob (in bytes) */
239 data_offset = buff_read_uint64(buffer, offset + 0x18);
240 in_offset += data_offset;
241
242 /* move to begin of chunk entries */
243 offset += 204;
Peter Wu65a1c7c2015-01-06 18:48:05 +0100244
245 chunk_count = (count - 204) / 40;
246 new_size = sizeof(uint64_t) * (s->n_chunks + chunk_count);
247 s->types = g_realloc(s->types, new_size / 2);
248 s->offsets = g_realloc(s->offsets, new_size);
249 s->lengths = g_realloc(s->lengths, new_size);
250 s->sectors = g_realloc(s->sectors, new_size);
251 s->sectorcounts = g_realloc(s->sectorcounts, new_size);
252
253 for (i = s->n_chunks; i < s->n_chunks + chunk_count; i++) {
Peter Wu7aee37b2015-01-06 18:48:07 +0100254 s->types[i] = buff_read_uint32(buffer, offset);
Peter Wua8b10c62015-01-06 18:48:13 +0100255 if (!dmg_is_known_block_type(s->types[i])) {
Peter Wu65a1c7c2015-01-06 18:48:05 +0100256 chunk_count--;
257 i--;
Peter Wua8b10c62015-01-06 18:48:13 +0100258 offset += 40;
Peter Wu65a1c7c2015-01-06 18:48:05 +0100259 continue;
260 }
Peter Wu65a1c7c2015-01-06 18:48:05 +0100261
Peter Wua8b10c62015-01-06 18:48:13 +0100262 /* sector number */
263 s->sectors[i] = buff_read_uint64(buffer, offset + 8);
Peter Wu66ec3bb2015-01-06 18:48:12 +0100264 s->sectors[i] += out_offset;
Peter Wu65a1c7c2015-01-06 18:48:05 +0100265
Peter Wua8b10c62015-01-06 18:48:13 +0100266 /* sector count */
267 s->sectorcounts[i] = buff_read_uint64(buffer, offset + 0x10);
Peter Wu65a1c7c2015-01-06 18:48:05 +0100268
yuchenlin39a04082019-01-03 19:47:00 +0800269 /* all-zeroes sector (type UDZE and UDIG) does not need to be
270 * "uncompressed" and can therefore be unbounded. */
271 if (s->types[i] != UDZE && s->types[i] != UDIG
272 && s->sectorcounts[i] > DMG_SECTORCOUNTS_MAX) {
Peter Wu65a1c7c2015-01-06 18:48:05 +0100273 error_report("sector count %" PRIu64 " for chunk %" PRIu32
274 " is larger than max (%u)",
275 s->sectorcounts[i], i, DMG_SECTORCOUNTS_MAX);
276 ret = -EINVAL;
277 goto fail;
278 }
279
Peter Wua8b10c62015-01-06 18:48:13 +0100280 /* offset in (compressed) data fork */
281 s->offsets[i] = buff_read_uint64(buffer, offset + 0x18);
Peter Wuc6d34862015-01-06 18:48:11 +0100282 s->offsets[i] += in_offset;
Peter Wu65a1c7c2015-01-06 18:48:05 +0100283
Peter Wua8b10c62015-01-06 18:48:13 +0100284 /* length in (compressed) data fork */
285 s->lengths[i] = buff_read_uint64(buffer, offset + 0x20);
Peter Wu65a1c7c2015-01-06 18:48:05 +0100286
287 if (s->lengths[i] > DMG_LENGTHS_MAX) {
288 error_report("length %" PRIu64 " for chunk %" PRIu32
289 " is larger than max (%u)",
290 s->lengths[i], i, DMG_LENGTHS_MAX);
291 ret = -EINVAL;
292 goto fail;
293 }
294
295 update_max_chunk_size(s, i, &ds->max_compressed_size,
296 &ds->max_sectors_per_chunk);
Peter Wua8b10c62015-01-06 18:48:13 +0100297 offset += 40;
Peter Wu65a1c7c2015-01-06 18:48:05 +0100298 }
299 s->n_chunks += chunk_count;
300 return 0;
301
302fail:
303 return ret;
304}
305
Peter Wub0e8dc52015-01-06 18:48:06 +0100306static int dmg_read_resource_fork(BlockDriverState *bs, DmgHeaderState *ds,
307 uint64_t info_begin, uint64_t info_length)
308{
Peter Wu7aee37b2015-01-06 18:48:07 +0100309 BDRVDMGState *s = bs->opaque;
Peter Wub0e8dc52015-01-06 18:48:06 +0100310 int ret;
311 uint32_t count, rsrc_data_offset;
Peter Wu7aee37b2015-01-06 18:48:07 +0100312 uint8_t *buffer = NULL;
Peter Wub0e8dc52015-01-06 18:48:06 +0100313 uint64_t info_end;
314 uint64_t offset;
315
316 /* read offset from begin of resource fork (info_begin) to resource data */
317 ret = read_uint32(bs, info_begin, &rsrc_data_offset);
318 if (ret < 0) {
319 goto fail;
320 } else if (rsrc_data_offset > info_length) {
321 ret = -EINVAL;
322 goto fail;
323 }
324
325 /* read length of resource data */
326 ret = read_uint32(bs, info_begin + 8, &count);
327 if (ret < 0) {
328 goto fail;
329 } else if (count == 0 || rsrc_data_offset + count > info_length) {
330 ret = -EINVAL;
331 goto fail;
332 }
333
334 /* begin of resource data (consisting of one or more resources) */
335 offset = info_begin + rsrc_data_offset;
336
337 /* end of resource data (there is possibly a following resource map
338 * which will be ignored). */
339 info_end = offset + count;
340
341 /* read offsets (mish blocks) from one or more resources in resource data */
342 while (offset < info_end) {
343 /* size of following resource */
344 ret = read_uint32(bs, offset, &count);
345 if (ret < 0) {
346 goto fail;
Peter Wuf6e66522015-01-06 18:48:08 +0100347 } else if (count == 0 || count > info_end - offset) {
Peter Wub0e8dc52015-01-06 18:48:06 +0100348 ret = -EINVAL;
349 goto fail;
350 }
351 offset += 4;
352
Peter Wu7aee37b2015-01-06 18:48:07 +0100353 buffer = g_realloc(buffer, count);
Kevin Wolfcf2ab8f2016-06-20 18:24:02 +0200354 ret = bdrv_pread(bs->file, offset, buffer, count);
Peter Wu7aee37b2015-01-06 18:48:07 +0100355 if (ret < 0) {
356 goto fail;
357 }
358
359 ret = dmg_read_mish_block(s, ds, buffer, count);
Peter Wub0e8dc52015-01-06 18:48:06 +0100360 if (ret < 0) {
361 goto fail;
362 }
363 /* advance offset by size of resource */
364 offset += count;
365 }
Peter Wu7aee37b2015-01-06 18:48:07 +0100366 ret = 0;
Peter Wub0e8dc52015-01-06 18:48:06 +0100367
368fail:
Peter Wu7aee37b2015-01-06 18:48:07 +0100369 g_free(buffer);
Peter Wub0e8dc52015-01-06 18:48:06 +0100370 return ret;
371}
372
Peter Wu0599e562015-01-06 18:48:09 +0100373static int dmg_read_plist_xml(BlockDriverState *bs, DmgHeaderState *ds,
374 uint64_t info_begin, uint64_t info_length)
375{
376 BDRVDMGState *s = bs->opaque;
377 int ret;
378 uint8_t *buffer = NULL;
379 char *data_begin, *data_end;
380
381 /* Have at least some length to avoid NULL for g_malloc. Attempt to set a
382 * safe upper cap on the data length. A test sample had a XML length of
383 * about 1 MiB. */
384 if (info_length == 0 || info_length > 16 * 1024 * 1024) {
385 ret = -EINVAL;
386 goto fail;
387 }
388
389 buffer = g_malloc(info_length + 1);
390 buffer[info_length] = '\0';
Kevin Wolfcf2ab8f2016-06-20 18:24:02 +0200391 ret = bdrv_pread(bs->file, info_begin, buffer, info_length);
Peter Wu0599e562015-01-06 18:48:09 +0100392 if (ret != info_length) {
393 ret = -EINVAL;
394 goto fail;
395 }
396
397 /* look for <data>...</data>. The data is 284 (0x11c) bytes after base64
398 * decode. The actual data element has 431 (0x1af) bytes which includes tabs
399 * and line feeds. */
400 data_end = (char *)buffer;
401 while ((data_begin = strstr(data_end, "<data>")) != NULL) {
402 guchar *mish;
403 gsize out_len = 0;
404
405 data_begin += 6;
406 data_end = strstr(data_begin, "</data>");
407 /* malformed XML? */
408 if (data_end == NULL) {
409 ret = -EINVAL;
410 goto fail;
411 }
412 *data_end++ = '\0';
413 mish = g_base64_decode(data_begin, &out_len);
414 ret = dmg_read_mish_block(s, ds, mish, (uint32_t)out_len);
415 g_free(mish);
416 if (ret < 0) {
417 goto fail;
418 }
419 }
420 ret = 0;
421
422fail:
423 g_free(buffer);
424 return ret;
425}
426
Max Reitz015a1032013-09-05 14:22:29 +0200427static int dmg_open(BlockDriverState *bs, QDict *options, int flags,
428 Error **errp)
bellard585d0ed2004-12-12 11:24:44 +0000429{
430 BDRVDMGState *s = bs->opaque;
Peter Wu65a1c7c2015-01-06 18:48:05 +0100431 DmgHeaderState ds;
Peter Wub0e8dc52015-01-06 18:48:06 +0100432 uint64_t rsrc_fork_offset, rsrc_fork_length;
Peter Wu0599e562015-01-06 18:48:09 +0100433 uint64_t plist_xml_offset, plist_xml_length;
Christoph Hellwig16cdf7c2010-05-12 16:31:35 +0200434 int64_t offset;
Kevin Wolf69d34a32013-01-25 17:07:30 +0100435 int ret;
bellard585d0ed2004-12-12 11:24:44 +0000436
Kevin Wolfeaa24102018-10-12 11:27:41 +0200437 ret = bdrv_apply_auto_read_only(bs, NULL, errp);
438 if (ret < 0) {
439 return ret;
440 }
441
Max Reitz8b1869d2020-05-13 13:05:35 +0200442 bs->file = bdrv_open_child(NULL, options, "file", bs, &child_of_bds,
443 BDRV_CHILD_IMAGE, false, errp);
Kevin Wolf4e4bf5c2016-12-16 18:52:37 +0100444 if (!bs->file) {
445 return -EINVAL;
446 }
447
Fam Zheng27685a82016-09-05 10:50:45 +0800448 block_module_load_one("dmg-bz2");
Julio Faracco7a40b412018-11-05 13:08:05 -0200449 block_module_load_one("dmg-lzfse");
Kevin Wolf3edf1e72016-04-25 15:43:09 +0200450
bellard585d0ed2004-12-12 11:24:44 +0000451 s->n_chunks = 0;
blueswir1511d2b12009-03-07 15:32:56 +0000452 s->offsets = s->lengths = s->sectors = s->sectorcounts = NULL;
Peter Wu65a1c7c2015-01-06 18:48:05 +0100453 /* used by dmg_read_mish_block to keep track of the current I/O position */
Peter Wuc6d34862015-01-06 18:48:11 +0100454 ds.data_fork_offset = 0;
Peter Wu65a1c7c2015-01-06 18:48:05 +0100455 ds.max_compressed_size = 1;
456 ds.max_sectors_per_chunk = 1;
ths3b46e622007-09-17 08:09:54 +0000457
Peter Wufa8354b2015-01-06 18:48:04 +0100458 /* locate the UDIF trailer */
Kevin Wolfcf2ab8f2016-06-20 18:24:02 +0200459 offset = dmg_find_koly_offset(bs->file, errp);
Christoph Hellwig16cdf7c2010-05-12 16:31:35 +0200460 if (offset < 0) {
Kevin Wolf69d34a32013-01-25 17:07:30 +0100461 ret = offset;
Christoph Hellwig1559ca02010-01-11 14:06:54 +0100462 goto fail;
bellard585d0ed2004-12-12 11:24:44 +0000463 }
Christoph Hellwig1559ca02010-01-11 14:06:54 +0100464
Peter Wuc6d34862015-01-06 18:48:11 +0100465 /* offset of data fork (DataForkOffset) */
466 ret = read_uint64(bs, offset + 0x18, &ds.data_fork_offset);
467 if (ret < 0) {
468 goto fail;
469 } else if (ds.data_fork_offset > offset) {
470 ret = -EINVAL;
471 goto fail;
472 }
473
Peter Wub0e8dc52015-01-06 18:48:06 +0100474 /* offset of resource fork (RsrcForkOffset) */
475 ret = read_uint64(bs, offset + 0x28, &rsrc_fork_offset);
Kevin Wolf69d34a32013-01-25 17:07:30 +0100476 if (ret < 0) {
477 goto fail;
Christoph Hellwig16cdf7c2010-05-12 16:31:35 +0200478 }
Peter Wub0e8dc52015-01-06 18:48:06 +0100479 ret = read_uint64(bs, offset + 0x30, &rsrc_fork_length);
Kevin Wolf69d34a32013-01-25 17:07:30 +0100480 if (ret < 0) {
481 goto fail;
Kevin Wolf69d34a32013-01-25 17:07:30 +0100482 }
Peter Wuf6e66522015-01-06 18:48:08 +0100483 if (rsrc_fork_offset >= offset ||
484 rsrc_fork_length > offset - rsrc_fork_offset) {
485 ret = -EINVAL;
486 goto fail;
487 }
Peter Wu0599e562015-01-06 18:48:09 +0100488 /* offset of property list (XMLOffset) */
489 ret = read_uint64(bs, offset + 0xd8, &plist_xml_offset);
490 if (ret < 0) {
491 goto fail;
492 }
493 ret = read_uint64(bs, offset + 0xe0, &plist_xml_length);
494 if (ret < 0) {
495 goto fail;
496 }
497 if (plist_xml_offset >= offset ||
498 plist_xml_length > offset - plist_xml_offset) {
499 ret = -EINVAL;
500 goto fail;
501 }
Peter Wu8daf4252015-01-06 18:48:10 +0100502 ret = read_uint64(bs, offset + 0x1ec, (uint64_t *)&bs->total_sectors);
503 if (ret < 0) {
504 goto fail;
505 }
506 if (bs->total_sectors < 0) {
507 ret = -EINVAL;
508 goto fail;
509 }
Peter Wub0e8dc52015-01-06 18:48:06 +0100510 if (rsrc_fork_length != 0) {
511 ret = dmg_read_resource_fork(bs, &ds,
512 rsrc_fork_offset, rsrc_fork_length);
Kevin Wolf69d34a32013-01-25 17:07:30 +0100513 if (ret < 0) {
514 goto fail;
515 }
Peter Wu0599e562015-01-06 18:48:09 +0100516 } else if (plist_xml_length != 0) {
517 ret = dmg_read_plist_xml(bs, &ds, plist_xml_offset, plist_xml_length);
518 if (ret < 0) {
519 goto fail;
520 }
Peter Wub0e8dc52015-01-06 18:48:06 +0100521 } else {
522 ret = -EINVAL;
523 goto fail;
bellard585d0ed2004-12-12 11:24:44 +0000524 }
525
526 /* initialize zlib engine */
Kevin Wolf9a4f4c32015-06-16 14:19:22 +0200527 s->compressed_chunk = qemu_try_blockalign(bs->file->bs,
Peter Wu65a1c7c2015-01-06 18:48:05 +0100528 ds.max_compressed_size + 1);
Kevin Wolf9a4f4c32015-06-16 14:19:22 +0200529 s->uncompressed_chunk = qemu_try_blockalign(bs->file->bs,
Peter Wu65a1c7c2015-01-06 18:48:05 +0100530 512 * ds.max_sectors_per_chunk);
Kevin Wolfb546a942014-05-20 13:28:14 +0200531 if (s->compressed_chunk == NULL || s->uncompressed_chunk == NULL) {
532 ret = -ENOMEM;
533 goto fail;
534 }
535
Stefan Hajnoczi2c1885a2014-03-26 13:05:54 +0100536 if (inflateInit(&s->zstream) != Z_OK) {
Kevin Wolf69d34a32013-01-25 17:07:30 +0100537 ret = -EINVAL;
538 goto fail;
539 }
bellard585d0ed2004-12-12 11:24:44 +0000540
541 s->current_chunk = s->n_chunks;
ths3b46e622007-09-17 08:09:54 +0000542
Paolo Bonzini848c66e2011-10-20 13:16:21 +0200543 qemu_co_mutex_init(&s->lock);
bellard585d0ed2004-12-12 11:24:44 +0000544 return 0;
Kevin Wolf69d34a32013-01-25 17:07:30 +0100545
Christoph Hellwig1559ca02010-01-11 14:06:54 +0100546fail:
Kevin Wolf69d34a32013-01-25 17:07:30 +0100547 g_free(s->types);
548 g_free(s->offsets);
549 g_free(s->lengths);
550 g_free(s->sectors);
551 g_free(s->sectorcounts);
Kevin Wolfb546a942014-05-20 13:28:14 +0200552 qemu_vfree(s->compressed_chunk);
553 qemu_vfree(s->uncompressed_chunk);
Kevin Wolf69d34a32013-01-25 17:07:30 +0100554 return ret;
bellard585d0ed2004-12-12 11:24:44 +0000555}
556
Eric Blakea6506482016-06-23 16:37:17 -0600557static void dmg_refresh_limits(BlockDriverState *bs, Error **errp)
558{
Eric Blakea5b8dd22016-06-23 16:37:24 -0600559 bs->bl.request_alignment = BDRV_SECTOR_SIZE; /* No sub-sector I/O */
Eric Blakea6506482016-06-23 16:37:17 -0600560}
561
shiliyang5f14f312020-10-30 11:35:12 +0800562static inline int is_sector_in_chunk(BDRVDMGState *s,
Stefan Hajnoczi686d7142014-03-26 13:05:59 +0100563 uint32_t chunk_num, uint64_t sector_num)
bellard585d0ed2004-12-12 11:24:44 +0000564{
Stefan Hajnoczi2c1885a2014-03-26 13:05:54 +0100565 if (chunk_num >= s->n_chunks || s->sectors[chunk_num] > sector_num ||
566 s->sectors[chunk_num] + s->sectorcounts[chunk_num] <= sector_num) {
567 return 0;
568 } else {
569 return -1;
570 }
bellard585d0ed2004-12-12 11:24:44 +0000571}
572
Stefan Hajnoczi686d7142014-03-26 13:05:59 +0100573static inline uint32_t search_chunk(BDRVDMGState *s, uint64_t sector_num)
bellard585d0ed2004-12-12 11:24:44 +0000574{
575 /* binary search */
Stefan Hajnoczi2c1885a2014-03-26 13:05:54 +0100576 uint32_t chunk1 = 0, chunk2 = s->n_chunks, chunk3;
yuchenlin5ef40822019-01-03 19:46:58 +0800577 while (chunk1 <= chunk2) {
Stefan Hajnoczi2c1885a2014-03-26 13:05:54 +0100578 chunk3 = (chunk1 + chunk2) / 2;
579 if (s->sectors[chunk3] > sector_num) {
yuchenlin5ef40822019-01-03 19:46:58 +0800580 if (chunk3 == 0) {
581 goto err;
582 }
583 chunk2 = chunk3 - 1;
Stefan Hajnoczi2c1885a2014-03-26 13:05:54 +0100584 } else if (s->sectors[chunk3] + s->sectorcounts[chunk3] > sector_num) {
585 return chunk3;
586 } else {
yuchenlin5ef40822019-01-03 19:46:58 +0800587 chunk1 = chunk3 + 1;
Stefan Hajnoczi2c1885a2014-03-26 13:05:54 +0100588 }
bellard585d0ed2004-12-12 11:24:44 +0000589 }
yuchenlin5ef40822019-01-03 19:46:58 +0800590err:
bellard585d0ed2004-12-12 11:24:44 +0000591 return s->n_chunks; /* error */
592}
593
Stefan Hajnoczi686d7142014-03-26 13:05:59 +0100594static inline int dmg_read_chunk(BlockDriverState *bs, uint64_t sector_num)
bellard585d0ed2004-12-12 11:24:44 +0000595{
Christoph Hellwig64a31d52010-05-12 16:31:49 +0200596 BDRVDMGState *s = bs->opaque;
597
Stefan Hajnoczi2c1885a2014-03-26 13:05:54 +0100598 if (!is_sector_in_chunk(s, s->current_chunk, sector_num)) {
599 int ret;
600 uint32_t chunk = search_chunk(s, sector_num);
bellard585d0ed2004-12-12 11:24:44 +0000601
Stefan Hajnoczi2c1885a2014-03-26 13:05:54 +0100602 if (chunk >= s->n_chunks) {
603 return -1;
604 }
bellard585d0ed2004-12-12 11:24:44 +0000605
Stefan Hajnoczi2c1885a2014-03-26 13:05:54 +0100606 s->current_chunk = s->n_chunks;
Peter Wu6b383c02015-01-06 18:48:14 +0100607 switch (s->types[chunk]) { /* block entry type */
Julio Faracco95a156f2018-11-05 13:08:06 -0200608 case UDZO: { /* zlib compressed */
Stefan Hajnoczi2c1885a2014-03-26 13:05:54 +0100609 /* we need to buffer, because only the chunk as whole can be
610 * inflated. */
Kevin Wolfcf2ab8f2016-06-20 18:24:02 +0200611 ret = bdrv_pread(bs->file, s->offsets[chunk],
Stefan Hajnoczib404bf82014-03-26 13:05:56 +0100612 s->compressed_chunk, s->lengths[chunk]);
Stefan Hajnoczi2c1885a2014-03-26 13:05:54 +0100613 if (ret != s->lengths[chunk]) {
614 return -1;
615 }
ths5fafdf22007-09-16 21:08:06 +0000616
Stefan Hajnoczi2c1885a2014-03-26 13:05:54 +0100617 s->zstream.next_in = s->compressed_chunk;
618 s->zstream.avail_in = s->lengths[chunk];
619 s->zstream.next_out = s->uncompressed_chunk;
620 s->zstream.avail_out = 512 * s->sectorcounts[chunk];
621 ret = inflateReset(&s->zstream);
622 if (ret != Z_OK) {
623 return -1;
624 }
625 ret = inflate(&s->zstream, Z_FINISH);
626 if (ret != Z_STREAM_END ||
627 s->zstream.total_out != 512 * s->sectorcounts[chunk]) {
628 return -1;
629 }
630 break; }
Julio Faracco95a156f2018-11-05 13:08:06 -0200631 case UDBZ: /* bzip2 compressed */
Fam Zheng27685a82016-09-05 10:50:45 +0800632 if (!dmg_uncompress_bz2) {
633 break;
634 }
Peter Wu6b383c02015-01-06 18:48:14 +0100635 /* we need to buffer, because only the chunk as whole can be
636 * inflated. */
Kevin Wolfcf2ab8f2016-06-20 18:24:02 +0200637 ret = bdrv_pread(bs->file, s->offsets[chunk],
Peter Wu6b383c02015-01-06 18:48:14 +0100638 s->compressed_chunk, s->lengths[chunk]);
639 if (ret != s->lengths[chunk]) {
640 return -1;
641 }
642
Fam Zheng27685a82016-09-05 10:50:45 +0800643 ret = dmg_uncompress_bz2((char *)s->compressed_chunk,
644 (unsigned int) s->lengths[chunk],
645 (char *)s->uncompressed_chunk,
646 (unsigned int)
647 (512 * s->sectorcounts[chunk]));
648 if (ret < 0) {
649 return ret;
Peter Wu6b383c02015-01-06 18:48:14 +0100650 }
651 break;
Julio Faracco95a156f2018-11-05 13:08:06 -0200652 case ULFO:
Julio Faracco7a40b412018-11-05 13:08:05 -0200653 if (!dmg_uncompress_lzfse) {
654 break;
655 }
656 /* we need to buffer, because only the chunk as whole can be
657 * inflated. */
658 ret = bdrv_pread(bs->file, s->offsets[chunk],
659 s->compressed_chunk, s->lengths[chunk]);
660 if (ret != s->lengths[chunk]) {
661 return -1;
662 }
663
664 ret = dmg_uncompress_lzfse((char *)s->compressed_chunk,
665 (unsigned int) s->lengths[chunk],
666 (char *)s->uncompressed_chunk,
667 (unsigned int)
668 (512 * s->sectorcounts[chunk]));
669 if (ret < 0) {
670 return ret;
671 }
672 break;
Julio Faracco95a156f2018-11-05 13:08:06 -0200673 case UDRW: /* copy */
Kevin Wolfcf2ab8f2016-06-20 18:24:02 +0200674 ret = bdrv_pread(bs->file, s->offsets[chunk],
Christoph Hellwig64a31d52010-05-12 16:31:49 +0200675 s->uncompressed_chunk, s->lengths[chunk]);
Stefan Hajnoczi2c1885a2014-03-26 13:05:54 +0100676 if (ret != s->lengths[chunk]) {
677 return -1;
678 }
679 break;
yuchenlin39a04082019-01-03 19:47:00 +0800680 case UDZE: /* zeros */
681 case UDIG: /* ignore */
Peter Wu177b7512015-01-06 18:48:15 +0100682 /* see dmg_read, it is treated specially. No buffer needs to be
683 * pre-filled, the zeroes can be set directly. */
Stefan Hajnoczi2c1885a2014-03-26 13:05:54 +0100684 break;
685 }
686 s->current_chunk = chunk;
bellard585d0ed2004-12-12 11:24:44 +0000687 }
688 return 0;
689}
690
Kevin Wolf3edf1e72016-04-25 15:43:09 +0200691static int coroutine_fn
692dmg_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
693 QEMUIOVector *qiov, int flags)
bellard585d0ed2004-12-12 11:24:44 +0000694{
695 BDRVDMGState *s = bs->opaque;
Kevin Wolf3edf1e72016-04-25 15:43:09 +0200696 uint64_t sector_num = offset >> BDRV_SECTOR_BITS;
697 int nb_sectors = bytes >> BDRV_SECTOR_BITS;
698 int ret, i;
699
Nir Soffer1bbbf322019-08-27 21:59:12 +0300700 assert(QEMU_IS_ALIGNED(offset, BDRV_SECTOR_SIZE));
701 assert(QEMU_IS_ALIGNED(bytes, BDRV_SECTOR_SIZE));
Kevin Wolf3edf1e72016-04-25 15:43:09 +0200702
703 qemu_co_mutex_lock(&s->lock);
bellard585d0ed2004-12-12 11:24:44 +0000704
Stefan Hajnoczi2c1885a2014-03-26 13:05:54 +0100705 for (i = 0; i < nb_sectors; i++) {
706 uint32_t sector_offset_in_chunk;
Kevin Wolf3edf1e72016-04-25 15:43:09 +0200707 void *data;
708
Stefan Hajnoczi2c1885a2014-03-26 13:05:54 +0100709 if (dmg_read_chunk(bs, sector_num + i) != 0) {
Kevin Wolf3edf1e72016-04-25 15:43:09 +0200710 ret = -EIO;
711 goto fail;
Stefan Hajnoczi2c1885a2014-03-26 13:05:54 +0100712 }
Peter Wu177b7512015-01-06 18:48:15 +0100713 /* Special case: current chunk is all zeroes. Do not perform a memcpy as
714 * s->uncompressed_chunk may be too small to cover the large all-zeroes
715 * section. dmg_read_chunk is called to find s->current_chunk */
yuchenlin39a04082019-01-03 19:47:00 +0800716 if (s->types[s->current_chunk] == UDZE
717 || s->types[s->current_chunk] == UDIG) { /* all zeroes block entry */
Kevin Wolf3edf1e72016-04-25 15:43:09 +0200718 qemu_iovec_memset(qiov, i * 512, 0, 512);
Peter Wu177b7512015-01-06 18:48:15 +0100719 continue;
720 }
Stefan Hajnoczi2c1885a2014-03-26 13:05:54 +0100721 sector_offset_in_chunk = sector_num + i - s->sectors[s->current_chunk];
Kevin Wolf3edf1e72016-04-25 15:43:09 +0200722 data = s->uncompressed_chunk + sector_offset_in_chunk * 512;
723 qemu_iovec_from_buf(qiov, i * 512, data, 512);
bellard585d0ed2004-12-12 11:24:44 +0000724 }
bellard585d0ed2004-12-12 11:24:44 +0000725
Kevin Wolf3edf1e72016-04-25 15:43:09 +0200726 ret = 0;
727fail:
Paolo Bonzini2914caa2011-10-20 13:16:22 +0200728 qemu_co_mutex_unlock(&s->lock);
729 return ret;
730}
731
bellard585d0ed2004-12-12 11:24:44 +0000732static void dmg_close(BlockDriverState *bs)
733{
734 BDRVDMGState *s = bs->opaque;
Kevin Wolf4f8aa2e2013-01-25 17:07:31 +0100735
736 g_free(s->types);
737 g_free(s->offsets);
738 g_free(s->lengths);
739 g_free(s->sectors);
740 g_free(s->sectorcounts);
Kevin Wolfb546a942014-05-20 13:28:14 +0200741 qemu_vfree(s->compressed_chunk);
742 qemu_vfree(s->uncompressed_chunk);
Kevin Wolf4f8aa2e2013-01-25 17:07:31 +0100743
bellard585d0ed2004-12-12 11:24:44 +0000744 inflateEnd(&s->zstream);
745}
746
Anthony Liguori5efa9d52009-05-09 17:03:42 -0500747static BlockDriver bdrv_dmg = {
Stefan Hajnoczi2c1885a2014-03-26 13:05:54 +0100748 .format_name = "dmg",
749 .instance_size = sizeof(BDRVDMGState),
750 .bdrv_probe = dmg_probe,
751 .bdrv_open = dmg_open,
Eric Blakea6506482016-06-23 16:37:17 -0600752 .bdrv_refresh_limits = dmg_refresh_limits,
Max Reitz69dca432020-05-13 13:05:39 +0200753 .bdrv_child_perm = bdrv_default_perms,
Kevin Wolf3edf1e72016-04-25 15:43:09 +0200754 .bdrv_co_preadv = dmg_co_preadv,
Stefan Hajnoczi2c1885a2014-03-26 13:05:54 +0100755 .bdrv_close = dmg_close,
Max Reitzd67066d2020-05-13 13:05:12 +0200756 .is_format = true,
bellard585d0ed2004-12-12 11:24:44 +0000757};
Anthony Liguori5efa9d52009-05-09 17:03:42 -0500758
759static void bdrv_dmg_init(void)
760{
761 bdrv_register(&bdrv_dmg);
762}
763
764block_init(bdrv_dmg_init);