blob: a7d25fc47b46fb31dc7edaceaf5f1f81c91da3ed [file] [log] [blame]
bellard585d0ed2004-12-12 11:24:44 +00001/*
2 * QEMU Block driver for DMG images
ths5fafdf22007-09-16 21:08:06 +00003 *
bellard585d0ed2004-12-12 11:24:44 +00004 * Copyright (c) 2004 Johannes E. Schindelin
ths5fafdf22007-09-16 21:08:06 +00005 *
bellard585d0ed2004-12-12 11:24:44 +00006 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to deal
8 * in the Software without restriction, including without limitation the rights
9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
10 * copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
22 * THE SOFTWARE.
23 */
Peter Maydell80c71a22016-01-18 18:01:42 +000024#include "qemu/osdep.h"
Markus Armbrusterda34e652016-03-14 09:01:28 +010025#include "qapi/error.h"
pbrookfaf07962007-11-11 02:51:17 +000026#include "qemu-common.h"
Paolo Bonzini737e1502012-12-17 18:19:44 +010027#include "block/block_int.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010028#include "qemu/bswap.h"
Markus Armbrusterd49b6832015-03-17 18:29:20 +010029#include "qemu/error-report.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010030#include "qemu/module.h"
Fam Zheng27685a82016-09-05 10:50:45 +080031#include "dmg.h"
32
33int (*dmg_uncompress_bz2)(char *next_in, unsigned int avail_in,
34 char *next_out, unsigned int avail_out);
bellard585d0ed2004-12-12 11:24:44 +000035
Stefan Hajnoczic165f772014-03-26 13:05:58 +010036enum {
37 /* Limit chunk sizes to prevent unreasonable amounts of memory being used
38 * or truncating when converting to 32-bit types
39 */
40 DMG_LENGTHS_MAX = 64 * 1024 * 1024, /* 64 MB */
41 DMG_SECTORCOUNTS_MAX = DMG_LENGTHS_MAX / 512,
42};
43
bellard585d0ed2004-12-12 11:24:44 +000044static int dmg_probe(const uint8_t *buf, int buf_size, const char *filename)
45{
Kevin Wolff5866fa2013-03-18 16:20:27 +010046 int len;
47
48 if (!filename) {
49 return 0;
50 }
51
52 len = strlen(filename);
53 if (len > 4 && !strcmp(filename + len - 4, ".dmg")) {
54 return 2;
55 }
bellard585d0ed2004-12-12 11:24:44 +000056 return 0;
57}
58
Kevin Wolf69d34a32013-01-25 17:07:30 +010059static int read_uint64(BlockDriverState *bs, int64_t offset, uint64_t *result)
bellard585d0ed2004-12-12 11:24:44 +000060{
Kevin Wolf69d34a32013-01-25 17:07:30 +010061 uint64_t buffer;
62 int ret;
63
Kevin Wolfcf2ab8f2016-06-20 18:24:02 +020064 ret = bdrv_pread(bs->file, offset, &buffer, 8);
Kevin Wolf69d34a32013-01-25 17:07:30 +010065 if (ret < 0) {
66 return ret;
67 }
68
69 *result = be64_to_cpu(buffer);
70 return 0;
bellard585d0ed2004-12-12 11:24:44 +000071}
72
Kevin Wolf69d34a32013-01-25 17:07:30 +010073static int read_uint32(BlockDriverState *bs, int64_t offset, uint32_t *result)
bellard585d0ed2004-12-12 11:24:44 +000074{
Kevin Wolf69d34a32013-01-25 17:07:30 +010075 uint32_t buffer;
76 int ret;
77
Kevin Wolfcf2ab8f2016-06-20 18:24:02 +020078 ret = bdrv_pread(bs->file, offset, &buffer, 4);
Kevin Wolf69d34a32013-01-25 17:07:30 +010079 if (ret < 0) {
80 return ret;
81 }
82
83 *result = be32_to_cpu(buffer);
84 return 0;
bellard585d0ed2004-12-12 11:24:44 +000085}
86
Peter Wu7aee37b2015-01-06 18:48:07 +010087static inline uint64_t buff_read_uint64(const uint8_t *buffer, int64_t offset)
88{
89 return be64_to_cpu(*(uint64_t *)&buffer[offset]);
90}
91
92static inline uint32_t buff_read_uint32(const uint8_t *buffer, int64_t offset)
93{
94 return be32_to_cpu(*(uint32_t *)&buffer[offset]);
95}
96
Stefan Hajnoczif0dce232014-03-26 13:06:00 +010097/* Increase max chunk sizes, if necessary. This function is used to calculate
98 * the buffer sizes needed for compressed/uncompressed chunk I/O.
99 */
100static void update_max_chunk_size(BDRVDMGState *s, uint32_t chunk,
101 uint32_t *max_compressed_size,
102 uint32_t *max_sectors_per_chunk)
103{
104 uint32_t compressed_size = 0;
105 uint32_t uncompressed_sectors = 0;
106
107 switch (s->types[chunk]) {
108 case 0x80000005: /* zlib compressed */
Peter Wu6b383c02015-01-06 18:48:14 +0100109 case 0x80000006: /* bzip2 compressed */
Stefan Hajnoczif0dce232014-03-26 13:06:00 +0100110 compressed_size = s->lengths[chunk];
111 uncompressed_sectors = s->sectorcounts[chunk];
112 break;
113 case 1: /* copy */
114 uncompressed_sectors = (s->lengths[chunk] + 511) / 512;
115 break;
116 case 2: /* zero */
Peter Wu177b7512015-01-06 18:48:15 +0100117 /* as the all-zeroes block may be large, it is treated specially: the
118 * sector is not copied from a large buffer, a simple memset is used
119 * instead. Therefore uncompressed_sectors does not need to be set. */
Stefan Hajnoczif0dce232014-03-26 13:06:00 +0100120 break;
121 }
122
123 if (compressed_size > *max_compressed_size) {
124 *max_compressed_size = compressed_size;
125 }
126 if (uncompressed_sectors > *max_sectors_per_chunk) {
127 *max_sectors_per_chunk = uncompressed_sectors;
128 }
129}
130
Kevin Wolfcf2ab8f2016-06-20 18:24:02 +0200131static int64_t dmg_find_koly_offset(BdrvChild *file, Error **errp)
Peter Wufa8354b2015-01-06 18:48:04 +0100132{
Kevin Wolfcf2ab8f2016-06-20 18:24:02 +0200133 BlockDriverState *file_bs = file->bs;
Peter Wufa8354b2015-01-06 18:48:04 +0100134 int64_t length;
135 int64_t offset = 0;
136 uint8_t buffer[515];
137 int i, ret;
138
139 /* bdrv_getlength returns a multiple of block size (512), rounded up. Since
140 * dmg images can have odd sizes, try to look for the "koly" magic which
141 * marks the begin of the UDIF trailer (512 bytes). This magic can be found
142 * in the last 511 bytes of the second-last sector or the first 4 bytes of
143 * the last sector (search space: 515 bytes) */
144 length = bdrv_getlength(file_bs);
145 if (length < 0) {
146 error_setg_errno(errp, -length,
147 "Failed to get file size while reading UDIF trailer");
148 return length;
149 } else if (length < 512) {
150 error_setg(errp, "dmg file must be at least 512 bytes long");
151 return -EINVAL;
152 }
153 if (length > 511 + 512) {
154 offset = length - 511 - 512;
155 }
156 length = length < 515 ? length : 515;
Kevin Wolfcf2ab8f2016-06-20 18:24:02 +0200157 ret = bdrv_pread(file, offset, buffer, length);
Peter Wufa8354b2015-01-06 18:48:04 +0100158 if (ret < 0) {
159 error_setg_errno(errp, -ret, "Failed while reading UDIF trailer");
160 return ret;
161 }
162 for (i = 0; i < length - 3; i++) {
163 if (buffer[i] == 'k' && buffer[i+1] == 'o' &&
164 buffer[i+2] == 'l' && buffer[i+3] == 'y') {
165 return offset + i;
166 }
167 }
168 error_setg(errp, "Could not locate UDIF trailer in dmg file");
169 return -EINVAL;
170}
171
Peter Wu65a1c7c2015-01-06 18:48:05 +0100172/* used when building the sector table */
173typedef struct DmgHeaderState {
174 /* used internally by dmg_read_mish_block to remember offsets of blocks
175 * across calls */
Peter Wuc6d34862015-01-06 18:48:11 +0100176 uint64_t data_fork_offset;
Peter Wu65a1c7c2015-01-06 18:48:05 +0100177 /* exported for dmg_open */
178 uint32_t max_compressed_size;
179 uint32_t max_sectors_per_chunk;
180} DmgHeaderState;
181
Peter Wua8b10c62015-01-06 18:48:13 +0100182static bool dmg_is_known_block_type(uint32_t entry_type)
183{
184 switch (entry_type) {
185 case 0x00000001: /* uncompressed */
186 case 0x00000002: /* zeroes */
187 case 0x80000005: /* zlib */
188 return true;
Fam Zheng27685a82016-09-05 10:50:45 +0800189 case 0x80000006: /* bzip2 */
190 return !!dmg_uncompress_bz2;
Peter Wua8b10c62015-01-06 18:48:13 +0100191 default:
192 return false;
193 }
194}
195
Peter Wu7aee37b2015-01-06 18:48:07 +0100196static int dmg_read_mish_block(BDRVDMGState *s, DmgHeaderState *ds,
197 uint8_t *buffer, uint32_t count)
Peter Wu65a1c7c2015-01-06 18:48:05 +0100198{
Peter Wu65a1c7c2015-01-06 18:48:05 +0100199 uint32_t type, i;
200 int ret;
201 size_t new_size;
202 uint32_t chunk_count;
Peter Wu7aee37b2015-01-06 18:48:07 +0100203 int64_t offset = 0;
Peter Wuc6d34862015-01-06 18:48:11 +0100204 uint64_t data_offset;
205 uint64_t in_offset = ds->data_fork_offset;
Peter Wu66ec3bb2015-01-06 18:48:12 +0100206 uint64_t out_offset;
Peter Wu65a1c7c2015-01-06 18:48:05 +0100207
Peter Wu7aee37b2015-01-06 18:48:07 +0100208 type = buff_read_uint32(buffer, offset);
Peter Wu65a1c7c2015-01-06 18:48:05 +0100209 /* skip data that is not a valid MISH block (invalid magic or too small) */
210 if (type != 0x6d697368 || count < 244) {
211 /* assume success for now */
212 return 0;
213 }
214
Peter Wu66ec3bb2015-01-06 18:48:12 +0100215 /* chunk offsets are relative to this sector number */
216 out_offset = buff_read_uint64(buffer, offset + 8);
217
Peter Wuc6d34862015-01-06 18:48:11 +0100218 /* location in data fork for (compressed) blob (in bytes) */
219 data_offset = buff_read_uint64(buffer, offset + 0x18);
220 in_offset += data_offset;
221
222 /* move to begin of chunk entries */
223 offset += 204;
Peter Wu65a1c7c2015-01-06 18:48:05 +0100224
225 chunk_count = (count - 204) / 40;
226 new_size = sizeof(uint64_t) * (s->n_chunks + chunk_count);
227 s->types = g_realloc(s->types, new_size / 2);
228 s->offsets = g_realloc(s->offsets, new_size);
229 s->lengths = g_realloc(s->lengths, new_size);
230 s->sectors = g_realloc(s->sectors, new_size);
231 s->sectorcounts = g_realloc(s->sectorcounts, new_size);
232
233 for (i = s->n_chunks; i < s->n_chunks + chunk_count; i++) {
Peter Wu7aee37b2015-01-06 18:48:07 +0100234 s->types[i] = buff_read_uint32(buffer, offset);
Peter Wua8b10c62015-01-06 18:48:13 +0100235 if (!dmg_is_known_block_type(s->types[i])) {
Peter Wu65a1c7c2015-01-06 18:48:05 +0100236 chunk_count--;
237 i--;
Peter Wua8b10c62015-01-06 18:48:13 +0100238 offset += 40;
Peter Wu65a1c7c2015-01-06 18:48:05 +0100239 continue;
240 }
Peter Wu65a1c7c2015-01-06 18:48:05 +0100241
Peter Wua8b10c62015-01-06 18:48:13 +0100242 /* sector number */
243 s->sectors[i] = buff_read_uint64(buffer, offset + 8);
Peter Wu66ec3bb2015-01-06 18:48:12 +0100244 s->sectors[i] += out_offset;
Peter Wu65a1c7c2015-01-06 18:48:05 +0100245
Peter Wua8b10c62015-01-06 18:48:13 +0100246 /* sector count */
247 s->sectorcounts[i] = buff_read_uint64(buffer, offset + 0x10);
Peter Wu65a1c7c2015-01-06 18:48:05 +0100248
Peter Wu177b7512015-01-06 18:48:15 +0100249 /* all-zeroes sector (type 2) does not need to be "uncompressed" and can
250 * therefore be unbounded. */
251 if (s->types[i] != 2 && s->sectorcounts[i] > DMG_SECTORCOUNTS_MAX) {
Peter Wu65a1c7c2015-01-06 18:48:05 +0100252 error_report("sector count %" PRIu64 " for chunk %" PRIu32
253 " is larger than max (%u)",
254 s->sectorcounts[i], i, DMG_SECTORCOUNTS_MAX);
255 ret = -EINVAL;
256 goto fail;
257 }
258
Peter Wua8b10c62015-01-06 18:48:13 +0100259 /* offset in (compressed) data fork */
260 s->offsets[i] = buff_read_uint64(buffer, offset + 0x18);
Peter Wuc6d34862015-01-06 18:48:11 +0100261 s->offsets[i] += in_offset;
Peter Wu65a1c7c2015-01-06 18:48:05 +0100262
Peter Wua8b10c62015-01-06 18:48:13 +0100263 /* length in (compressed) data fork */
264 s->lengths[i] = buff_read_uint64(buffer, offset + 0x20);
Peter Wu65a1c7c2015-01-06 18:48:05 +0100265
266 if (s->lengths[i] > DMG_LENGTHS_MAX) {
267 error_report("length %" PRIu64 " for chunk %" PRIu32
268 " is larger than max (%u)",
269 s->lengths[i], i, DMG_LENGTHS_MAX);
270 ret = -EINVAL;
271 goto fail;
272 }
273
274 update_max_chunk_size(s, i, &ds->max_compressed_size,
275 &ds->max_sectors_per_chunk);
Peter Wua8b10c62015-01-06 18:48:13 +0100276 offset += 40;
Peter Wu65a1c7c2015-01-06 18:48:05 +0100277 }
278 s->n_chunks += chunk_count;
279 return 0;
280
281fail:
282 return ret;
283}
284
Peter Wub0e8dc52015-01-06 18:48:06 +0100285static int dmg_read_resource_fork(BlockDriverState *bs, DmgHeaderState *ds,
286 uint64_t info_begin, uint64_t info_length)
287{
Peter Wu7aee37b2015-01-06 18:48:07 +0100288 BDRVDMGState *s = bs->opaque;
Peter Wub0e8dc52015-01-06 18:48:06 +0100289 int ret;
290 uint32_t count, rsrc_data_offset;
Peter Wu7aee37b2015-01-06 18:48:07 +0100291 uint8_t *buffer = NULL;
Peter Wub0e8dc52015-01-06 18:48:06 +0100292 uint64_t info_end;
293 uint64_t offset;
294
295 /* read offset from begin of resource fork (info_begin) to resource data */
296 ret = read_uint32(bs, info_begin, &rsrc_data_offset);
297 if (ret < 0) {
298 goto fail;
299 } else if (rsrc_data_offset > info_length) {
300 ret = -EINVAL;
301 goto fail;
302 }
303
304 /* read length of resource data */
305 ret = read_uint32(bs, info_begin + 8, &count);
306 if (ret < 0) {
307 goto fail;
308 } else if (count == 0 || rsrc_data_offset + count > info_length) {
309 ret = -EINVAL;
310 goto fail;
311 }
312
313 /* begin of resource data (consisting of one or more resources) */
314 offset = info_begin + rsrc_data_offset;
315
316 /* end of resource data (there is possibly a following resource map
317 * which will be ignored). */
318 info_end = offset + count;
319
320 /* read offsets (mish blocks) from one or more resources in resource data */
321 while (offset < info_end) {
322 /* size of following resource */
323 ret = read_uint32(bs, offset, &count);
324 if (ret < 0) {
325 goto fail;
Peter Wuf6e66522015-01-06 18:48:08 +0100326 } else if (count == 0 || count > info_end - offset) {
Peter Wub0e8dc52015-01-06 18:48:06 +0100327 ret = -EINVAL;
328 goto fail;
329 }
330 offset += 4;
331
Peter Wu7aee37b2015-01-06 18:48:07 +0100332 buffer = g_realloc(buffer, count);
Kevin Wolfcf2ab8f2016-06-20 18:24:02 +0200333 ret = bdrv_pread(bs->file, offset, buffer, count);
Peter Wu7aee37b2015-01-06 18:48:07 +0100334 if (ret < 0) {
335 goto fail;
336 }
337
338 ret = dmg_read_mish_block(s, ds, buffer, count);
Peter Wub0e8dc52015-01-06 18:48:06 +0100339 if (ret < 0) {
340 goto fail;
341 }
342 /* advance offset by size of resource */
343 offset += count;
344 }
Peter Wu7aee37b2015-01-06 18:48:07 +0100345 ret = 0;
Peter Wub0e8dc52015-01-06 18:48:06 +0100346
347fail:
Peter Wu7aee37b2015-01-06 18:48:07 +0100348 g_free(buffer);
Peter Wub0e8dc52015-01-06 18:48:06 +0100349 return ret;
350}
351
Peter Wu0599e562015-01-06 18:48:09 +0100352static int dmg_read_plist_xml(BlockDriverState *bs, DmgHeaderState *ds,
353 uint64_t info_begin, uint64_t info_length)
354{
355 BDRVDMGState *s = bs->opaque;
356 int ret;
357 uint8_t *buffer = NULL;
358 char *data_begin, *data_end;
359
360 /* Have at least some length to avoid NULL for g_malloc. Attempt to set a
361 * safe upper cap on the data length. A test sample had a XML length of
362 * about 1 MiB. */
363 if (info_length == 0 || info_length > 16 * 1024 * 1024) {
364 ret = -EINVAL;
365 goto fail;
366 }
367
368 buffer = g_malloc(info_length + 1);
369 buffer[info_length] = '\0';
Kevin Wolfcf2ab8f2016-06-20 18:24:02 +0200370 ret = bdrv_pread(bs->file, info_begin, buffer, info_length);
Peter Wu0599e562015-01-06 18:48:09 +0100371 if (ret != info_length) {
372 ret = -EINVAL;
373 goto fail;
374 }
375
376 /* look for <data>...</data>. The data is 284 (0x11c) bytes after base64
377 * decode. The actual data element has 431 (0x1af) bytes which includes tabs
378 * and line feeds. */
379 data_end = (char *)buffer;
380 while ((data_begin = strstr(data_end, "<data>")) != NULL) {
381 guchar *mish;
382 gsize out_len = 0;
383
384 data_begin += 6;
385 data_end = strstr(data_begin, "</data>");
386 /* malformed XML? */
387 if (data_end == NULL) {
388 ret = -EINVAL;
389 goto fail;
390 }
391 *data_end++ = '\0';
392 mish = g_base64_decode(data_begin, &out_len);
393 ret = dmg_read_mish_block(s, ds, mish, (uint32_t)out_len);
394 g_free(mish);
395 if (ret < 0) {
396 goto fail;
397 }
398 }
399 ret = 0;
400
401fail:
402 g_free(buffer);
403 return ret;
404}
405
Max Reitz015a1032013-09-05 14:22:29 +0200406static int dmg_open(BlockDriverState *bs, QDict *options, int flags,
407 Error **errp)
bellard585d0ed2004-12-12 11:24:44 +0000408{
409 BDRVDMGState *s = bs->opaque;
Peter Wu65a1c7c2015-01-06 18:48:05 +0100410 DmgHeaderState ds;
Peter Wub0e8dc52015-01-06 18:48:06 +0100411 uint64_t rsrc_fork_offset, rsrc_fork_length;
Peter Wu0599e562015-01-06 18:48:09 +0100412 uint64_t plist_xml_offset, plist_xml_length;
Christoph Hellwig16cdf7c2010-05-12 16:31:35 +0200413 int64_t offset;
Kevin Wolf69d34a32013-01-25 17:07:30 +0100414 int ret;
bellard585d0ed2004-12-12 11:24:44 +0000415
Kevin Wolf4e4bf5c2016-12-16 18:52:37 +0100416 bs->file = bdrv_open_child(NULL, options, "file", bs, &child_file,
417 false, errp);
418 if (!bs->file) {
419 return -EINVAL;
420 }
421
Fam Zheng27685a82016-09-05 10:50:45 +0800422 block_module_load_one("dmg-bz2");
Eric Blake54115412016-06-23 16:37:26 -0600423 bs->read_only = true;
Kevin Wolf3edf1e72016-04-25 15:43:09 +0200424
bellard585d0ed2004-12-12 11:24:44 +0000425 s->n_chunks = 0;
blueswir1511d2b12009-03-07 15:32:56 +0000426 s->offsets = s->lengths = s->sectors = s->sectorcounts = NULL;
Peter Wu65a1c7c2015-01-06 18:48:05 +0100427 /* used by dmg_read_mish_block to keep track of the current I/O position */
Peter Wuc6d34862015-01-06 18:48:11 +0100428 ds.data_fork_offset = 0;
Peter Wu65a1c7c2015-01-06 18:48:05 +0100429 ds.max_compressed_size = 1;
430 ds.max_sectors_per_chunk = 1;
ths3b46e622007-09-17 08:09:54 +0000431
Peter Wufa8354b2015-01-06 18:48:04 +0100432 /* locate the UDIF trailer */
Kevin Wolfcf2ab8f2016-06-20 18:24:02 +0200433 offset = dmg_find_koly_offset(bs->file, errp);
Christoph Hellwig16cdf7c2010-05-12 16:31:35 +0200434 if (offset < 0) {
Kevin Wolf69d34a32013-01-25 17:07:30 +0100435 ret = offset;
Christoph Hellwig1559ca02010-01-11 14:06:54 +0100436 goto fail;
bellard585d0ed2004-12-12 11:24:44 +0000437 }
Christoph Hellwig1559ca02010-01-11 14:06:54 +0100438
Peter Wuc6d34862015-01-06 18:48:11 +0100439 /* offset of data fork (DataForkOffset) */
440 ret = read_uint64(bs, offset + 0x18, &ds.data_fork_offset);
441 if (ret < 0) {
442 goto fail;
443 } else if (ds.data_fork_offset > offset) {
444 ret = -EINVAL;
445 goto fail;
446 }
447
Peter Wub0e8dc52015-01-06 18:48:06 +0100448 /* offset of resource fork (RsrcForkOffset) */
449 ret = read_uint64(bs, offset + 0x28, &rsrc_fork_offset);
Kevin Wolf69d34a32013-01-25 17:07:30 +0100450 if (ret < 0) {
451 goto fail;
Christoph Hellwig16cdf7c2010-05-12 16:31:35 +0200452 }
Peter Wub0e8dc52015-01-06 18:48:06 +0100453 ret = read_uint64(bs, offset + 0x30, &rsrc_fork_length);
Kevin Wolf69d34a32013-01-25 17:07:30 +0100454 if (ret < 0) {
455 goto fail;
Kevin Wolf69d34a32013-01-25 17:07:30 +0100456 }
Peter Wuf6e66522015-01-06 18:48:08 +0100457 if (rsrc_fork_offset >= offset ||
458 rsrc_fork_length > offset - rsrc_fork_offset) {
459 ret = -EINVAL;
460 goto fail;
461 }
Peter Wu0599e562015-01-06 18:48:09 +0100462 /* offset of property list (XMLOffset) */
463 ret = read_uint64(bs, offset + 0xd8, &plist_xml_offset);
464 if (ret < 0) {
465 goto fail;
466 }
467 ret = read_uint64(bs, offset + 0xe0, &plist_xml_length);
468 if (ret < 0) {
469 goto fail;
470 }
471 if (plist_xml_offset >= offset ||
472 plist_xml_length > offset - plist_xml_offset) {
473 ret = -EINVAL;
474 goto fail;
475 }
Peter Wu8daf4252015-01-06 18:48:10 +0100476 ret = read_uint64(bs, offset + 0x1ec, (uint64_t *)&bs->total_sectors);
477 if (ret < 0) {
478 goto fail;
479 }
480 if (bs->total_sectors < 0) {
481 ret = -EINVAL;
482 goto fail;
483 }
Peter Wub0e8dc52015-01-06 18:48:06 +0100484 if (rsrc_fork_length != 0) {
485 ret = dmg_read_resource_fork(bs, &ds,
486 rsrc_fork_offset, rsrc_fork_length);
Kevin Wolf69d34a32013-01-25 17:07:30 +0100487 if (ret < 0) {
488 goto fail;
489 }
Peter Wu0599e562015-01-06 18:48:09 +0100490 } else if (plist_xml_length != 0) {
491 ret = dmg_read_plist_xml(bs, &ds, plist_xml_offset, plist_xml_length);
492 if (ret < 0) {
493 goto fail;
494 }
Peter Wub0e8dc52015-01-06 18:48:06 +0100495 } else {
496 ret = -EINVAL;
497 goto fail;
bellard585d0ed2004-12-12 11:24:44 +0000498 }
499
500 /* initialize zlib engine */
Kevin Wolf9a4f4c32015-06-16 14:19:22 +0200501 s->compressed_chunk = qemu_try_blockalign(bs->file->bs,
Peter Wu65a1c7c2015-01-06 18:48:05 +0100502 ds.max_compressed_size + 1);
Kevin Wolf9a4f4c32015-06-16 14:19:22 +0200503 s->uncompressed_chunk = qemu_try_blockalign(bs->file->bs,
Peter Wu65a1c7c2015-01-06 18:48:05 +0100504 512 * ds.max_sectors_per_chunk);
Kevin Wolfb546a942014-05-20 13:28:14 +0200505 if (s->compressed_chunk == NULL || s->uncompressed_chunk == NULL) {
506 ret = -ENOMEM;
507 goto fail;
508 }
509
Stefan Hajnoczi2c1885a2014-03-26 13:05:54 +0100510 if (inflateInit(&s->zstream) != Z_OK) {
Kevin Wolf69d34a32013-01-25 17:07:30 +0100511 ret = -EINVAL;
512 goto fail;
513 }
bellard585d0ed2004-12-12 11:24:44 +0000514
515 s->current_chunk = s->n_chunks;
ths3b46e622007-09-17 08:09:54 +0000516
Paolo Bonzini848c66e2011-10-20 13:16:21 +0200517 qemu_co_mutex_init(&s->lock);
bellard585d0ed2004-12-12 11:24:44 +0000518 return 0;
Kevin Wolf69d34a32013-01-25 17:07:30 +0100519
Christoph Hellwig1559ca02010-01-11 14:06:54 +0100520fail:
Kevin Wolf69d34a32013-01-25 17:07:30 +0100521 g_free(s->types);
522 g_free(s->offsets);
523 g_free(s->lengths);
524 g_free(s->sectors);
525 g_free(s->sectorcounts);
Kevin Wolfb546a942014-05-20 13:28:14 +0200526 qemu_vfree(s->compressed_chunk);
527 qemu_vfree(s->uncompressed_chunk);
Kevin Wolf69d34a32013-01-25 17:07:30 +0100528 return ret;
bellard585d0ed2004-12-12 11:24:44 +0000529}
530
Eric Blakea6506482016-06-23 16:37:17 -0600531static void dmg_refresh_limits(BlockDriverState *bs, Error **errp)
532{
Eric Blakea5b8dd22016-06-23 16:37:24 -0600533 bs->bl.request_alignment = BDRV_SECTOR_SIZE; /* No sub-sector I/O */
Eric Blakea6506482016-06-23 16:37:17 -0600534}
535
bellard585d0ed2004-12-12 11:24:44 +0000536static inline int is_sector_in_chunk(BDRVDMGState* s,
Stefan Hajnoczi686d7142014-03-26 13:05:59 +0100537 uint32_t chunk_num, uint64_t sector_num)
bellard585d0ed2004-12-12 11:24:44 +0000538{
Stefan Hajnoczi2c1885a2014-03-26 13:05:54 +0100539 if (chunk_num >= s->n_chunks || s->sectors[chunk_num] > sector_num ||
540 s->sectors[chunk_num] + s->sectorcounts[chunk_num] <= sector_num) {
541 return 0;
542 } else {
543 return -1;
544 }
bellard585d0ed2004-12-12 11:24:44 +0000545}
546
Stefan Hajnoczi686d7142014-03-26 13:05:59 +0100547static inline uint32_t search_chunk(BDRVDMGState *s, uint64_t sector_num)
bellard585d0ed2004-12-12 11:24:44 +0000548{
549 /* binary search */
Stefan Hajnoczi2c1885a2014-03-26 13:05:54 +0100550 uint32_t chunk1 = 0, chunk2 = s->n_chunks, chunk3;
551 while (chunk1 != chunk2) {
552 chunk3 = (chunk1 + chunk2) / 2;
553 if (s->sectors[chunk3] > sector_num) {
554 chunk2 = chunk3;
555 } else if (s->sectors[chunk3] + s->sectorcounts[chunk3] > sector_num) {
556 return chunk3;
557 } else {
558 chunk1 = chunk3;
559 }
bellard585d0ed2004-12-12 11:24:44 +0000560 }
561 return s->n_chunks; /* error */
562}
563
Stefan Hajnoczi686d7142014-03-26 13:05:59 +0100564static inline int dmg_read_chunk(BlockDriverState *bs, uint64_t sector_num)
bellard585d0ed2004-12-12 11:24:44 +0000565{
Christoph Hellwig64a31d52010-05-12 16:31:49 +0200566 BDRVDMGState *s = bs->opaque;
567
Stefan Hajnoczi2c1885a2014-03-26 13:05:54 +0100568 if (!is_sector_in_chunk(s, s->current_chunk, sector_num)) {
569 int ret;
570 uint32_t chunk = search_chunk(s, sector_num);
bellard585d0ed2004-12-12 11:24:44 +0000571
Stefan Hajnoczi2c1885a2014-03-26 13:05:54 +0100572 if (chunk >= s->n_chunks) {
573 return -1;
574 }
bellard585d0ed2004-12-12 11:24:44 +0000575
Stefan Hajnoczi2c1885a2014-03-26 13:05:54 +0100576 s->current_chunk = s->n_chunks;
Peter Wu6b383c02015-01-06 18:48:14 +0100577 switch (s->types[chunk]) { /* block entry type */
Stefan Hajnoczi2c1885a2014-03-26 13:05:54 +0100578 case 0x80000005: { /* zlib compressed */
Stefan Hajnoczi2c1885a2014-03-26 13:05:54 +0100579 /* we need to buffer, because only the chunk as whole can be
580 * inflated. */
Kevin Wolfcf2ab8f2016-06-20 18:24:02 +0200581 ret = bdrv_pread(bs->file, s->offsets[chunk],
Stefan Hajnoczib404bf82014-03-26 13:05:56 +0100582 s->compressed_chunk, s->lengths[chunk]);
Stefan Hajnoczi2c1885a2014-03-26 13:05:54 +0100583 if (ret != s->lengths[chunk]) {
584 return -1;
585 }
ths5fafdf22007-09-16 21:08:06 +0000586
Stefan Hajnoczi2c1885a2014-03-26 13:05:54 +0100587 s->zstream.next_in = s->compressed_chunk;
588 s->zstream.avail_in = s->lengths[chunk];
589 s->zstream.next_out = s->uncompressed_chunk;
590 s->zstream.avail_out = 512 * s->sectorcounts[chunk];
591 ret = inflateReset(&s->zstream);
592 if (ret != Z_OK) {
593 return -1;
594 }
595 ret = inflate(&s->zstream, Z_FINISH);
596 if (ret != Z_STREAM_END ||
597 s->zstream.total_out != 512 * s->sectorcounts[chunk]) {
598 return -1;
599 }
600 break; }
Peter Wu6b383c02015-01-06 18:48:14 +0100601 case 0x80000006: /* bzip2 compressed */
Fam Zheng27685a82016-09-05 10:50:45 +0800602 if (!dmg_uncompress_bz2) {
603 break;
604 }
Peter Wu6b383c02015-01-06 18:48:14 +0100605 /* we need to buffer, because only the chunk as whole can be
606 * inflated. */
Kevin Wolfcf2ab8f2016-06-20 18:24:02 +0200607 ret = bdrv_pread(bs->file, s->offsets[chunk],
Peter Wu6b383c02015-01-06 18:48:14 +0100608 s->compressed_chunk, s->lengths[chunk]);
609 if (ret != s->lengths[chunk]) {
610 return -1;
611 }
612
Fam Zheng27685a82016-09-05 10:50:45 +0800613 ret = dmg_uncompress_bz2((char *)s->compressed_chunk,
614 (unsigned int) s->lengths[chunk],
615 (char *)s->uncompressed_chunk,
616 (unsigned int)
617 (512 * s->sectorcounts[chunk]));
618 if (ret < 0) {
619 return ret;
Peter Wu6b383c02015-01-06 18:48:14 +0100620 }
621 break;
Stefan Hajnoczi2c1885a2014-03-26 13:05:54 +0100622 case 1: /* copy */
Kevin Wolfcf2ab8f2016-06-20 18:24:02 +0200623 ret = bdrv_pread(bs->file, s->offsets[chunk],
Christoph Hellwig64a31d52010-05-12 16:31:49 +0200624 s->uncompressed_chunk, s->lengths[chunk]);
Stefan Hajnoczi2c1885a2014-03-26 13:05:54 +0100625 if (ret != s->lengths[chunk]) {
626 return -1;
627 }
628 break;
629 case 2: /* zero */
Peter Wu177b7512015-01-06 18:48:15 +0100630 /* see dmg_read, it is treated specially. No buffer needs to be
631 * pre-filled, the zeroes can be set directly. */
Stefan Hajnoczi2c1885a2014-03-26 13:05:54 +0100632 break;
633 }
634 s->current_chunk = chunk;
bellard585d0ed2004-12-12 11:24:44 +0000635 }
636 return 0;
637}
638
Kevin Wolf3edf1e72016-04-25 15:43:09 +0200639static int coroutine_fn
640dmg_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes,
641 QEMUIOVector *qiov, int flags)
bellard585d0ed2004-12-12 11:24:44 +0000642{
643 BDRVDMGState *s = bs->opaque;
Kevin Wolf3edf1e72016-04-25 15:43:09 +0200644 uint64_t sector_num = offset >> BDRV_SECTOR_BITS;
645 int nb_sectors = bytes >> BDRV_SECTOR_BITS;
646 int ret, i;
647
648 assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0);
649 assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0);
650
651 qemu_co_mutex_lock(&s->lock);
bellard585d0ed2004-12-12 11:24:44 +0000652
Stefan Hajnoczi2c1885a2014-03-26 13:05:54 +0100653 for (i = 0; i < nb_sectors; i++) {
654 uint32_t sector_offset_in_chunk;
Kevin Wolf3edf1e72016-04-25 15:43:09 +0200655 void *data;
656
Stefan Hajnoczi2c1885a2014-03-26 13:05:54 +0100657 if (dmg_read_chunk(bs, sector_num + i) != 0) {
Kevin Wolf3edf1e72016-04-25 15:43:09 +0200658 ret = -EIO;
659 goto fail;
Stefan Hajnoczi2c1885a2014-03-26 13:05:54 +0100660 }
Peter Wu177b7512015-01-06 18:48:15 +0100661 /* Special case: current chunk is all zeroes. Do not perform a memcpy as
662 * s->uncompressed_chunk may be too small to cover the large all-zeroes
663 * section. dmg_read_chunk is called to find s->current_chunk */
664 if (s->types[s->current_chunk] == 2) { /* all zeroes block entry */
Kevin Wolf3edf1e72016-04-25 15:43:09 +0200665 qemu_iovec_memset(qiov, i * 512, 0, 512);
Peter Wu177b7512015-01-06 18:48:15 +0100666 continue;
667 }
Stefan Hajnoczi2c1885a2014-03-26 13:05:54 +0100668 sector_offset_in_chunk = sector_num + i - s->sectors[s->current_chunk];
Kevin Wolf3edf1e72016-04-25 15:43:09 +0200669 data = s->uncompressed_chunk + sector_offset_in_chunk * 512;
670 qemu_iovec_from_buf(qiov, i * 512, data, 512);
bellard585d0ed2004-12-12 11:24:44 +0000671 }
bellard585d0ed2004-12-12 11:24:44 +0000672
Kevin Wolf3edf1e72016-04-25 15:43:09 +0200673 ret = 0;
674fail:
Paolo Bonzini2914caa2011-10-20 13:16:22 +0200675 qemu_co_mutex_unlock(&s->lock);
676 return ret;
677}
678
bellard585d0ed2004-12-12 11:24:44 +0000679static void dmg_close(BlockDriverState *bs)
680{
681 BDRVDMGState *s = bs->opaque;
Kevin Wolf4f8aa2e2013-01-25 17:07:31 +0100682
683 g_free(s->types);
684 g_free(s->offsets);
685 g_free(s->lengths);
686 g_free(s->sectors);
687 g_free(s->sectorcounts);
Kevin Wolfb546a942014-05-20 13:28:14 +0200688 qemu_vfree(s->compressed_chunk);
689 qemu_vfree(s->uncompressed_chunk);
Kevin Wolf4f8aa2e2013-01-25 17:07:31 +0100690
bellard585d0ed2004-12-12 11:24:44 +0000691 inflateEnd(&s->zstream);
692}
693
Anthony Liguori5efa9d52009-05-09 17:03:42 -0500694static BlockDriver bdrv_dmg = {
Stefan Hajnoczi2c1885a2014-03-26 13:05:54 +0100695 .format_name = "dmg",
696 .instance_size = sizeof(BDRVDMGState),
697 .bdrv_probe = dmg_probe,
698 .bdrv_open = dmg_open,
Eric Blakea6506482016-06-23 16:37:17 -0600699 .bdrv_refresh_limits = dmg_refresh_limits,
Kevin Wolf862f2152016-12-19 16:36:02 +0100700 .bdrv_child_perm = bdrv_format_default_perms,
Kevin Wolf3edf1e72016-04-25 15:43:09 +0200701 .bdrv_co_preadv = dmg_co_preadv,
Stefan Hajnoczi2c1885a2014-03-26 13:05:54 +0100702 .bdrv_close = dmg_close,
bellard585d0ed2004-12-12 11:24:44 +0000703};
Anthony Liguori5efa9d52009-05-09 17:03:42 -0500704
705static void bdrv_dmg_init(void)
706{
707 bdrv_register(&bdrv_dmg);
708}
709
710block_init(bdrv_dmg_init);