bellard | 585d0ed | 2004-12-12 11:24:44 +0000 | [diff] [blame] | 1 | /* |
| 2 | * QEMU Block driver for DMG images |
ths | 5fafdf2 | 2007-09-16 21:08:06 +0000 | [diff] [blame] | 3 | * |
bellard | 585d0ed | 2004-12-12 11:24:44 +0000 | [diff] [blame] | 4 | * Copyright (c) 2004 Johannes E. Schindelin |
ths | 5fafdf2 | 2007-09-16 21:08:06 +0000 | [diff] [blame] | 5 | * |
bellard | 585d0ed | 2004-12-12 11:24:44 +0000 | [diff] [blame] | 6 | * Permission is hereby granted, free of charge, to any person obtaining a copy |
| 7 | * of this software and associated documentation files (the "Software"), to deal |
| 8 | * in the Software without restriction, including without limitation the rights |
| 9 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell |
| 10 | * copies of the Software, and to permit persons to whom the Software is |
| 11 | * furnished to do so, subject to the following conditions: |
| 12 | * |
| 13 | * The above copyright notice and this permission notice shall be included in |
| 14 | * all copies or substantial portions of the Software. |
| 15 | * |
| 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 19 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| 20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
| 21 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN |
| 22 | * THE SOFTWARE. |
| 23 | */ |
Peter Maydell | 80c71a2 | 2016-01-18 18:01:42 +0000 | [diff] [blame] | 24 | #include "qemu/osdep.h" |
Markus Armbruster | da34e65 | 2016-03-14 09:01:28 +0100 | [diff] [blame] | 25 | #include "qapi/error.h" |
Paolo Bonzini | 737e150 | 2012-12-17 18:19:44 +0100 | [diff] [blame] | 26 | #include "block/block_int.h" |
Paolo Bonzini | 1de7afc | 2012-12-17 18:20:00 +0100 | [diff] [blame] | 27 | #include "qemu/bswap.h" |
Markus Armbruster | d49b683 | 2015-03-17 18:29:20 +0100 | [diff] [blame] | 28 | #include "qemu/error-report.h" |
Paolo Bonzini | 1de7afc | 2012-12-17 18:20:00 +0100 | [diff] [blame] | 29 | #include "qemu/module.h" |
Fam Zheng | 27685a8 | 2016-09-05 10:50:45 +0800 | [diff] [blame] | 30 | #include "dmg.h" |
| 31 | |
| 32 | int (*dmg_uncompress_bz2)(char *next_in, unsigned int avail_in, |
| 33 | char *next_out, unsigned int avail_out); |
bellard | 585d0ed | 2004-12-12 11:24:44 +0000 | [diff] [blame] | 34 | |
Julio Faracco | 7a40b41 | 2018-11-05 13:08:05 -0200 | [diff] [blame] | 35 | int (*dmg_uncompress_lzfse)(char *next_in, unsigned int avail_in, |
| 36 | char *next_out, unsigned int avail_out); |
| 37 | |
Stefan Hajnoczi | c165f77 | 2014-03-26 13:05:58 +0100 | [diff] [blame] | 38 | enum { |
| 39 | /* Limit chunk sizes to prevent unreasonable amounts of memory being used |
| 40 | * or truncating when converting to 32-bit types |
| 41 | */ |
| 42 | DMG_LENGTHS_MAX = 64 * 1024 * 1024, /* 64 MB */ |
| 43 | DMG_SECTORCOUNTS_MAX = DMG_LENGTHS_MAX / 512, |
| 44 | }; |
| 45 | |
Julio Faracco | 95a156f | 2018-11-05 13:08:06 -0200 | [diff] [blame] | 46 | enum { |
| 47 | /* DMG Block Type */ |
| 48 | UDZE = 0, /* Zeroes */ |
| 49 | UDRW, /* RAW type */ |
| 50 | UDIG, /* Ignore */ |
| 51 | UDCO = 0x80000004, |
| 52 | UDZO, |
| 53 | UDBZ, |
| 54 | ULFO, |
| 55 | UDCM = 0x7ffffffe, /* Comments */ |
Julio Faracco | b47c7d5 | 2018-12-28 12:50:55 -0200 | [diff] [blame] | 56 | UDLE = 0xffffffff /* Last Entry */ |
Julio Faracco | 95a156f | 2018-11-05 13:08:06 -0200 | [diff] [blame] | 57 | }; |
| 58 | |
bellard | 585d0ed | 2004-12-12 11:24:44 +0000 | [diff] [blame] | 59 | static int dmg_probe(const uint8_t *buf, int buf_size, const char *filename) |
| 60 | { |
Kevin Wolf | f5866fa | 2013-03-18 16:20:27 +0100 | [diff] [blame] | 61 | int len; |
| 62 | |
| 63 | if (!filename) { |
| 64 | return 0; |
| 65 | } |
| 66 | |
| 67 | len = strlen(filename); |
| 68 | if (len > 4 && !strcmp(filename + len - 4, ".dmg")) { |
| 69 | return 2; |
| 70 | } |
bellard | 585d0ed | 2004-12-12 11:24:44 +0000 | [diff] [blame] | 71 | return 0; |
| 72 | } |
| 73 | |
Kevin Wolf | 69d34a3 | 2013-01-25 17:07:30 +0100 | [diff] [blame] | 74 | static int read_uint64(BlockDriverState *bs, int64_t offset, uint64_t *result) |
bellard | 585d0ed | 2004-12-12 11:24:44 +0000 | [diff] [blame] | 75 | { |
Kevin Wolf | 69d34a3 | 2013-01-25 17:07:30 +0100 | [diff] [blame] | 76 | uint64_t buffer; |
| 77 | int ret; |
| 78 | |
Kevin Wolf | cf2ab8f | 2016-06-20 18:24:02 +0200 | [diff] [blame] | 79 | ret = bdrv_pread(bs->file, offset, &buffer, 8); |
Kevin Wolf | 69d34a3 | 2013-01-25 17:07:30 +0100 | [diff] [blame] | 80 | if (ret < 0) { |
| 81 | return ret; |
| 82 | } |
| 83 | |
| 84 | *result = be64_to_cpu(buffer); |
| 85 | return 0; |
bellard | 585d0ed | 2004-12-12 11:24:44 +0000 | [diff] [blame] | 86 | } |
| 87 | |
Kevin Wolf | 69d34a3 | 2013-01-25 17:07:30 +0100 | [diff] [blame] | 88 | static int read_uint32(BlockDriverState *bs, int64_t offset, uint32_t *result) |
bellard | 585d0ed | 2004-12-12 11:24:44 +0000 | [diff] [blame] | 89 | { |
Kevin Wolf | 69d34a3 | 2013-01-25 17:07:30 +0100 | [diff] [blame] | 90 | uint32_t buffer; |
| 91 | int ret; |
| 92 | |
Kevin Wolf | cf2ab8f | 2016-06-20 18:24:02 +0200 | [diff] [blame] | 93 | ret = bdrv_pread(bs->file, offset, &buffer, 4); |
Kevin Wolf | 69d34a3 | 2013-01-25 17:07:30 +0100 | [diff] [blame] | 94 | if (ret < 0) { |
| 95 | return ret; |
| 96 | } |
| 97 | |
| 98 | *result = be32_to_cpu(buffer); |
| 99 | return 0; |
bellard | 585d0ed | 2004-12-12 11:24:44 +0000 | [diff] [blame] | 100 | } |
| 101 | |
Peter Wu | 7aee37b | 2015-01-06 18:48:07 +0100 | [diff] [blame] | 102 | static inline uint64_t buff_read_uint64(const uint8_t *buffer, int64_t offset) |
| 103 | { |
| 104 | return be64_to_cpu(*(uint64_t *)&buffer[offset]); |
| 105 | } |
| 106 | |
| 107 | static inline uint32_t buff_read_uint32(const uint8_t *buffer, int64_t offset) |
| 108 | { |
| 109 | return be32_to_cpu(*(uint32_t *)&buffer[offset]); |
| 110 | } |
| 111 | |
Stefan Hajnoczi | f0dce23 | 2014-03-26 13:06:00 +0100 | [diff] [blame] | 112 | /* Increase max chunk sizes, if necessary. This function is used to calculate |
| 113 | * the buffer sizes needed for compressed/uncompressed chunk I/O. |
| 114 | */ |
| 115 | static void update_max_chunk_size(BDRVDMGState *s, uint32_t chunk, |
| 116 | uint32_t *max_compressed_size, |
| 117 | uint32_t *max_sectors_per_chunk) |
| 118 | { |
| 119 | uint32_t compressed_size = 0; |
| 120 | uint32_t uncompressed_sectors = 0; |
| 121 | |
| 122 | switch (s->types[chunk]) { |
Julio Faracco | 95a156f | 2018-11-05 13:08:06 -0200 | [diff] [blame] | 123 | case UDZO: /* zlib compressed */ |
| 124 | case UDBZ: /* bzip2 compressed */ |
| 125 | case ULFO: /* lzfse compressed */ |
Stefan Hajnoczi | f0dce23 | 2014-03-26 13:06:00 +0100 | [diff] [blame] | 126 | compressed_size = s->lengths[chunk]; |
| 127 | uncompressed_sectors = s->sectorcounts[chunk]; |
| 128 | break; |
Julio Faracco | 95a156f | 2018-11-05 13:08:06 -0200 | [diff] [blame] | 129 | case UDRW: /* copy */ |
Marc-André Lureau | 6fb0022 | 2017-06-22 13:04:16 +0200 | [diff] [blame] | 130 | uncompressed_sectors = DIV_ROUND_UP(s->lengths[chunk], 512); |
Stefan Hajnoczi | f0dce23 | 2014-03-26 13:06:00 +0100 | [diff] [blame] | 131 | break; |
yuchenlin | 39a0408 | 2019-01-03 19:47:00 +0800 | [diff] [blame] | 132 | case UDZE: /* zero */ |
| 133 | case UDIG: /* ignore */ |
Peter Wu | 177b751 | 2015-01-06 18:48:15 +0100 | [diff] [blame] | 134 | /* as the all-zeroes block may be large, it is treated specially: the |
| 135 | * sector is not copied from a large buffer, a simple memset is used |
| 136 | * instead. Therefore uncompressed_sectors does not need to be set. */ |
Stefan Hajnoczi | f0dce23 | 2014-03-26 13:06:00 +0100 | [diff] [blame] | 137 | break; |
| 138 | } |
| 139 | |
| 140 | if (compressed_size > *max_compressed_size) { |
| 141 | *max_compressed_size = compressed_size; |
| 142 | } |
| 143 | if (uncompressed_sectors > *max_sectors_per_chunk) { |
| 144 | *max_sectors_per_chunk = uncompressed_sectors; |
| 145 | } |
| 146 | } |
| 147 | |
Kevin Wolf | cf2ab8f | 2016-06-20 18:24:02 +0200 | [diff] [blame] | 148 | static int64_t dmg_find_koly_offset(BdrvChild *file, Error **errp) |
Peter Wu | fa8354b | 2015-01-06 18:48:04 +0100 | [diff] [blame] | 149 | { |
Kevin Wolf | cf2ab8f | 2016-06-20 18:24:02 +0200 | [diff] [blame] | 150 | BlockDriverState *file_bs = file->bs; |
Peter Wu | fa8354b | 2015-01-06 18:48:04 +0100 | [diff] [blame] | 151 | int64_t length; |
| 152 | int64_t offset = 0; |
| 153 | uint8_t buffer[515]; |
| 154 | int i, ret; |
| 155 | |
| 156 | /* bdrv_getlength returns a multiple of block size (512), rounded up. Since |
| 157 | * dmg images can have odd sizes, try to look for the "koly" magic which |
| 158 | * marks the begin of the UDIF trailer (512 bytes). This magic can be found |
| 159 | * in the last 511 bytes of the second-last sector or the first 4 bytes of |
| 160 | * the last sector (search space: 515 bytes) */ |
| 161 | length = bdrv_getlength(file_bs); |
| 162 | if (length < 0) { |
| 163 | error_setg_errno(errp, -length, |
| 164 | "Failed to get file size while reading UDIF trailer"); |
| 165 | return length; |
| 166 | } else if (length < 512) { |
| 167 | error_setg(errp, "dmg file must be at least 512 bytes long"); |
| 168 | return -EINVAL; |
| 169 | } |
| 170 | if (length > 511 + 512) { |
| 171 | offset = length - 511 - 512; |
| 172 | } |
| 173 | length = length < 515 ? length : 515; |
Kevin Wolf | cf2ab8f | 2016-06-20 18:24:02 +0200 | [diff] [blame] | 174 | ret = bdrv_pread(file, offset, buffer, length); |
Peter Wu | fa8354b | 2015-01-06 18:48:04 +0100 | [diff] [blame] | 175 | if (ret < 0) { |
| 176 | error_setg_errno(errp, -ret, "Failed while reading UDIF trailer"); |
| 177 | return ret; |
| 178 | } |
| 179 | for (i = 0; i < length - 3; i++) { |
| 180 | if (buffer[i] == 'k' && buffer[i+1] == 'o' && |
| 181 | buffer[i+2] == 'l' && buffer[i+3] == 'y') { |
| 182 | return offset + i; |
| 183 | } |
| 184 | } |
| 185 | error_setg(errp, "Could not locate UDIF trailer in dmg file"); |
| 186 | return -EINVAL; |
| 187 | } |
| 188 | |
Peter Wu | 65a1c7c | 2015-01-06 18:48:05 +0100 | [diff] [blame] | 189 | /* used when building the sector table */ |
| 190 | typedef struct DmgHeaderState { |
| 191 | /* used internally by dmg_read_mish_block to remember offsets of blocks |
| 192 | * across calls */ |
Peter Wu | c6d3486 | 2015-01-06 18:48:11 +0100 | [diff] [blame] | 193 | uint64_t data_fork_offset; |
Peter Wu | 65a1c7c | 2015-01-06 18:48:05 +0100 | [diff] [blame] | 194 | /* exported for dmg_open */ |
| 195 | uint32_t max_compressed_size; |
| 196 | uint32_t max_sectors_per_chunk; |
| 197 | } DmgHeaderState; |
| 198 | |
Peter Wu | a8b10c6 | 2015-01-06 18:48:13 +0100 | [diff] [blame] | 199 | static bool dmg_is_known_block_type(uint32_t entry_type) |
| 200 | { |
| 201 | switch (entry_type) { |
yuchenlin | 39a0408 | 2019-01-03 19:47:00 +0800 | [diff] [blame] | 202 | case UDZE: /* zeros */ |
Julio Faracco | 95a156f | 2018-11-05 13:08:06 -0200 | [diff] [blame] | 203 | case UDRW: /* uncompressed */ |
yuchenlin | 39a0408 | 2019-01-03 19:47:00 +0800 | [diff] [blame] | 204 | case UDIG: /* ignore */ |
Julio Faracco | 95a156f | 2018-11-05 13:08:06 -0200 | [diff] [blame] | 205 | case UDZO: /* zlib */ |
Peter Wu | a8b10c6 | 2015-01-06 18:48:13 +0100 | [diff] [blame] | 206 | return true; |
Julio Faracco | 95a156f | 2018-11-05 13:08:06 -0200 | [diff] [blame] | 207 | case UDBZ: /* bzip2 */ |
Fam Zheng | 27685a8 | 2016-09-05 10:50:45 +0800 | [diff] [blame] | 208 | return !!dmg_uncompress_bz2; |
Julio Faracco | 95a156f | 2018-11-05 13:08:06 -0200 | [diff] [blame] | 209 | case ULFO: /* lzfse */ |
Julio Faracco | 7a40b41 | 2018-11-05 13:08:05 -0200 | [diff] [blame] | 210 | return !!dmg_uncompress_lzfse; |
Peter Wu | a8b10c6 | 2015-01-06 18:48:13 +0100 | [diff] [blame] | 211 | default: |
| 212 | return false; |
| 213 | } |
| 214 | } |
| 215 | |
Peter Wu | 7aee37b | 2015-01-06 18:48:07 +0100 | [diff] [blame] | 216 | static int dmg_read_mish_block(BDRVDMGState *s, DmgHeaderState *ds, |
| 217 | uint8_t *buffer, uint32_t count) |
Peter Wu | 65a1c7c | 2015-01-06 18:48:05 +0100 | [diff] [blame] | 218 | { |
Peter Wu | 65a1c7c | 2015-01-06 18:48:05 +0100 | [diff] [blame] | 219 | uint32_t type, i; |
| 220 | int ret; |
| 221 | size_t new_size; |
| 222 | uint32_t chunk_count; |
Peter Wu | 7aee37b | 2015-01-06 18:48:07 +0100 | [diff] [blame] | 223 | int64_t offset = 0; |
Peter Wu | c6d3486 | 2015-01-06 18:48:11 +0100 | [diff] [blame] | 224 | uint64_t data_offset; |
| 225 | uint64_t in_offset = ds->data_fork_offset; |
Peter Wu | 66ec3bb | 2015-01-06 18:48:12 +0100 | [diff] [blame] | 226 | uint64_t out_offset; |
Peter Wu | 65a1c7c | 2015-01-06 18:48:05 +0100 | [diff] [blame] | 227 | |
Peter Wu | 7aee37b | 2015-01-06 18:48:07 +0100 | [diff] [blame] | 228 | type = buff_read_uint32(buffer, offset); |
Peter Wu | 65a1c7c | 2015-01-06 18:48:05 +0100 | [diff] [blame] | 229 | /* skip data that is not a valid MISH block (invalid magic or too small) */ |
| 230 | if (type != 0x6d697368 || count < 244) { |
| 231 | /* assume success for now */ |
| 232 | return 0; |
| 233 | } |
| 234 | |
Peter Wu | 66ec3bb | 2015-01-06 18:48:12 +0100 | [diff] [blame] | 235 | /* chunk offsets are relative to this sector number */ |
| 236 | out_offset = buff_read_uint64(buffer, offset + 8); |
| 237 | |
Peter Wu | c6d3486 | 2015-01-06 18:48:11 +0100 | [diff] [blame] | 238 | /* location in data fork for (compressed) blob (in bytes) */ |
| 239 | data_offset = buff_read_uint64(buffer, offset + 0x18); |
| 240 | in_offset += data_offset; |
| 241 | |
| 242 | /* move to begin of chunk entries */ |
| 243 | offset += 204; |
Peter Wu | 65a1c7c | 2015-01-06 18:48:05 +0100 | [diff] [blame] | 244 | |
| 245 | chunk_count = (count - 204) / 40; |
| 246 | new_size = sizeof(uint64_t) * (s->n_chunks + chunk_count); |
| 247 | s->types = g_realloc(s->types, new_size / 2); |
| 248 | s->offsets = g_realloc(s->offsets, new_size); |
| 249 | s->lengths = g_realloc(s->lengths, new_size); |
| 250 | s->sectors = g_realloc(s->sectors, new_size); |
| 251 | s->sectorcounts = g_realloc(s->sectorcounts, new_size); |
| 252 | |
| 253 | for (i = s->n_chunks; i < s->n_chunks + chunk_count; i++) { |
Peter Wu | 7aee37b | 2015-01-06 18:48:07 +0100 | [diff] [blame] | 254 | s->types[i] = buff_read_uint32(buffer, offset); |
Peter Wu | a8b10c6 | 2015-01-06 18:48:13 +0100 | [diff] [blame] | 255 | if (!dmg_is_known_block_type(s->types[i])) { |
Peter Wu | 65a1c7c | 2015-01-06 18:48:05 +0100 | [diff] [blame] | 256 | chunk_count--; |
| 257 | i--; |
Peter Wu | a8b10c6 | 2015-01-06 18:48:13 +0100 | [diff] [blame] | 258 | offset += 40; |
Peter Wu | 65a1c7c | 2015-01-06 18:48:05 +0100 | [diff] [blame] | 259 | continue; |
| 260 | } |
Peter Wu | 65a1c7c | 2015-01-06 18:48:05 +0100 | [diff] [blame] | 261 | |
Peter Wu | a8b10c6 | 2015-01-06 18:48:13 +0100 | [diff] [blame] | 262 | /* sector number */ |
| 263 | s->sectors[i] = buff_read_uint64(buffer, offset + 8); |
Peter Wu | 66ec3bb | 2015-01-06 18:48:12 +0100 | [diff] [blame] | 264 | s->sectors[i] += out_offset; |
Peter Wu | 65a1c7c | 2015-01-06 18:48:05 +0100 | [diff] [blame] | 265 | |
Peter Wu | a8b10c6 | 2015-01-06 18:48:13 +0100 | [diff] [blame] | 266 | /* sector count */ |
| 267 | s->sectorcounts[i] = buff_read_uint64(buffer, offset + 0x10); |
Peter Wu | 65a1c7c | 2015-01-06 18:48:05 +0100 | [diff] [blame] | 268 | |
yuchenlin | 39a0408 | 2019-01-03 19:47:00 +0800 | [diff] [blame] | 269 | /* all-zeroes sector (type UDZE and UDIG) does not need to be |
| 270 | * "uncompressed" and can therefore be unbounded. */ |
| 271 | if (s->types[i] != UDZE && s->types[i] != UDIG |
| 272 | && s->sectorcounts[i] > DMG_SECTORCOUNTS_MAX) { |
Peter Wu | 65a1c7c | 2015-01-06 18:48:05 +0100 | [diff] [blame] | 273 | error_report("sector count %" PRIu64 " for chunk %" PRIu32 |
| 274 | " is larger than max (%u)", |
| 275 | s->sectorcounts[i], i, DMG_SECTORCOUNTS_MAX); |
| 276 | ret = -EINVAL; |
| 277 | goto fail; |
| 278 | } |
| 279 | |
Peter Wu | a8b10c6 | 2015-01-06 18:48:13 +0100 | [diff] [blame] | 280 | /* offset in (compressed) data fork */ |
| 281 | s->offsets[i] = buff_read_uint64(buffer, offset + 0x18); |
Peter Wu | c6d3486 | 2015-01-06 18:48:11 +0100 | [diff] [blame] | 282 | s->offsets[i] += in_offset; |
Peter Wu | 65a1c7c | 2015-01-06 18:48:05 +0100 | [diff] [blame] | 283 | |
Peter Wu | a8b10c6 | 2015-01-06 18:48:13 +0100 | [diff] [blame] | 284 | /* length in (compressed) data fork */ |
| 285 | s->lengths[i] = buff_read_uint64(buffer, offset + 0x20); |
Peter Wu | 65a1c7c | 2015-01-06 18:48:05 +0100 | [diff] [blame] | 286 | |
| 287 | if (s->lengths[i] > DMG_LENGTHS_MAX) { |
| 288 | error_report("length %" PRIu64 " for chunk %" PRIu32 |
| 289 | " is larger than max (%u)", |
| 290 | s->lengths[i], i, DMG_LENGTHS_MAX); |
| 291 | ret = -EINVAL; |
| 292 | goto fail; |
| 293 | } |
| 294 | |
| 295 | update_max_chunk_size(s, i, &ds->max_compressed_size, |
| 296 | &ds->max_sectors_per_chunk); |
Peter Wu | a8b10c6 | 2015-01-06 18:48:13 +0100 | [diff] [blame] | 297 | offset += 40; |
Peter Wu | 65a1c7c | 2015-01-06 18:48:05 +0100 | [diff] [blame] | 298 | } |
| 299 | s->n_chunks += chunk_count; |
| 300 | return 0; |
| 301 | |
| 302 | fail: |
| 303 | return ret; |
| 304 | } |
| 305 | |
Peter Wu | b0e8dc5 | 2015-01-06 18:48:06 +0100 | [diff] [blame] | 306 | static int dmg_read_resource_fork(BlockDriverState *bs, DmgHeaderState *ds, |
| 307 | uint64_t info_begin, uint64_t info_length) |
| 308 | { |
Peter Wu | 7aee37b | 2015-01-06 18:48:07 +0100 | [diff] [blame] | 309 | BDRVDMGState *s = bs->opaque; |
Peter Wu | b0e8dc5 | 2015-01-06 18:48:06 +0100 | [diff] [blame] | 310 | int ret; |
| 311 | uint32_t count, rsrc_data_offset; |
Peter Wu | 7aee37b | 2015-01-06 18:48:07 +0100 | [diff] [blame] | 312 | uint8_t *buffer = NULL; |
Peter Wu | b0e8dc5 | 2015-01-06 18:48:06 +0100 | [diff] [blame] | 313 | uint64_t info_end; |
| 314 | uint64_t offset; |
| 315 | |
| 316 | /* read offset from begin of resource fork (info_begin) to resource data */ |
| 317 | ret = read_uint32(bs, info_begin, &rsrc_data_offset); |
| 318 | if (ret < 0) { |
| 319 | goto fail; |
| 320 | } else if (rsrc_data_offset > info_length) { |
| 321 | ret = -EINVAL; |
| 322 | goto fail; |
| 323 | } |
| 324 | |
| 325 | /* read length of resource data */ |
| 326 | ret = read_uint32(bs, info_begin + 8, &count); |
| 327 | if (ret < 0) { |
| 328 | goto fail; |
| 329 | } else if (count == 0 || rsrc_data_offset + count > info_length) { |
| 330 | ret = -EINVAL; |
| 331 | goto fail; |
| 332 | } |
| 333 | |
| 334 | /* begin of resource data (consisting of one or more resources) */ |
| 335 | offset = info_begin + rsrc_data_offset; |
| 336 | |
| 337 | /* end of resource data (there is possibly a following resource map |
| 338 | * which will be ignored). */ |
| 339 | info_end = offset + count; |
| 340 | |
| 341 | /* read offsets (mish blocks) from one or more resources in resource data */ |
| 342 | while (offset < info_end) { |
| 343 | /* size of following resource */ |
| 344 | ret = read_uint32(bs, offset, &count); |
| 345 | if (ret < 0) { |
| 346 | goto fail; |
Peter Wu | f6e6652 | 2015-01-06 18:48:08 +0100 | [diff] [blame] | 347 | } else if (count == 0 || count > info_end - offset) { |
Peter Wu | b0e8dc5 | 2015-01-06 18:48:06 +0100 | [diff] [blame] | 348 | ret = -EINVAL; |
| 349 | goto fail; |
| 350 | } |
| 351 | offset += 4; |
| 352 | |
Peter Wu | 7aee37b | 2015-01-06 18:48:07 +0100 | [diff] [blame] | 353 | buffer = g_realloc(buffer, count); |
Kevin Wolf | cf2ab8f | 2016-06-20 18:24:02 +0200 | [diff] [blame] | 354 | ret = bdrv_pread(bs->file, offset, buffer, count); |
Peter Wu | 7aee37b | 2015-01-06 18:48:07 +0100 | [diff] [blame] | 355 | if (ret < 0) { |
| 356 | goto fail; |
| 357 | } |
| 358 | |
| 359 | ret = dmg_read_mish_block(s, ds, buffer, count); |
Peter Wu | b0e8dc5 | 2015-01-06 18:48:06 +0100 | [diff] [blame] | 360 | if (ret < 0) { |
| 361 | goto fail; |
| 362 | } |
| 363 | /* advance offset by size of resource */ |
| 364 | offset += count; |
| 365 | } |
Peter Wu | 7aee37b | 2015-01-06 18:48:07 +0100 | [diff] [blame] | 366 | ret = 0; |
Peter Wu | b0e8dc5 | 2015-01-06 18:48:06 +0100 | [diff] [blame] | 367 | |
| 368 | fail: |
Peter Wu | 7aee37b | 2015-01-06 18:48:07 +0100 | [diff] [blame] | 369 | g_free(buffer); |
Peter Wu | b0e8dc5 | 2015-01-06 18:48:06 +0100 | [diff] [blame] | 370 | return ret; |
| 371 | } |
| 372 | |
Peter Wu | 0599e56 | 2015-01-06 18:48:09 +0100 | [diff] [blame] | 373 | static int dmg_read_plist_xml(BlockDriverState *bs, DmgHeaderState *ds, |
| 374 | uint64_t info_begin, uint64_t info_length) |
| 375 | { |
| 376 | BDRVDMGState *s = bs->opaque; |
| 377 | int ret; |
| 378 | uint8_t *buffer = NULL; |
| 379 | char *data_begin, *data_end; |
| 380 | |
| 381 | /* Have at least some length to avoid NULL for g_malloc. Attempt to set a |
| 382 | * safe upper cap on the data length. A test sample had a XML length of |
| 383 | * about 1 MiB. */ |
| 384 | if (info_length == 0 || info_length > 16 * 1024 * 1024) { |
| 385 | ret = -EINVAL; |
| 386 | goto fail; |
| 387 | } |
| 388 | |
| 389 | buffer = g_malloc(info_length + 1); |
| 390 | buffer[info_length] = '\0'; |
Kevin Wolf | cf2ab8f | 2016-06-20 18:24:02 +0200 | [diff] [blame] | 391 | ret = bdrv_pread(bs->file, info_begin, buffer, info_length); |
Peter Wu | 0599e56 | 2015-01-06 18:48:09 +0100 | [diff] [blame] | 392 | if (ret != info_length) { |
| 393 | ret = -EINVAL; |
| 394 | goto fail; |
| 395 | } |
| 396 | |
| 397 | /* look for <data>...</data>. The data is 284 (0x11c) bytes after base64 |
| 398 | * decode. The actual data element has 431 (0x1af) bytes which includes tabs |
| 399 | * and line feeds. */ |
| 400 | data_end = (char *)buffer; |
| 401 | while ((data_begin = strstr(data_end, "<data>")) != NULL) { |
| 402 | guchar *mish; |
| 403 | gsize out_len = 0; |
| 404 | |
| 405 | data_begin += 6; |
| 406 | data_end = strstr(data_begin, "</data>"); |
| 407 | /* malformed XML? */ |
| 408 | if (data_end == NULL) { |
| 409 | ret = -EINVAL; |
| 410 | goto fail; |
| 411 | } |
| 412 | *data_end++ = '\0'; |
| 413 | mish = g_base64_decode(data_begin, &out_len); |
| 414 | ret = dmg_read_mish_block(s, ds, mish, (uint32_t)out_len); |
| 415 | g_free(mish); |
| 416 | if (ret < 0) { |
| 417 | goto fail; |
| 418 | } |
| 419 | } |
| 420 | ret = 0; |
| 421 | |
| 422 | fail: |
| 423 | g_free(buffer); |
| 424 | return ret; |
| 425 | } |
| 426 | |
Max Reitz | 015a103 | 2013-09-05 14:22:29 +0200 | [diff] [blame] | 427 | static int dmg_open(BlockDriverState *bs, QDict *options, int flags, |
| 428 | Error **errp) |
bellard | 585d0ed | 2004-12-12 11:24:44 +0000 | [diff] [blame] | 429 | { |
| 430 | BDRVDMGState *s = bs->opaque; |
Peter Wu | 65a1c7c | 2015-01-06 18:48:05 +0100 | [diff] [blame] | 431 | DmgHeaderState ds; |
Peter Wu | b0e8dc5 | 2015-01-06 18:48:06 +0100 | [diff] [blame] | 432 | uint64_t rsrc_fork_offset, rsrc_fork_length; |
Peter Wu | 0599e56 | 2015-01-06 18:48:09 +0100 | [diff] [blame] | 433 | uint64_t plist_xml_offset, plist_xml_length; |
Christoph Hellwig | 16cdf7c | 2010-05-12 16:31:35 +0200 | [diff] [blame] | 434 | int64_t offset; |
Kevin Wolf | 69d34a3 | 2013-01-25 17:07:30 +0100 | [diff] [blame] | 435 | int ret; |
bellard | 585d0ed | 2004-12-12 11:24:44 +0000 | [diff] [blame] | 436 | |
Kevin Wolf | eaa2410 | 2018-10-12 11:27:41 +0200 | [diff] [blame] | 437 | ret = bdrv_apply_auto_read_only(bs, NULL, errp); |
| 438 | if (ret < 0) { |
| 439 | return ret; |
| 440 | } |
| 441 | |
Max Reitz | 8b1869d | 2020-05-13 13:05:35 +0200 | [diff] [blame] | 442 | bs->file = bdrv_open_child(NULL, options, "file", bs, &child_of_bds, |
| 443 | BDRV_CHILD_IMAGE, false, errp); |
Kevin Wolf | 4e4bf5c | 2016-12-16 18:52:37 +0100 | [diff] [blame] | 444 | if (!bs->file) { |
| 445 | return -EINVAL; |
| 446 | } |
| 447 | |
Fam Zheng | 27685a8 | 2016-09-05 10:50:45 +0800 | [diff] [blame] | 448 | block_module_load_one("dmg-bz2"); |
Julio Faracco | 7a40b41 | 2018-11-05 13:08:05 -0200 | [diff] [blame] | 449 | block_module_load_one("dmg-lzfse"); |
Kevin Wolf | 3edf1e7 | 2016-04-25 15:43:09 +0200 | [diff] [blame] | 450 | |
bellard | 585d0ed | 2004-12-12 11:24:44 +0000 | [diff] [blame] | 451 | s->n_chunks = 0; |
blueswir1 | 511d2b1 | 2009-03-07 15:32:56 +0000 | [diff] [blame] | 452 | s->offsets = s->lengths = s->sectors = s->sectorcounts = NULL; |
Peter Wu | 65a1c7c | 2015-01-06 18:48:05 +0100 | [diff] [blame] | 453 | /* used by dmg_read_mish_block to keep track of the current I/O position */ |
Peter Wu | c6d3486 | 2015-01-06 18:48:11 +0100 | [diff] [blame] | 454 | ds.data_fork_offset = 0; |
Peter Wu | 65a1c7c | 2015-01-06 18:48:05 +0100 | [diff] [blame] | 455 | ds.max_compressed_size = 1; |
| 456 | ds.max_sectors_per_chunk = 1; |
ths | 3b46e62 | 2007-09-17 08:09:54 +0000 | [diff] [blame] | 457 | |
Peter Wu | fa8354b | 2015-01-06 18:48:04 +0100 | [diff] [blame] | 458 | /* locate the UDIF trailer */ |
Kevin Wolf | cf2ab8f | 2016-06-20 18:24:02 +0200 | [diff] [blame] | 459 | offset = dmg_find_koly_offset(bs->file, errp); |
Christoph Hellwig | 16cdf7c | 2010-05-12 16:31:35 +0200 | [diff] [blame] | 460 | if (offset < 0) { |
Kevin Wolf | 69d34a3 | 2013-01-25 17:07:30 +0100 | [diff] [blame] | 461 | ret = offset; |
Christoph Hellwig | 1559ca0 | 2010-01-11 14:06:54 +0100 | [diff] [blame] | 462 | goto fail; |
bellard | 585d0ed | 2004-12-12 11:24:44 +0000 | [diff] [blame] | 463 | } |
Christoph Hellwig | 1559ca0 | 2010-01-11 14:06:54 +0100 | [diff] [blame] | 464 | |
Peter Wu | c6d3486 | 2015-01-06 18:48:11 +0100 | [diff] [blame] | 465 | /* offset of data fork (DataForkOffset) */ |
| 466 | ret = read_uint64(bs, offset + 0x18, &ds.data_fork_offset); |
| 467 | if (ret < 0) { |
| 468 | goto fail; |
| 469 | } else if (ds.data_fork_offset > offset) { |
| 470 | ret = -EINVAL; |
| 471 | goto fail; |
| 472 | } |
| 473 | |
Peter Wu | b0e8dc5 | 2015-01-06 18:48:06 +0100 | [diff] [blame] | 474 | /* offset of resource fork (RsrcForkOffset) */ |
| 475 | ret = read_uint64(bs, offset + 0x28, &rsrc_fork_offset); |
Kevin Wolf | 69d34a3 | 2013-01-25 17:07:30 +0100 | [diff] [blame] | 476 | if (ret < 0) { |
| 477 | goto fail; |
Christoph Hellwig | 16cdf7c | 2010-05-12 16:31:35 +0200 | [diff] [blame] | 478 | } |
Peter Wu | b0e8dc5 | 2015-01-06 18:48:06 +0100 | [diff] [blame] | 479 | ret = read_uint64(bs, offset + 0x30, &rsrc_fork_length); |
Kevin Wolf | 69d34a3 | 2013-01-25 17:07:30 +0100 | [diff] [blame] | 480 | if (ret < 0) { |
| 481 | goto fail; |
Kevin Wolf | 69d34a3 | 2013-01-25 17:07:30 +0100 | [diff] [blame] | 482 | } |
Peter Wu | f6e6652 | 2015-01-06 18:48:08 +0100 | [diff] [blame] | 483 | if (rsrc_fork_offset >= offset || |
| 484 | rsrc_fork_length > offset - rsrc_fork_offset) { |
| 485 | ret = -EINVAL; |
| 486 | goto fail; |
| 487 | } |
Peter Wu | 0599e56 | 2015-01-06 18:48:09 +0100 | [diff] [blame] | 488 | /* offset of property list (XMLOffset) */ |
| 489 | ret = read_uint64(bs, offset + 0xd8, &plist_xml_offset); |
| 490 | if (ret < 0) { |
| 491 | goto fail; |
| 492 | } |
| 493 | ret = read_uint64(bs, offset + 0xe0, &plist_xml_length); |
| 494 | if (ret < 0) { |
| 495 | goto fail; |
| 496 | } |
| 497 | if (plist_xml_offset >= offset || |
| 498 | plist_xml_length > offset - plist_xml_offset) { |
| 499 | ret = -EINVAL; |
| 500 | goto fail; |
| 501 | } |
Peter Wu | 8daf425 | 2015-01-06 18:48:10 +0100 | [diff] [blame] | 502 | ret = read_uint64(bs, offset + 0x1ec, (uint64_t *)&bs->total_sectors); |
| 503 | if (ret < 0) { |
| 504 | goto fail; |
| 505 | } |
| 506 | if (bs->total_sectors < 0) { |
| 507 | ret = -EINVAL; |
| 508 | goto fail; |
| 509 | } |
Peter Wu | b0e8dc5 | 2015-01-06 18:48:06 +0100 | [diff] [blame] | 510 | if (rsrc_fork_length != 0) { |
| 511 | ret = dmg_read_resource_fork(bs, &ds, |
| 512 | rsrc_fork_offset, rsrc_fork_length); |
Kevin Wolf | 69d34a3 | 2013-01-25 17:07:30 +0100 | [diff] [blame] | 513 | if (ret < 0) { |
| 514 | goto fail; |
| 515 | } |
Peter Wu | 0599e56 | 2015-01-06 18:48:09 +0100 | [diff] [blame] | 516 | } else if (plist_xml_length != 0) { |
| 517 | ret = dmg_read_plist_xml(bs, &ds, plist_xml_offset, plist_xml_length); |
| 518 | if (ret < 0) { |
| 519 | goto fail; |
| 520 | } |
Peter Wu | b0e8dc5 | 2015-01-06 18:48:06 +0100 | [diff] [blame] | 521 | } else { |
| 522 | ret = -EINVAL; |
| 523 | goto fail; |
bellard | 585d0ed | 2004-12-12 11:24:44 +0000 | [diff] [blame] | 524 | } |
| 525 | |
| 526 | /* initialize zlib engine */ |
Kevin Wolf | 9a4f4c3 | 2015-06-16 14:19:22 +0200 | [diff] [blame] | 527 | s->compressed_chunk = qemu_try_blockalign(bs->file->bs, |
Peter Wu | 65a1c7c | 2015-01-06 18:48:05 +0100 | [diff] [blame] | 528 | ds.max_compressed_size + 1); |
Kevin Wolf | 9a4f4c3 | 2015-06-16 14:19:22 +0200 | [diff] [blame] | 529 | s->uncompressed_chunk = qemu_try_blockalign(bs->file->bs, |
Peter Wu | 65a1c7c | 2015-01-06 18:48:05 +0100 | [diff] [blame] | 530 | 512 * ds.max_sectors_per_chunk); |
Kevin Wolf | b546a94 | 2014-05-20 13:28:14 +0200 | [diff] [blame] | 531 | if (s->compressed_chunk == NULL || s->uncompressed_chunk == NULL) { |
| 532 | ret = -ENOMEM; |
| 533 | goto fail; |
| 534 | } |
| 535 | |
Stefan Hajnoczi | 2c1885a | 2014-03-26 13:05:54 +0100 | [diff] [blame] | 536 | if (inflateInit(&s->zstream) != Z_OK) { |
Kevin Wolf | 69d34a3 | 2013-01-25 17:07:30 +0100 | [diff] [blame] | 537 | ret = -EINVAL; |
| 538 | goto fail; |
| 539 | } |
bellard | 585d0ed | 2004-12-12 11:24:44 +0000 | [diff] [blame] | 540 | |
| 541 | s->current_chunk = s->n_chunks; |
ths | 3b46e62 | 2007-09-17 08:09:54 +0000 | [diff] [blame] | 542 | |
Paolo Bonzini | 848c66e | 2011-10-20 13:16:21 +0200 | [diff] [blame] | 543 | qemu_co_mutex_init(&s->lock); |
bellard | 585d0ed | 2004-12-12 11:24:44 +0000 | [diff] [blame] | 544 | return 0; |
Kevin Wolf | 69d34a3 | 2013-01-25 17:07:30 +0100 | [diff] [blame] | 545 | |
Christoph Hellwig | 1559ca0 | 2010-01-11 14:06:54 +0100 | [diff] [blame] | 546 | fail: |
Kevin Wolf | 69d34a3 | 2013-01-25 17:07:30 +0100 | [diff] [blame] | 547 | g_free(s->types); |
| 548 | g_free(s->offsets); |
| 549 | g_free(s->lengths); |
| 550 | g_free(s->sectors); |
| 551 | g_free(s->sectorcounts); |
Kevin Wolf | b546a94 | 2014-05-20 13:28:14 +0200 | [diff] [blame] | 552 | qemu_vfree(s->compressed_chunk); |
| 553 | qemu_vfree(s->uncompressed_chunk); |
Kevin Wolf | 69d34a3 | 2013-01-25 17:07:30 +0100 | [diff] [blame] | 554 | return ret; |
bellard | 585d0ed | 2004-12-12 11:24:44 +0000 | [diff] [blame] | 555 | } |
| 556 | |
Eric Blake | a650648 | 2016-06-23 16:37:17 -0600 | [diff] [blame] | 557 | static void dmg_refresh_limits(BlockDriverState *bs, Error **errp) |
| 558 | { |
Eric Blake | a5b8dd2 | 2016-06-23 16:37:24 -0600 | [diff] [blame] | 559 | bs->bl.request_alignment = BDRV_SECTOR_SIZE; /* No sub-sector I/O */ |
Eric Blake | a650648 | 2016-06-23 16:37:17 -0600 | [diff] [blame] | 560 | } |
| 561 | |
shiliyang | 5f14f31 | 2020-10-30 11:35:12 +0800 | [diff] [blame] | 562 | static inline int is_sector_in_chunk(BDRVDMGState *s, |
Stefan Hajnoczi | 686d714 | 2014-03-26 13:05:59 +0100 | [diff] [blame] | 563 | uint32_t chunk_num, uint64_t sector_num) |
bellard | 585d0ed | 2004-12-12 11:24:44 +0000 | [diff] [blame] | 564 | { |
Stefan Hajnoczi | 2c1885a | 2014-03-26 13:05:54 +0100 | [diff] [blame] | 565 | if (chunk_num >= s->n_chunks || s->sectors[chunk_num] > sector_num || |
| 566 | s->sectors[chunk_num] + s->sectorcounts[chunk_num] <= sector_num) { |
| 567 | return 0; |
| 568 | } else { |
| 569 | return -1; |
| 570 | } |
bellard | 585d0ed | 2004-12-12 11:24:44 +0000 | [diff] [blame] | 571 | } |
| 572 | |
Stefan Hajnoczi | 686d714 | 2014-03-26 13:05:59 +0100 | [diff] [blame] | 573 | static inline uint32_t search_chunk(BDRVDMGState *s, uint64_t sector_num) |
bellard | 585d0ed | 2004-12-12 11:24:44 +0000 | [diff] [blame] | 574 | { |
| 575 | /* binary search */ |
Stefan Hajnoczi | 2c1885a | 2014-03-26 13:05:54 +0100 | [diff] [blame] | 576 | uint32_t chunk1 = 0, chunk2 = s->n_chunks, chunk3; |
yuchenlin | 5ef4082 | 2019-01-03 19:46:58 +0800 | [diff] [blame] | 577 | while (chunk1 <= chunk2) { |
Stefan Hajnoczi | 2c1885a | 2014-03-26 13:05:54 +0100 | [diff] [blame] | 578 | chunk3 = (chunk1 + chunk2) / 2; |
| 579 | if (s->sectors[chunk3] > sector_num) { |
yuchenlin | 5ef4082 | 2019-01-03 19:46:58 +0800 | [diff] [blame] | 580 | if (chunk3 == 0) { |
| 581 | goto err; |
| 582 | } |
| 583 | chunk2 = chunk3 - 1; |
Stefan Hajnoczi | 2c1885a | 2014-03-26 13:05:54 +0100 | [diff] [blame] | 584 | } else if (s->sectors[chunk3] + s->sectorcounts[chunk3] > sector_num) { |
| 585 | return chunk3; |
| 586 | } else { |
yuchenlin | 5ef4082 | 2019-01-03 19:46:58 +0800 | [diff] [blame] | 587 | chunk1 = chunk3 + 1; |
Stefan Hajnoczi | 2c1885a | 2014-03-26 13:05:54 +0100 | [diff] [blame] | 588 | } |
bellard | 585d0ed | 2004-12-12 11:24:44 +0000 | [diff] [blame] | 589 | } |
yuchenlin | 5ef4082 | 2019-01-03 19:46:58 +0800 | [diff] [blame] | 590 | err: |
bellard | 585d0ed | 2004-12-12 11:24:44 +0000 | [diff] [blame] | 591 | return s->n_chunks; /* error */ |
| 592 | } |
| 593 | |
Stefan Hajnoczi | 686d714 | 2014-03-26 13:05:59 +0100 | [diff] [blame] | 594 | static inline int dmg_read_chunk(BlockDriverState *bs, uint64_t sector_num) |
bellard | 585d0ed | 2004-12-12 11:24:44 +0000 | [diff] [blame] | 595 | { |
Christoph Hellwig | 64a31d5 | 2010-05-12 16:31:49 +0200 | [diff] [blame] | 596 | BDRVDMGState *s = bs->opaque; |
| 597 | |
Stefan Hajnoczi | 2c1885a | 2014-03-26 13:05:54 +0100 | [diff] [blame] | 598 | if (!is_sector_in_chunk(s, s->current_chunk, sector_num)) { |
| 599 | int ret; |
| 600 | uint32_t chunk = search_chunk(s, sector_num); |
bellard | 585d0ed | 2004-12-12 11:24:44 +0000 | [diff] [blame] | 601 | |
Stefan Hajnoczi | 2c1885a | 2014-03-26 13:05:54 +0100 | [diff] [blame] | 602 | if (chunk >= s->n_chunks) { |
| 603 | return -1; |
| 604 | } |
bellard | 585d0ed | 2004-12-12 11:24:44 +0000 | [diff] [blame] | 605 | |
Stefan Hajnoczi | 2c1885a | 2014-03-26 13:05:54 +0100 | [diff] [blame] | 606 | s->current_chunk = s->n_chunks; |
Peter Wu | 6b383c0 | 2015-01-06 18:48:14 +0100 | [diff] [blame] | 607 | switch (s->types[chunk]) { /* block entry type */ |
Julio Faracco | 95a156f | 2018-11-05 13:08:06 -0200 | [diff] [blame] | 608 | case UDZO: { /* zlib compressed */ |
Stefan Hajnoczi | 2c1885a | 2014-03-26 13:05:54 +0100 | [diff] [blame] | 609 | /* we need to buffer, because only the chunk as whole can be |
| 610 | * inflated. */ |
Kevin Wolf | cf2ab8f | 2016-06-20 18:24:02 +0200 | [diff] [blame] | 611 | ret = bdrv_pread(bs->file, s->offsets[chunk], |
Stefan Hajnoczi | b404bf8 | 2014-03-26 13:05:56 +0100 | [diff] [blame] | 612 | s->compressed_chunk, s->lengths[chunk]); |
Stefan Hajnoczi | 2c1885a | 2014-03-26 13:05:54 +0100 | [diff] [blame] | 613 | if (ret != s->lengths[chunk]) { |
| 614 | return -1; |
| 615 | } |
ths | 5fafdf2 | 2007-09-16 21:08:06 +0000 | [diff] [blame] | 616 | |
Stefan Hajnoczi | 2c1885a | 2014-03-26 13:05:54 +0100 | [diff] [blame] | 617 | s->zstream.next_in = s->compressed_chunk; |
| 618 | s->zstream.avail_in = s->lengths[chunk]; |
| 619 | s->zstream.next_out = s->uncompressed_chunk; |
| 620 | s->zstream.avail_out = 512 * s->sectorcounts[chunk]; |
| 621 | ret = inflateReset(&s->zstream); |
| 622 | if (ret != Z_OK) { |
| 623 | return -1; |
| 624 | } |
| 625 | ret = inflate(&s->zstream, Z_FINISH); |
| 626 | if (ret != Z_STREAM_END || |
| 627 | s->zstream.total_out != 512 * s->sectorcounts[chunk]) { |
| 628 | return -1; |
| 629 | } |
| 630 | break; } |
Julio Faracco | 95a156f | 2018-11-05 13:08:06 -0200 | [diff] [blame] | 631 | case UDBZ: /* bzip2 compressed */ |
Fam Zheng | 27685a8 | 2016-09-05 10:50:45 +0800 | [diff] [blame] | 632 | if (!dmg_uncompress_bz2) { |
| 633 | break; |
| 634 | } |
Peter Wu | 6b383c0 | 2015-01-06 18:48:14 +0100 | [diff] [blame] | 635 | /* we need to buffer, because only the chunk as whole can be |
| 636 | * inflated. */ |
Kevin Wolf | cf2ab8f | 2016-06-20 18:24:02 +0200 | [diff] [blame] | 637 | ret = bdrv_pread(bs->file, s->offsets[chunk], |
Peter Wu | 6b383c0 | 2015-01-06 18:48:14 +0100 | [diff] [blame] | 638 | s->compressed_chunk, s->lengths[chunk]); |
| 639 | if (ret != s->lengths[chunk]) { |
| 640 | return -1; |
| 641 | } |
| 642 | |
Fam Zheng | 27685a8 | 2016-09-05 10:50:45 +0800 | [diff] [blame] | 643 | ret = dmg_uncompress_bz2((char *)s->compressed_chunk, |
| 644 | (unsigned int) s->lengths[chunk], |
| 645 | (char *)s->uncompressed_chunk, |
| 646 | (unsigned int) |
| 647 | (512 * s->sectorcounts[chunk])); |
| 648 | if (ret < 0) { |
| 649 | return ret; |
Peter Wu | 6b383c0 | 2015-01-06 18:48:14 +0100 | [diff] [blame] | 650 | } |
| 651 | break; |
Julio Faracco | 95a156f | 2018-11-05 13:08:06 -0200 | [diff] [blame] | 652 | case ULFO: |
Julio Faracco | 7a40b41 | 2018-11-05 13:08:05 -0200 | [diff] [blame] | 653 | if (!dmg_uncompress_lzfse) { |
| 654 | break; |
| 655 | } |
| 656 | /* we need to buffer, because only the chunk as whole can be |
| 657 | * inflated. */ |
| 658 | ret = bdrv_pread(bs->file, s->offsets[chunk], |
| 659 | s->compressed_chunk, s->lengths[chunk]); |
| 660 | if (ret != s->lengths[chunk]) { |
| 661 | return -1; |
| 662 | } |
| 663 | |
| 664 | ret = dmg_uncompress_lzfse((char *)s->compressed_chunk, |
| 665 | (unsigned int) s->lengths[chunk], |
| 666 | (char *)s->uncompressed_chunk, |
| 667 | (unsigned int) |
| 668 | (512 * s->sectorcounts[chunk])); |
| 669 | if (ret < 0) { |
| 670 | return ret; |
| 671 | } |
| 672 | break; |
Julio Faracco | 95a156f | 2018-11-05 13:08:06 -0200 | [diff] [blame] | 673 | case UDRW: /* copy */ |
Kevin Wolf | cf2ab8f | 2016-06-20 18:24:02 +0200 | [diff] [blame] | 674 | ret = bdrv_pread(bs->file, s->offsets[chunk], |
Christoph Hellwig | 64a31d5 | 2010-05-12 16:31:49 +0200 | [diff] [blame] | 675 | s->uncompressed_chunk, s->lengths[chunk]); |
Stefan Hajnoczi | 2c1885a | 2014-03-26 13:05:54 +0100 | [diff] [blame] | 676 | if (ret != s->lengths[chunk]) { |
| 677 | return -1; |
| 678 | } |
| 679 | break; |
yuchenlin | 39a0408 | 2019-01-03 19:47:00 +0800 | [diff] [blame] | 680 | case UDZE: /* zeros */ |
| 681 | case UDIG: /* ignore */ |
Peter Wu | 177b751 | 2015-01-06 18:48:15 +0100 | [diff] [blame] | 682 | /* see dmg_read, it is treated specially. No buffer needs to be |
| 683 | * pre-filled, the zeroes can be set directly. */ |
Stefan Hajnoczi | 2c1885a | 2014-03-26 13:05:54 +0100 | [diff] [blame] | 684 | break; |
| 685 | } |
| 686 | s->current_chunk = chunk; |
bellard | 585d0ed | 2004-12-12 11:24:44 +0000 | [diff] [blame] | 687 | } |
| 688 | return 0; |
| 689 | } |
| 690 | |
Kevin Wolf | 3edf1e7 | 2016-04-25 15:43:09 +0200 | [diff] [blame] | 691 | static int coroutine_fn |
| 692 | dmg_co_preadv(BlockDriverState *bs, uint64_t offset, uint64_t bytes, |
| 693 | QEMUIOVector *qiov, int flags) |
bellard | 585d0ed | 2004-12-12 11:24:44 +0000 | [diff] [blame] | 694 | { |
| 695 | BDRVDMGState *s = bs->opaque; |
Kevin Wolf | 3edf1e7 | 2016-04-25 15:43:09 +0200 | [diff] [blame] | 696 | uint64_t sector_num = offset >> BDRV_SECTOR_BITS; |
| 697 | int nb_sectors = bytes >> BDRV_SECTOR_BITS; |
| 698 | int ret, i; |
| 699 | |
Nir Soffer | 1bbbf32 | 2019-08-27 21:59:12 +0300 | [diff] [blame] | 700 | assert(QEMU_IS_ALIGNED(offset, BDRV_SECTOR_SIZE)); |
| 701 | assert(QEMU_IS_ALIGNED(bytes, BDRV_SECTOR_SIZE)); |
Kevin Wolf | 3edf1e7 | 2016-04-25 15:43:09 +0200 | [diff] [blame] | 702 | |
| 703 | qemu_co_mutex_lock(&s->lock); |
bellard | 585d0ed | 2004-12-12 11:24:44 +0000 | [diff] [blame] | 704 | |
Stefan Hajnoczi | 2c1885a | 2014-03-26 13:05:54 +0100 | [diff] [blame] | 705 | for (i = 0; i < nb_sectors; i++) { |
| 706 | uint32_t sector_offset_in_chunk; |
Kevin Wolf | 3edf1e7 | 2016-04-25 15:43:09 +0200 | [diff] [blame] | 707 | void *data; |
| 708 | |
Stefan Hajnoczi | 2c1885a | 2014-03-26 13:05:54 +0100 | [diff] [blame] | 709 | if (dmg_read_chunk(bs, sector_num + i) != 0) { |
Kevin Wolf | 3edf1e7 | 2016-04-25 15:43:09 +0200 | [diff] [blame] | 710 | ret = -EIO; |
| 711 | goto fail; |
Stefan Hajnoczi | 2c1885a | 2014-03-26 13:05:54 +0100 | [diff] [blame] | 712 | } |
Peter Wu | 177b751 | 2015-01-06 18:48:15 +0100 | [diff] [blame] | 713 | /* Special case: current chunk is all zeroes. Do not perform a memcpy as |
| 714 | * s->uncompressed_chunk may be too small to cover the large all-zeroes |
| 715 | * section. dmg_read_chunk is called to find s->current_chunk */ |
yuchenlin | 39a0408 | 2019-01-03 19:47:00 +0800 | [diff] [blame] | 716 | if (s->types[s->current_chunk] == UDZE |
| 717 | || s->types[s->current_chunk] == UDIG) { /* all zeroes block entry */ |
Kevin Wolf | 3edf1e7 | 2016-04-25 15:43:09 +0200 | [diff] [blame] | 718 | qemu_iovec_memset(qiov, i * 512, 0, 512); |
Peter Wu | 177b751 | 2015-01-06 18:48:15 +0100 | [diff] [blame] | 719 | continue; |
| 720 | } |
Stefan Hajnoczi | 2c1885a | 2014-03-26 13:05:54 +0100 | [diff] [blame] | 721 | sector_offset_in_chunk = sector_num + i - s->sectors[s->current_chunk]; |
Kevin Wolf | 3edf1e7 | 2016-04-25 15:43:09 +0200 | [diff] [blame] | 722 | data = s->uncompressed_chunk + sector_offset_in_chunk * 512; |
| 723 | qemu_iovec_from_buf(qiov, i * 512, data, 512); |
bellard | 585d0ed | 2004-12-12 11:24:44 +0000 | [diff] [blame] | 724 | } |
bellard | 585d0ed | 2004-12-12 11:24:44 +0000 | [diff] [blame] | 725 | |
Kevin Wolf | 3edf1e7 | 2016-04-25 15:43:09 +0200 | [diff] [blame] | 726 | ret = 0; |
| 727 | fail: |
Paolo Bonzini | 2914caa | 2011-10-20 13:16:22 +0200 | [diff] [blame] | 728 | qemu_co_mutex_unlock(&s->lock); |
| 729 | return ret; |
| 730 | } |
| 731 | |
bellard | 585d0ed | 2004-12-12 11:24:44 +0000 | [diff] [blame] | 732 | static void dmg_close(BlockDriverState *bs) |
| 733 | { |
| 734 | BDRVDMGState *s = bs->opaque; |
Kevin Wolf | 4f8aa2e | 2013-01-25 17:07:31 +0100 | [diff] [blame] | 735 | |
| 736 | g_free(s->types); |
| 737 | g_free(s->offsets); |
| 738 | g_free(s->lengths); |
| 739 | g_free(s->sectors); |
| 740 | g_free(s->sectorcounts); |
Kevin Wolf | b546a94 | 2014-05-20 13:28:14 +0200 | [diff] [blame] | 741 | qemu_vfree(s->compressed_chunk); |
| 742 | qemu_vfree(s->uncompressed_chunk); |
Kevin Wolf | 4f8aa2e | 2013-01-25 17:07:31 +0100 | [diff] [blame] | 743 | |
bellard | 585d0ed | 2004-12-12 11:24:44 +0000 | [diff] [blame] | 744 | inflateEnd(&s->zstream); |
| 745 | } |
| 746 | |
Anthony Liguori | 5efa9d5 | 2009-05-09 17:03:42 -0500 | [diff] [blame] | 747 | static BlockDriver bdrv_dmg = { |
Stefan Hajnoczi | 2c1885a | 2014-03-26 13:05:54 +0100 | [diff] [blame] | 748 | .format_name = "dmg", |
| 749 | .instance_size = sizeof(BDRVDMGState), |
| 750 | .bdrv_probe = dmg_probe, |
| 751 | .bdrv_open = dmg_open, |
Eric Blake | a650648 | 2016-06-23 16:37:17 -0600 | [diff] [blame] | 752 | .bdrv_refresh_limits = dmg_refresh_limits, |
Max Reitz | 69dca43 | 2020-05-13 13:05:39 +0200 | [diff] [blame] | 753 | .bdrv_child_perm = bdrv_default_perms, |
Kevin Wolf | 3edf1e7 | 2016-04-25 15:43:09 +0200 | [diff] [blame] | 754 | .bdrv_co_preadv = dmg_co_preadv, |
Stefan Hajnoczi | 2c1885a | 2014-03-26 13:05:54 +0100 | [diff] [blame] | 755 | .bdrv_close = dmg_close, |
Max Reitz | d67066d | 2020-05-13 13:05:12 +0200 | [diff] [blame] | 756 | .is_format = true, |
bellard | 585d0ed | 2004-12-12 11:24:44 +0000 | [diff] [blame] | 757 | }; |
Anthony Liguori | 5efa9d5 | 2009-05-09 17:03:42 -0500 | [diff] [blame] | 758 | |
| 759 | static void bdrv_dmg_init(void) |
| 760 | { |
| 761 | bdrv_register(&bdrv_dmg); |
| 762 | } |
| 763 | |
| 764 | block_init(bdrv_dmg_init); |