bellard | 585d0ed | 2004-12-12 11:24:44 +0000 | [diff] [blame] | 1 | /* |
| 2 | * QEMU Block driver for DMG images |
ths | 5fafdf2 | 2007-09-16 21:08:06 +0000 | [diff] [blame] | 3 | * |
bellard | 585d0ed | 2004-12-12 11:24:44 +0000 | [diff] [blame] | 4 | * Copyright (c) 2004 Johannes E. Schindelin |
ths | 5fafdf2 | 2007-09-16 21:08:06 +0000 | [diff] [blame] | 5 | * |
bellard | 585d0ed | 2004-12-12 11:24:44 +0000 | [diff] [blame] | 6 | * Permission is hereby granted, free of charge, to any person obtaining a copy |
| 7 | * of this software and associated documentation files (the "Software"), to deal |
| 8 | * in the Software without restriction, including without limitation the rights |
| 9 | * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell |
| 10 | * copies of the Software, and to permit persons to whom the Software is |
| 11 | * furnished to do so, subject to the following conditions: |
| 12 | * |
| 13 | * The above copyright notice and this permission notice shall be included in |
| 14 | * all copies or substantial portions of the Software. |
| 15 | * |
| 16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
| 17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
| 18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
| 19 | * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER |
| 20 | * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, |
| 21 | * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN |
| 22 | * THE SOFTWARE. |
| 23 | */ |
Peter Maydell | 80c71a2 | 2016-01-18 18:01:42 +0000 | [diff] [blame] | 24 | #include "qemu/osdep.h" |
Markus Armbruster | da34e65 | 2016-03-14 09:01:28 +0100 | [diff] [blame] | 25 | #include "qapi/error.h" |
Markus Armbruster | e2c1c34 | 2022-12-21 14:35:49 +0100 | [diff] [blame] | 26 | #include "block/block-io.h" |
Paolo Bonzini | 737e150 | 2012-12-17 18:19:44 +0100 | [diff] [blame] | 27 | #include "block/block_int.h" |
Paolo Bonzini | 1de7afc | 2012-12-17 18:20:00 +0100 | [diff] [blame] | 28 | #include "qemu/bswap.h" |
Markus Armbruster | d49b683 | 2015-03-17 18:29:20 +0100 | [diff] [blame] | 29 | #include "qemu/error-report.h" |
Paolo Bonzini | 1de7afc | 2012-12-17 18:20:00 +0100 | [diff] [blame] | 30 | #include "qemu/module.h" |
Peter Maydell | 5df022c | 2022-02-26 18:07:23 +0000 | [diff] [blame] | 31 | #include "qemu/memalign.h" |
Fam Zheng | 27685a8 | 2016-09-05 10:50:45 +0800 | [diff] [blame] | 32 | #include "dmg.h" |
| 33 | |
Philippe Mathieu-Daudé | fdd5e90 | 2023-03-20 16:26:10 +0100 | [diff] [blame] | 34 | BdrvDmgUncompressFunc *dmg_uncompress_bz2; |
| 35 | BdrvDmgUncompressFunc *dmg_uncompress_lzfse; |
Julio Faracco | 7a40b41 | 2018-11-05 13:08:05 -0200 | [diff] [blame] | 36 | |
Stefan Hajnoczi | c165f77 | 2014-03-26 13:05:58 +0100 | [diff] [blame] | 37 | enum { |
| 38 | /* Limit chunk sizes to prevent unreasonable amounts of memory being used |
| 39 | * or truncating when converting to 32-bit types |
| 40 | */ |
| 41 | DMG_LENGTHS_MAX = 64 * 1024 * 1024, /* 64 MB */ |
| 42 | DMG_SECTORCOUNTS_MAX = DMG_LENGTHS_MAX / 512, |
| 43 | }; |
| 44 | |
Julio Faracco | 95a156f | 2018-11-05 13:08:06 -0200 | [diff] [blame] | 45 | enum { |
| 46 | /* DMG Block Type */ |
| 47 | UDZE = 0, /* Zeroes */ |
| 48 | UDRW, /* RAW type */ |
| 49 | UDIG, /* Ignore */ |
| 50 | UDCO = 0x80000004, |
| 51 | UDZO, |
| 52 | UDBZ, |
| 53 | ULFO, |
| 54 | UDCM = 0x7ffffffe, /* Comments */ |
Julio Faracco | b47c7d5 | 2018-12-28 12:50:55 -0200 | [diff] [blame] | 55 | UDLE = 0xffffffff /* Last Entry */ |
Julio Faracco | 95a156f | 2018-11-05 13:08:06 -0200 | [diff] [blame] | 56 | }; |
| 57 | |
bellard | 585d0ed | 2004-12-12 11:24:44 +0000 | [diff] [blame] | 58 | static int dmg_probe(const uint8_t *buf, int buf_size, const char *filename) |
| 59 | { |
Kevin Wolf | f5866fa | 2013-03-18 16:20:27 +0100 | [diff] [blame] | 60 | int len; |
| 61 | |
| 62 | if (!filename) { |
| 63 | return 0; |
| 64 | } |
| 65 | |
| 66 | len = strlen(filename); |
| 67 | if (len > 4 && !strcmp(filename + len - 4, ".dmg")) { |
| 68 | return 2; |
| 69 | } |
bellard | 585d0ed | 2004-12-12 11:24:44 +0000 | [diff] [blame] | 70 | return 0; |
| 71 | } |
| 72 | |
Kevin Wolf | 1f051dc | 2023-10-27 17:53:33 +0200 | [diff] [blame] | 73 | static int GRAPH_RDLOCK |
| 74 | read_uint64(BlockDriverState *bs, int64_t offset, uint64_t *result) |
bellard | 585d0ed | 2004-12-12 11:24:44 +0000 | [diff] [blame] | 75 | { |
Kevin Wolf | 69d34a3 | 2013-01-25 17:07:30 +0100 | [diff] [blame] | 76 | uint64_t buffer; |
| 77 | int ret; |
| 78 | |
Alberto Faria | 32cc71d | 2022-06-09 16:27:36 +0100 | [diff] [blame] | 79 | ret = bdrv_pread(bs->file, offset, 8, &buffer, 0); |
Kevin Wolf | 69d34a3 | 2013-01-25 17:07:30 +0100 | [diff] [blame] | 80 | if (ret < 0) { |
| 81 | return ret; |
| 82 | } |
| 83 | |
| 84 | *result = be64_to_cpu(buffer); |
| 85 | return 0; |
bellard | 585d0ed | 2004-12-12 11:24:44 +0000 | [diff] [blame] | 86 | } |
| 87 | |
Kevin Wolf | 1f051dc | 2023-10-27 17:53:33 +0200 | [diff] [blame] | 88 | static int GRAPH_RDLOCK |
| 89 | read_uint32(BlockDriverState *bs, int64_t offset, uint32_t *result) |
bellard | 585d0ed | 2004-12-12 11:24:44 +0000 | [diff] [blame] | 90 | { |
Kevin Wolf | 69d34a3 | 2013-01-25 17:07:30 +0100 | [diff] [blame] | 91 | uint32_t buffer; |
| 92 | int ret; |
| 93 | |
Alberto Faria | 32cc71d | 2022-06-09 16:27:36 +0100 | [diff] [blame] | 94 | ret = bdrv_pread(bs->file, offset, 4, &buffer, 0); |
Kevin Wolf | 69d34a3 | 2013-01-25 17:07:30 +0100 | [diff] [blame] | 95 | if (ret < 0) { |
| 96 | return ret; |
| 97 | } |
| 98 | |
| 99 | *result = be32_to_cpu(buffer); |
| 100 | return 0; |
bellard | 585d0ed | 2004-12-12 11:24:44 +0000 | [diff] [blame] | 101 | } |
| 102 | |
Peter Wu | 7aee37b | 2015-01-06 18:48:07 +0100 | [diff] [blame] | 103 | static inline uint64_t buff_read_uint64(const uint8_t *buffer, int64_t offset) |
| 104 | { |
| 105 | return be64_to_cpu(*(uint64_t *)&buffer[offset]); |
| 106 | } |
| 107 | |
| 108 | static inline uint32_t buff_read_uint32(const uint8_t *buffer, int64_t offset) |
| 109 | { |
| 110 | return be32_to_cpu(*(uint32_t *)&buffer[offset]); |
| 111 | } |
| 112 | |
Stefan Hajnoczi | f0dce23 | 2014-03-26 13:06:00 +0100 | [diff] [blame] | 113 | /* Increase max chunk sizes, if necessary. This function is used to calculate |
| 114 | * the buffer sizes needed for compressed/uncompressed chunk I/O. |
| 115 | */ |
| 116 | static void update_max_chunk_size(BDRVDMGState *s, uint32_t chunk, |
| 117 | uint32_t *max_compressed_size, |
| 118 | uint32_t *max_sectors_per_chunk) |
| 119 | { |
| 120 | uint32_t compressed_size = 0; |
| 121 | uint32_t uncompressed_sectors = 0; |
| 122 | |
| 123 | switch (s->types[chunk]) { |
Julio Faracco | 95a156f | 2018-11-05 13:08:06 -0200 | [diff] [blame] | 124 | case UDZO: /* zlib compressed */ |
| 125 | case UDBZ: /* bzip2 compressed */ |
| 126 | case ULFO: /* lzfse compressed */ |
Stefan Hajnoczi | f0dce23 | 2014-03-26 13:06:00 +0100 | [diff] [blame] | 127 | compressed_size = s->lengths[chunk]; |
| 128 | uncompressed_sectors = s->sectorcounts[chunk]; |
| 129 | break; |
Julio Faracco | 95a156f | 2018-11-05 13:08:06 -0200 | [diff] [blame] | 130 | case UDRW: /* copy */ |
Marc-André Lureau | 6fb0022 | 2017-06-22 13:04:16 +0200 | [diff] [blame] | 131 | uncompressed_sectors = DIV_ROUND_UP(s->lengths[chunk], 512); |
Stefan Hajnoczi | f0dce23 | 2014-03-26 13:06:00 +0100 | [diff] [blame] | 132 | break; |
yuchenlin | 39a0408 | 2019-01-03 19:47:00 +0800 | [diff] [blame] | 133 | case UDZE: /* zero */ |
| 134 | case UDIG: /* ignore */ |
Peter Wu | 177b751 | 2015-01-06 18:48:15 +0100 | [diff] [blame] | 135 | /* as the all-zeroes block may be large, it is treated specially: the |
| 136 | * sector is not copied from a large buffer, a simple memset is used |
| 137 | * instead. Therefore uncompressed_sectors does not need to be set. */ |
Stefan Hajnoczi | f0dce23 | 2014-03-26 13:06:00 +0100 | [diff] [blame] | 138 | break; |
| 139 | } |
| 140 | |
| 141 | if (compressed_size > *max_compressed_size) { |
| 142 | *max_compressed_size = compressed_size; |
| 143 | } |
| 144 | if (uncompressed_sectors > *max_sectors_per_chunk) { |
| 145 | *max_sectors_per_chunk = uncompressed_sectors; |
| 146 | } |
| 147 | } |
| 148 | |
Kevin Wolf | cf2ab8f | 2016-06-20 18:24:02 +0200 | [diff] [blame] | 149 | static int64_t dmg_find_koly_offset(BdrvChild *file, Error **errp) |
Peter Wu | fa8354b | 2015-01-06 18:48:04 +0100 | [diff] [blame] | 150 | { |
Kevin Wolf | cf2ab8f | 2016-06-20 18:24:02 +0200 | [diff] [blame] | 151 | BlockDriverState *file_bs = file->bs; |
Peter Wu | fa8354b | 2015-01-06 18:48:04 +0100 | [diff] [blame] | 152 | int64_t length; |
| 153 | int64_t offset = 0; |
| 154 | uint8_t buffer[515]; |
| 155 | int i, ret; |
| 156 | |
| 157 | /* bdrv_getlength returns a multiple of block size (512), rounded up. Since |
| 158 | * dmg images can have odd sizes, try to look for the "koly" magic which |
| 159 | * marks the begin of the UDIF trailer (512 bytes). This magic can be found |
| 160 | * in the last 511 bytes of the second-last sector or the first 4 bytes of |
| 161 | * the last sector (search space: 515 bytes) */ |
| 162 | length = bdrv_getlength(file_bs); |
| 163 | if (length < 0) { |
| 164 | error_setg_errno(errp, -length, |
| 165 | "Failed to get file size while reading UDIF trailer"); |
| 166 | return length; |
| 167 | } else if (length < 512) { |
| 168 | error_setg(errp, "dmg file must be at least 512 bytes long"); |
| 169 | return -EINVAL; |
| 170 | } |
| 171 | if (length > 511 + 512) { |
| 172 | offset = length - 511 - 512; |
| 173 | } |
| 174 | length = length < 515 ? length : 515; |
Alberto Faria | 32cc71d | 2022-06-09 16:27:36 +0100 | [diff] [blame] | 175 | ret = bdrv_pread(file, offset, length, buffer, 0); |
Peter Wu | fa8354b | 2015-01-06 18:48:04 +0100 | [diff] [blame] | 176 | if (ret < 0) { |
| 177 | error_setg_errno(errp, -ret, "Failed while reading UDIF trailer"); |
| 178 | return ret; |
| 179 | } |
| 180 | for (i = 0; i < length - 3; i++) { |
| 181 | if (buffer[i] == 'k' && buffer[i+1] == 'o' && |
| 182 | buffer[i+2] == 'l' && buffer[i+3] == 'y') { |
| 183 | return offset + i; |
| 184 | } |
| 185 | } |
| 186 | error_setg(errp, "Could not locate UDIF trailer in dmg file"); |
| 187 | return -EINVAL; |
| 188 | } |
| 189 | |
Peter Wu | 65a1c7c | 2015-01-06 18:48:05 +0100 | [diff] [blame] | 190 | /* used when building the sector table */ |
| 191 | typedef struct DmgHeaderState { |
| 192 | /* used internally by dmg_read_mish_block to remember offsets of blocks |
| 193 | * across calls */ |
Peter Wu | c6d3486 | 2015-01-06 18:48:11 +0100 | [diff] [blame] | 194 | uint64_t data_fork_offset; |
Peter Wu | 65a1c7c | 2015-01-06 18:48:05 +0100 | [diff] [blame] | 195 | /* exported for dmg_open */ |
| 196 | uint32_t max_compressed_size; |
| 197 | uint32_t max_sectors_per_chunk; |
| 198 | } DmgHeaderState; |
| 199 | |
Peter Wu | a8b10c6 | 2015-01-06 18:48:13 +0100 | [diff] [blame] | 200 | static bool dmg_is_known_block_type(uint32_t entry_type) |
| 201 | { |
| 202 | switch (entry_type) { |
yuchenlin | 39a0408 | 2019-01-03 19:47:00 +0800 | [diff] [blame] | 203 | case UDZE: /* zeros */ |
Julio Faracco | 95a156f | 2018-11-05 13:08:06 -0200 | [diff] [blame] | 204 | case UDRW: /* uncompressed */ |
yuchenlin | 39a0408 | 2019-01-03 19:47:00 +0800 | [diff] [blame] | 205 | case UDIG: /* ignore */ |
Julio Faracco | 95a156f | 2018-11-05 13:08:06 -0200 | [diff] [blame] | 206 | case UDZO: /* zlib */ |
Peter Wu | a8b10c6 | 2015-01-06 18:48:13 +0100 | [diff] [blame] | 207 | return true; |
Julio Faracco | 95a156f | 2018-11-05 13:08:06 -0200 | [diff] [blame] | 208 | case UDBZ: /* bzip2 */ |
Fam Zheng | 27685a8 | 2016-09-05 10:50:45 +0800 | [diff] [blame] | 209 | return !!dmg_uncompress_bz2; |
Julio Faracco | 95a156f | 2018-11-05 13:08:06 -0200 | [diff] [blame] | 210 | case ULFO: /* lzfse */ |
Julio Faracco | 7a40b41 | 2018-11-05 13:08:05 -0200 | [diff] [blame] | 211 | return !!dmg_uncompress_lzfse; |
Peter Wu | a8b10c6 | 2015-01-06 18:48:13 +0100 | [diff] [blame] | 212 | default: |
| 213 | return false; |
| 214 | } |
| 215 | } |
| 216 | |
Peter Wu | 7aee37b | 2015-01-06 18:48:07 +0100 | [diff] [blame] | 217 | static int dmg_read_mish_block(BDRVDMGState *s, DmgHeaderState *ds, |
| 218 | uint8_t *buffer, uint32_t count) |
Peter Wu | 65a1c7c | 2015-01-06 18:48:05 +0100 | [diff] [blame] | 219 | { |
Peter Wu | 65a1c7c | 2015-01-06 18:48:05 +0100 | [diff] [blame] | 220 | uint32_t type, i; |
| 221 | int ret; |
| 222 | size_t new_size; |
| 223 | uint32_t chunk_count; |
Peter Wu | 7aee37b | 2015-01-06 18:48:07 +0100 | [diff] [blame] | 224 | int64_t offset = 0; |
Peter Wu | c6d3486 | 2015-01-06 18:48:11 +0100 | [diff] [blame] | 225 | uint64_t data_offset; |
| 226 | uint64_t in_offset = ds->data_fork_offset; |
Peter Wu | 66ec3bb | 2015-01-06 18:48:12 +0100 | [diff] [blame] | 227 | uint64_t out_offset; |
Peter Wu | 65a1c7c | 2015-01-06 18:48:05 +0100 | [diff] [blame] | 228 | |
Peter Wu | 7aee37b | 2015-01-06 18:48:07 +0100 | [diff] [blame] | 229 | type = buff_read_uint32(buffer, offset); |
Peter Wu | 65a1c7c | 2015-01-06 18:48:05 +0100 | [diff] [blame] | 230 | /* skip data that is not a valid MISH block (invalid magic or too small) */ |
| 231 | if (type != 0x6d697368 || count < 244) { |
| 232 | /* assume success for now */ |
| 233 | return 0; |
| 234 | } |
| 235 | |
Peter Wu | 66ec3bb | 2015-01-06 18:48:12 +0100 | [diff] [blame] | 236 | /* chunk offsets are relative to this sector number */ |
| 237 | out_offset = buff_read_uint64(buffer, offset + 8); |
| 238 | |
Peter Wu | c6d3486 | 2015-01-06 18:48:11 +0100 | [diff] [blame] | 239 | /* location in data fork for (compressed) blob (in bytes) */ |
| 240 | data_offset = buff_read_uint64(buffer, offset + 0x18); |
| 241 | in_offset += data_offset; |
| 242 | |
| 243 | /* move to begin of chunk entries */ |
| 244 | offset += 204; |
Peter Wu | 65a1c7c | 2015-01-06 18:48:05 +0100 | [diff] [blame] | 245 | |
| 246 | chunk_count = (count - 204) / 40; |
| 247 | new_size = sizeof(uint64_t) * (s->n_chunks + chunk_count); |
| 248 | s->types = g_realloc(s->types, new_size / 2); |
| 249 | s->offsets = g_realloc(s->offsets, new_size); |
| 250 | s->lengths = g_realloc(s->lengths, new_size); |
| 251 | s->sectors = g_realloc(s->sectors, new_size); |
| 252 | s->sectorcounts = g_realloc(s->sectorcounts, new_size); |
| 253 | |
| 254 | for (i = s->n_chunks; i < s->n_chunks + chunk_count; i++) { |
Peter Wu | 7aee37b | 2015-01-06 18:48:07 +0100 | [diff] [blame] | 255 | s->types[i] = buff_read_uint32(buffer, offset); |
Peter Wu | a8b10c6 | 2015-01-06 18:48:13 +0100 | [diff] [blame] | 256 | if (!dmg_is_known_block_type(s->types[i])) { |
Kevin Wolf | 971974f | 2022-09-29 11:30:34 +0200 | [diff] [blame] | 257 | switch (s->types[i]) { |
| 258 | case UDBZ: |
| 259 | warn_report_once("dmg-bzip2 module is missing, accessing bzip2 " |
| 260 | "compressed blocks will result in I/O errors"); |
| 261 | break; |
| 262 | case ULFO: |
| 263 | warn_report_once("dmg-lzfse module is missing, accessing lzfse " |
| 264 | "compressed blocks will result in I/O errors"); |
| 265 | break; |
| 266 | case UDCM: |
| 267 | case UDLE: |
| 268 | /* Comments and last entry can be ignored without problems */ |
| 269 | break; |
| 270 | default: |
| 271 | warn_report_once("Image contains chunks of unknown type %x, " |
| 272 | "accessing them will result in I/O errors", |
| 273 | s->types[i]); |
| 274 | break; |
| 275 | } |
Peter Wu | 65a1c7c | 2015-01-06 18:48:05 +0100 | [diff] [blame] | 276 | chunk_count--; |
| 277 | i--; |
Peter Wu | a8b10c6 | 2015-01-06 18:48:13 +0100 | [diff] [blame] | 278 | offset += 40; |
Peter Wu | 65a1c7c | 2015-01-06 18:48:05 +0100 | [diff] [blame] | 279 | continue; |
| 280 | } |
Peter Wu | 65a1c7c | 2015-01-06 18:48:05 +0100 | [diff] [blame] | 281 | |
Peter Wu | a8b10c6 | 2015-01-06 18:48:13 +0100 | [diff] [blame] | 282 | /* sector number */ |
| 283 | s->sectors[i] = buff_read_uint64(buffer, offset + 8); |
Peter Wu | 66ec3bb | 2015-01-06 18:48:12 +0100 | [diff] [blame] | 284 | s->sectors[i] += out_offset; |
Peter Wu | 65a1c7c | 2015-01-06 18:48:05 +0100 | [diff] [blame] | 285 | |
Peter Wu | a8b10c6 | 2015-01-06 18:48:13 +0100 | [diff] [blame] | 286 | /* sector count */ |
| 287 | s->sectorcounts[i] = buff_read_uint64(buffer, offset + 0x10); |
Peter Wu | 65a1c7c | 2015-01-06 18:48:05 +0100 | [diff] [blame] | 288 | |
yuchenlin | 39a0408 | 2019-01-03 19:47:00 +0800 | [diff] [blame] | 289 | /* all-zeroes sector (type UDZE and UDIG) does not need to be |
| 290 | * "uncompressed" and can therefore be unbounded. */ |
| 291 | if (s->types[i] != UDZE && s->types[i] != UDIG |
| 292 | && s->sectorcounts[i] > DMG_SECTORCOUNTS_MAX) { |
Peter Wu | 65a1c7c | 2015-01-06 18:48:05 +0100 | [diff] [blame] | 293 | error_report("sector count %" PRIu64 " for chunk %" PRIu32 |
| 294 | " is larger than max (%u)", |
| 295 | s->sectorcounts[i], i, DMG_SECTORCOUNTS_MAX); |
| 296 | ret = -EINVAL; |
| 297 | goto fail; |
| 298 | } |
| 299 | |
Peter Wu | a8b10c6 | 2015-01-06 18:48:13 +0100 | [diff] [blame] | 300 | /* offset in (compressed) data fork */ |
| 301 | s->offsets[i] = buff_read_uint64(buffer, offset + 0x18); |
Peter Wu | c6d3486 | 2015-01-06 18:48:11 +0100 | [diff] [blame] | 302 | s->offsets[i] += in_offset; |
Peter Wu | 65a1c7c | 2015-01-06 18:48:05 +0100 | [diff] [blame] | 303 | |
Peter Wu | a8b10c6 | 2015-01-06 18:48:13 +0100 | [diff] [blame] | 304 | /* length in (compressed) data fork */ |
| 305 | s->lengths[i] = buff_read_uint64(buffer, offset + 0x20); |
Peter Wu | 65a1c7c | 2015-01-06 18:48:05 +0100 | [diff] [blame] | 306 | |
| 307 | if (s->lengths[i] > DMG_LENGTHS_MAX) { |
| 308 | error_report("length %" PRIu64 " for chunk %" PRIu32 |
| 309 | " is larger than max (%u)", |
| 310 | s->lengths[i], i, DMG_LENGTHS_MAX); |
| 311 | ret = -EINVAL; |
| 312 | goto fail; |
| 313 | } |
| 314 | |
| 315 | update_max_chunk_size(s, i, &ds->max_compressed_size, |
| 316 | &ds->max_sectors_per_chunk); |
Peter Wu | a8b10c6 | 2015-01-06 18:48:13 +0100 | [diff] [blame] | 317 | offset += 40; |
Peter Wu | 65a1c7c | 2015-01-06 18:48:05 +0100 | [diff] [blame] | 318 | } |
| 319 | s->n_chunks += chunk_count; |
| 320 | return 0; |
| 321 | |
| 322 | fail: |
| 323 | return ret; |
| 324 | } |
| 325 | |
Kevin Wolf | 1f051dc | 2023-10-27 17:53:33 +0200 | [diff] [blame] | 326 | static int GRAPH_RDLOCK |
| 327 | dmg_read_resource_fork(BlockDriverState *bs, DmgHeaderState *ds, |
| 328 | uint64_t info_begin, uint64_t info_length) |
Peter Wu | b0e8dc5 | 2015-01-06 18:48:06 +0100 | [diff] [blame] | 329 | { |
Peter Wu | 7aee37b | 2015-01-06 18:48:07 +0100 | [diff] [blame] | 330 | BDRVDMGState *s = bs->opaque; |
Peter Wu | b0e8dc5 | 2015-01-06 18:48:06 +0100 | [diff] [blame] | 331 | int ret; |
| 332 | uint32_t count, rsrc_data_offset; |
Peter Wu | 7aee37b | 2015-01-06 18:48:07 +0100 | [diff] [blame] | 333 | uint8_t *buffer = NULL; |
Peter Wu | b0e8dc5 | 2015-01-06 18:48:06 +0100 | [diff] [blame] | 334 | uint64_t info_end; |
| 335 | uint64_t offset; |
| 336 | |
| 337 | /* read offset from begin of resource fork (info_begin) to resource data */ |
| 338 | ret = read_uint32(bs, info_begin, &rsrc_data_offset); |
| 339 | if (ret < 0) { |
| 340 | goto fail; |
| 341 | } else if (rsrc_data_offset > info_length) { |
| 342 | ret = -EINVAL; |
| 343 | goto fail; |
| 344 | } |
| 345 | |
| 346 | /* read length of resource data */ |
| 347 | ret = read_uint32(bs, info_begin + 8, &count); |
| 348 | if (ret < 0) { |
| 349 | goto fail; |
| 350 | } else if (count == 0 || rsrc_data_offset + count > info_length) { |
| 351 | ret = -EINVAL; |
| 352 | goto fail; |
| 353 | } |
| 354 | |
| 355 | /* begin of resource data (consisting of one or more resources) */ |
| 356 | offset = info_begin + rsrc_data_offset; |
| 357 | |
| 358 | /* end of resource data (there is possibly a following resource map |
| 359 | * which will be ignored). */ |
| 360 | info_end = offset + count; |
| 361 | |
| 362 | /* read offsets (mish blocks) from one or more resources in resource data */ |
| 363 | while (offset < info_end) { |
| 364 | /* size of following resource */ |
| 365 | ret = read_uint32(bs, offset, &count); |
| 366 | if (ret < 0) { |
| 367 | goto fail; |
Peter Wu | f6e6652 | 2015-01-06 18:48:08 +0100 | [diff] [blame] | 368 | } else if (count == 0 || count > info_end - offset) { |
Peter Wu | b0e8dc5 | 2015-01-06 18:48:06 +0100 | [diff] [blame] | 369 | ret = -EINVAL; |
| 370 | goto fail; |
| 371 | } |
| 372 | offset += 4; |
| 373 | |
Peter Wu | 7aee37b | 2015-01-06 18:48:07 +0100 | [diff] [blame] | 374 | buffer = g_realloc(buffer, count); |
Alberto Faria | 32cc71d | 2022-06-09 16:27:36 +0100 | [diff] [blame] | 375 | ret = bdrv_pread(bs->file, offset, count, buffer, 0); |
Peter Wu | 7aee37b | 2015-01-06 18:48:07 +0100 | [diff] [blame] | 376 | if (ret < 0) { |
| 377 | goto fail; |
| 378 | } |
| 379 | |
| 380 | ret = dmg_read_mish_block(s, ds, buffer, count); |
Peter Wu | b0e8dc5 | 2015-01-06 18:48:06 +0100 | [diff] [blame] | 381 | if (ret < 0) { |
| 382 | goto fail; |
| 383 | } |
| 384 | /* advance offset by size of resource */ |
| 385 | offset += count; |
| 386 | } |
Peter Wu | 7aee37b | 2015-01-06 18:48:07 +0100 | [diff] [blame] | 387 | ret = 0; |
Peter Wu | b0e8dc5 | 2015-01-06 18:48:06 +0100 | [diff] [blame] | 388 | |
| 389 | fail: |
Peter Wu | 7aee37b | 2015-01-06 18:48:07 +0100 | [diff] [blame] | 390 | g_free(buffer); |
Peter Wu | b0e8dc5 | 2015-01-06 18:48:06 +0100 | [diff] [blame] | 391 | return ret; |
| 392 | } |
| 393 | |
Kevin Wolf | 1f051dc | 2023-10-27 17:53:33 +0200 | [diff] [blame] | 394 | static int GRAPH_RDLOCK |
| 395 | dmg_read_plist_xml(BlockDriverState *bs, DmgHeaderState *ds, |
| 396 | uint64_t info_begin, uint64_t info_length) |
Peter Wu | 0599e56 | 2015-01-06 18:48:09 +0100 | [diff] [blame] | 397 | { |
| 398 | BDRVDMGState *s = bs->opaque; |
| 399 | int ret; |
| 400 | uint8_t *buffer = NULL; |
| 401 | char *data_begin, *data_end; |
| 402 | |
| 403 | /* Have at least some length to avoid NULL for g_malloc. Attempt to set a |
| 404 | * safe upper cap on the data length. A test sample had a XML length of |
| 405 | * about 1 MiB. */ |
| 406 | if (info_length == 0 || info_length > 16 * 1024 * 1024) { |
| 407 | ret = -EINVAL; |
| 408 | goto fail; |
| 409 | } |
| 410 | |
| 411 | buffer = g_malloc(info_length + 1); |
| 412 | buffer[info_length] = '\0'; |
Alberto Faria | 32cc71d | 2022-06-09 16:27:36 +0100 | [diff] [blame] | 413 | ret = bdrv_pread(bs->file, info_begin, info_length, buffer, 0); |
Alberto Faria | 353a5d8 | 2022-06-09 16:27:37 +0100 | [diff] [blame] | 414 | if (ret < 0) { |
Peter Wu | 0599e56 | 2015-01-06 18:48:09 +0100 | [diff] [blame] | 415 | ret = -EINVAL; |
| 416 | goto fail; |
| 417 | } |
| 418 | |
| 419 | /* look for <data>...</data>. The data is 284 (0x11c) bytes after base64 |
| 420 | * decode. The actual data element has 431 (0x1af) bytes which includes tabs |
| 421 | * and line feeds. */ |
| 422 | data_end = (char *)buffer; |
| 423 | while ((data_begin = strstr(data_end, "<data>")) != NULL) { |
| 424 | guchar *mish; |
| 425 | gsize out_len = 0; |
| 426 | |
| 427 | data_begin += 6; |
| 428 | data_end = strstr(data_begin, "</data>"); |
| 429 | /* malformed XML? */ |
| 430 | if (data_end == NULL) { |
| 431 | ret = -EINVAL; |
| 432 | goto fail; |
| 433 | } |
| 434 | *data_end++ = '\0'; |
| 435 | mish = g_base64_decode(data_begin, &out_len); |
| 436 | ret = dmg_read_mish_block(s, ds, mish, (uint32_t)out_len); |
| 437 | g_free(mish); |
| 438 | if (ret < 0) { |
| 439 | goto fail; |
| 440 | } |
| 441 | } |
| 442 | ret = 0; |
| 443 | |
| 444 | fail: |
| 445 | g_free(buffer); |
| 446 | return ret; |
| 447 | } |
| 448 | |
Max Reitz | 015a103 | 2013-09-05 14:22:29 +0200 | [diff] [blame] | 449 | static int dmg_open(BlockDriverState *bs, QDict *options, int flags, |
| 450 | Error **errp) |
bellard | 585d0ed | 2004-12-12 11:24:44 +0000 | [diff] [blame] | 451 | { |
| 452 | BDRVDMGState *s = bs->opaque; |
Peter Wu | 65a1c7c | 2015-01-06 18:48:05 +0100 | [diff] [blame] | 453 | DmgHeaderState ds; |
Peter Wu | b0e8dc5 | 2015-01-06 18:48:06 +0100 | [diff] [blame] | 454 | uint64_t rsrc_fork_offset, rsrc_fork_length; |
Peter Wu | 0599e56 | 2015-01-06 18:48:09 +0100 | [diff] [blame] | 455 | uint64_t plist_xml_offset, plist_xml_length; |
Christoph Hellwig | 16cdf7c | 2010-05-12 16:31:35 +0200 | [diff] [blame] | 456 | int64_t offset; |
Kevin Wolf | 69d34a3 | 2013-01-25 17:07:30 +0100 | [diff] [blame] | 457 | int ret; |
bellard | 585d0ed | 2004-12-12 11:24:44 +0000 | [diff] [blame] | 458 | |
Kevin Wolf | a4b740d | 2023-10-27 17:53:32 +0200 | [diff] [blame] | 459 | GLOBAL_STATE_CODE(); |
| 460 | |
Kevin Wolf | 018f9de | 2023-09-29 16:51:53 +0200 | [diff] [blame] | 461 | bdrv_graph_rdlock_main_loop(); |
Kevin Wolf | eaa2410 | 2018-10-12 11:27:41 +0200 | [diff] [blame] | 462 | ret = bdrv_apply_auto_read_only(bs, NULL, errp); |
Kevin Wolf | 018f9de | 2023-09-29 16:51:53 +0200 | [diff] [blame] | 463 | bdrv_graph_rdunlock_main_loop(); |
Kevin Wolf | eaa2410 | 2018-10-12 11:27:41 +0200 | [diff] [blame] | 464 | if (ret < 0) { |
| 465 | return ret; |
| 466 | } |
| 467 | |
Vladimir Sementsov-Ogievskiy | 8393078 | 2022-07-26 23:11:21 +0300 | [diff] [blame] | 468 | ret = bdrv_open_file_child(NULL, options, "file", bs, errp); |
| 469 | if (ret < 0) { |
| 470 | return ret; |
Kevin Wolf | 4e4bf5c | 2016-12-16 18:52:37 +0100 | [diff] [blame] | 471 | } |
Kevin Wolf | a4b740d | 2023-10-27 17:53:32 +0200 | [diff] [blame] | 472 | |
| 473 | GRAPH_RDLOCK_GUARD_MAINLOOP(); |
| 474 | |
Claudio Fontana | c551fb0 | 2022-09-29 11:30:33 +0200 | [diff] [blame] | 475 | /* |
| 476 | * NB: if uncompress submodules are absent, |
| 477 | * ie block_module_load return value == 0, the function pointers |
| 478 | * dmg_uncompress_bz2 and dmg_uncompress_lzfse will be NULL. |
| 479 | */ |
| 480 | if (block_module_load("dmg-bz2", errp) < 0) { |
| 481 | return -EINVAL; |
| 482 | } |
| 483 | if (block_module_load("dmg-lzfse", errp) < 0) { |
| 484 | return -EINVAL; |
| 485 | } |
Kevin Wolf | 3edf1e7 | 2016-04-25 15:43:09 +0200 | [diff] [blame] | 486 | |
bellard | 585d0ed | 2004-12-12 11:24:44 +0000 | [diff] [blame] | 487 | s->n_chunks = 0; |
blueswir1 | 511d2b1 | 2009-03-07 15:32:56 +0000 | [diff] [blame] | 488 | s->offsets = s->lengths = s->sectors = s->sectorcounts = NULL; |
Peter Wu | 65a1c7c | 2015-01-06 18:48:05 +0100 | [diff] [blame] | 489 | /* used by dmg_read_mish_block to keep track of the current I/O position */ |
Peter Wu | c6d3486 | 2015-01-06 18:48:11 +0100 | [diff] [blame] | 490 | ds.data_fork_offset = 0; |
Peter Wu | 65a1c7c | 2015-01-06 18:48:05 +0100 | [diff] [blame] | 491 | ds.max_compressed_size = 1; |
| 492 | ds.max_sectors_per_chunk = 1; |
ths | 3b46e62 | 2007-09-17 08:09:54 +0000 | [diff] [blame] | 493 | |
Peter Wu | fa8354b | 2015-01-06 18:48:04 +0100 | [diff] [blame] | 494 | /* locate the UDIF trailer */ |
Kevin Wolf | cf2ab8f | 2016-06-20 18:24:02 +0200 | [diff] [blame] | 495 | offset = dmg_find_koly_offset(bs->file, errp); |
Christoph Hellwig | 16cdf7c | 2010-05-12 16:31:35 +0200 | [diff] [blame] | 496 | if (offset < 0) { |
Kevin Wolf | 69d34a3 | 2013-01-25 17:07:30 +0100 | [diff] [blame] | 497 | ret = offset; |
Christoph Hellwig | 1559ca0 | 2010-01-11 14:06:54 +0100 | [diff] [blame] | 498 | goto fail; |
bellard | 585d0ed | 2004-12-12 11:24:44 +0000 | [diff] [blame] | 499 | } |
Christoph Hellwig | 1559ca0 | 2010-01-11 14:06:54 +0100 | [diff] [blame] | 500 | |
Peter Wu | c6d3486 | 2015-01-06 18:48:11 +0100 | [diff] [blame] | 501 | /* offset of data fork (DataForkOffset) */ |
| 502 | ret = read_uint64(bs, offset + 0x18, &ds.data_fork_offset); |
| 503 | if (ret < 0) { |
| 504 | goto fail; |
| 505 | } else if (ds.data_fork_offset > offset) { |
| 506 | ret = -EINVAL; |
| 507 | goto fail; |
| 508 | } |
| 509 | |
Peter Wu | b0e8dc5 | 2015-01-06 18:48:06 +0100 | [diff] [blame] | 510 | /* offset of resource fork (RsrcForkOffset) */ |
| 511 | ret = read_uint64(bs, offset + 0x28, &rsrc_fork_offset); |
Kevin Wolf | 69d34a3 | 2013-01-25 17:07:30 +0100 | [diff] [blame] | 512 | if (ret < 0) { |
| 513 | goto fail; |
Christoph Hellwig | 16cdf7c | 2010-05-12 16:31:35 +0200 | [diff] [blame] | 514 | } |
Peter Wu | b0e8dc5 | 2015-01-06 18:48:06 +0100 | [diff] [blame] | 515 | ret = read_uint64(bs, offset + 0x30, &rsrc_fork_length); |
Kevin Wolf | 69d34a3 | 2013-01-25 17:07:30 +0100 | [diff] [blame] | 516 | if (ret < 0) { |
| 517 | goto fail; |
Kevin Wolf | 69d34a3 | 2013-01-25 17:07:30 +0100 | [diff] [blame] | 518 | } |
Peter Wu | f6e6652 | 2015-01-06 18:48:08 +0100 | [diff] [blame] | 519 | if (rsrc_fork_offset >= offset || |
| 520 | rsrc_fork_length > offset - rsrc_fork_offset) { |
| 521 | ret = -EINVAL; |
| 522 | goto fail; |
| 523 | } |
Peter Wu | 0599e56 | 2015-01-06 18:48:09 +0100 | [diff] [blame] | 524 | /* offset of property list (XMLOffset) */ |
| 525 | ret = read_uint64(bs, offset + 0xd8, &plist_xml_offset); |
| 526 | if (ret < 0) { |
| 527 | goto fail; |
| 528 | } |
| 529 | ret = read_uint64(bs, offset + 0xe0, &plist_xml_length); |
| 530 | if (ret < 0) { |
| 531 | goto fail; |
| 532 | } |
| 533 | if (plist_xml_offset >= offset || |
| 534 | plist_xml_length > offset - plist_xml_offset) { |
| 535 | ret = -EINVAL; |
| 536 | goto fail; |
| 537 | } |
Peter Wu | 8daf425 | 2015-01-06 18:48:10 +0100 | [diff] [blame] | 538 | ret = read_uint64(bs, offset + 0x1ec, (uint64_t *)&bs->total_sectors); |
| 539 | if (ret < 0) { |
| 540 | goto fail; |
| 541 | } |
| 542 | if (bs->total_sectors < 0) { |
| 543 | ret = -EINVAL; |
| 544 | goto fail; |
| 545 | } |
Peter Wu | b0e8dc5 | 2015-01-06 18:48:06 +0100 | [diff] [blame] | 546 | if (rsrc_fork_length != 0) { |
| 547 | ret = dmg_read_resource_fork(bs, &ds, |
| 548 | rsrc_fork_offset, rsrc_fork_length); |
Kevin Wolf | 69d34a3 | 2013-01-25 17:07:30 +0100 | [diff] [blame] | 549 | if (ret < 0) { |
| 550 | goto fail; |
| 551 | } |
Peter Wu | 0599e56 | 2015-01-06 18:48:09 +0100 | [diff] [blame] | 552 | } else if (plist_xml_length != 0) { |
| 553 | ret = dmg_read_plist_xml(bs, &ds, plist_xml_offset, plist_xml_length); |
| 554 | if (ret < 0) { |
| 555 | goto fail; |
| 556 | } |
Peter Wu | b0e8dc5 | 2015-01-06 18:48:06 +0100 | [diff] [blame] | 557 | } else { |
| 558 | ret = -EINVAL; |
| 559 | goto fail; |
bellard | 585d0ed | 2004-12-12 11:24:44 +0000 | [diff] [blame] | 560 | } |
| 561 | |
| 562 | /* initialize zlib engine */ |
Kevin Wolf | 9a4f4c3 | 2015-06-16 14:19:22 +0200 | [diff] [blame] | 563 | s->compressed_chunk = qemu_try_blockalign(bs->file->bs, |
Peter Wu | 65a1c7c | 2015-01-06 18:48:05 +0100 | [diff] [blame] | 564 | ds.max_compressed_size + 1); |
Kevin Wolf | 9a4f4c3 | 2015-06-16 14:19:22 +0200 | [diff] [blame] | 565 | s->uncompressed_chunk = qemu_try_blockalign(bs->file->bs, |
Peter Wu | 65a1c7c | 2015-01-06 18:48:05 +0100 | [diff] [blame] | 566 | 512 * ds.max_sectors_per_chunk); |
Kevin Wolf | b546a94 | 2014-05-20 13:28:14 +0200 | [diff] [blame] | 567 | if (s->compressed_chunk == NULL || s->uncompressed_chunk == NULL) { |
| 568 | ret = -ENOMEM; |
| 569 | goto fail; |
| 570 | } |
| 571 | |
Stefan Hajnoczi | 2c1885a | 2014-03-26 13:05:54 +0100 | [diff] [blame] | 572 | if (inflateInit(&s->zstream) != Z_OK) { |
Kevin Wolf | 69d34a3 | 2013-01-25 17:07:30 +0100 | [diff] [blame] | 573 | ret = -EINVAL; |
| 574 | goto fail; |
| 575 | } |
bellard | 585d0ed | 2004-12-12 11:24:44 +0000 | [diff] [blame] | 576 | |
| 577 | s->current_chunk = s->n_chunks; |
ths | 3b46e62 | 2007-09-17 08:09:54 +0000 | [diff] [blame] | 578 | |
Paolo Bonzini | 848c66e | 2011-10-20 13:16:21 +0200 | [diff] [blame] | 579 | qemu_co_mutex_init(&s->lock); |
bellard | 585d0ed | 2004-12-12 11:24:44 +0000 | [diff] [blame] | 580 | return 0; |
Kevin Wolf | 69d34a3 | 2013-01-25 17:07:30 +0100 | [diff] [blame] | 581 | |
Christoph Hellwig | 1559ca0 | 2010-01-11 14:06:54 +0100 | [diff] [blame] | 582 | fail: |
Kevin Wolf | 69d34a3 | 2013-01-25 17:07:30 +0100 | [diff] [blame] | 583 | g_free(s->types); |
| 584 | g_free(s->offsets); |
| 585 | g_free(s->lengths); |
| 586 | g_free(s->sectors); |
| 587 | g_free(s->sectorcounts); |
Kevin Wolf | b546a94 | 2014-05-20 13:28:14 +0200 | [diff] [blame] | 588 | qemu_vfree(s->compressed_chunk); |
| 589 | qemu_vfree(s->uncompressed_chunk); |
Kevin Wolf | 69d34a3 | 2013-01-25 17:07:30 +0100 | [diff] [blame] | 590 | return ret; |
bellard | 585d0ed | 2004-12-12 11:24:44 +0000 | [diff] [blame] | 591 | } |
| 592 | |
Eric Blake | a650648 | 2016-06-23 16:37:17 -0600 | [diff] [blame] | 593 | static void dmg_refresh_limits(BlockDriverState *bs, Error **errp) |
| 594 | { |
Eric Blake | a5b8dd2 | 2016-06-23 16:37:24 -0600 | [diff] [blame] | 595 | bs->bl.request_alignment = BDRV_SECTOR_SIZE; /* No sub-sector I/O */ |
Eric Blake | a650648 | 2016-06-23 16:37:17 -0600 | [diff] [blame] | 596 | } |
| 597 | |
shiliyang | 5f14f31 | 2020-10-30 11:35:12 +0800 | [diff] [blame] | 598 | static inline int is_sector_in_chunk(BDRVDMGState *s, |
Stefan Hajnoczi | 686d714 | 2014-03-26 13:05:59 +0100 | [diff] [blame] | 599 | uint32_t chunk_num, uint64_t sector_num) |
bellard | 585d0ed | 2004-12-12 11:24:44 +0000 | [diff] [blame] | 600 | { |
Stefan Hajnoczi | 2c1885a | 2014-03-26 13:05:54 +0100 | [diff] [blame] | 601 | if (chunk_num >= s->n_chunks || s->sectors[chunk_num] > sector_num || |
| 602 | s->sectors[chunk_num] + s->sectorcounts[chunk_num] <= sector_num) { |
| 603 | return 0; |
| 604 | } else { |
| 605 | return -1; |
| 606 | } |
bellard | 585d0ed | 2004-12-12 11:24:44 +0000 | [diff] [blame] | 607 | } |
| 608 | |
Stefan Hajnoczi | 686d714 | 2014-03-26 13:05:59 +0100 | [diff] [blame] | 609 | static inline uint32_t search_chunk(BDRVDMGState *s, uint64_t sector_num) |
bellard | 585d0ed | 2004-12-12 11:24:44 +0000 | [diff] [blame] | 610 | { |
| 611 | /* binary search */ |
Stefan Hajnoczi | 2c1885a | 2014-03-26 13:05:54 +0100 | [diff] [blame] | 612 | uint32_t chunk1 = 0, chunk2 = s->n_chunks, chunk3; |
yuchenlin | 5ef4082 | 2019-01-03 19:46:58 +0800 | [diff] [blame] | 613 | while (chunk1 <= chunk2) { |
Stefan Hajnoczi | 2c1885a | 2014-03-26 13:05:54 +0100 | [diff] [blame] | 614 | chunk3 = (chunk1 + chunk2) / 2; |
| 615 | if (s->sectors[chunk3] > sector_num) { |
yuchenlin | 5ef4082 | 2019-01-03 19:46:58 +0800 | [diff] [blame] | 616 | if (chunk3 == 0) { |
| 617 | goto err; |
| 618 | } |
| 619 | chunk2 = chunk3 - 1; |
Stefan Hajnoczi | 2c1885a | 2014-03-26 13:05:54 +0100 | [diff] [blame] | 620 | } else if (s->sectors[chunk3] + s->sectorcounts[chunk3] > sector_num) { |
| 621 | return chunk3; |
| 622 | } else { |
yuchenlin | 5ef4082 | 2019-01-03 19:46:58 +0800 | [diff] [blame] | 623 | chunk1 = chunk3 + 1; |
Stefan Hajnoczi | 2c1885a | 2014-03-26 13:05:54 +0100 | [diff] [blame] | 624 | } |
bellard | 585d0ed | 2004-12-12 11:24:44 +0000 | [diff] [blame] | 625 | } |
yuchenlin | 5ef4082 | 2019-01-03 19:46:58 +0800 | [diff] [blame] | 626 | err: |
bellard | 585d0ed | 2004-12-12 11:24:44 +0000 | [diff] [blame] | 627 | return s->n_chunks; /* error */ |
| 628 | } |
| 629 | |
Paolo Bonzini | 688dc49 | 2023-06-01 13:51:40 +0200 | [diff] [blame] | 630 | static int coroutine_fn GRAPH_RDLOCK |
| 631 | dmg_read_chunk(BlockDriverState *bs, uint64_t sector_num) |
bellard | 585d0ed | 2004-12-12 11:24:44 +0000 | [diff] [blame] | 632 | { |
Christoph Hellwig | 64a31d5 | 2010-05-12 16:31:49 +0200 | [diff] [blame] | 633 | BDRVDMGState *s = bs->opaque; |
| 634 | |
Stefan Hajnoczi | 2c1885a | 2014-03-26 13:05:54 +0100 | [diff] [blame] | 635 | if (!is_sector_in_chunk(s, s->current_chunk, sector_num)) { |
| 636 | int ret; |
| 637 | uint32_t chunk = search_chunk(s, sector_num); |
bellard | 585d0ed | 2004-12-12 11:24:44 +0000 | [diff] [blame] | 638 | |
Stefan Hajnoczi | 2c1885a | 2014-03-26 13:05:54 +0100 | [diff] [blame] | 639 | if (chunk >= s->n_chunks) { |
| 640 | return -1; |
| 641 | } |
bellard | 585d0ed | 2004-12-12 11:24:44 +0000 | [diff] [blame] | 642 | |
Stefan Hajnoczi | 2c1885a | 2014-03-26 13:05:54 +0100 | [diff] [blame] | 643 | s->current_chunk = s->n_chunks; |
Peter Wu | 6b383c0 | 2015-01-06 18:48:14 +0100 | [diff] [blame] | 644 | switch (s->types[chunk]) { /* block entry type */ |
Julio Faracco | 95a156f | 2018-11-05 13:08:06 -0200 | [diff] [blame] | 645 | case UDZO: { /* zlib compressed */ |
Stefan Hajnoczi | 2c1885a | 2014-03-26 13:05:54 +0100 | [diff] [blame] | 646 | /* we need to buffer, because only the chunk as whole can be |
| 647 | * inflated. */ |
Paolo Bonzini | 688dc49 | 2023-06-01 13:51:40 +0200 | [diff] [blame] | 648 | ret = bdrv_co_pread(bs->file, s->offsets[chunk], s->lengths[chunk], |
| 649 | s->compressed_chunk, 0); |
Alberto Faria | 353a5d8 | 2022-06-09 16:27:37 +0100 | [diff] [blame] | 650 | if (ret < 0) { |
Stefan Hajnoczi | 2c1885a | 2014-03-26 13:05:54 +0100 | [diff] [blame] | 651 | return -1; |
| 652 | } |
ths | 5fafdf2 | 2007-09-16 21:08:06 +0000 | [diff] [blame] | 653 | |
Stefan Hajnoczi | 2c1885a | 2014-03-26 13:05:54 +0100 | [diff] [blame] | 654 | s->zstream.next_in = s->compressed_chunk; |
| 655 | s->zstream.avail_in = s->lengths[chunk]; |
| 656 | s->zstream.next_out = s->uncompressed_chunk; |
| 657 | s->zstream.avail_out = 512 * s->sectorcounts[chunk]; |
| 658 | ret = inflateReset(&s->zstream); |
| 659 | if (ret != Z_OK) { |
| 660 | return -1; |
| 661 | } |
| 662 | ret = inflate(&s->zstream, Z_FINISH); |
| 663 | if (ret != Z_STREAM_END || |
| 664 | s->zstream.total_out != 512 * s->sectorcounts[chunk]) { |
| 665 | return -1; |
| 666 | } |
| 667 | break; } |
Julio Faracco | 95a156f | 2018-11-05 13:08:06 -0200 | [diff] [blame] | 668 | case UDBZ: /* bzip2 compressed */ |
Fam Zheng | 27685a8 | 2016-09-05 10:50:45 +0800 | [diff] [blame] | 669 | if (!dmg_uncompress_bz2) { |
| 670 | break; |
| 671 | } |
Peter Wu | 6b383c0 | 2015-01-06 18:48:14 +0100 | [diff] [blame] | 672 | /* we need to buffer, because only the chunk as whole can be |
| 673 | * inflated. */ |
Paolo Bonzini | 688dc49 | 2023-06-01 13:51:40 +0200 | [diff] [blame] | 674 | ret = bdrv_co_pread(bs->file, s->offsets[chunk], s->lengths[chunk], |
| 675 | s->compressed_chunk, 0); |
Alberto Faria | 353a5d8 | 2022-06-09 16:27:37 +0100 | [diff] [blame] | 676 | if (ret < 0) { |
Peter Wu | 6b383c0 | 2015-01-06 18:48:14 +0100 | [diff] [blame] | 677 | return -1; |
| 678 | } |
| 679 | |
Fam Zheng | 27685a8 | 2016-09-05 10:50:45 +0800 | [diff] [blame] | 680 | ret = dmg_uncompress_bz2((char *)s->compressed_chunk, |
| 681 | (unsigned int) s->lengths[chunk], |
| 682 | (char *)s->uncompressed_chunk, |
| 683 | (unsigned int) |
| 684 | (512 * s->sectorcounts[chunk])); |
| 685 | if (ret < 0) { |
| 686 | return ret; |
Peter Wu | 6b383c0 | 2015-01-06 18:48:14 +0100 | [diff] [blame] | 687 | } |
| 688 | break; |
Julio Faracco | 95a156f | 2018-11-05 13:08:06 -0200 | [diff] [blame] | 689 | case ULFO: |
Julio Faracco | 7a40b41 | 2018-11-05 13:08:05 -0200 | [diff] [blame] | 690 | if (!dmg_uncompress_lzfse) { |
| 691 | break; |
| 692 | } |
| 693 | /* we need to buffer, because only the chunk as whole can be |
| 694 | * inflated. */ |
Paolo Bonzini | 688dc49 | 2023-06-01 13:51:40 +0200 | [diff] [blame] | 695 | ret = bdrv_co_pread(bs->file, s->offsets[chunk], s->lengths[chunk], |
| 696 | s->compressed_chunk, 0); |
Alberto Faria | 353a5d8 | 2022-06-09 16:27:37 +0100 | [diff] [blame] | 697 | if (ret < 0) { |
Julio Faracco | 7a40b41 | 2018-11-05 13:08:05 -0200 | [diff] [blame] | 698 | return -1; |
| 699 | } |
| 700 | |
| 701 | ret = dmg_uncompress_lzfse((char *)s->compressed_chunk, |
| 702 | (unsigned int) s->lengths[chunk], |
| 703 | (char *)s->uncompressed_chunk, |
| 704 | (unsigned int) |
| 705 | (512 * s->sectorcounts[chunk])); |
| 706 | if (ret < 0) { |
| 707 | return ret; |
| 708 | } |
| 709 | break; |
Julio Faracco | 95a156f | 2018-11-05 13:08:06 -0200 | [diff] [blame] | 710 | case UDRW: /* copy */ |
Paolo Bonzini | 688dc49 | 2023-06-01 13:51:40 +0200 | [diff] [blame] | 711 | ret = bdrv_co_pread(bs->file, s->offsets[chunk], s->lengths[chunk], |
| 712 | s->uncompressed_chunk, 0); |
Alberto Faria | 353a5d8 | 2022-06-09 16:27:37 +0100 | [diff] [blame] | 713 | if (ret < 0) { |
Stefan Hajnoczi | 2c1885a | 2014-03-26 13:05:54 +0100 | [diff] [blame] | 714 | return -1; |
| 715 | } |
| 716 | break; |
yuchenlin | 39a0408 | 2019-01-03 19:47:00 +0800 | [diff] [blame] | 717 | case UDZE: /* zeros */ |
| 718 | case UDIG: /* ignore */ |
Peter Wu | 177b751 | 2015-01-06 18:48:15 +0100 | [diff] [blame] | 719 | /* see dmg_read, it is treated specially. No buffer needs to be |
| 720 | * pre-filled, the zeroes can be set directly. */ |
Stefan Hajnoczi | 2c1885a | 2014-03-26 13:05:54 +0100 | [diff] [blame] | 721 | break; |
| 722 | } |
| 723 | s->current_chunk = chunk; |
bellard | 585d0ed | 2004-12-12 11:24:44 +0000 | [diff] [blame] | 724 | } |
| 725 | return 0; |
| 726 | } |
| 727 | |
Paolo Bonzini | 688dc49 | 2023-06-01 13:51:40 +0200 | [diff] [blame] | 728 | static int coroutine_fn GRAPH_RDLOCK |
Vladimir Sementsov-Ogievskiy | f7ef38d | 2021-09-03 13:27:59 +0300 | [diff] [blame] | 729 | dmg_co_preadv(BlockDriverState *bs, int64_t offset, int64_t bytes, |
| 730 | QEMUIOVector *qiov, BdrvRequestFlags flags) |
bellard | 585d0ed | 2004-12-12 11:24:44 +0000 | [diff] [blame] | 731 | { |
| 732 | BDRVDMGState *s = bs->opaque; |
Kevin Wolf | 3edf1e7 | 2016-04-25 15:43:09 +0200 | [diff] [blame] | 733 | uint64_t sector_num = offset >> BDRV_SECTOR_BITS; |
| 734 | int nb_sectors = bytes >> BDRV_SECTOR_BITS; |
| 735 | int ret, i; |
| 736 | |
Nir Soffer | 1bbbf32 | 2019-08-27 21:59:12 +0300 | [diff] [blame] | 737 | assert(QEMU_IS_ALIGNED(offset, BDRV_SECTOR_SIZE)); |
| 738 | assert(QEMU_IS_ALIGNED(bytes, BDRV_SECTOR_SIZE)); |
Kevin Wolf | 3edf1e7 | 2016-04-25 15:43:09 +0200 | [diff] [blame] | 739 | |
| 740 | qemu_co_mutex_lock(&s->lock); |
bellard | 585d0ed | 2004-12-12 11:24:44 +0000 | [diff] [blame] | 741 | |
Stefan Hajnoczi | 2c1885a | 2014-03-26 13:05:54 +0100 | [diff] [blame] | 742 | for (i = 0; i < nb_sectors; i++) { |
| 743 | uint32_t sector_offset_in_chunk; |
Kevin Wolf | 3edf1e7 | 2016-04-25 15:43:09 +0200 | [diff] [blame] | 744 | void *data; |
| 745 | |
Stefan Hajnoczi | 2c1885a | 2014-03-26 13:05:54 +0100 | [diff] [blame] | 746 | if (dmg_read_chunk(bs, sector_num + i) != 0) { |
Kevin Wolf | 3edf1e7 | 2016-04-25 15:43:09 +0200 | [diff] [blame] | 747 | ret = -EIO; |
| 748 | goto fail; |
Stefan Hajnoczi | 2c1885a | 2014-03-26 13:05:54 +0100 | [diff] [blame] | 749 | } |
Peter Wu | 177b751 | 2015-01-06 18:48:15 +0100 | [diff] [blame] | 750 | /* Special case: current chunk is all zeroes. Do not perform a memcpy as |
| 751 | * s->uncompressed_chunk may be too small to cover the large all-zeroes |
| 752 | * section. dmg_read_chunk is called to find s->current_chunk */ |
yuchenlin | 39a0408 | 2019-01-03 19:47:00 +0800 | [diff] [blame] | 753 | if (s->types[s->current_chunk] == UDZE |
| 754 | || s->types[s->current_chunk] == UDIG) { /* all zeroes block entry */ |
Kevin Wolf | 3edf1e7 | 2016-04-25 15:43:09 +0200 | [diff] [blame] | 755 | qemu_iovec_memset(qiov, i * 512, 0, 512); |
Peter Wu | 177b751 | 2015-01-06 18:48:15 +0100 | [diff] [blame] | 756 | continue; |
| 757 | } |
Stefan Hajnoczi | 2c1885a | 2014-03-26 13:05:54 +0100 | [diff] [blame] | 758 | sector_offset_in_chunk = sector_num + i - s->sectors[s->current_chunk]; |
Kevin Wolf | 3edf1e7 | 2016-04-25 15:43:09 +0200 | [diff] [blame] | 759 | data = s->uncompressed_chunk + sector_offset_in_chunk * 512; |
| 760 | qemu_iovec_from_buf(qiov, i * 512, data, 512); |
bellard | 585d0ed | 2004-12-12 11:24:44 +0000 | [diff] [blame] | 761 | } |
bellard | 585d0ed | 2004-12-12 11:24:44 +0000 | [diff] [blame] | 762 | |
Kevin Wolf | 3edf1e7 | 2016-04-25 15:43:09 +0200 | [diff] [blame] | 763 | ret = 0; |
| 764 | fail: |
Paolo Bonzini | 2914caa | 2011-10-20 13:16:22 +0200 | [diff] [blame] | 765 | qemu_co_mutex_unlock(&s->lock); |
| 766 | return ret; |
| 767 | } |
| 768 | |
bellard | 585d0ed | 2004-12-12 11:24:44 +0000 | [diff] [blame] | 769 | static void dmg_close(BlockDriverState *bs) |
| 770 | { |
| 771 | BDRVDMGState *s = bs->opaque; |
Kevin Wolf | 4f8aa2e | 2013-01-25 17:07:31 +0100 | [diff] [blame] | 772 | |
| 773 | g_free(s->types); |
| 774 | g_free(s->offsets); |
| 775 | g_free(s->lengths); |
| 776 | g_free(s->sectors); |
| 777 | g_free(s->sectorcounts); |
Kevin Wolf | b546a94 | 2014-05-20 13:28:14 +0200 | [diff] [blame] | 778 | qemu_vfree(s->compressed_chunk); |
| 779 | qemu_vfree(s->uncompressed_chunk); |
Kevin Wolf | 4f8aa2e | 2013-01-25 17:07:31 +0100 | [diff] [blame] | 780 | |
bellard | 585d0ed | 2004-12-12 11:24:44 +0000 | [diff] [blame] | 781 | inflateEnd(&s->zstream); |
| 782 | } |
| 783 | |
Anthony Liguori | 5efa9d5 | 2009-05-09 17:03:42 -0500 | [diff] [blame] | 784 | static BlockDriver bdrv_dmg = { |
Stefan Hajnoczi | 2c1885a | 2014-03-26 13:05:54 +0100 | [diff] [blame] | 785 | .format_name = "dmg", |
| 786 | .instance_size = sizeof(BDRVDMGState), |
| 787 | .bdrv_probe = dmg_probe, |
| 788 | .bdrv_open = dmg_open, |
Eric Blake | a650648 | 2016-06-23 16:37:17 -0600 | [diff] [blame] | 789 | .bdrv_refresh_limits = dmg_refresh_limits, |
Max Reitz | 69dca43 | 2020-05-13 13:05:39 +0200 | [diff] [blame] | 790 | .bdrv_child_perm = bdrv_default_perms, |
Kevin Wolf | 3edf1e7 | 2016-04-25 15:43:09 +0200 | [diff] [blame] | 791 | .bdrv_co_preadv = dmg_co_preadv, |
Stefan Hajnoczi | 2c1885a | 2014-03-26 13:05:54 +0100 | [diff] [blame] | 792 | .bdrv_close = dmg_close, |
Max Reitz | d67066d | 2020-05-13 13:05:12 +0200 | [diff] [blame] | 793 | .is_format = true, |
bellard | 585d0ed | 2004-12-12 11:24:44 +0000 | [diff] [blame] | 794 | }; |
Anthony Liguori | 5efa9d5 | 2009-05-09 17:03:42 -0500 | [diff] [blame] | 795 | |
| 796 | static void bdrv_dmg_init(void) |
| 797 | { |
| 798 | bdrv_register(&bdrv_dmg); |
| 799 | } |
| 800 | |
| 801 | block_init(bdrv_dmg_init); |