vmdk: Fix next_cluster_sector for compressed write
This fixes the bug introduced by commit c6ac36e (vmdk: Optimize cluster
allocation).
Sometimes, write_len could be larger than cluster size, because it
contains both data and marker. We must advance next_cluster_sector in
this case, otherwise the image gets corrupted.
Cc: qemu-stable@nongnu.org
Reported-by: Antoni Villalonga <qemu-list@friki.cat>
Signed-off-by: Fam Zheng <famz@redhat.com>
Reviewed-by: Max Reitz <mreitz@redhat.com>
Signed-off-by: Kevin Wolf <kwolf@redhat.com>
diff --git a/block/vmdk.c b/block/vmdk.c
index 1c5e2ef..4b4a862 100644
--- a/block/vmdk.c
+++ b/block/vmdk.c
@@ -1302,6 +1302,8 @@
uLongf buf_len;
const uint8_t *write_buf = buf;
int write_len = nb_sectors * 512;
+ int64_t write_offset;
+ int64_t write_end_sector;
if (extent->compressed) {
if (!extent->has_marker) {
@@ -1320,10 +1322,14 @@
write_buf = (uint8_t *)data;
write_len = buf_len + sizeof(VmdkGrainMarker);
}
- ret = bdrv_pwrite(extent->file,
- cluster_offset + offset_in_cluster,
- write_buf,
- write_len);
+ write_offset = cluster_offset + offset_in_cluster,
+ ret = bdrv_pwrite(extent->file, write_offset, write_buf, write_len);
+
+ write_end_sector = DIV_ROUND_UP(write_offset + write_len, BDRV_SECTOR_SIZE);
+
+ extent->next_cluster_sector = MAX(extent->next_cluster_sector,
+ write_end_sector);
+
if (ret != write_len) {
ret = ret < 0 ? ret : -EIO;
goto out;