Paolo Bonzini | 893f7eb | 2012-10-18 16:49:23 +0200 | [diff] [blame] | 1 | /* |
| 2 | * Image mirroring |
| 3 | * |
| 4 | * Copyright Red Hat, Inc. 2012 |
| 5 | * |
| 6 | * Authors: |
| 7 | * Paolo Bonzini <pbonzini@redhat.com> |
| 8 | * |
| 9 | * This work is licensed under the terms of the GNU LGPL, version 2 or later. |
| 10 | * See the COPYING.LIB file in the top-level directory. |
| 11 | * |
| 12 | */ |
| 13 | |
Peter Maydell | 80c71a2 | 2016-01-18 18:01:42 +0000 | [diff] [blame] | 14 | #include "qemu/osdep.h" |
Paolo Bonzini | 893f7eb | 2012-10-18 16:49:23 +0200 | [diff] [blame] | 15 | #include "trace.h" |
Paolo Bonzini | 737e150 | 2012-12-17 18:19:44 +0100 | [diff] [blame] | 16 | #include "block/blockjob.h" |
| 17 | #include "block/block_int.h" |
Max Reitz | 373340b | 2015-10-19 17:53:22 +0200 | [diff] [blame] | 18 | #include "sysemu/block-backend.h" |
Markus Armbruster | da34e65 | 2016-03-14 09:01:28 +0100 | [diff] [blame] | 19 | #include "qapi/error.h" |
Markus Armbruster | cc7a8ea | 2015-03-17 17:22:46 +0100 | [diff] [blame] | 20 | #include "qapi/qmp/qerror.h" |
Paolo Bonzini | 893f7eb | 2012-10-18 16:49:23 +0200 | [diff] [blame] | 21 | #include "qemu/ratelimit.h" |
Paolo Bonzini | b812f67 | 2013-01-21 17:09:43 +0100 | [diff] [blame] | 22 | #include "qemu/bitmap.h" |
Paolo Bonzini | 893f7eb | 2012-10-18 16:49:23 +0200 | [diff] [blame] | 23 | |
Paolo Bonzini | 402a474 | 2013-01-22 09:03:14 +0100 | [diff] [blame] | 24 | #define SLICE_TIME 100000000ULL /* ns */ |
| 25 | #define MAX_IN_FLIGHT 16 |
Wen Congyang | 48ac0a4 | 2015-05-15 15:51:36 +0800 | [diff] [blame] | 26 | #define DEFAULT_MIRROR_BUF_SIZE (10 << 20) |
Paolo Bonzini | 402a474 | 2013-01-22 09:03:14 +0100 | [diff] [blame] | 27 | |
| 28 | /* The mirroring buffer is a list of granularity-sized chunks. |
| 29 | * Free chunks are organized in a list. |
| 30 | */ |
| 31 | typedef struct MirrorBuffer { |
| 32 | QSIMPLEQ_ENTRY(MirrorBuffer) next; |
| 33 | } MirrorBuffer; |
Paolo Bonzini | 893f7eb | 2012-10-18 16:49:23 +0200 | [diff] [blame] | 34 | |
| 35 | typedef struct MirrorBlockJob { |
| 36 | BlockJob common; |
| 37 | RateLimit limit; |
Kevin Wolf | e253f4b | 2016-04-12 16:17:41 +0200 | [diff] [blame] | 38 | BlockBackend *target; |
Fam Zheng | 5bc361b | 2013-12-16 14:45:29 +0800 | [diff] [blame] | 39 | BlockDriverState *base; |
Benoît Canet | 09158f0 | 2014-06-27 18:25:25 +0200 | [diff] [blame] | 40 | /* The name of the graph node to replace */ |
| 41 | char *replaces; |
| 42 | /* The BDS to replace */ |
| 43 | BlockDriverState *to_replace; |
| 44 | /* Used to block operations on the drive-mirror-replace target */ |
| 45 | Error *replace_blocker; |
Fam Zheng | 03544a6 | 2013-12-16 14:45:30 +0800 | [diff] [blame] | 46 | bool is_none_mode; |
Paolo Bonzini | b952b55 | 2012-10-18 16:49:28 +0200 | [diff] [blame] | 47 | BlockdevOnError on_source_error, on_target_error; |
Paolo Bonzini | d63ffd8 | 2012-10-18 16:49:25 +0200 | [diff] [blame] | 48 | bool synced; |
| 49 | bool should_complete; |
Paolo Bonzini | eee13df | 2013-01-21 17:09:46 +0100 | [diff] [blame] | 50 | int64_t granularity; |
Paolo Bonzini | b812f67 | 2013-01-21 17:09:43 +0100 | [diff] [blame] | 51 | size_t buf_size; |
Max Reitz | b21c765 | 2014-10-24 15:57:36 +0200 | [diff] [blame] | 52 | int64_t bdev_length; |
Paolo Bonzini | b812f67 | 2013-01-21 17:09:43 +0100 | [diff] [blame] | 53 | unsigned long *cow_bitmap; |
Fam Zheng | e4654d2 | 2013-11-13 18:29:43 +0800 | [diff] [blame] | 54 | BdrvDirtyBitmap *dirty_bitmap; |
Paolo Bonzini | 8f0720e | 2013-01-21 17:09:41 +0100 | [diff] [blame] | 55 | HBitmapIter hbi; |
Paolo Bonzini | 893f7eb | 2012-10-18 16:49:23 +0200 | [diff] [blame] | 56 | uint8_t *buf; |
Paolo Bonzini | 402a474 | 2013-01-22 09:03:14 +0100 | [diff] [blame] | 57 | QSIMPLEQ_HEAD(, MirrorBuffer) buf_free; |
| 58 | int buf_free_count; |
Paolo Bonzini | bd48bde | 2013-01-22 09:03:12 +0100 | [diff] [blame] | 59 | |
Paolo Bonzini | 402a474 | 2013-01-22 09:03:14 +0100 | [diff] [blame] | 60 | unsigned long *in_flight_bitmap; |
Paolo Bonzini | bd48bde | 2013-01-22 09:03:12 +0100 | [diff] [blame] | 61 | int in_flight; |
Max Reitz | b21c765 | 2014-10-24 15:57:36 +0200 | [diff] [blame] | 62 | int sectors_in_flight; |
Paolo Bonzini | bd48bde | 2013-01-22 09:03:12 +0100 | [diff] [blame] | 63 | int ret; |
Fam Zheng | 0fc9f8e | 2015-06-08 13:56:08 +0800 | [diff] [blame] | 64 | bool unmap; |
Kevin Wolf | e424aff | 2015-08-13 10:41:50 +0200 | [diff] [blame] | 65 | bool waiting_for_io; |
Fam Zheng | e5b4357 | 2016-02-05 10:00:29 +0800 | [diff] [blame] | 66 | int target_cluster_sectors; |
| 67 | int max_iov; |
Paolo Bonzini | 893f7eb | 2012-10-18 16:49:23 +0200 | [diff] [blame] | 68 | } MirrorBlockJob; |
| 69 | |
Paolo Bonzini | bd48bde | 2013-01-22 09:03:12 +0100 | [diff] [blame] | 70 | typedef struct MirrorOp { |
| 71 | MirrorBlockJob *s; |
| 72 | QEMUIOVector qiov; |
Paolo Bonzini | bd48bde | 2013-01-22 09:03:12 +0100 | [diff] [blame] | 73 | int64_t sector_num; |
| 74 | int nb_sectors; |
| 75 | } MirrorOp; |
| 76 | |
Paolo Bonzini | b952b55 | 2012-10-18 16:49:28 +0200 | [diff] [blame] | 77 | static BlockErrorAction mirror_error_action(MirrorBlockJob *s, bool read, |
| 78 | int error) |
| 79 | { |
| 80 | s->synced = false; |
| 81 | if (read) { |
Kevin Wolf | 81e254d | 2016-04-18 11:36:38 +0200 | [diff] [blame] | 82 | return block_job_error_action(&s->common, s->on_source_error, |
| 83 | true, error); |
Paolo Bonzini | b952b55 | 2012-10-18 16:49:28 +0200 | [diff] [blame] | 84 | } else { |
Kevin Wolf | 81e254d | 2016-04-18 11:36:38 +0200 | [diff] [blame] | 85 | return block_job_error_action(&s->common, s->on_target_error, |
| 86 | false, error); |
Paolo Bonzini | b952b55 | 2012-10-18 16:49:28 +0200 | [diff] [blame] | 87 | } |
| 88 | } |
| 89 | |
Paolo Bonzini | bd48bde | 2013-01-22 09:03:12 +0100 | [diff] [blame] | 90 | static void mirror_iteration_done(MirrorOp *op, int ret) |
| 91 | { |
| 92 | MirrorBlockJob *s = op->s; |
Paolo Bonzini | 402a474 | 2013-01-22 09:03:14 +0100 | [diff] [blame] | 93 | struct iovec *iov; |
Paolo Bonzini | bd48bde | 2013-01-22 09:03:12 +0100 | [diff] [blame] | 94 | int64_t chunk_num; |
Paolo Bonzini | 402a474 | 2013-01-22 09:03:14 +0100 | [diff] [blame] | 95 | int i, nb_chunks, sectors_per_chunk; |
Paolo Bonzini | bd48bde | 2013-01-22 09:03:12 +0100 | [diff] [blame] | 96 | |
| 97 | trace_mirror_iteration_done(s, op->sector_num, op->nb_sectors, ret); |
| 98 | |
| 99 | s->in_flight--; |
Max Reitz | b21c765 | 2014-10-24 15:57:36 +0200 | [diff] [blame] | 100 | s->sectors_in_flight -= op->nb_sectors; |
Paolo Bonzini | 402a474 | 2013-01-22 09:03:14 +0100 | [diff] [blame] | 101 | iov = op->qiov.iov; |
| 102 | for (i = 0; i < op->qiov.niov; i++) { |
| 103 | MirrorBuffer *buf = (MirrorBuffer *) iov[i].iov_base; |
| 104 | QSIMPLEQ_INSERT_TAIL(&s->buf_free, buf, next); |
| 105 | s->buf_free_count++; |
| 106 | } |
| 107 | |
Paolo Bonzini | bd48bde | 2013-01-22 09:03:12 +0100 | [diff] [blame] | 108 | sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS; |
| 109 | chunk_num = op->sector_num / sectors_per_chunk; |
Fam Zheng | 4150ae6 | 2016-04-20 10:48:34 +0800 | [diff] [blame] | 110 | nb_chunks = DIV_ROUND_UP(op->nb_sectors, sectors_per_chunk); |
Paolo Bonzini | 402a474 | 2013-01-22 09:03:14 +0100 | [diff] [blame] | 111 | bitmap_clear(s->in_flight_bitmap, chunk_num, nb_chunks); |
Max Reitz | b21c765 | 2014-10-24 15:57:36 +0200 | [diff] [blame] | 112 | if (ret >= 0) { |
| 113 | if (s->cow_bitmap) { |
| 114 | bitmap_set(s->cow_bitmap, chunk_num, nb_chunks); |
| 115 | } |
| 116 | s->common.offset += (uint64_t)op->nb_sectors * BDRV_SECTOR_SIZE; |
Paolo Bonzini | bd48bde | 2013-01-22 09:03:12 +0100 | [diff] [blame] | 117 | } |
| 118 | |
Zhang Min | 6df3bf8 | 2014-01-23 15:59:16 +0800 | [diff] [blame] | 119 | qemu_iovec_destroy(&op->qiov); |
Paolo Bonzini | c84b319 | 2015-10-01 13:04:39 +0200 | [diff] [blame] | 120 | g_free(op); |
Stefan Hajnoczi | 7b770c7 | 2014-03-21 13:55:19 +0100 | [diff] [blame] | 121 | |
Kevin Wolf | e424aff | 2015-08-13 10:41:50 +0200 | [diff] [blame] | 122 | if (s->waiting_for_io) { |
Stefan Hajnoczi | 7b770c7 | 2014-03-21 13:55:19 +0100 | [diff] [blame] | 123 | qemu_coroutine_enter(s->common.co, NULL); |
| 124 | } |
Paolo Bonzini | bd48bde | 2013-01-22 09:03:12 +0100 | [diff] [blame] | 125 | } |
| 126 | |
| 127 | static void mirror_write_complete(void *opaque, int ret) |
| 128 | { |
| 129 | MirrorOp *op = opaque; |
| 130 | MirrorBlockJob *s = op->s; |
| 131 | if (ret < 0) { |
Paolo Bonzini | bd48bde | 2013-01-22 09:03:12 +0100 | [diff] [blame] | 132 | BlockErrorAction action; |
| 133 | |
John Snow | 20dca81 | 2015-04-17 19:50:02 -0400 | [diff] [blame] | 134 | bdrv_set_dirty_bitmap(s->dirty_bitmap, op->sector_num, op->nb_sectors); |
Paolo Bonzini | bd48bde | 2013-01-22 09:03:12 +0100 | [diff] [blame] | 135 | action = mirror_error_action(s, false, -ret); |
Wenchao Xia | a589569 | 2014-06-18 08:43:30 +0200 | [diff] [blame] | 136 | if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) { |
Paolo Bonzini | bd48bde | 2013-01-22 09:03:12 +0100 | [diff] [blame] | 137 | s->ret = ret; |
| 138 | } |
| 139 | } |
| 140 | mirror_iteration_done(op, ret); |
| 141 | } |
| 142 | |
| 143 | static void mirror_read_complete(void *opaque, int ret) |
| 144 | { |
| 145 | MirrorOp *op = opaque; |
| 146 | MirrorBlockJob *s = op->s; |
| 147 | if (ret < 0) { |
Paolo Bonzini | bd48bde | 2013-01-22 09:03:12 +0100 | [diff] [blame] | 148 | BlockErrorAction action; |
| 149 | |
John Snow | 20dca81 | 2015-04-17 19:50:02 -0400 | [diff] [blame] | 150 | bdrv_set_dirty_bitmap(s->dirty_bitmap, op->sector_num, op->nb_sectors); |
Paolo Bonzini | bd48bde | 2013-01-22 09:03:12 +0100 | [diff] [blame] | 151 | action = mirror_error_action(s, true, -ret); |
Wenchao Xia | a589569 | 2014-06-18 08:43:30 +0200 | [diff] [blame] | 152 | if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) { |
Paolo Bonzini | bd48bde | 2013-01-22 09:03:12 +0100 | [diff] [blame] | 153 | s->ret = ret; |
| 154 | } |
| 155 | |
| 156 | mirror_iteration_done(op, ret); |
| 157 | return; |
| 158 | } |
Kevin Wolf | e253f4b | 2016-04-12 16:17:41 +0200 | [diff] [blame] | 159 | blk_aio_pwritev(s->target, op->sector_num * BDRV_SECTOR_SIZE, &op->qiov, |
Eric Blake | 73698c3 | 2016-06-13 12:56:34 -0600 | [diff] [blame] | 160 | 0, mirror_write_complete, op); |
Paolo Bonzini | bd48bde | 2013-01-22 09:03:12 +0100 | [diff] [blame] | 161 | } |
| 162 | |
Fam Zheng | 4150ae6 | 2016-04-20 10:48:34 +0800 | [diff] [blame] | 163 | static inline void mirror_clip_sectors(MirrorBlockJob *s, |
| 164 | int64_t sector_num, |
| 165 | int *nb_sectors) |
| 166 | { |
| 167 | *nb_sectors = MIN(*nb_sectors, |
| 168 | s->bdev_length / BDRV_SECTOR_SIZE - sector_num); |
| 169 | } |
| 170 | |
Fam Zheng | e5b4357 | 2016-02-05 10:00:29 +0800 | [diff] [blame] | 171 | /* Round sector_num and/or nb_sectors to target cluster if COW is needed, and |
| 172 | * return the offset of the adjusted tail sector against original. */ |
| 173 | static int mirror_cow_align(MirrorBlockJob *s, |
| 174 | int64_t *sector_num, |
| 175 | int *nb_sectors) |
Paolo Bonzini | 893f7eb | 2012-10-18 16:49:23 +0200 | [diff] [blame] | 176 | { |
Fam Zheng | e5b4357 | 2016-02-05 10:00:29 +0800 | [diff] [blame] | 177 | bool need_cow; |
| 178 | int ret = 0; |
| 179 | int chunk_sectors = s->granularity >> BDRV_SECTOR_BITS; |
| 180 | int64_t align_sector_num = *sector_num; |
| 181 | int align_nb_sectors = *nb_sectors; |
| 182 | int max_sectors = chunk_sectors * s->max_iov; |
Paolo Bonzini | 893f7eb | 2012-10-18 16:49:23 +0200 | [diff] [blame] | 183 | |
Fam Zheng | e5b4357 | 2016-02-05 10:00:29 +0800 | [diff] [blame] | 184 | need_cow = !test_bit(*sector_num / chunk_sectors, s->cow_bitmap); |
| 185 | need_cow |= !test_bit((*sector_num + *nb_sectors - 1) / chunk_sectors, |
| 186 | s->cow_bitmap); |
| 187 | if (need_cow) { |
Kevin Wolf | 244483e | 2016-06-02 11:41:52 +0200 | [diff] [blame] | 188 | bdrv_round_sectors_to_clusters(blk_bs(s->target), *sector_num, |
| 189 | *nb_sectors, &align_sector_num, |
| 190 | &align_nb_sectors); |
Paolo Bonzini | 8f0720e | 2013-01-21 17:09:41 +0100 | [diff] [blame] | 191 | } |
| 192 | |
Fam Zheng | e5b4357 | 2016-02-05 10:00:29 +0800 | [diff] [blame] | 193 | if (align_nb_sectors > max_sectors) { |
| 194 | align_nb_sectors = max_sectors; |
| 195 | if (need_cow) { |
| 196 | align_nb_sectors = QEMU_ALIGN_DOWN(align_nb_sectors, |
| 197 | s->target_cluster_sectors); |
| 198 | } |
| 199 | } |
Fam Zheng | 4150ae6 | 2016-04-20 10:48:34 +0800 | [diff] [blame] | 200 | /* Clipping may result in align_nb_sectors unaligned to chunk boundary, but |
| 201 | * that doesn't matter because it's already the end of source image. */ |
| 202 | mirror_clip_sectors(s, align_sector_num, &align_nb_sectors); |
Fam Zheng | e5b4357 | 2016-02-05 10:00:29 +0800 | [diff] [blame] | 203 | |
| 204 | ret = align_sector_num + align_nb_sectors - (*sector_num + *nb_sectors); |
| 205 | *sector_num = align_sector_num; |
| 206 | *nb_sectors = align_nb_sectors; |
| 207 | assert(ret >= 0); |
| 208 | return ret; |
| 209 | } |
| 210 | |
Fam Zheng | 21cd917 | 2016-02-05 10:00:30 +0800 | [diff] [blame] | 211 | static inline void mirror_wait_for_io(MirrorBlockJob *s) |
| 212 | { |
| 213 | assert(!s->waiting_for_io); |
| 214 | s->waiting_for_io = true; |
| 215 | qemu_coroutine_yield(); |
| 216 | s->waiting_for_io = false; |
| 217 | } |
| 218 | |
Fam Zheng | e5b4357 | 2016-02-05 10:00:29 +0800 | [diff] [blame] | 219 | /* Submit async read while handling COW. |
| 220 | * Returns: nb_sectors if no alignment is necessary, or |
| 221 | * (new_end - sector_num) if tail is rounded up or down due to |
| 222 | * alignment or buffer limit. |
| 223 | */ |
| 224 | static int mirror_do_read(MirrorBlockJob *s, int64_t sector_num, |
| 225 | int nb_sectors) |
| 226 | { |
Kevin Wolf | e253f4b | 2016-04-12 16:17:41 +0200 | [diff] [blame] | 227 | BlockBackend *source = s->common.blk; |
Fam Zheng | e5b4357 | 2016-02-05 10:00:29 +0800 | [diff] [blame] | 228 | int sectors_per_chunk, nb_chunks; |
| 229 | int ret = nb_sectors; |
| 230 | MirrorOp *op; |
| 231 | |
Paolo Bonzini | 884fea4 | 2013-01-22 09:03:15 +0100 | [diff] [blame] | 232 | sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS; |
Paolo Bonzini | 402a474 | 2013-01-22 09:03:14 +0100 | [diff] [blame] | 233 | |
Fam Zheng | e5b4357 | 2016-02-05 10:00:29 +0800 | [diff] [blame] | 234 | /* We can only handle as much as buf_size at a time. */ |
| 235 | nb_sectors = MIN(s->buf_size >> BDRV_SECTOR_BITS, nb_sectors); |
| 236 | assert(nb_sectors); |
Paolo Bonzini | 402a474 | 2013-01-22 09:03:14 +0100 | [diff] [blame] | 237 | |
Fam Zheng | e5b4357 | 2016-02-05 10:00:29 +0800 | [diff] [blame] | 238 | if (s->cow_bitmap) { |
| 239 | ret += mirror_cow_align(s, §or_num, &nb_sectors); |
| 240 | } |
| 241 | assert(nb_sectors << BDRV_SECTOR_BITS <= s->buf_size); |
| 242 | /* The sector range must meet granularity because: |
| 243 | * 1) Caller passes in aligned values; |
| 244 | * 2) mirror_cow_align is used only when target cluster is larger. */ |
Fam Zheng | e5b4357 | 2016-02-05 10:00:29 +0800 | [diff] [blame] | 245 | assert(!(sector_num % sectors_per_chunk)); |
Fam Zheng | 4150ae6 | 2016-04-20 10:48:34 +0800 | [diff] [blame] | 246 | nb_chunks = DIV_ROUND_UP(nb_sectors, sectors_per_chunk); |
Fam Zheng | e5b4357 | 2016-02-05 10:00:29 +0800 | [diff] [blame] | 247 | |
| 248 | while (s->buf_free_count < nb_chunks) { |
Paolo Bonzini | 402a474 | 2013-01-22 09:03:14 +0100 | [diff] [blame] | 249 | trace_mirror_yield_in_flight(s, sector_num, s->in_flight); |
Fam Zheng | 21cd917 | 2016-02-05 10:00:30 +0800 | [diff] [blame] | 250 | mirror_wait_for_io(s); |
Paolo Bonzini | b812f67 | 2013-01-21 17:09:43 +0100 | [diff] [blame] | 251 | } |
| 252 | |
Paolo Bonzini | bd48bde | 2013-01-22 09:03:12 +0100 | [diff] [blame] | 253 | /* Allocate a MirrorOp that is used as an AIO callback. */ |
Paolo Bonzini | c84b319 | 2015-10-01 13:04:39 +0200 | [diff] [blame] | 254 | op = g_new(MirrorOp, 1); |
Paolo Bonzini | bd48bde | 2013-01-22 09:03:12 +0100 | [diff] [blame] | 255 | op->s = s; |
Paolo Bonzini | bd48bde | 2013-01-22 09:03:12 +0100 | [diff] [blame] | 256 | op->sector_num = sector_num; |
| 257 | op->nb_sectors = nb_sectors; |
Paolo Bonzini | 402a474 | 2013-01-22 09:03:14 +0100 | [diff] [blame] | 258 | |
| 259 | /* Now make a QEMUIOVector taking enough granularity-sized chunks |
| 260 | * from s->buf_free. |
| 261 | */ |
| 262 | qemu_iovec_init(&op->qiov, nb_chunks); |
Paolo Bonzini | 402a474 | 2013-01-22 09:03:14 +0100 | [diff] [blame] | 263 | while (nb_chunks-- > 0) { |
| 264 | MirrorBuffer *buf = QSIMPLEQ_FIRST(&s->buf_free); |
Fam Zheng | e5b4357 | 2016-02-05 10:00:29 +0800 | [diff] [blame] | 265 | size_t remaining = nb_sectors * BDRV_SECTOR_SIZE - op->qiov.size; |
Kevin Wolf | 5a0f6fd | 2014-07-01 16:52:21 +0200 | [diff] [blame] | 266 | |
Paolo Bonzini | 402a474 | 2013-01-22 09:03:14 +0100 | [diff] [blame] | 267 | QSIMPLEQ_REMOVE_HEAD(&s->buf_free, next); |
| 268 | s->buf_free_count--; |
Kevin Wolf | 5a0f6fd | 2014-07-01 16:52:21 +0200 | [diff] [blame] | 269 | qemu_iovec_add(&op->qiov, buf, MIN(s->granularity, remaining)); |
Paolo Bonzini | 402a474 | 2013-01-22 09:03:14 +0100 | [diff] [blame] | 270 | } |
Paolo Bonzini | bd48bde | 2013-01-22 09:03:12 +0100 | [diff] [blame] | 271 | |
Paolo Bonzini | 893f7eb | 2012-10-18 16:49:23 +0200 | [diff] [blame] | 272 | /* Copy the dirty cluster. */ |
Paolo Bonzini | bd48bde | 2013-01-22 09:03:12 +0100 | [diff] [blame] | 273 | s->in_flight++; |
Max Reitz | b21c765 | 2014-10-24 15:57:36 +0200 | [diff] [blame] | 274 | s->sectors_in_flight += nb_sectors; |
Paolo Bonzini | b812f67 | 2013-01-21 17:09:43 +0100 | [diff] [blame] | 275 | trace_mirror_one_iteration(s, sector_num, nb_sectors); |
Fam Zheng | dcfb3be | 2015-06-08 13:56:09 +0800 | [diff] [blame] | 276 | |
Eric Blake | 73698c3 | 2016-06-13 12:56:34 -0600 | [diff] [blame] | 277 | blk_aio_preadv(source, sector_num * BDRV_SECTOR_SIZE, &op->qiov, 0, |
Fam Zheng | e5b4357 | 2016-02-05 10:00:29 +0800 | [diff] [blame] | 278 | mirror_read_complete, op); |
| 279 | return ret; |
| 280 | } |
| 281 | |
| 282 | static void mirror_do_zero_or_discard(MirrorBlockJob *s, |
| 283 | int64_t sector_num, |
| 284 | int nb_sectors, |
| 285 | bool is_discard) |
| 286 | { |
| 287 | MirrorOp *op; |
| 288 | |
| 289 | /* Allocate a MirrorOp that is used as an AIO callback. The qiov is zeroed |
| 290 | * so the freeing in mirror_iteration_done is nop. */ |
| 291 | op = g_new0(MirrorOp, 1); |
| 292 | op->s = s; |
| 293 | op->sector_num = sector_num; |
| 294 | op->nb_sectors = nb_sectors; |
| 295 | |
| 296 | s->in_flight++; |
| 297 | s->sectors_in_flight += nb_sectors; |
| 298 | if (is_discard) { |
Kevin Wolf | e253f4b | 2016-04-12 16:17:41 +0200 | [diff] [blame] | 299 | blk_aio_discard(s->target, sector_num, op->nb_sectors, |
| 300 | mirror_write_complete, op); |
Fam Zheng | e5b4357 | 2016-02-05 10:00:29 +0800 | [diff] [blame] | 301 | } else { |
Kevin Wolf | e253f4b | 2016-04-12 16:17:41 +0200 | [diff] [blame] | 302 | blk_aio_pwrite_zeroes(s->target, sector_num * BDRV_SECTOR_SIZE, |
| 303 | op->nb_sectors * BDRV_SECTOR_SIZE, |
Fam Zheng | dcfb3be | 2015-06-08 13:56:09 +0800 | [diff] [blame] | 304 | s->unmap ? BDRV_REQ_MAY_UNMAP : 0, |
| 305 | mirror_write_complete, op); |
Fam Zheng | e5b4357 | 2016-02-05 10:00:29 +0800 | [diff] [blame] | 306 | } |
| 307 | } |
| 308 | |
| 309 | static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s) |
| 310 | { |
Kevin Wolf | e253f4b | 2016-04-12 16:17:41 +0200 | [diff] [blame] | 311 | BlockDriverState *source = blk_bs(s->common.blk); |
Max Reitz | 9c83625 | 2016-04-20 00:59:47 +0200 | [diff] [blame] | 312 | int64_t sector_num, first_chunk; |
Fam Zheng | e5b4357 | 2016-02-05 10:00:29 +0800 | [diff] [blame] | 313 | uint64_t delay_ns = 0; |
| 314 | /* At least the first dirty chunk is mirrored in one iteration. */ |
| 315 | int nb_chunks = 1; |
| 316 | int64_t end = s->bdev_length / BDRV_SECTOR_SIZE; |
| 317 | int sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS; |
| 318 | |
| 319 | sector_num = hbitmap_iter_next(&s->hbi); |
| 320 | if (sector_num < 0) { |
| 321 | bdrv_dirty_iter_init(s->dirty_bitmap, &s->hbi); |
| 322 | sector_num = hbitmap_iter_next(&s->hbi); |
| 323 | trace_mirror_restart_iter(s, bdrv_get_dirty_count(s->dirty_bitmap)); |
| 324 | assert(sector_num >= 0); |
| 325 | } |
| 326 | |
Max Reitz | 9c83625 | 2016-04-20 00:59:47 +0200 | [diff] [blame] | 327 | first_chunk = sector_num / sectors_per_chunk; |
| 328 | while (test_bit(first_chunk, s->in_flight_bitmap)) { |
| 329 | trace_mirror_yield_in_flight(s, first_chunk, s->in_flight); |
| 330 | mirror_wait_for_io(s); |
| 331 | } |
| 332 | |
Fam Zheng | e5b4357 | 2016-02-05 10:00:29 +0800 | [diff] [blame] | 333 | /* Find the number of consective dirty chunks following the first dirty |
| 334 | * one, and wait for in flight requests in them. */ |
| 335 | while (nb_chunks * sectors_per_chunk < (s->buf_size >> BDRV_SECTOR_BITS)) { |
| 336 | int64_t hbitmap_next; |
| 337 | int64_t next_sector = sector_num + nb_chunks * sectors_per_chunk; |
| 338 | int64_t next_chunk = next_sector / sectors_per_chunk; |
| 339 | if (next_sector >= end || |
| 340 | !bdrv_get_dirty(source, s->dirty_bitmap, next_sector)) { |
| 341 | break; |
| 342 | } |
| 343 | if (test_bit(next_chunk, s->in_flight_bitmap)) { |
Max Reitz | 9c83625 | 2016-04-20 00:59:47 +0200 | [diff] [blame] | 344 | break; |
Fam Zheng | e5b4357 | 2016-02-05 10:00:29 +0800 | [diff] [blame] | 345 | } |
Max Reitz | 9c83625 | 2016-04-20 00:59:47 +0200 | [diff] [blame] | 346 | |
| 347 | hbitmap_next = hbitmap_iter_next(&s->hbi); |
Max Reitz | f27a274 | 2016-04-20 00:59:48 +0200 | [diff] [blame] | 348 | if (hbitmap_next > next_sector || hbitmap_next < 0) { |
| 349 | /* The bitmap iterator's cache is stale, refresh it */ |
| 350 | bdrv_set_dirty_iter(&s->hbi, next_sector); |
| 351 | hbitmap_next = hbitmap_iter_next(&s->hbi); |
| 352 | } |
Max Reitz | 9c83625 | 2016-04-20 00:59:47 +0200 | [diff] [blame] | 353 | assert(hbitmap_next == next_sector); |
| 354 | nb_chunks++; |
Fam Zheng | e5b4357 | 2016-02-05 10:00:29 +0800 | [diff] [blame] | 355 | } |
| 356 | |
| 357 | /* Clear dirty bits before querying the block status, because |
| 358 | * calling bdrv_get_block_status_above could yield - if some blocks are |
| 359 | * marked dirty in this window, we need to know. |
| 360 | */ |
| 361 | bdrv_reset_dirty_bitmap(s->dirty_bitmap, sector_num, |
| 362 | nb_chunks * sectors_per_chunk); |
| 363 | bitmap_set(s->in_flight_bitmap, sector_num / sectors_per_chunk, nb_chunks); |
| 364 | while (nb_chunks > 0 && sector_num < end) { |
| 365 | int ret; |
| 366 | int io_sectors; |
| 367 | BlockDriverState *file; |
| 368 | enum MirrorMethod { |
| 369 | MIRROR_METHOD_COPY, |
| 370 | MIRROR_METHOD_ZERO, |
| 371 | MIRROR_METHOD_DISCARD |
| 372 | } mirror_method = MIRROR_METHOD_COPY; |
| 373 | |
| 374 | assert(!(sector_num % sectors_per_chunk)); |
| 375 | ret = bdrv_get_block_status_above(source, NULL, sector_num, |
| 376 | nb_chunks * sectors_per_chunk, |
| 377 | &io_sectors, &file); |
| 378 | if (ret < 0) { |
| 379 | io_sectors = nb_chunks * sectors_per_chunk; |
| 380 | } |
| 381 | |
| 382 | io_sectors -= io_sectors % sectors_per_chunk; |
| 383 | if (io_sectors < sectors_per_chunk) { |
| 384 | io_sectors = sectors_per_chunk; |
| 385 | } else if (ret >= 0 && !(ret & BDRV_BLOCK_DATA)) { |
| 386 | int64_t target_sector_num; |
| 387 | int target_nb_sectors; |
Kevin Wolf | 244483e | 2016-06-02 11:41:52 +0200 | [diff] [blame] | 388 | bdrv_round_sectors_to_clusters(blk_bs(s->target), sector_num, |
| 389 | io_sectors, &target_sector_num, |
| 390 | &target_nb_sectors); |
Fam Zheng | e5b4357 | 2016-02-05 10:00:29 +0800 | [diff] [blame] | 391 | if (target_sector_num == sector_num && |
| 392 | target_nb_sectors == io_sectors) { |
| 393 | mirror_method = ret & BDRV_BLOCK_ZERO ? |
| 394 | MIRROR_METHOD_ZERO : |
| 395 | MIRROR_METHOD_DISCARD; |
| 396 | } |
| 397 | } |
| 398 | |
Fam Zheng | 4150ae6 | 2016-04-20 10:48:34 +0800 | [diff] [blame] | 399 | mirror_clip_sectors(s, sector_num, &io_sectors); |
Fam Zheng | e5b4357 | 2016-02-05 10:00:29 +0800 | [diff] [blame] | 400 | switch (mirror_method) { |
| 401 | case MIRROR_METHOD_COPY: |
| 402 | io_sectors = mirror_do_read(s, sector_num, io_sectors); |
| 403 | break; |
| 404 | case MIRROR_METHOD_ZERO: |
| 405 | mirror_do_zero_or_discard(s, sector_num, io_sectors, false); |
| 406 | break; |
| 407 | case MIRROR_METHOD_DISCARD: |
| 408 | mirror_do_zero_or_discard(s, sector_num, io_sectors, true); |
| 409 | break; |
| 410 | default: |
| 411 | abort(); |
| 412 | } |
| 413 | assert(io_sectors); |
| 414 | sector_num += io_sectors; |
Fam Zheng | 4150ae6 | 2016-04-20 10:48:34 +0800 | [diff] [blame] | 415 | nb_chunks -= DIV_ROUND_UP(io_sectors, sectors_per_chunk); |
Fam Zheng | e5b4357 | 2016-02-05 10:00:29 +0800 | [diff] [blame] | 416 | delay_ns += ratelimit_calculate_delay(&s->limit, io_sectors); |
Fam Zheng | dcfb3be | 2015-06-08 13:56:09 +0800 | [diff] [blame] | 417 | } |
Paolo Bonzini | cc8c9d6 | 2014-03-21 13:55:18 +0100 | [diff] [blame] | 418 | return delay_ns; |
Paolo Bonzini | bd48bde | 2013-01-22 09:03:12 +0100 | [diff] [blame] | 419 | } |
Paolo Bonzini | b952b55 | 2012-10-18 16:49:28 +0200 | [diff] [blame] | 420 | |
Paolo Bonzini | 402a474 | 2013-01-22 09:03:14 +0100 | [diff] [blame] | 421 | static void mirror_free_init(MirrorBlockJob *s) |
| 422 | { |
| 423 | int granularity = s->granularity; |
| 424 | size_t buf_size = s->buf_size; |
| 425 | uint8_t *buf = s->buf; |
| 426 | |
| 427 | assert(s->buf_free_count == 0); |
| 428 | QSIMPLEQ_INIT(&s->buf_free); |
| 429 | while (buf_size != 0) { |
| 430 | MirrorBuffer *cur = (MirrorBuffer *)buf; |
| 431 | QSIMPLEQ_INSERT_TAIL(&s->buf_free, cur, next); |
| 432 | s->buf_free_count++; |
| 433 | buf_size -= granularity; |
| 434 | buf += granularity; |
| 435 | } |
| 436 | } |
| 437 | |
Paolo Bonzini | bd48bde | 2013-01-22 09:03:12 +0100 | [diff] [blame] | 438 | static void mirror_drain(MirrorBlockJob *s) |
| 439 | { |
| 440 | while (s->in_flight > 0) { |
Fam Zheng | 21cd917 | 2016-02-05 10:00:30 +0800 | [diff] [blame] | 441 | mirror_wait_for_io(s); |
Paolo Bonzini | bd48bde | 2013-01-22 09:03:12 +0100 | [diff] [blame] | 442 | } |
Paolo Bonzini | 893f7eb | 2012-10-18 16:49:23 +0200 | [diff] [blame] | 443 | } |
| 444 | |
Stefan Hajnoczi | 5a7e7a0 | 2014-10-21 12:03:58 +0100 | [diff] [blame] | 445 | typedef struct { |
| 446 | int ret; |
| 447 | } MirrorExitData; |
| 448 | |
| 449 | static void mirror_exit(BlockJob *job, void *opaque) |
| 450 | { |
| 451 | MirrorBlockJob *s = container_of(job, MirrorBlockJob, common); |
| 452 | MirrorExitData *data = opaque; |
| 453 | AioContext *replace_aio_context = NULL; |
Kevin Wolf | e253f4b | 2016-04-12 16:17:41 +0200 | [diff] [blame] | 454 | BlockDriverState *src = blk_bs(s->common.blk); |
| 455 | BlockDriverState *target_bs = blk_bs(s->target); |
Kevin Wolf | 3f09bfb | 2015-09-15 11:58:23 +0200 | [diff] [blame] | 456 | |
| 457 | /* Make sure that the source BDS doesn't go away before we called |
| 458 | * block_job_completed(). */ |
| 459 | bdrv_ref(src); |
Stefan Hajnoczi | 5a7e7a0 | 2014-10-21 12:03:58 +0100 | [diff] [blame] | 460 | |
| 461 | if (s->to_replace) { |
| 462 | replace_aio_context = bdrv_get_aio_context(s->to_replace); |
| 463 | aio_context_acquire(replace_aio_context); |
| 464 | } |
| 465 | |
| 466 | if (s->should_complete && data->ret == 0) { |
Kevin Wolf | e253f4b | 2016-04-12 16:17:41 +0200 | [diff] [blame] | 467 | BlockDriverState *to_replace = src; |
Stefan Hajnoczi | 5a7e7a0 | 2014-10-21 12:03:58 +0100 | [diff] [blame] | 468 | if (s->to_replace) { |
| 469 | to_replace = s->to_replace; |
| 470 | } |
Kevin Wolf | 4036555 | 2015-10-28 13:24:26 +0100 | [diff] [blame] | 471 | |
Kevin Wolf | e253f4b | 2016-04-12 16:17:41 +0200 | [diff] [blame] | 472 | if (bdrv_get_flags(target_bs) != bdrv_get_flags(to_replace)) { |
| 473 | bdrv_reopen(target_bs, bdrv_get_flags(to_replace), NULL); |
Stefan Hajnoczi | 5a7e7a0 | 2014-10-21 12:03:58 +0100 | [diff] [blame] | 474 | } |
Kevin Wolf | b880481 | 2016-04-12 16:20:59 +0200 | [diff] [blame] | 475 | |
| 476 | /* The mirror job has no requests in flight any more, but we need to |
| 477 | * drain potential other users of the BDS before changing the graph. */ |
Kevin Wolf | e253f4b | 2016-04-12 16:17:41 +0200 | [diff] [blame] | 478 | bdrv_drained_begin(target_bs); |
| 479 | bdrv_replace_in_backing_chain(to_replace, target_bs); |
| 480 | bdrv_drained_end(target_bs); |
Kevin Wolf | b880481 | 2016-04-12 16:20:59 +0200 | [diff] [blame] | 481 | |
Kevin Wolf | b6d2e59 | 2016-04-08 14:51:09 +0200 | [diff] [blame] | 482 | /* We just changed the BDS the job BB refers to */ |
| 483 | blk_remove_bs(job->blk); |
| 484 | blk_insert_bs(job->blk, src); |
Stefan Hajnoczi | 5a7e7a0 | 2014-10-21 12:03:58 +0100 | [diff] [blame] | 485 | } |
| 486 | if (s->to_replace) { |
| 487 | bdrv_op_unblock_all(s->to_replace, s->replace_blocker); |
| 488 | error_free(s->replace_blocker); |
| 489 | bdrv_unref(s->to_replace); |
| 490 | } |
| 491 | if (replace_aio_context) { |
| 492 | aio_context_release(replace_aio_context); |
| 493 | } |
| 494 | g_free(s->replaces); |
Kevin Wolf | e253f4b | 2016-04-12 16:17:41 +0200 | [diff] [blame] | 495 | bdrv_op_unblock_all(target_bs, s->common.blocker); |
| 496 | blk_unref(s->target); |
Stefan Hajnoczi | 5a7e7a0 | 2014-10-21 12:03:58 +0100 | [diff] [blame] | 497 | block_job_completed(&s->common, data->ret); |
| 498 | g_free(data); |
Fam Zheng | 176c369 | 2015-11-23 10:28:04 +0800 | [diff] [blame] | 499 | bdrv_drained_end(src); |
Fam Zheng | ab27c3b | 2016-04-22 21:53:56 +0800 | [diff] [blame] | 500 | if (qemu_get_aio_context() == bdrv_get_aio_context(src)) { |
| 501 | aio_enable_external(iohandler_get_aio_context()); |
| 502 | } |
Kevin Wolf | 3f09bfb | 2015-09-15 11:58:23 +0200 | [diff] [blame] | 503 | bdrv_unref(src); |
Stefan Hajnoczi | 5a7e7a0 | 2014-10-21 12:03:58 +0100 | [diff] [blame] | 504 | } |
| 505 | |
Paolo Bonzini | 893f7eb | 2012-10-18 16:49:23 +0200 | [diff] [blame] | 506 | static void coroutine_fn mirror_run(void *opaque) |
| 507 | { |
| 508 | MirrorBlockJob *s = opaque; |
Stefan Hajnoczi | 5a7e7a0 | 2014-10-21 12:03:58 +0100 | [diff] [blame] | 509 | MirrorExitData *data; |
Kevin Wolf | e253f4b | 2016-04-12 16:17:41 +0200 | [diff] [blame] | 510 | BlockDriverState *bs = blk_bs(s->common.blk); |
| 511 | BlockDriverState *target_bs = blk_bs(s->target); |
Fam Zheng | 9990069 | 2015-07-09 11:47:58 +0800 | [diff] [blame] | 512 | int64_t sector_num, end, length; |
Paolo Bonzini | bd48bde | 2013-01-22 09:03:12 +0100 | [diff] [blame] | 513 | uint64_t last_pause_ns; |
Paolo Bonzini | b812f67 | 2013-01-21 17:09:43 +0100 | [diff] [blame] | 514 | BlockDriverInfo bdi; |
Jeff Cody | 1d33936 | 2015-01-22 08:03:29 -0500 | [diff] [blame] | 515 | char backing_filename[2]; /* we only need 2 characters because we are only |
| 516 | checking for a NULL string */ |
Paolo Bonzini | 893f7eb | 2012-10-18 16:49:23 +0200 | [diff] [blame] | 517 | int ret = 0; |
| 518 | int n; |
Fam Zheng | e5b4357 | 2016-02-05 10:00:29 +0800 | [diff] [blame] | 519 | int target_cluster_size = BDRV_SECTOR_SIZE; |
Paolo Bonzini | 893f7eb | 2012-10-18 16:49:23 +0200 | [diff] [blame] | 520 | |
| 521 | if (block_job_is_cancelled(&s->common)) { |
| 522 | goto immediate_exit; |
| 523 | } |
| 524 | |
Max Reitz | b21c765 | 2014-10-24 15:57:36 +0200 | [diff] [blame] | 525 | s->bdev_length = bdrv_getlength(bs); |
| 526 | if (s->bdev_length < 0) { |
| 527 | ret = s->bdev_length; |
Fam Zheng | 373df5b | 2014-04-29 18:09:09 +0800 | [diff] [blame] | 528 | goto immediate_exit; |
Max Reitz | b21c765 | 2014-10-24 15:57:36 +0200 | [diff] [blame] | 529 | } else if (s->bdev_length == 0) { |
Fam Zheng | 9e48b02 | 2014-06-24 20:26:36 +0800 | [diff] [blame] | 530 | /* Report BLOCK_JOB_READY and wait for complete. */ |
| 531 | block_job_event_ready(&s->common); |
| 532 | s->synced = true; |
| 533 | while (!block_job_is_cancelled(&s->common) && !s->should_complete) { |
| 534 | block_job_yield(&s->common); |
| 535 | } |
| 536 | s->common.cancelled = false; |
| 537 | goto immediate_exit; |
Paolo Bonzini | 893f7eb | 2012-10-18 16:49:23 +0200 | [diff] [blame] | 538 | } |
| 539 | |
Max Reitz | b21c765 | 2014-10-24 15:57:36 +0200 | [diff] [blame] | 540 | length = DIV_ROUND_UP(s->bdev_length, s->granularity); |
Paolo Bonzini | 402a474 | 2013-01-22 09:03:14 +0100 | [diff] [blame] | 541 | s->in_flight_bitmap = bitmap_new(length); |
| 542 | |
Paolo Bonzini | b812f67 | 2013-01-21 17:09:43 +0100 | [diff] [blame] | 543 | /* If we have no backing file yet in the destination, we cannot let |
| 544 | * the destination do COW. Instead, we copy sectors around the |
| 545 | * dirty data if needed. We need a bitmap to do that. |
| 546 | */ |
Kevin Wolf | e253f4b | 2016-04-12 16:17:41 +0200 | [diff] [blame] | 547 | bdrv_get_backing_filename(target_bs, backing_filename, |
Paolo Bonzini | b812f67 | 2013-01-21 17:09:43 +0100 | [diff] [blame] | 548 | sizeof(backing_filename)); |
Kevin Wolf | e253f4b | 2016-04-12 16:17:41 +0200 | [diff] [blame] | 549 | if (!bdrv_get_info(target_bs, &bdi) && bdi.cluster_size) { |
Fam Zheng | e5b4357 | 2016-02-05 10:00:29 +0800 | [diff] [blame] | 550 | target_cluster_size = bdi.cluster_size; |
Paolo Bonzini | b812f67 | 2013-01-21 17:09:43 +0100 | [diff] [blame] | 551 | } |
Kevin Wolf | e253f4b | 2016-04-12 16:17:41 +0200 | [diff] [blame] | 552 | if (backing_filename[0] && !target_bs->backing |
Fam Zheng | e5b4357 | 2016-02-05 10:00:29 +0800 | [diff] [blame] | 553 | && s->granularity < target_cluster_size) { |
| 554 | s->buf_size = MAX(s->buf_size, target_cluster_size); |
| 555 | s->cow_bitmap = bitmap_new(length); |
| 556 | } |
| 557 | s->target_cluster_sectors = target_cluster_size >> BDRV_SECTOR_BITS; |
Kevin Wolf | e253f4b | 2016-04-12 16:17:41 +0200 | [diff] [blame] | 558 | s->max_iov = MIN(bs->bl.max_iov, target_bs->bl.max_iov); |
Paolo Bonzini | b812f67 | 2013-01-21 17:09:43 +0100 | [diff] [blame] | 559 | |
Max Reitz | b21c765 | 2014-10-24 15:57:36 +0200 | [diff] [blame] | 560 | end = s->bdev_length / BDRV_SECTOR_SIZE; |
Kevin Wolf | 7504edf | 2014-05-21 18:16:21 +0200 | [diff] [blame] | 561 | s->buf = qemu_try_blockalign(bs, s->buf_size); |
| 562 | if (s->buf == NULL) { |
| 563 | ret = -ENOMEM; |
| 564 | goto immediate_exit; |
| 565 | } |
| 566 | |
Paolo Bonzini | 402a474 | 2013-01-22 09:03:14 +0100 | [diff] [blame] | 567 | mirror_free_init(s); |
Paolo Bonzini | 893f7eb | 2012-10-18 16:49:23 +0200 | [diff] [blame] | 568 | |
Fam Zheng | 4c0cbd6 | 2015-05-13 11:11:13 +0800 | [diff] [blame] | 569 | last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); |
Fam Zheng | 03544a6 | 2013-12-16 14:45:30 +0800 | [diff] [blame] | 570 | if (!s->is_none_mode) { |
Paolo Bonzini | 893f7eb | 2012-10-18 16:49:23 +0200 | [diff] [blame] | 571 | /* First part, loop on the sectors and initialize the dirty bitmap. */ |
Fam Zheng | 5bc361b | 2013-12-16 14:45:29 +0800 | [diff] [blame] | 572 | BlockDriverState *base = s->base; |
Kevin Wolf | e253f4b | 2016-04-12 16:17:41 +0200 | [diff] [blame] | 573 | bool mark_all_dirty = s->base == NULL && !bdrv_has_zero_init(target_bs); |
Jeff Cody | 5279efe | 2015-10-01 00:06:37 -0400 | [diff] [blame] | 574 | |
Paolo Bonzini | 893f7eb | 2012-10-18 16:49:23 +0200 | [diff] [blame] | 575 | for (sector_num = 0; sector_num < end; ) { |
Fam Zheng | 9990069 | 2015-07-09 11:47:58 +0800 | [diff] [blame] | 576 | /* Just to make sure we are not exceeding int limit. */ |
| 577 | int nb_sectors = MIN(INT_MAX >> BDRV_SECTOR_BITS, |
| 578 | end - sector_num); |
Fam Zheng | 4c0cbd6 | 2015-05-13 11:11:13 +0800 | [diff] [blame] | 579 | int64_t now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); |
| 580 | |
| 581 | if (now - last_pause_ns > SLICE_TIME) { |
| 582 | last_pause_ns = now; |
| 583 | block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, 0); |
| 584 | } |
| 585 | |
| 586 | if (block_job_is_cancelled(&s->common)) { |
| 587 | goto immediate_exit; |
| 588 | } |
| 589 | |
Fam Zheng | 9990069 | 2015-07-09 11:47:58 +0800 | [diff] [blame] | 590 | ret = bdrv_is_allocated_above(bs, base, sector_num, nb_sectors, &n); |
Paolo Bonzini | 893f7eb | 2012-10-18 16:49:23 +0200 | [diff] [blame] | 591 | |
| 592 | if (ret < 0) { |
| 593 | goto immediate_exit; |
| 594 | } |
| 595 | |
| 596 | assert(n > 0); |
Jeff Cody | 5279efe | 2015-10-01 00:06:37 -0400 | [diff] [blame] | 597 | if (ret == 1 || mark_all_dirty) { |
John Snow | 20dca81 | 2015-04-17 19:50:02 -0400 | [diff] [blame] | 598 | bdrv_set_dirty_bitmap(s->dirty_bitmap, sector_num, n); |
Paolo Bonzini | 893f7eb | 2012-10-18 16:49:23 +0200 | [diff] [blame] | 599 | } |
Fam Zheng | 9990069 | 2015-07-09 11:47:58 +0800 | [diff] [blame] | 600 | sector_num += n; |
Paolo Bonzini | 893f7eb | 2012-10-18 16:49:23 +0200 | [diff] [blame] | 601 | } |
| 602 | } |
| 603 | |
John Snow | 20dca81 | 2015-04-17 19:50:02 -0400 | [diff] [blame] | 604 | bdrv_dirty_iter_init(s->dirty_bitmap, &s->hbi); |
Paolo Bonzini | 893f7eb | 2012-10-18 16:49:23 +0200 | [diff] [blame] | 605 | for (;;) { |
Paolo Bonzini | cc8c9d6 | 2014-03-21 13:55:18 +0100 | [diff] [blame] | 606 | uint64_t delay_ns = 0; |
Paolo Bonzini | 893f7eb | 2012-10-18 16:49:23 +0200 | [diff] [blame] | 607 | int64_t cnt; |
| 608 | bool should_complete; |
| 609 | |
Paolo Bonzini | bd48bde | 2013-01-22 09:03:12 +0100 | [diff] [blame] | 610 | if (s->ret < 0) { |
| 611 | ret = s->ret; |
| 612 | goto immediate_exit; |
| 613 | } |
| 614 | |
John Snow | 20dca81 | 2015-04-17 19:50:02 -0400 | [diff] [blame] | 615 | cnt = bdrv_get_dirty_count(s->dirty_bitmap); |
Max Reitz | b21c765 | 2014-10-24 15:57:36 +0200 | [diff] [blame] | 616 | /* s->common.offset contains the number of bytes already processed so |
| 617 | * far, cnt is the number of dirty sectors remaining and |
| 618 | * s->sectors_in_flight is the number of sectors currently being |
| 619 | * processed; together those are the current total operation length */ |
| 620 | s->common.len = s->common.offset + |
| 621 | (cnt + s->sectors_in_flight) * BDRV_SECTOR_SIZE; |
Paolo Bonzini | bd48bde | 2013-01-22 09:03:12 +0100 | [diff] [blame] | 622 | |
| 623 | /* Note that even when no rate limit is applied we need to yield |
Fam Zheng | a728233 | 2015-04-03 22:05:21 +0800 | [diff] [blame] | 624 | * periodically with no pending I/O so that bdrv_drain_all() returns. |
Paolo Bonzini | bd48bde | 2013-01-22 09:03:12 +0100 | [diff] [blame] | 625 | * We do so every SLICE_TIME nanoseconds, or when there is an error, |
| 626 | * or when the source is clean, whichever comes first. |
| 627 | */ |
Alex Bligh | bc72ad6 | 2013-08-21 16:03:08 +0100 | [diff] [blame] | 628 | if (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - last_pause_ns < SLICE_TIME && |
Paolo Bonzini | bd48bde | 2013-01-22 09:03:12 +0100 | [diff] [blame] | 629 | s->common.iostatus == BLOCK_DEVICE_IO_STATUS_OK) { |
Paolo Bonzini | 402a474 | 2013-01-22 09:03:14 +0100 | [diff] [blame] | 630 | if (s->in_flight == MAX_IN_FLIGHT || s->buf_free_count == 0 || |
| 631 | (cnt == 0 && s->in_flight > 0)) { |
| 632 | trace_mirror_yield(s, s->in_flight, s->buf_free_count, cnt); |
Fam Zheng | 21cd917 | 2016-02-05 10:00:30 +0800 | [diff] [blame] | 633 | mirror_wait_for_io(s); |
Paolo Bonzini | bd48bde | 2013-01-22 09:03:12 +0100 | [diff] [blame] | 634 | continue; |
| 635 | } else if (cnt != 0) { |
Paolo Bonzini | cc8c9d6 | 2014-03-21 13:55:18 +0100 | [diff] [blame] | 636 | delay_ns = mirror_iteration(s); |
Paolo Bonzini | 893f7eb | 2012-10-18 16:49:23 +0200 | [diff] [blame] | 637 | } |
Paolo Bonzini | 893f7eb | 2012-10-18 16:49:23 +0200 | [diff] [blame] | 638 | } |
| 639 | |
| 640 | should_complete = false; |
Paolo Bonzini | bd48bde | 2013-01-22 09:03:12 +0100 | [diff] [blame] | 641 | if (s->in_flight == 0 && cnt == 0) { |
Paolo Bonzini | 893f7eb | 2012-10-18 16:49:23 +0200 | [diff] [blame] | 642 | trace_mirror_before_flush(s); |
Kevin Wolf | e253f4b | 2016-04-12 16:17:41 +0200 | [diff] [blame] | 643 | ret = blk_flush(s->target); |
Paolo Bonzini | 893f7eb | 2012-10-18 16:49:23 +0200 | [diff] [blame] | 644 | if (ret < 0) { |
Wenchao Xia | a589569 | 2014-06-18 08:43:30 +0200 | [diff] [blame] | 645 | if (mirror_error_action(s, false, -ret) == |
| 646 | BLOCK_ERROR_ACTION_REPORT) { |
Paolo Bonzini | b952b55 | 2012-10-18 16:49:28 +0200 | [diff] [blame] | 647 | goto immediate_exit; |
| 648 | } |
| 649 | } else { |
| 650 | /* We're out of the streaming phase. From now on, if the job |
| 651 | * is cancelled we will actually complete all pending I/O and |
| 652 | * report completion. This way, block-job-cancel will leave |
| 653 | * the target in a consistent state. |
| 654 | */ |
Paolo Bonzini | b952b55 | 2012-10-18 16:49:28 +0200 | [diff] [blame] | 655 | if (!s->synced) { |
Wenchao Xia | bcada37b | 2014-06-18 08:43:47 +0200 | [diff] [blame] | 656 | block_job_event_ready(&s->common); |
Paolo Bonzini | b952b55 | 2012-10-18 16:49:28 +0200 | [diff] [blame] | 657 | s->synced = true; |
| 658 | } |
Paolo Bonzini | 893f7eb | 2012-10-18 16:49:23 +0200 | [diff] [blame] | 659 | |
Paolo Bonzini | b952b55 | 2012-10-18 16:49:28 +0200 | [diff] [blame] | 660 | should_complete = s->should_complete || |
| 661 | block_job_is_cancelled(&s->common); |
John Snow | 20dca81 | 2015-04-17 19:50:02 -0400 | [diff] [blame] | 662 | cnt = bdrv_get_dirty_count(s->dirty_bitmap); |
Paolo Bonzini | d63ffd8 | 2012-10-18 16:49:25 +0200 | [diff] [blame] | 663 | } |
Paolo Bonzini | 893f7eb | 2012-10-18 16:49:23 +0200 | [diff] [blame] | 664 | } |
| 665 | |
| 666 | if (cnt == 0 && should_complete) { |
| 667 | /* The dirty bitmap is not updated while operations are pending. |
| 668 | * If we're about to exit, wait for pending operations before |
| 669 | * calling bdrv_get_dirty_count(bs), or we may exit while the |
| 670 | * source has dirty data to copy! |
| 671 | * |
| 672 | * Note that I/O can be submitted by the guest while |
| 673 | * mirror_populate runs. |
| 674 | */ |
| 675 | trace_mirror_before_drain(s, cnt); |
Fam Zheng | 39bf92d | 2016-04-05 19:20:53 +0800 | [diff] [blame] | 676 | bdrv_co_drain(bs); |
John Snow | 20dca81 | 2015-04-17 19:50:02 -0400 | [diff] [blame] | 677 | cnt = bdrv_get_dirty_count(s->dirty_bitmap); |
Paolo Bonzini | 893f7eb | 2012-10-18 16:49:23 +0200 | [diff] [blame] | 678 | } |
| 679 | |
| 680 | ret = 0; |
Paolo Bonzini | cc8c9d6 | 2014-03-21 13:55:18 +0100 | [diff] [blame] | 681 | trace_mirror_before_sleep(s, cnt, s->synced, delay_ns); |
Paolo Bonzini | d63ffd8 | 2012-10-18 16:49:25 +0200 | [diff] [blame] | 682 | if (!s->synced) { |
Alex Bligh | 7483d1e | 2013-08-21 16:03:05 +0100 | [diff] [blame] | 683 | block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, delay_ns); |
Paolo Bonzini | 893f7eb | 2012-10-18 16:49:23 +0200 | [diff] [blame] | 684 | if (block_job_is_cancelled(&s->common)) { |
| 685 | break; |
| 686 | } |
| 687 | } else if (!should_complete) { |
Paolo Bonzini | bd48bde | 2013-01-22 09:03:12 +0100 | [diff] [blame] | 688 | delay_ns = (s->in_flight == 0 && cnt == 0 ? SLICE_TIME : 0); |
Alex Bligh | 7483d1e | 2013-08-21 16:03:05 +0100 | [diff] [blame] | 689 | block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, delay_ns); |
Paolo Bonzini | 893f7eb | 2012-10-18 16:49:23 +0200 | [diff] [blame] | 690 | } else if (cnt == 0) { |
| 691 | /* The two disks are in sync. Exit and report successful |
| 692 | * completion. |
| 693 | */ |
| 694 | assert(QLIST_EMPTY(&bs->tracked_requests)); |
| 695 | s->common.cancelled = false; |
| 696 | break; |
| 697 | } |
Alex Bligh | bc72ad6 | 2013-08-21 16:03:08 +0100 | [diff] [blame] | 698 | last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); |
Paolo Bonzini | 893f7eb | 2012-10-18 16:49:23 +0200 | [diff] [blame] | 699 | } |
| 700 | |
| 701 | immediate_exit: |
Paolo Bonzini | bd48bde | 2013-01-22 09:03:12 +0100 | [diff] [blame] | 702 | if (s->in_flight > 0) { |
| 703 | /* We get here only if something went wrong. Either the job failed, |
| 704 | * or it was cancelled prematurely so that we do not guarantee that |
| 705 | * the target is a copy of the source. |
| 706 | */ |
| 707 | assert(ret < 0 || (!s->synced && block_job_is_cancelled(&s->common))); |
| 708 | mirror_drain(s); |
| 709 | } |
| 710 | |
| 711 | assert(s->in_flight == 0); |
Markus Armbruster | 7191bf3 | 2013-01-15 15:29:10 +0100 | [diff] [blame] | 712 | qemu_vfree(s->buf); |
Paolo Bonzini | b812f67 | 2013-01-21 17:09:43 +0100 | [diff] [blame] | 713 | g_free(s->cow_bitmap); |
Paolo Bonzini | 402a474 | 2013-01-22 09:03:14 +0100 | [diff] [blame] | 714 | g_free(s->in_flight_bitmap); |
Fam Zheng | e4654d2 | 2013-11-13 18:29:43 +0800 | [diff] [blame] | 715 | bdrv_release_dirty_bitmap(bs, s->dirty_bitmap); |
Stefan Hajnoczi | 5a7e7a0 | 2014-10-21 12:03:58 +0100 | [diff] [blame] | 716 | |
| 717 | data = g_malloc(sizeof(*data)); |
| 718 | data->ret = ret; |
Fam Zheng | 176c369 | 2015-11-23 10:28:04 +0800 | [diff] [blame] | 719 | /* Before we switch to target in mirror_exit, make sure data doesn't |
| 720 | * change. */ |
Kevin Wolf | e253f4b | 2016-04-12 16:17:41 +0200 | [diff] [blame] | 721 | bdrv_drained_begin(bs); |
Fam Zheng | ab27c3b | 2016-04-22 21:53:56 +0800 | [diff] [blame] | 722 | if (qemu_get_aio_context() == bdrv_get_aio_context(bs)) { |
| 723 | /* FIXME: virtio host notifiers run on iohandler_ctx, therefore the |
| 724 | * above bdrv_drained_end isn't enough to quiesce it. This is ugly, we |
| 725 | * need a block layer API change to achieve this. */ |
| 726 | aio_disable_external(iohandler_get_aio_context()); |
| 727 | } |
Stefan Hajnoczi | 5a7e7a0 | 2014-10-21 12:03:58 +0100 | [diff] [blame] | 728 | block_job_defer_to_main_loop(&s->common, mirror_exit, data); |
Paolo Bonzini | 893f7eb | 2012-10-18 16:49:23 +0200 | [diff] [blame] | 729 | } |
| 730 | |
| 731 | static void mirror_set_speed(BlockJob *job, int64_t speed, Error **errp) |
| 732 | { |
| 733 | MirrorBlockJob *s = container_of(job, MirrorBlockJob, common); |
| 734 | |
| 735 | if (speed < 0) { |
Markus Armbruster | c6bd8c7 | 2015-03-17 11:54:50 +0100 | [diff] [blame] | 736 | error_setg(errp, QERR_INVALID_PARAMETER, "speed"); |
Paolo Bonzini | 893f7eb | 2012-10-18 16:49:23 +0200 | [diff] [blame] | 737 | return; |
| 738 | } |
| 739 | ratelimit_set_speed(&s->limit, speed / BDRV_SECTOR_SIZE, SLICE_TIME); |
| 740 | } |
| 741 | |
Paolo Bonzini | d63ffd8 | 2012-10-18 16:49:25 +0200 | [diff] [blame] | 742 | static void mirror_complete(BlockJob *job, Error **errp) |
| 743 | { |
| 744 | MirrorBlockJob *s = container_of(job, MirrorBlockJob, common); |
Max Reitz | 34b5d2c | 2013-09-05 14:45:29 +0200 | [diff] [blame] | 745 | Error *local_err = NULL; |
Paolo Bonzini | d63ffd8 | 2012-10-18 16:49:25 +0200 | [diff] [blame] | 746 | int ret; |
| 747 | |
Kevin Wolf | e253f4b | 2016-04-12 16:17:41 +0200 | [diff] [blame] | 748 | ret = bdrv_open_backing_file(blk_bs(s->target), NULL, "backing", |
| 749 | &local_err); |
Paolo Bonzini | d63ffd8 | 2012-10-18 16:49:25 +0200 | [diff] [blame] | 750 | if (ret < 0) { |
Max Reitz | 34b5d2c | 2013-09-05 14:45:29 +0200 | [diff] [blame] | 751 | error_propagate(errp, local_err); |
Paolo Bonzini | d63ffd8 | 2012-10-18 16:49:25 +0200 | [diff] [blame] | 752 | return; |
| 753 | } |
| 754 | if (!s->synced) { |
Kevin Wolf | 8ccb956 | 2015-09-16 13:34:54 +0200 | [diff] [blame] | 755 | error_setg(errp, QERR_BLOCK_JOB_NOT_READY, job->id); |
Paolo Bonzini | d63ffd8 | 2012-10-18 16:49:25 +0200 | [diff] [blame] | 756 | return; |
| 757 | } |
| 758 | |
Benoît Canet | 09158f0 | 2014-06-27 18:25:25 +0200 | [diff] [blame] | 759 | /* check the target bs is not blocked and block all operations on it */ |
| 760 | if (s->replaces) { |
Stefan Hajnoczi | 5a7e7a0 | 2014-10-21 12:03:58 +0100 | [diff] [blame] | 761 | AioContext *replace_aio_context; |
| 762 | |
Wen Congyang | e12f378 | 2015-07-17 10:12:22 +0800 | [diff] [blame] | 763 | s->to_replace = bdrv_find_node(s->replaces); |
Benoît Canet | 09158f0 | 2014-06-27 18:25:25 +0200 | [diff] [blame] | 764 | if (!s->to_replace) { |
Wen Congyang | e12f378 | 2015-07-17 10:12:22 +0800 | [diff] [blame] | 765 | error_setg(errp, "Node name '%s' not found", s->replaces); |
Benoît Canet | 09158f0 | 2014-06-27 18:25:25 +0200 | [diff] [blame] | 766 | return; |
| 767 | } |
| 768 | |
Stefan Hajnoczi | 5a7e7a0 | 2014-10-21 12:03:58 +0100 | [diff] [blame] | 769 | replace_aio_context = bdrv_get_aio_context(s->to_replace); |
| 770 | aio_context_acquire(replace_aio_context); |
| 771 | |
Benoît Canet | 09158f0 | 2014-06-27 18:25:25 +0200 | [diff] [blame] | 772 | error_setg(&s->replace_blocker, |
| 773 | "block device is in use by block-job-complete"); |
| 774 | bdrv_op_block_all(s->to_replace, s->replace_blocker); |
| 775 | bdrv_ref(s->to_replace); |
Stefan Hajnoczi | 5a7e7a0 | 2014-10-21 12:03:58 +0100 | [diff] [blame] | 776 | |
| 777 | aio_context_release(replace_aio_context); |
Benoît Canet | 09158f0 | 2014-06-27 18:25:25 +0200 | [diff] [blame] | 778 | } |
| 779 | |
Paolo Bonzini | d63ffd8 | 2012-10-18 16:49:25 +0200 | [diff] [blame] | 780 | s->should_complete = true; |
Fam Zheng | 751ebd7 | 2015-04-03 22:05:18 +0800 | [diff] [blame] | 781 | block_job_enter(&s->common); |
Paolo Bonzini | d63ffd8 | 2012-10-18 16:49:25 +0200 | [diff] [blame] | 782 | } |
| 783 | |
Fam Zheng | 3fc4b10 | 2013-10-08 17:29:38 +0800 | [diff] [blame] | 784 | static const BlockJobDriver mirror_job_driver = { |
Paolo Bonzini | 893f7eb | 2012-10-18 16:49:23 +0200 | [diff] [blame] | 785 | .instance_size = sizeof(MirrorBlockJob), |
Fam Zheng | 79e14bf | 2013-10-08 17:29:40 +0800 | [diff] [blame] | 786 | .job_type = BLOCK_JOB_TYPE_MIRROR, |
Paolo Bonzini | 893f7eb | 2012-10-18 16:49:23 +0200 | [diff] [blame] | 787 | .set_speed = mirror_set_speed, |
Paolo Bonzini | d63ffd8 | 2012-10-18 16:49:25 +0200 | [diff] [blame] | 788 | .complete = mirror_complete, |
Paolo Bonzini | 893f7eb | 2012-10-18 16:49:23 +0200 | [diff] [blame] | 789 | }; |
| 790 | |
Fam Zheng | 03544a6 | 2013-12-16 14:45:30 +0800 | [diff] [blame] | 791 | static const BlockJobDriver commit_active_job_driver = { |
| 792 | .instance_size = sizeof(MirrorBlockJob), |
| 793 | .job_type = BLOCK_JOB_TYPE_COMMIT, |
| 794 | .set_speed = mirror_set_speed, |
Fam Zheng | 03544a6 | 2013-12-16 14:45:30 +0800 | [diff] [blame] | 795 | .complete = mirror_complete, |
| 796 | }; |
| 797 | |
| 798 | static void mirror_start_job(BlockDriverState *bs, BlockDriverState *target, |
Benoît Canet | 09158f0 | 2014-06-27 18:25:25 +0200 | [diff] [blame] | 799 | const char *replaces, |
John Snow | 5fba6c0 | 2015-04-17 19:49:51 -0400 | [diff] [blame] | 800 | int64_t speed, uint32_t granularity, |
Benoît Canet | 09158f0 | 2014-06-27 18:25:25 +0200 | [diff] [blame] | 801 | int64_t buf_size, |
| 802 | BlockdevOnError on_source_error, |
| 803 | BlockdevOnError on_target_error, |
Fam Zheng | 0fc9f8e | 2015-06-08 13:56:08 +0800 | [diff] [blame] | 804 | bool unmap, |
Markus Armbruster | 097310b | 2014-10-07 13:59:15 +0200 | [diff] [blame] | 805 | BlockCompletionFunc *cb, |
Benoît Canet | 09158f0 | 2014-06-27 18:25:25 +0200 | [diff] [blame] | 806 | void *opaque, Error **errp, |
| 807 | const BlockJobDriver *driver, |
| 808 | bool is_none_mode, BlockDriverState *base) |
Paolo Bonzini | 893f7eb | 2012-10-18 16:49:23 +0200 | [diff] [blame] | 809 | { |
| 810 | MirrorBlockJob *s; |
| 811 | |
Paolo Bonzini | eee13df | 2013-01-21 17:09:46 +0100 | [diff] [blame] | 812 | if (granularity == 0) { |
John Snow | 341ebc2 | 2015-04-17 19:49:52 -0400 | [diff] [blame] | 813 | granularity = bdrv_get_default_bitmap_granularity(target); |
Paolo Bonzini | eee13df | 2013-01-21 17:09:46 +0100 | [diff] [blame] | 814 | } |
| 815 | |
| 816 | assert ((granularity & (granularity - 1)) == 0); |
| 817 | |
Wen Congyang | 48ac0a4 | 2015-05-15 15:51:36 +0800 | [diff] [blame] | 818 | if (buf_size < 0) { |
| 819 | error_setg(errp, "Invalid parameter 'buf-size'"); |
| 820 | return; |
| 821 | } |
| 822 | |
| 823 | if (buf_size == 0) { |
| 824 | buf_size = DEFAULT_MIRROR_BUF_SIZE; |
| 825 | } |
Fam Zheng | 5bc361b | 2013-12-16 14:45:29 +0800 | [diff] [blame] | 826 | |
Fam Zheng | 03544a6 | 2013-12-16 14:45:30 +0800 | [diff] [blame] | 827 | s = block_job_create(driver, bs, speed, cb, opaque, errp); |
Paolo Bonzini | 893f7eb | 2012-10-18 16:49:23 +0200 | [diff] [blame] | 828 | if (!s) { |
| 829 | return; |
| 830 | } |
| 831 | |
Kevin Wolf | e253f4b | 2016-04-12 16:17:41 +0200 | [diff] [blame] | 832 | s->target = blk_new(); |
| 833 | blk_insert_bs(s->target, target); |
| 834 | |
Benoît Canet | 09158f0 | 2014-06-27 18:25:25 +0200 | [diff] [blame] | 835 | s->replaces = g_strdup(replaces); |
Paolo Bonzini | b952b55 | 2012-10-18 16:49:28 +0200 | [diff] [blame] | 836 | s->on_source_error = on_source_error; |
| 837 | s->on_target_error = on_target_error; |
Fam Zheng | 03544a6 | 2013-12-16 14:45:30 +0800 | [diff] [blame] | 838 | s->is_none_mode = is_none_mode; |
Fam Zheng | 5bc361b | 2013-12-16 14:45:29 +0800 | [diff] [blame] | 839 | s->base = base; |
Paolo Bonzini | eee13df | 2013-01-21 17:09:46 +0100 | [diff] [blame] | 840 | s->granularity = granularity; |
Wen Congyang | 48ac0a4 | 2015-05-15 15:51:36 +0800 | [diff] [blame] | 841 | s->buf_size = ROUND_UP(buf_size, granularity); |
Fam Zheng | 0fc9f8e | 2015-06-08 13:56:08 +0800 | [diff] [blame] | 842 | s->unmap = unmap; |
Paolo Bonzini | b812f67 | 2013-01-21 17:09:43 +0100 | [diff] [blame] | 843 | |
Fam Zheng | 0db6e54 | 2015-04-17 19:49:50 -0400 | [diff] [blame] | 844 | s->dirty_bitmap = bdrv_create_dirty_bitmap(bs, granularity, NULL, errp); |
Fam Zheng | b8afb52 | 2014-04-16 09:34:30 +0800 | [diff] [blame] | 845 | if (!s->dirty_bitmap) { |
Ting Wang | 9703116 | 2015-06-26 17:37:35 +0800 | [diff] [blame] | 846 | g_free(s->replaces); |
Kevin Wolf | e253f4b | 2016-04-12 16:17:41 +0200 | [diff] [blame] | 847 | blk_unref(s->target); |
Fam Zheng | 18930ba | 2015-11-05 18:13:11 -0500 | [diff] [blame] | 848 | block_job_unref(&s->common); |
Fam Zheng | b8afb52 | 2014-04-16 09:34:30 +0800 | [diff] [blame] | 849 | return; |
| 850 | } |
Alberto Garcia | 10f3cd1 | 2015-11-02 16:51:53 +0200 | [diff] [blame] | 851 | |
Kevin Wolf | e253f4b | 2016-04-12 16:17:41 +0200 | [diff] [blame] | 852 | bdrv_op_block_all(target, s->common.blocker); |
Alberto Garcia | 10f3cd1 | 2015-11-02 16:51:53 +0200 | [diff] [blame] | 853 | |
Paolo Bonzini | 893f7eb | 2012-10-18 16:49:23 +0200 | [diff] [blame] | 854 | s->common.co = qemu_coroutine_create(mirror_run); |
| 855 | trace_mirror_start(bs, s, s->common.co, opaque); |
| 856 | qemu_coroutine_enter(s->common.co, s); |
| 857 | } |
Fam Zheng | 03544a6 | 2013-12-16 14:45:30 +0800 | [diff] [blame] | 858 | |
| 859 | void mirror_start(BlockDriverState *bs, BlockDriverState *target, |
Benoît Canet | 09158f0 | 2014-06-27 18:25:25 +0200 | [diff] [blame] | 860 | const char *replaces, |
John Snow | 5fba6c0 | 2015-04-17 19:49:51 -0400 | [diff] [blame] | 861 | int64_t speed, uint32_t granularity, int64_t buf_size, |
Fam Zheng | 03544a6 | 2013-12-16 14:45:30 +0800 | [diff] [blame] | 862 | MirrorSyncMode mode, BlockdevOnError on_source_error, |
| 863 | BlockdevOnError on_target_error, |
Fam Zheng | 0fc9f8e | 2015-06-08 13:56:08 +0800 | [diff] [blame] | 864 | bool unmap, |
Markus Armbruster | 097310b | 2014-10-07 13:59:15 +0200 | [diff] [blame] | 865 | BlockCompletionFunc *cb, |
Fam Zheng | 03544a6 | 2013-12-16 14:45:30 +0800 | [diff] [blame] | 866 | void *opaque, Error **errp) |
| 867 | { |
| 868 | bool is_none_mode; |
| 869 | BlockDriverState *base; |
| 870 | |
John Snow | 4b80ab2 | 2015-06-04 20:20:34 -0400 | [diff] [blame] | 871 | if (mode == MIRROR_SYNC_MODE_INCREMENTAL) { |
| 872 | error_setg(errp, "Sync mode 'incremental' not supported"); |
John Snow | d58d845 | 2015-04-17 19:49:58 -0400 | [diff] [blame] | 873 | return; |
| 874 | } |
Fam Zheng | 03544a6 | 2013-12-16 14:45:30 +0800 | [diff] [blame] | 875 | is_none_mode = mode == MIRROR_SYNC_MODE_NONE; |
Kevin Wolf | 760e006 | 2015-06-17 14:55:21 +0200 | [diff] [blame] | 876 | base = mode == MIRROR_SYNC_MODE_TOP ? backing_bs(bs) : NULL; |
Benoît Canet | 09158f0 | 2014-06-27 18:25:25 +0200 | [diff] [blame] | 877 | mirror_start_job(bs, target, replaces, |
| 878 | speed, granularity, buf_size, |
Fam Zheng | 0fc9f8e | 2015-06-08 13:56:08 +0800 | [diff] [blame] | 879 | on_source_error, on_target_error, unmap, cb, opaque, errp, |
Fam Zheng | 03544a6 | 2013-12-16 14:45:30 +0800 | [diff] [blame] | 880 | &mirror_job_driver, is_none_mode, base); |
| 881 | } |
| 882 | |
| 883 | void commit_active_start(BlockDriverState *bs, BlockDriverState *base, |
| 884 | int64_t speed, |
| 885 | BlockdevOnError on_error, |
Markus Armbruster | 097310b | 2014-10-07 13:59:15 +0200 | [diff] [blame] | 886 | BlockCompletionFunc *cb, |
Fam Zheng | 03544a6 | 2013-12-16 14:45:30 +0800 | [diff] [blame] | 887 | void *opaque, Error **errp) |
| 888 | { |
Jeff Cody | 4da8358 | 2014-01-24 09:02:36 -0500 | [diff] [blame] | 889 | int64_t length, base_length; |
| 890 | int orig_base_flags; |
Jeff Cody | 39a611a | 2014-02-12 14:46:24 -0500 | [diff] [blame] | 891 | int ret; |
Jeff Cody | cc67f4d | 2014-02-13 09:23:38 -0500 | [diff] [blame] | 892 | Error *local_err = NULL; |
Jeff Cody | 4da8358 | 2014-01-24 09:02:36 -0500 | [diff] [blame] | 893 | |
| 894 | orig_base_flags = bdrv_get_flags(base); |
| 895 | |
Fam Zheng | 20a63d2 | 2013-12-16 14:45:31 +0800 | [diff] [blame] | 896 | if (bdrv_reopen(base, bs->open_flags, errp)) { |
| 897 | return; |
| 898 | } |
Jeff Cody | 4da8358 | 2014-01-24 09:02:36 -0500 | [diff] [blame] | 899 | |
| 900 | length = bdrv_getlength(bs); |
| 901 | if (length < 0) { |
Jeff Cody | 39a611a | 2014-02-12 14:46:24 -0500 | [diff] [blame] | 902 | error_setg_errno(errp, -length, |
| 903 | "Unable to determine length of %s", bs->filename); |
Jeff Cody | 4da8358 | 2014-01-24 09:02:36 -0500 | [diff] [blame] | 904 | goto error_restore_flags; |
| 905 | } |
| 906 | |
| 907 | base_length = bdrv_getlength(base); |
| 908 | if (base_length < 0) { |
Jeff Cody | 39a611a | 2014-02-12 14:46:24 -0500 | [diff] [blame] | 909 | error_setg_errno(errp, -base_length, |
| 910 | "Unable to determine length of %s", base->filename); |
Jeff Cody | 4da8358 | 2014-01-24 09:02:36 -0500 | [diff] [blame] | 911 | goto error_restore_flags; |
| 912 | } |
| 913 | |
| 914 | if (length > base_length) { |
Jeff Cody | 39a611a | 2014-02-12 14:46:24 -0500 | [diff] [blame] | 915 | ret = bdrv_truncate(base, length); |
| 916 | if (ret < 0) { |
| 917 | error_setg_errno(errp, -ret, |
| 918 | "Top image %s is larger than base image %s, and " |
Jeff Cody | 4da8358 | 2014-01-24 09:02:36 -0500 | [diff] [blame] | 919 | "resize of base image failed", |
| 920 | bs->filename, base->filename); |
| 921 | goto error_restore_flags; |
| 922 | } |
| 923 | } |
| 924 | |
Benoît Canet | 09158f0 | 2014-06-27 18:25:25 +0200 | [diff] [blame] | 925 | mirror_start_job(bs, base, NULL, speed, 0, 0, |
Fam Zheng | 0fc9f8e | 2015-06-08 13:56:08 +0800 | [diff] [blame] | 926 | on_error, on_error, false, cb, opaque, &local_err, |
Fam Zheng | 03544a6 | 2013-12-16 14:45:30 +0800 | [diff] [blame] | 927 | &commit_active_job_driver, false, base); |
Markus Armbruster | 0fb6395 | 2014-04-25 16:50:31 +0200 | [diff] [blame] | 928 | if (local_err) { |
Jeff Cody | cc67f4d | 2014-02-13 09:23:38 -0500 | [diff] [blame] | 929 | error_propagate(errp, local_err); |
Jeff Cody | 4da8358 | 2014-01-24 09:02:36 -0500 | [diff] [blame] | 930 | goto error_restore_flags; |
| 931 | } |
| 932 | |
| 933 | return; |
| 934 | |
| 935 | error_restore_flags: |
| 936 | /* ignore error and errp for bdrv_reopen, because we want to propagate |
| 937 | * the original error */ |
| 938 | bdrv_reopen(base, orig_base_flags, NULL); |
| 939 | return; |
Fam Zheng | 03544a6 | 2013-12-16 14:45:30 +0800 | [diff] [blame] | 940 | } |