blob: 41848b2c8e299eefb331407aca7cc2bd5d93b46b [file] [log] [blame]
Paolo Bonzini893f7eb2012-10-18 16:49:23 +02001/*
2 * Image mirroring
3 *
4 * Copyright Red Hat, Inc. 2012
5 *
6 * Authors:
7 * Paolo Bonzini <pbonzini@redhat.com>
8 *
9 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
10 * See the COPYING.LIB file in the top-level directory.
11 *
12 */
13
Peter Maydell80c71a22016-01-18 18:01:42 +000014#include "qemu/osdep.h"
Paolo Bonzini893f7eb2012-10-18 16:49:23 +020015#include "trace.h"
Paolo Bonzini737e1502012-12-17 18:19:44 +010016#include "block/blockjob.h"
17#include "block/block_int.h"
Max Reitz373340b2015-10-19 17:53:22 +020018#include "sysemu/block-backend.h"
Markus Armbrusterda34e652016-03-14 09:01:28 +010019#include "qapi/error.h"
Markus Armbrustercc7a8ea2015-03-17 17:22:46 +010020#include "qapi/qmp/qerror.h"
Paolo Bonzini893f7eb2012-10-18 16:49:23 +020021#include "qemu/ratelimit.h"
Paolo Bonzinib812f672013-01-21 17:09:43 +010022#include "qemu/bitmap.h"
Paolo Bonzini893f7eb2012-10-18 16:49:23 +020023
Paolo Bonzini402a4742013-01-22 09:03:14 +010024#define SLICE_TIME 100000000ULL /* ns */
25#define MAX_IN_FLIGHT 16
Wen Congyang48ac0a42015-05-15 15:51:36 +080026#define DEFAULT_MIRROR_BUF_SIZE (10 << 20)
Paolo Bonzini402a4742013-01-22 09:03:14 +010027
28/* The mirroring buffer is a list of granularity-sized chunks.
29 * Free chunks are organized in a list.
30 */
31typedef struct MirrorBuffer {
32 QSIMPLEQ_ENTRY(MirrorBuffer) next;
33} MirrorBuffer;
Paolo Bonzini893f7eb2012-10-18 16:49:23 +020034
35typedef struct MirrorBlockJob {
36 BlockJob common;
37 RateLimit limit;
Kevin Wolfe253f4b2016-04-12 16:17:41 +020038 BlockBackend *target;
Fam Zheng5bc361b2013-12-16 14:45:29 +080039 BlockDriverState *base;
Benoît Canet09158f02014-06-27 18:25:25 +020040 /* The name of the graph node to replace */
41 char *replaces;
42 /* The BDS to replace */
43 BlockDriverState *to_replace;
44 /* Used to block operations on the drive-mirror-replace target */
45 Error *replace_blocker;
Fam Zheng03544a62013-12-16 14:45:30 +080046 bool is_none_mode;
Paolo Bonzinib952b552012-10-18 16:49:28 +020047 BlockdevOnError on_source_error, on_target_error;
Paolo Bonzinid63ffd82012-10-18 16:49:25 +020048 bool synced;
49 bool should_complete;
Paolo Bonzinieee13df2013-01-21 17:09:46 +010050 int64_t granularity;
Paolo Bonzinib812f672013-01-21 17:09:43 +010051 size_t buf_size;
Max Reitzb21c7652014-10-24 15:57:36 +020052 int64_t bdev_length;
Paolo Bonzinib812f672013-01-21 17:09:43 +010053 unsigned long *cow_bitmap;
Fam Zhenge4654d22013-11-13 18:29:43 +080054 BdrvDirtyBitmap *dirty_bitmap;
Paolo Bonzini8f0720e2013-01-21 17:09:41 +010055 HBitmapIter hbi;
Paolo Bonzini893f7eb2012-10-18 16:49:23 +020056 uint8_t *buf;
Paolo Bonzini402a4742013-01-22 09:03:14 +010057 QSIMPLEQ_HEAD(, MirrorBuffer) buf_free;
58 int buf_free_count;
Paolo Bonzinibd48bde2013-01-22 09:03:12 +010059
Paolo Bonzini402a4742013-01-22 09:03:14 +010060 unsigned long *in_flight_bitmap;
Paolo Bonzinibd48bde2013-01-22 09:03:12 +010061 int in_flight;
Max Reitzb21c7652014-10-24 15:57:36 +020062 int sectors_in_flight;
Paolo Bonzinibd48bde2013-01-22 09:03:12 +010063 int ret;
Fam Zheng0fc9f8e2015-06-08 13:56:08 +080064 bool unmap;
Kevin Wolfe424aff2015-08-13 10:41:50 +020065 bool waiting_for_io;
Fam Zhenge5b43572016-02-05 10:00:29 +080066 int target_cluster_sectors;
67 int max_iov;
Paolo Bonzini893f7eb2012-10-18 16:49:23 +020068} MirrorBlockJob;
69
Paolo Bonzinibd48bde2013-01-22 09:03:12 +010070typedef struct MirrorOp {
71 MirrorBlockJob *s;
72 QEMUIOVector qiov;
Paolo Bonzinibd48bde2013-01-22 09:03:12 +010073 int64_t sector_num;
74 int nb_sectors;
75} MirrorOp;
76
Paolo Bonzinib952b552012-10-18 16:49:28 +020077static BlockErrorAction mirror_error_action(MirrorBlockJob *s, bool read,
78 int error)
79{
80 s->synced = false;
81 if (read) {
Kevin Wolf81e254d2016-04-18 11:36:38 +020082 return block_job_error_action(&s->common, s->on_source_error,
83 true, error);
Paolo Bonzinib952b552012-10-18 16:49:28 +020084 } else {
Kevin Wolf81e254d2016-04-18 11:36:38 +020085 return block_job_error_action(&s->common, s->on_target_error,
86 false, error);
Paolo Bonzinib952b552012-10-18 16:49:28 +020087 }
88}
89
Paolo Bonzinibd48bde2013-01-22 09:03:12 +010090static void mirror_iteration_done(MirrorOp *op, int ret)
91{
92 MirrorBlockJob *s = op->s;
Paolo Bonzini402a4742013-01-22 09:03:14 +010093 struct iovec *iov;
Paolo Bonzinibd48bde2013-01-22 09:03:12 +010094 int64_t chunk_num;
Paolo Bonzini402a4742013-01-22 09:03:14 +010095 int i, nb_chunks, sectors_per_chunk;
Paolo Bonzinibd48bde2013-01-22 09:03:12 +010096
97 trace_mirror_iteration_done(s, op->sector_num, op->nb_sectors, ret);
98
99 s->in_flight--;
Max Reitzb21c7652014-10-24 15:57:36 +0200100 s->sectors_in_flight -= op->nb_sectors;
Paolo Bonzini402a4742013-01-22 09:03:14 +0100101 iov = op->qiov.iov;
102 for (i = 0; i < op->qiov.niov; i++) {
103 MirrorBuffer *buf = (MirrorBuffer *) iov[i].iov_base;
104 QSIMPLEQ_INSERT_TAIL(&s->buf_free, buf, next);
105 s->buf_free_count++;
106 }
107
Paolo Bonzinibd48bde2013-01-22 09:03:12 +0100108 sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS;
109 chunk_num = op->sector_num / sectors_per_chunk;
Fam Zheng4150ae62016-04-20 10:48:34 +0800110 nb_chunks = DIV_ROUND_UP(op->nb_sectors, sectors_per_chunk);
Paolo Bonzini402a4742013-01-22 09:03:14 +0100111 bitmap_clear(s->in_flight_bitmap, chunk_num, nb_chunks);
Max Reitzb21c7652014-10-24 15:57:36 +0200112 if (ret >= 0) {
113 if (s->cow_bitmap) {
114 bitmap_set(s->cow_bitmap, chunk_num, nb_chunks);
115 }
116 s->common.offset += (uint64_t)op->nb_sectors * BDRV_SECTOR_SIZE;
Paolo Bonzinibd48bde2013-01-22 09:03:12 +0100117 }
118
Zhang Min6df3bf82014-01-23 15:59:16 +0800119 qemu_iovec_destroy(&op->qiov);
Paolo Bonzinic84b3192015-10-01 13:04:39 +0200120 g_free(op);
Stefan Hajnoczi7b770c72014-03-21 13:55:19 +0100121
Kevin Wolfe424aff2015-08-13 10:41:50 +0200122 if (s->waiting_for_io) {
Stefan Hajnoczi7b770c72014-03-21 13:55:19 +0100123 qemu_coroutine_enter(s->common.co, NULL);
124 }
Paolo Bonzinibd48bde2013-01-22 09:03:12 +0100125}
126
127static void mirror_write_complete(void *opaque, int ret)
128{
129 MirrorOp *op = opaque;
130 MirrorBlockJob *s = op->s;
131 if (ret < 0) {
Paolo Bonzinibd48bde2013-01-22 09:03:12 +0100132 BlockErrorAction action;
133
John Snow20dca812015-04-17 19:50:02 -0400134 bdrv_set_dirty_bitmap(s->dirty_bitmap, op->sector_num, op->nb_sectors);
Paolo Bonzinibd48bde2013-01-22 09:03:12 +0100135 action = mirror_error_action(s, false, -ret);
Wenchao Xiaa5895692014-06-18 08:43:30 +0200136 if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) {
Paolo Bonzinibd48bde2013-01-22 09:03:12 +0100137 s->ret = ret;
138 }
139 }
140 mirror_iteration_done(op, ret);
141}
142
143static void mirror_read_complete(void *opaque, int ret)
144{
145 MirrorOp *op = opaque;
146 MirrorBlockJob *s = op->s;
147 if (ret < 0) {
Paolo Bonzinibd48bde2013-01-22 09:03:12 +0100148 BlockErrorAction action;
149
John Snow20dca812015-04-17 19:50:02 -0400150 bdrv_set_dirty_bitmap(s->dirty_bitmap, op->sector_num, op->nb_sectors);
Paolo Bonzinibd48bde2013-01-22 09:03:12 +0100151 action = mirror_error_action(s, true, -ret);
Wenchao Xiaa5895692014-06-18 08:43:30 +0200152 if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) {
Paolo Bonzinibd48bde2013-01-22 09:03:12 +0100153 s->ret = ret;
154 }
155
156 mirror_iteration_done(op, ret);
157 return;
158 }
Kevin Wolfe253f4b2016-04-12 16:17:41 +0200159 blk_aio_pwritev(s->target, op->sector_num * BDRV_SECTOR_SIZE, &op->qiov,
Eric Blake73698c32016-06-13 12:56:34 -0600160 0, mirror_write_complete, op);
Paolo Bonzinibd48bde2013-01-22 09:03:12 +0100161}
162
Fam Zheng4150ae62016-04-20 10:48:34 +0800163static inline void mirror_clip_sectors(MirrorBlockJob *s,
164 int64_t sector_num,
165 int *nb_sectors)
166{
167 *nb_sectors = MIN(*nb_sectors,
168 s->bdev_length / BDRV_SECTOR_SIZE - sector_num);
169}
170
Fam Zhenge5b43572016-02-05 10:00:29 +0800171/* Round sector_num and/or nb_sectors to target cluster if COW is needed, and
172 * return the offset of the adjusted tail sector against original. */
173static int mirror_cow_align(MirrorBlockJob *s,
174 int64_t *sector_num,
175 int *nb_sectors)
Paolo Bonzini893f7eb2012-10-18 16:49:23 +0200176{
Fam Zhenge5b43572016-02-05 10:00:29 +0800177 bool need_cow;
178 int ret = 0;
179 int chunk_sectors = s->granularity >> BDRV_SECTOR_BITS;
180 int64_t align_sector_num = *sector_num;
181 int align_nb_sectors = *nb_sectors;
182 int max_sectors = chunk_sectors * s->max_iov;
Paolo Bonzini893f7eb2012-10-18 16:49:23 +0200183
Fam Zhenge5b43572016-02-05 10:00:29 +0800184 need_cow = !test_bit(*sector_num / chunk_sectors, s->cow_bitmap);
185 need_cow |= !test_bit((*sector_num + *nb_sectors - 1) / chunk_sectors,
186 s->cow_bitmap);
187 if (need_cow) {
Kevin Wolf244483e2016-06-02 11:41:52 +0200188 bdrv_round_sectors_to_clusters(blk_bs(s->target), *sector_num,
189 *nb_sectors, &align_sector_num,
190 &align_nb_sectors);
Paolo Bonzini8f0720e2013-01-21 17:09:41 +0100191 }
192
Fam Zhenge5b43572016-02-05 10:00:29 +0800193 if (align_nb_sectors > max_sectors) {
194 align_nb_sectors = max_sectors;
195 if (need_cow) {
196 align_nb_sectors = QEMU_ALIGN_DOWN(align_nb_sectors,
197 s->target_cluster_sectors);
198 }
199 }
Fam Zheng4150ae62016-04-20 10:48:34 +0800200 /* Clipping may result in align_nb_sectors unaligned to chunk boundary, but
201 * that doesn't matter because it's already the end of source image. */
202 mirror_clip_sectors(s, align_sector_num, &align_nb_sectors);
Fam Zhenge5b43572016-02-05 10:00:29 +0800203
204 ret = align_sector_num + align_nb_sectors - (*sector_num + *nb_sectors);
205 *sector_num = align_sector_num;
206 *nb_sectors = align_nb_sectors;
207 assert(ret >= 0);
208 return ret;
209}
210
Fam Zheng21cd9172016-02-05 10:00:30 +0800211static inline void mirror_wait_for_io(MirrorBlockJob *s)
212{
213 assert(!s->waiting_for_io);
214 s->waiting_for_io = true;
215 qemu_coroutine_yield();
216 s->waiting_for_io = false;
217}
218
Fam Zhenge5b43572016-02-05 10:00:29 +0800219/* Submit async read while handling COW.
220 * Returns: nb_sectors if no alignment is necessary, or
221 * (new_end - sector_num) if tail is rounded up or down due to
222 * alignment or buffer limit.
223 */
224static int mirror_do_read(MirrorBlockJob *s, int64_t sector_num,
225 int nb_sectors)
226{
Kevin Wolfe253f4b2016-04-12 16:17:41 +0200227 BlockBackend *source = s->common.blk;
Fam Zhenge5b43572016-02-05 10:00:29 +0800228 int sectors_per_chunk, nb_chunks;
229 int ret = nb_sectors;
230 MirrorOp *op;
231
Paolo Bonzini884fea42013-01-22 09:03:15 +0100232 sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS;
Paolo Bonzini402a4742013-01-22 09:03:14 +0100233
Fam Zhenge5b43572016-02-05 10:00:29 +0800234 /* We can only handle as much as buf_size at a time. */
235 nb_sectors = MIN(s->buf_size >> BDRV_SECTOR_BITS, nb_sectors);
236 assert(nb_sectors);
Paolo Bonzini402a4742013-01-22 09:03:14 +0100237
Fam Zhenge5b43572016-02-05 10:00:29 +0800238 if (s->cow_bitmap) {
239 ret += mirror_cow_align(s, &sector_num, &nb_sectors);
240 }
241 assert(nb_sectors << BDRV_SECTOR_BITS <= s->buf_size);
242 /* The sector range must meet granularity because:
243 * 1) Caller passes in aligned values;
244 * 2) mirror_cow_align is used only when target cluster is larger. */
Fam Zhenge5b43572016-02-05 10:00:29 +0800245 assert(!(sector_num % sectors_per_chunk));
Fam Zheng4150ae62016-04-20 10:48:34 +0800246 nb_chunks = DIV_ROUND_UP(nb_sectors, sectors_per_chunk);
Fam Zhenge5b43572016-02-05 10:00:29 +0800247
248 while (s->buf_free_count < nb_chunks) {
Paolo Bonzini402a4742013-01-22 09:03:14 +0100249 trace_mirror_yield_in_flight(s, sector_num, s->in_flight);
Fam Zheng21cd9172016-02-05 10:00:30 +0800250 mirror_wait_for_io(s);
Paolo Bonzinib812f672013-01-21 17:09:43 +0100251 }
252
Paolo Bonzinibd48bde2013-01-22 09:03:12 +0100253 /* Allocate a MirrorOp that is used as an AIO callback. */
Paolo Bonzinic84b3192015-10-01 13:04:39 +0200254 op = g_new(MirrorOp, 1);
Paolo Bonzinibd48bde2013-01-22 09:03:12 +0100255 op->s = s;
Paolo Bonzinibd48bde2013-01-22 09:03:12 +0100256 op->sector_num = sector_num;
257 op->nb_sectors = nb_sectors;
Paolo Bonzini402a4742013-01-22 09:03:14 +0100258
259 /* Now make a QEMUIOVector taking enough granularity-sized chunks
260 * from s->buf_free.
261 */
262 qemu_iovec_init(&op->qiov, nb_chunks);
Paolo Bonzini402a4742013-01-22 09:03:14 +0100263 while (nb_chunks-- > 0) {
264 MirrorBuffer *buf = QSIMPLEQ_FIRST(&s->buf_free);
Fam Zhenge5b43572016-02-05 10:00:29 +0800265 size_t remaining = nb_sectors * BDRV_SECTOR_SIZE - op->qiov.size;
Kevin Wolf5a0f6fd2014-07-01 16:52:21 +0200266
Paolo Bonzini402a4742013-01-22 09:03:14 +0100267 QSIMPLEQ_REMOVE_HEAD(&s->buf_free, next);
268 s->buf_free_count--;
Kevin Wolf5a0f6fd2014-07-01 16:52:21 +0200269 qemu_iovec_add(&op->qiov, buf, MIN(s->granularity, remaining));
Paolo Bonzini402a4742013-01-22 09:03:14 +0100270 }
Paolo Bonzinibd48bde2013-01-22 09:03:12 +0100271
Paolo Bonzini893f7eb2012-10-18 16:49:23 +0200272 /* Copy the dirty cluster. */
Paolo Bonzinibd48bde2013-01-22 09:03:12 +0100273 s->in_flight++;
Max Reitzb21c7652014-10-24 15:57:36 +0200274 s->sectors_in_flight += nb_sectors;
Paolo Bonzinib812f672013-01-21 17:09:43 +0100275 trace_mirror_one_iteration(s, sector_num, nb_sectors);
Fam Zhengdcfb3be2015-06-08 13:56:09 +0800276
Eric Blake73698c32016-06-13 12:56:34 -0600277 blk_aio_preadv(source, sector_num * BDRV_SECTOR_SIZE, &op->qiov, 0,
Fam Zhenge5b43572016-02-05 10:00:29 +0800278 mirror_read_complete, op);
279 return ret;
280}
281
282static void mirror_do_zero_or_discard(MirrorBlockJob *s,
283 int64_t sector_num,
284 int nb_sectors,
285 bool is_discard)
286{
287 MirrorOp *op;
288
289 /* Allocate a MirrorOp that is used as an AIO callback. The qiov is zeroed
290 * so the freeing in mirror_iteration_done is nop. */
291 op = g_new0(MirrorOp, 1);
292 op->s = s;
293 op->sector_num = sector_num;
294 op->nb_sectors = nb_sectors;
295
296 s->in_flight++;
297 s->sectors_in_flight += nb_sectors;
298 if (is_discard) {
Kevin Wolfe253f4b2016-04-12 16:17:41 +0200299 blk_aio_discard(s->target, sector_num, op->nb_sectors,
300 mirror_write_complete, op);
Fam Zhenge5b43572016-02-05 10:00:29 +0800301 } else {
Kevin Wolfe253f4b2016-04-12 16:17:41 +0200302 blk_aio_pwrite_zeroes(s->target, sector_num * BDRV_SECTOR_SIZE,
303 op->nb_sectors * BDRV_SECTOR_SIZE,
Fam Zhengdcfb3be2015-06-08 13:56:09 +0800304 s->unmap ? BDRV_REQ_MAY_UNMAP : 0,
305 mirror_write_complete, op);
Fam Zhenge5b43572016-02-05 10:00:29 +0800306 }
307}
308
309static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s)
310{
Kevin Wolfe253f4b2016-04-12 16:17:41 +0200311 BlockDriverState *source = blk_bs(s->common.blk);
Max Reitz9c836252016-04-20 00:59:47 +0200312 int64_t sector_num, first_chunk;
Fam Zhenge5b43572016-02-05 10:00:29 +0800313 uint64_t delay_ns = 0;
314 /* At least the first dirty chunk is mirrored in one iteration. */
315 int nb_chunks = 1;
316 int64_t end = s->bdev_length / BDRV_SECTOR_SIZE;
317 int sectors_per_chunk = s->granularity >> BDRV_SECTOR_BITS;
318
319 sector_num = hbitmap_iter_next(&s->hbi);
320 if (sector_num < 0) {
321 bdrv_dirty_iter_init(s->dirty_bitmap, &s->hbi);
322 sector_num = hbitmap_iter_next(&s->hbi);
323 trace_mirror_restart_iter(s, bdrv_get_dirty_count(s->dirty_bitmap));
324 assert(sector_num >= 0);
325 }
326
Max Reitz9c836252016-04-20 00:59:47 +0200327 first_chunk = sector_num / sectors_per_chunk;
328 while (test_bit(first_chunk, s->in_flight_bitmap)) {
329 trace_mirror_yield_in_flight(s, first_chunk, s->in_flight);
330 mirror_wait_for_io(s);
331 }
332
Fam Zhenge5b43572016-02-05 10:00:29 +0800333 /* Find the number of consective dirty chunks following the first dirty
334 * one, and wait for in flight requests in them. */
335 while (nb_chunks * sectors_per_chunk < (s->buf_size >> BDRV_SECTOR_BITS)) {
336 int64_t hbitmap_next;
337 int64_t next_sector = sector_num + nb_chunks * sectors_per_chunk;
338 int64_t next_chunk = next_sector / sectors_per_chunk;
339 if (next_sector >= end ||
340 !bdrv_get_dirty(source, s->dirty_bitmap, next_sector)) {
341 break;
342 }
343 if (test_bit(next_chunk, s->in_flight_bitmap)) {
Max Reitz9c836252016-04-20 00:59:47 +0200344 break;
Fam Zhenge5b43572016-02-05 10:00:29 +0800345 }
Max Reitz9c836252016-04-20 00:59:47 +0200346
347 hbitmap_next = hbitmap_iter_next(&s->hbi);
Max Reitzf27a2742016-04-20 00:59:48 +0200348 if (hbitmap_next > next_sector || hbitmap_next < 0) {
349 /* The bitmap iterator's cache is stale, refresh it */
350 bdrv_set_dirty_iter(&s->hbi, next_sector);
351 hbitmap_next = hbitmap_iter_next(&s->hbi);
352 }
Max Reitz9c836252016-04-20 00:59:47 +0200353 assert(hbitmap_next == next_sector);
354 nb_chunks++;
Fam Zhenge5b43572016-02-05 10:00:29 +0800355 }
356
357 /* Clear dirty bits before querying the block status, because
358 * calling bdrv_get_block_status_above could yield - if some blocks are
359 * marked dirty in this window, we need to know.
360 */
361 bdrv_reset_dirty_bitmap(s->dirty_bitmap, sector_num,
362 nb_chunks * sectors_per_chunk);
363 bitmap_set(s->in_flight_bitmap, sector_num / sectors_per_chunk, nb_chunks);
364 while (nb_chunks > 0 && sector_num < end) {
365 int ret;
366 int io_sectors;
367 BlockDriverState *file;
368 enum MirrorMethod {
369 MIRROR_METHOD_COPY,
370 MIRROR_METHOD_ZERO,
371 MIRROR_METHOD_DISCARD
372 } mirror_method = MIRROR_METHOD_COPY;
373
374 assert(!(sector_num % sectors_per_chunk));
375 ret = bdrv_get_block_status_above(source, NULL, sector_num,
376 nb_chunks * sectors_per_chunk,
377 &io_sectors, &file);
378 if (ret < 0) {
379 io_sectors = nb_chunks * sectors_per_chunk;
380 }
381
382 io_sectors -= io_sectors % sectors_per_chunk;
383 if (io_sectors < sectors_per_chunk) {
384 io_sectors = sectors_per_chunk;
385 } else if (ret >= 0 && !(ret & BDRV_BLOCK_DATA)) {
386 int64_t target_sector_num;
387 int target_nb_sectors;
Kevin Wolf244483e2016-06-02 11:41:52 +0200388 bdrv_round_sectors_to_clusters(blk_bs(s->target), sector_num,
389 io_sectors, &target_sector_num,
390 &target_nb_sectors);
Fam Zhenge5b43572016-02-05 10:00:29 +0800391 if (target_sector_num == sector_num &&
392 target_nb_sectors == io_sectors) {
393 mirror_method = ret & BDRV_BLOCK_ZERO ?
394 MIRROR_METHOD_ZERO :
395 MIRROR_METHOD_DISCARD;
396 }
397 }
398
Fam Zheng4150ae62016-04-20 10:48:34 +0800399 mirror_clip_sectors(s, sector_num, &io_sectors);
Fam Zhenge5b43572016-02-05 10:00:29 +0800400 switch (mirror_method) {
401 case MIRROR_METHOD_COPY:
402 io_sectors = mirror_do_read(s, sector_num, io_sectors);
403 break;
404 case MIRROR_METHOD_ZERO:
405 mirror_do_zero_or_discard(s, sector_num, io_sectors, false);
406 break;
407 case MIRROR_METHOD_DISCARD:
408 mirror_do_zero_or_discard(s, sector_num, io_sectors, true);
409 break;
410 default:
411 abort();
412 }
413 assert(io_sectors);
414 sector_num += io_sectors;
Fam Zheng4150ae62016-04-20 10:48:34 +0800415 nb_chunks -= DIV_ROUND_UP(io_sectors, sectors_per_chunk);
Fam Zhenge5b43572016-02-05 10:00:29 +0800416 delay_ns += ratelimit_calculate_delay(&s->limit, io_sectors);
Fam Zhengdcfb3be2015-06-08 13:56:09 +0800417 }
Paolo Bonzinicc8c9d62014-03-21 13:55:18 +0100418 return delay_ns;
Paolo Bonzinibd48bde2013-01-22 09:03:12 +0100419}
Paolo Bonzinib952b552012-10-18 16:49:28 +0200420
Paolo Bonzini402a4742013-01-22 09:03:14 +0100421static void mirror_free_init(MirrorBlockJob *s)
422{
423 int granularity = s->granularity;
424 size_t buf_size = s->buf_size;
425 uint8_t *buf = s->buf;
426
427 assert(s->buf_free_count == 0);
428 QSIMPLEQ_INIT(&s->buf_free);
429 while (buf_size != 0) {
430 MirrorBuffer *cur = (MirrorBuffer *)buf;
431 QSIMPLEQ_INSERT_TAIL(&s->buf_free, cur, next);
432 s->buf_free_count++;
433 buf_size -= granularity;
434 buf += granularity;
435 }
436}
437
Paolo Bonzinibd48bde2013-01-22 09:03:12 +0100438static void mirror_drain(MirrorBlockJob *s)
439{
440 while (s->in_flight > 0) {
Fam Zheng21cd9172016-02-05 10:00:30 +0800441 mirror_wait_for_io(s);
Paolo Bonzinibd48bde2013-01-22 09:03:12 +0100442 }
Paolo Bonzini893f7eb2012-10-18 16:49:23 +0200443}
444
Stefan Hajnoczi5a7e7a02014-10-21 12:03:58 +0100445typedef struct {
446 int ret;
447} MirrorExitData;
448
449static void mirror_exit(BlockJob *job, void *opaque)
450{
451 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
452 MirrorExitData *data = opaque;
453 AioContext *replace_aio_context = NULL;
Kevin Wolfe253f4b2016-04-12 16:17:41 +0200454 BlockDriverState *src = blk_bs(s->common.blk);
455 BlockDriverState *target_bs = blk_bs(s->target);
Kevin Wolf3f09bfb2015-09-15 11:58:23 +0200456
457 /* Make sure that the source BDS doesn't go away before we called
458 * block_job_completed(). */
459 bdrv_ref(src);
Stefan Hajnoczi5a7e7a02014-10-21 12:03:58 +0100460
461 if (s->to_replace) {
462 replace_aio_context = bdrv_get_aio_context(s->to_replace);
463 aio_context_acquire(replace_aio_context);
464 }
465
466 if (s->should_complete && data->ret == 0) {
Kevin Wolfe253f4b2016-04-12 16:17:41 +0200467 BlockDriverState *to_replace = src;
Stefan Hajnoczi5a7e7a02014-10-21 12:03:58 +0100468 if (s->to_replace) {
469 to_replace = s->to_replace;
470 }
Kevin Wolf40365552015-10-28 13:24:26 +0100471
Kevin Wolfe253f4b2016-04-12 16:17:41 +0200472 if (bdrv_get_flags(target_bs) != bdrv_get_flags(to_replace)) {
473 bdrv_reopen(target_bs, bdrv_get_flags(to_replace), NULL);
Stefan Hajnoczi5a7e7a02014-10-21 12:03:58 +0100474 }
Kevin Wolfb8804812016-04-12 16:20:59 +0200475
476 /* The mirror job has no requests in flight any more, but we need to
477 * drain potential other users of the BDS before changing the graph. */
Kevin Wolfe253f4b2016-04-12 16:17:41 +0200478 bdrv_drained_begin(target_bs);
479 bdrv_replace_in_backing_chain(to_replace, target_bs);
480 bdrv_drained_end(target_bs);
Kevin Wolfb8804812016-04-12 16:20:59 +0200481
Kevin Wolfb6d2e592016-04-08 14:51:09 +0200482 /* We just changed the BDS the job BB refers to */
483 blk_remove_bs(job->blk);
484 blk_insert_bs(job->blk, src);
Stefan Hajnoczi5a7e7a02014-10-21 12:03:58 +0100485 }
486 if (s->to_replace) {
487 bdrv_op_unblock_all(s->to_replace, s->replace_blocker);
488 error_free(s->replace_blocker);
489 bdrv_unref(s->to_replace);
490 }
491 if (replace_aio_context) {
492 aio_context_release(replace_aio_context);
493 }
494 g_free(s->replaces);
Kevin Wolfe253f4b2016-04-12 16:17:41 +0200495 bdrv_op_unblock_all(target_bs, s->common.blocker);
496 blk_unref(s->target);
Stefan Hajnoczi5a7e7a02014-10-21 12:03:58 +0100497 block_job_completed(&s->common, data->ret);
498 g_free(data);
Fam Zheng176c3692015-11-23 10:28:04 +0800499 bdrv_drained_end(src);
Fam Zhengab27c3b2016-04-22 21:53:56 +0800500 if (qemu_get_aio_context() == bdrv_get_aio_context(src)) {
501 aio_enable_external(iohandler_get_aio_context());
502 }
Kevin Wolf3f09bfb2015-09-15 11:58:23 +0200503 bdrv_unref(src);
Stefan Hajnoczi5a7e7a02014-10-21 12:03:58 +0100504}
505
Paolo Bonzini893f7eb2012-10-18 16:49:23 +0200506static void coroutine_fn mirror_run(void *opaque)
507{
508 MirrorBlockJob *s = opaque;
Stefan Hajnoczi5a7e7a02014-10-21 12:03:58 +0100509 MirrorExitData *data;
Kevin Wolfe253f4b2016-04-12 16:17:41 +0200510 BlockDriverState *bs = blk_bs(s->common.blk);
511 BlockDriverState *target_bs = blk_bs(s->target);
Fam Zheng99900692015-07-09 11:47:58 +0800512 int64_t sector_num, end, length;
Paolo Bonzinibd48bde2013-01-22 09:03:12 +0100513 uint64_t last_pause_ns;
Paolo Bonzinib812f672013-01-21 17:09:43 +0100514 BlockDriverInfo bdi;
Jeff Cody1d339362015-01-22 08:03:29 -0500515 char backing_filename[2]; /* we only need 2 characters because we are only
516 checking for a NULL string */
Paolo Bonzini893f7eb2012-10-18 16:49:23 +0200517 int ret = 0;
518 int n;
Fam Zhenge5b43572016-02-05 10:00:29 +0800519 int target_cluster_size = BDRV_SECTOR_SIZE;
Paolo Bonzini893f7eb2012-10-18 16:49:23 +0200520
521 if (block_job_is_cancelled(&s->common)) {
522 goto immediate_exit;
523 }
524
Max Reitzb21c7652014-10-24 15:57:36 +0200525 s->bdev_length = bdrv_getlength(bs);
526 if (s->bdev_length < 0) {
527 ret = s->bdev_length;
Fam Zheng373df5b2014-04-29 18:09:09 +0800528 goto immediate_exit;
Max Reitzb21c7652014-10-24 15:57:36 +0200529 } else if (s->bdev_length == 0) {
Fam Zheng9e48b022014-06-24 20:26:36 +0800530 /* Report BLOCK_JOB_READY and wait for complete. */
531 block_job_event_ready(&s->common);
532 s->synced = true;
533 while (!block_job_is_cancelled(&s->common) && !s->should_complete) {
534 block_job_yield(&s->common);
535 }
536 s->common.cancelled = false;
537 goto immediate_exit;
Paolo Bonzini893f7eb2012-10-18 16:49:23 +0200538 }
539
Max Reitzb21c7652014-10-24 15:57:36 +0200540 length = DIV_ROUND_UP(s->bdev_length, s->granularity);
Paolo Bonzini402a4742013-01-22 09:03:14 +0100541 s->in_flight_bitmap = bitmap_new(length);
542
Paolo Bonzinib812f672013-01-21 17:09:43 +0100543 /* If we have no backing file yet in the destination, we cannot let
544 * the destination do COW. Instead, we copy sectors around the
545 * dirty data if needed. We need a bitmap to do that.
546 */
Kevin Wolfe253f4b2016-04-12 16:17:41 +0200547 bdrv_get_backing_filename(target_bs, backing_filename,
Paolo Bonzinib812f672013-01-21 17:09:43 +0100548 sizeof(backing_filename));
Kevin Wolfe253f4b2016-04-12 16:17:41 +0200549 if (!bdrv_get_info(target_bs, &bdi) && bdi.cluster_size) {
Fam Zhenge5b43572016-02-05 10:00:29 +0800550 target_cluster_size = bdi.cluster_size;
Paolo Bonzinib812f672013-01-21 17:09:43 +0100551 }
Kevin Wolfe253f4b2016-04-12 16:17:41 +0200552 if (backing_filename[0] && !target_bs->backing
Fam Zhenge5b43572016-02-05 10:00:29 +0800553 && s->granularity < target_cluster_size) {
554 s->buf_size = MAX(s->buf_size, target_cluster_size);
555 s->cow_bitmap = bitmap_new(length);
556 }
557 s->target_cluster_sectors = target_cluster_size >> BDRV_SECTOR_BITS;
Kevin Wolfe253f4b2016-04-12 16:17:41 +0200558 s->max_iov = MIN(bs->bl.max_iov, target_bs->bl.max_iov);
Paolo Bonzinib812f672013-01-21 17:09:43 +0100559
Max Reitzb21c7652014-10-24 15:57:36 +0200560 end = s->bdev_length / BDRV_SECTOR_SIZE;
Kevin Wolf7504edf2014-05-21 18:16:21 +0200561 s->buf = qemu_try_blockalign(bs, s->buf_size);
562 if (s->buf == NULL) {
563 ret = -ENOMEM;
564 goto immediate_exit;
565 }
566
Paolo Bonzini402a4742013-01-22 09:03:14 +0100567 mirror_free_init(s);
Paolo Bonzini893f7eb2012-10-18 16:49:23 +0200568
Fam Zheng4c0cbd62015-05-13 11:11:13 +0800569 last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
Fam Zheng03544a62013-12-16 14:45:30 +0800570 if (!s->is_none_mode) {
Paolo Bonzini893f7eb2012-10-18 16:49:23 +0200571 /* First part, loop on the sectors and initialize the dirty bitmap. */
Fam Zheng5bc361b2013-12-16 14:45:29 +0800572 BlockDriverState *base = s->base;
Kevin Wolfe253f4b2016-04-12 16:17:41 +0200573 bool mark_all_dirty = s->base == NULL && !bdrv_has_zero_init(target_bs);
Jeff Cody5279efe2015-10-01 00:06:37 -0400574
Paolo Bonzini893f7eb2012-10-18 16:49:23 +0200575 for (sector_num = 0; sector_num < end; ) {
Fam Zheng99900692015-07-09 11:47:58 +0800576 /* Just to make sure we are not exceeding int limit. */
577 int nb_sectors = MIN(INT_MAX >> BDRV_SECTOR_BITS,
578 end - sector_num);
Fam Zheng4c0cbd62015-05-13 11:11:13 +0800579 int64_t now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
580
581 if (now - last_pause_ns > SLICE_TIME) {
582 last_pause_ns = now;
583 block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, 0);
584 }
585
586 if (block_job_is_cancelled(&s->common)) {
587 goto immediate_exit;
588 }
589
Fam Zheng99900692015-07-09 11:47:58 +0800590 ret = bdrv_is_allocated_above(bs, base, sector_num, nb_sectors, &n);
Paolo Bonzini893f7eb2012-10-18 16:49:23 +0200591
592 if (ret < 0) {
593 goto immediate_exit;
594 }
595
596 assert(n > 0);
Jeff Cody5279efe2015-10-01 00:06:37 -0400597 if (ret == 1 || mark_all_dirty) {
John Snow20dca812015-04-17 19:50:02 -0400598 bdrv_set_dirty_bitmap(s->dirty_bitmap, sector_num, n);
Paolo Bonzini893f7eb2012-10-18 16:49:23 +0200599 }
Fam Zheng99900692015-07-09 11:47:58 +0800600 sector_num += n;
Paolo Bonzini893f7eb2012-10-18 16:49:23 +0200601 }
602 }
603
John Snow20dca812015-04-17 19:50:02 -0400604 bdrv_dirty_iter_init(s->dirty_bitmap, &s->hbi);
Paolo Bonzini893f7eb2012-10-18 16:49:23 +0200605 for (;;) {
Paolo Bonzinicc8c9d62014-03-21 13:55:18 +0100606 uint64_t delay_ns = 0;
Paolo Bonzini893f7eb2012-10-18 16:49:23 +0200607 int64_t cnt;
608 bool should_complete;
609
Paolo Bonzinibd48bde2013-01-22 09:03:12 +0100610 if (s->ret < 0) {
611 ret = s->ret;
612 goto immediate_exit;
613 }
614
John Snow20dca812015-04-17 19:50:02 -0400615 cnt = bdrv_get_dirty_count(s->dirty_bitmap);
Max Reitzb21c7652014-10-24 15:57:36 +0200616 /* s->common.offset contains the number of bytes already processed so
617 * far, cnt is the number of dirty sectors remaining and
618 * s->sectors_in_flight is the number of sectors currently being
619 * processed; together those are the current total operation length */
620 s->common.len = s->common.offset +
621 (cnt + s->sectors_in_flight) * BDRV_SECTOR_SIZE;
Paolo Bonzinibd48bde2013-01-22 09:03:12 +0100622
623 /* Note that even when no rate limit is applied we need to yield
Fam Zhenga7282332015-04-03 22:05:21 +0800624 * periodically with no pending I/O so that bdrv_drain_all() returns.
Paolo Bonzinibd48bde2013-01-22 09:03:12 +0100625 * We do so every SLICE_TIME nanoseconds, or when there is an error,
626 * or when the source is clean, whichever comes first.
627 */
Alex Blighbc72ad62013-08-21 16:03:08 +0100628 if (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - last_pause_ns < SLICE_TIME &&
Paolo Bonzinibd48bde2013-01-22 09:03:12 +0100629 s->common.iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
Paolo Bonzini402a4742013-01-22 09:03:14 +0100630 if (s->in_flight == MAX_IN_FLIGHT || s->buf_free_count == 0 ||
631 (cnt == 0 && s->in_flight > 0)) {
632 trace_mirror_yield(s, s->in_flight, s->buf_free_count, cnt);
Fam Zheng21cd9172016-02-05 10:00:30 +0800633 mirror_wait_for_io(s);
Paolo Bonzinibd48bde2013-01-22 09:03:12 +0100634 continue;
635 } else if (cnt != 0) {
Paolo Bonzinicc8c9d62014-03-21 13:55:18 +0100636 delay_ns = mirror_iteration(s);
Paolo Bonzini893f7eb2012-10-18 16:49:23 +0200637 }
Paolo Bonzini893f7eb2012-10-18 16:49:23 +0200638 }
639
640 should_complete = false;
Paolo Bonzinibd48bde2013-01-22 09:03:12 +0100641 if (s->in_flight == 0 && cnt == 0) {
Paolo Bonzini893f7eb2012-10-18 16:49:23 +0200642 trace_mirror_before_flush(s);
Kevin Wolfe253f4b2016-04-12 16:17:41 +0200643 ret = blk_flush(s->target);
Paolo Bonzini893f7eb2012-10-18 16:49:23 +0200644 if (ret < 0) {
Wenchao Xiaa5895692014-06-18 08:43:30 +0200645 if (mirror_error_action(s, false, -ret) ==
646 BLOCK_ERROR_ACTION_REPORT) {
Paolo Bonzinib952b552012-10-18 16:49:28 +0200647 goto immediate_exit;
648 }
649 } else {
650 /* We're out of the streaming phase. From now on, if the job
651 * is cancelled we will actually complete all pending I/O and
652 * report completion. This way, block-job-cancel will leave
653 * the target in a consistent state.
654 */
Paolo Bonzinib952b552012-10-18 16:49:28 +0200655 if (!s->synced) {
Wenchao Xiabcada37b2014-06-18 08:43:47 +0200656 block_job_event_ready(&s->common);
Paolo Bonzinib952b552012-10-18 16:49:28 +0200657 s->synced = true;
658 }
Paolo Bonzini893f7eb2012-10-18 16:49:23 +0200659
Paolo Bonzinib952b552012-10-18 16:49:28 +0200660 should_complete = s->should_complete ||
661 block_job_is_cancelled(&s->common);
John Snow20dca812015-04-17 19:50:02 -0400662 cnt = bdrv_get_dirty_count(s->dirty_bitmap);
Paolo Bonzinid63ffd82012-10-18 16:49:25 +0200663 }
Paolo Bonzini893f7eb2012-10-18 16:49:23 +0200664 }
665
666 if (cnt == 0 && should_complete) {
667 /* The dirty bitmap is not updated while operations are pending.
668 * If we're about to exit, wait for pending operations before
669 * calling bdrv_get_dirty_count(bs), or we may exit while the
670 * source has dirty data to copy!
671 *
672 * Note that I/O can be submitted by the guest while
673 * mirror_populate runs.
674 */
675 trace_mirror_before_drain(s, cnt);
Fam Zheng39bf92d2016-04-05 19:20:53 +0800676 bdrv_co_drain(bs);
John Snow20dca812015-04-17 19:50:02 -0400677 cnt = bdrv_get_dirty_count(s->dirty_bitmap);
Paolo Bonzini893f7eb2012-10-18 16:49:23 +0200678 }
679
680 ret = 0;
Paolo Bonzinicc8c9d62014-03-21 13:55:18 +0100681 trace_mirror_before_sleep(s, cnt, s->synced, delay_ns);
Paolo Bonzinid63ffd82012-10-18 16:49:25 +0200682 if (!s->synced) {
Alex Bligh7483d1e2013-08-21 16:03:05 +0100683 block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, delay_ns);
Paolo Bonzini893f7eb2012-10-18 16:49:23 +0200684 if (block_job_is_cancelled(&s->common)) {
685 break;
686 }
687 } else if (!should_complete) {
Paolo Bonzinibd48bde2013-01-22 09:03:12 +0100688 delay_ns = (s->in_flight == 0 && cnt == 0 ? SLICE_TIME : 0);
Alex Bligh7483d1e2013-08-21 16:03:05 +0100689 block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, delay_ns);
Paolo Bonzini893f7eb2012-10-18 16:49:23 +0200690 } else if (cnt == 0) {
691 /* The two disks are in sync. Exit and report successful
692 * completion.
693 */
694 assert(QLIST_EMPTY(&bs->tracked_requests));
695 s->common.cancelled = false;
696 break;
697 }
Alex Blighbc72ad62013-08-21 16:03:08 +0100698 last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
Paolo Bonzini893f7eb2012-10-18 16:49:23 +0200699 }
700
701immediate_exit:
Paolo Bonzinibd48bde2013-01-22 09:03:12 +0100702 if (s->in_flight > 0) {
703 /* We get here only if something went wrong. Either the job failed,
704 * or it was cancelled prematurely so that we do not guarantee that
705 * the target is a copy of the source.
706 */
707 assert(ret < 0 || (!s->synced && block_job_is_cancelled(&s->common)));
708 mirror_drain(s);
709 }
710
711 assert(s->in_flight == 0);
Markus Armbruster7191bf32013-01-15 15:29:10 +0100712 qemu_vfree(s->buf);
Paolo Bonzinib812f672013-01-21 17:09:43 +0100713 g_free(s->cow_bitmap);
Paolo Bonzini402a4742013-01-22 09:03:14 +0100714 g_free(s->in_flight_bitmap);
Fam Zhenge4654d22013-11-13 18:29:43 +0800715 bdrv_release_dirty_bitmap(bs, s->dirty_bitmap);
Stefan Hajnoczi5a7e7a02014-10-21 12:03:58 +0100716
717 data = g_malloc(sizeof(*data));
718 data->ret = ret;
Fam Zheng176c3692015-11-23 10:28:04 +0800719 /* Before we switch to target in mirror_exit, make sure data doesn't
720 * change. */
Kevin Wolfe253f4b2016-04-12 16:17:41 +0200721 bdrv_drained_begin(bs);
Fam Zhengab27c3b2016-04-22 21:53:56 +0800722 if (qemu_get_aio_context() == bdrv_get_aio_context(bs)) {
723 /* FIXME: virtio host notifiers run on iohandler_ctx, therefore the
724 * above bdrv_drained_end isn't enough to quiesce it. This is ugly, we
725 * need a block layer API change to achieve this. */
726 aio_disable_external(iohandler_get_aio_context());
727 }
Stefan Hajnoczi5a7e7a02014-10-21 12:03:58 +0100728 block_job_defer_to_main_loop(&s->common, mirror_exit, data);
Paolo Bonzini893f7eb2012-10-18 16:49:23 +0200729}
730
731static void mirror_set_speed(BlockJob *job, int64_t speed, Error **errp)
732{
733 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
734
735 if (speed < 0) {
Markus Armbrusterc6bd8c72015-03-17 11:54:50 +0100736 error_setg(errp, QERR_INVALID_PARAMETER, "speed");
Paolo Bonzini893f7eb2012-10-18 16:49:23 +0200737 return;
738 }
739 ratelimit_set_speed(&s->limit, speed / BDRV_SECTOR_SIZE, SLICE_TIME);
740}
741
Paolo Bonzinid63ffd82012-10-18 16:49:25 +0200742static void mirror_complete(BlockJob *job, Error **errp)
743{
744 MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
Max Reitz34b5d2c2013-09-05 14:45:29 +0200745 Error *local_err = NULL;
Paolo Bonzinid63ffd82012-10-18 16:49:25 +0200746 int ret;
747
Kevin Wolfe253f4b2016-04-12 16:17:41 +0200748 ret = bdrv_open_backing_file(blk_bs(s->target), NULL, "backing",
749 &local_err);
Paolo Bonzinid63ffd82012-10-18 16:49:25 +0200750 if (ret < 0) {
Max Reitz34b5d2c2013-09-05 14:45:29 +0200751 error_propagate(errp, local_err);
Paolo Bonzinid63ffd82012-10-18 16:49:25 +0200752 return;
753 }
754 if (!s->synced) {
Kevin Wolf8ccb9562015-09-16 13:34:54 +0200755 error_setg(errp, QERR_BLOCK_JOB_NOT_READY, job->id);
Paolo Bonzinid63ffd82012-10-18 16:49:25 +0200756 return;
757 }
758
Benoît Canet09158f02014-06-27 18:25:25 +0200759 /* check the target bs is not blocked and block all operations on it */
760 if (s->replaces) {
Stefan Hajnoczi5a7e7a02014-10-21 12:03:58 +0100761 AioContext *replace_aio_context;
762
Wen Congyange12f3782015-07-17 10:12:22 +0800763 s->to_replace = bdrv_find_node(s->replaces);
Benoît Canet09158f02014-06-27 18:25:25 +0200764 if (!s->to_replace) {
Wen Congyange12f3782015-07-17 10:12:22 +0800765 error_setg(errp, "Node name '%s' not found", s->replaces);
Benoît Canet09158f02014-06-27 18:25:25 +0200766 return;
767 }
768
Stefan Hajnoczi5a7e7a02014-10-21 12:03:58 +0100769 replace_aio_context = bdrv_get_aio_context(s->to_replace);
770 aio_context_acquire(replace_aio_context);
771
Benoît Canet09158f02014-06-27 18:25:25 +0200772 error_setg(&s->replace_blocker,
773 "block device is in use by block-job-complete");
774 bdrv_op_block_all(s->to_replace, s->replace_blocker);
775 bdrv_ref(s->to_replace);
Stefan Hajnoczi5a7e7a02014-10-21 12:03:58 +0100776
777 aio_context_release(replace_aio_context);
Benoît Canet09158f02014-06-27 18:25:25 +0200778 }
779
Paolo Bonzinid63ffd82012-10-18 16:49:25 +0200780 s->should_complete = true;
Fam Zheng751ebd72015-04-03 22:05:18 +0800781 block_job_enter(&s->common);
Paolo Bonzinid63ffd82012-10-18 16:49:25 +0200782}
783
Fam Zheng3fc4b102013-10-08 17:29:38 +0800784static const BlockJobDriver mirror_job_driver = {
Paolo Bonzini893f7eb2012-10-18 16:49:23 +0200785 .instance_size = sizeof(MirrorBlockJob),
Fam Zheng79e14bf2013-10-08 17:29:40 +0800786 .job_type = BLOCK_JOB_TYPE_MIRROR,
Paolo Bonzini893f7eb2012-10-18 16:49:23 +0200787 .set_speed = mirror_set_speed,
Paolo Bonzinid63ffd82012-10-18 16:49:25 +0200788 .complete = mirror_complete,
Paolo Bonzini893f7eb2012-10-18 16:49:23 +0200789};
790
Fam Zheng03544a62013-12-16 14:45:30 +0800791static const BlockJobDriver commit_active_job_driver = {
792 .instance_size = sizeof(MirrorBlockJob),
793 .job_type = BLOCK_JOB_TYPE_COMMIT,
794 .set_speed = mirror_set_speed,
Fam Zheng03544a62013-12-16 14:45:30 +0800795 .complete = mirror_complete,
796};
797
798static void mirror_start_job(BlockDriverState *bs, BlockDriverState *target,
Benoît Canet09158f02014-06-27 18:25:25 +0200799 const char *replaces,
John Snow5fba6c02015-04-17 19:49:51 -0400800 int64_t speed, uint32_t granularity,
Benoît Canet09158f02014-06-27 18:25:25 +0200801 int64_t buf_size,
802 BlockdevOnError on_source_error,
803 BlockdevOnError on_target_error,
Fam Zheng0fc9f8e2015-06-08 13:56:08 +0800804 bool unmap,
Markus Armbruster097310b2014-10-07 13:59:15 +0200805 BlockCompletionFunc *cb,
Benoît Canet09158f02014-06-27 18:25:25 +0200806 void *opaque, Error **errp,
807 const BlockJobDriver *driver,
808 bool is_none_mode, BlockDriverState *base)
Paolo Bonzini893f7eb2012-10-18 16:49:23 +0200809{
810 MirrorBlockJob *s;
811
Paolo Bonzinieee13df2013-01-21 17:09:46 +0100812 if (granularity == 0) {
John Snow341ebc22015-04-17 19:49:52 -0400813 granularity = bdrv_get_default_bitmap_granularity(target);
Paolo Bonzinieee13df2013-01-21 17:09:46 +0100814 }
815
816 assert ((granularity & (granularity - 1)) == 0);
817
Wen Congyang48ac0a42015-05-15 15:51:36 +0800818 if (buf_size < 0) {
819 error_setg(errp, "Invalid parameter 'buf-size'");
820 return;
821 }
822
823 if (buf_size == 0) {
824 buf_size = DEFAULT_MIRROR_BUF_SIZE;
825 }
Fam Zheng5bc361b2013-12-16 14:45:29 +0800826
Fam Zheng03544a62013-12-16 14:45:30 +0800827 s = block_job_create(driver, bs, speed, cb, opaque, errp);
Paolo Bonzini893f7eb2012-10-18 16:49:23 +0200828 if (!s) {
829 return;
830 }
831
Kevin Wolfe253f4b2016-04-12 16:17:41 +0200832 s->target = blk_new();
833 blk_insert_bs(s->target, target);
834
Benoît Canet09158f02014-06-27 18:25:25 +0200835 s->replaces = g_strdup(replaces);
Paolo Bonzinib952b552012-10-18 16:49:28 +0200836 s->on_source_error = on_source_error;
837 s->on_target_error = on_target_error;
Fam Zheng03544a62013-12-16 14:45:30 +0800838 s->is_none_mode = is_none_mode;
Fam Zheng5bc361b2013-12-16 14:45:29 +0800839 s->base = base;
Paolo Bonzinieee13df2013-01-21 17:09:46 +0100840 s->granularity = granularity;
Wen Congyang48ac0a42015-05-15 15:51:36 +0800841 s->buf_size = ROUND_UP(buf_size, granularity);
Fam Zheng0fc9f8e2015-06-08 13:56:08 +0800842 s->unmap = unmap;
Paolo Bonzinib812f672013-01-21 17:09:43 +0100843
Fam Zheng0db6e542015-04-17 19:49:50 -0400844 s->dirty_bitmap = bdrv_create_dirty_bitmap(bs, granularity, NULL, errp);
Fam Zhengb8afb522014-04-16 09:34:30 +0800845 if (!s->dirty_bitmap) {
Ting Wang97031162015-06-26 17:37:35 +0800846 g_free(s->replaces);
Kevin Wolfe253f4b2016-04-12 16:17:41 +0200847 blk_unref(s->target);
Fam Zheng18930ba2015-11-05 18:13:11 -0500848 block_job_unref(&s->common);
Fam Zhengb8afb522014-04-16 09:34:30 +0800849 return;
850 }
Alberto Garcia10f3cd12015-11-02 16:51:53 +0200851
Kevin Wolfe253f4b2016-04-12 16:17:41 +0200852 bdrv_op_block_all(target, s->common.blocker);
Alberto Garcia10f3cd12015-11-02 16:51:53 +0200853
Paolo Bonzini893f7eb2012-10-18 16:49:23 +0200854 s->common.co = qemu_coroutine_create(mirror_run);
855 trace_mirror_start(bs, s, s->common.co, opaque);
856 qemu_coroutine_enter(s->common.co, s);
857}
Fam Zheng03544a62013-12-16 14:45:30 +0800858
859void mirror_start(BlockDriverState *bs, BlockDriverState *target,
Benoît Canet09158f02014-06-27 18:25:25 +0200860 const char *replaces,
John Snow5fba6c02015-04-17 19:49:51 -0400861 int64_t speed, uint32_t granularity, int64_t buf_size,
Fam Zheng03544a62013-12-16 14:45:30 +0800862 MirrorSyncMode mode, BlockdevOnError on_source_error,
863 BlockdevOnError on_target_error,
Fam Zheng0fc9f8e2015-06-08 13:56:08 +0800864 bool unmap,
Markus Armbruster097310b2014-10-07 13:59:15 +0200865 BlockCompletionFunc *cb,
Fam Zheng03544a62013-12-16 14:45:30 +0800866 void *opaque, Error **errp)
867{
868 bool is_none_mode;
869 BlockDriverState *base;
870
John Snow4b80ab22015-06-04 20:20:34 -0400871 if (mode == MIRROR_SYNC_MODE_INCREMENTAL) {
872 error_setg(errp, "Sync mode 'incremental' not supported");
John Snowd58d8452015-04-17 19:49:58 -0400873 return;
874 }
Fam Zheng03544a62013-12-16 14:45:30 +0800875 is_none_mode = mode == MIRROR_SYNC_MODE_NONE;
Kevin Wolf760e0062015-06-17 14:55:21 +0200876 base = mode == MIRROR_SYNC_MODE_TOP ? backing_bs(bs) : NULL;
Benoît Canet09158f02014-06-27 18:25:25 +0200877 mirror_start_job(bs, target, replaces,
878 speed, granularity, buf_size,
Fam Zheng0fc9f8e2015-06-08 13:56:08 +0800879 on_source_error, on_target_error, unmap, cb, opaque, errp,
Fam Zheng03544a62013-12-16 14:45:30 +0800880 &mirror_job_driver, is_none_mode, base);
881}
882
883void commit_active_start(BlockDriverState *bs, BlockDriverState *base,
884 int64_t speed,
885 BlockdevOnError on_error,
Markus Armbruster097310b2014-10-07 13:59:15 +0200886 BlockCompletionFunc *cb,
Fam Zheng03544a62013-12-16 14:45:30 +0800887 void *opaque, Error **errp)
888{
Jeff Cody4da83582014-01-24 09:02:36 -0500889 int64_t length, base_length;
890 int orig_base_flags;
Jeff Cody39a611a2014-02-12 14:46:24 -0500891 int ret;
Jeff Codycc67f4d2014-02-13 09:23:38 -0500892 Error *local_err = NULL;
Jeff Cody4da83582014-01-24 09:02:36 -0500893
894 orig_base_flags = bdrv_get_flags(base);
895
Fam Zheng20a63d22013-12-16 14:45:31 +0800896 if (bdrv_reopen(base, bs->open_flags, errp)) {
897 return;
898 }
Jeff Cody4da83582014-01-24 09:02:36 -0500899
900 length = bdrv_getlength(bs);
901 if (length < 0) {
Jeff Cody39a611a2014-02-12 14:46:24 -0500902 error_setg_errno(errp, -length,
903 "Unable to determine length of %s", bs->filename);
Jeff Cody4da83582014-01-24 09:02:36 -0500904 goto error_restore_flags;
905 }
906
907 base_length = bdrv_getlength(base);
908 if (base_length < 0) {
Jeff Cody39a611a2014-02-12 14:46:24 -0500909 error_setg_errno(errp, -base_length,
910 "Unable to determine length of %s", base->filename);
Jeff Cody4da83582014-01-24 09:02:36 -0500911 goto error_restore_flags;
912 }
913
914 if (length > base_length) {
Jeff Cody39a611a2014-02-12 14:46:24 -0500915 ret = bdrv_truncate(base, length);
916 if (ret < 0) {
917 error_setg_errno(errp, -ret,
918 "Top image %s is larger than base image %s, and "
Jeff Cody4da83582014-01-24 09:02:36 -0500919 "resize of base image failed",
920 bs->filename, base->filename);
921 goto error_restore_flags;
922 }
923 }
924
Benoît Canet09158f02014-06-27 18:25:25 +0200925 mirror_start_job(bs, base, NULL, speed, 0, 0,
Fam Zheng0fc9f8e2015-06-08 13:56:08 +0800926 on_error, on_error, false, cb, opaque, &local_err,
Fam Zheng03544a62013-12-16 14:45:30 +0800927 &commit_active_job_driver, false, base);
Markus Armbruster0fb63952014-04-25 16:50:31 +0200928 if (local_err) {
Jeff Codycc67f4d2014-02-13 09:23:38 -0500929 error_propagate(errp, local_err);
Jeff Cody4da83582014-01-24 09:02:36 -0500930 goto error_restore_flags;
931 }
932
933 return;
934
935error_restore_flags:
936 /* ignore error and errp for bdrv_reopen, because we want to propagate
937 * the original error */
938 bdrv_reopen(base, orig_base_flags, NULL);
939 return;
Fam Zheng03544a62013-12-16 14:45:30 +0800940}