Paolo Bonzini | 893f7eb | 2012-10-18 16:49:23 +0200 | [diff] [blame] | 1 | /* |
| 2 | * Image mirroring |
| 3 | * |
| 4 | * Copyright Red Hat, Inc. 2012 |
| 5 | * |
| 6 | * Authors: |
| 7 | * Paolo Bonzini <pbonzini@redhat.com> |
| 8 | * |
| 9 | * This work is licensed under the terms of the GNU LGPL, version 2 or later. |
| 10 | * See the COPYING.LIB file in the top-level directory. |
| 11 | * |
| 12 | */ |
| 13 | |
Peter Maydell | 80c71a2 | 2016-01-18 18:01:42 +0000 | [diff] [blame] | 14 | #include "qemu/osdep.h" |
Kevin Wolf | fd4a649 | 2017-03-09 11:49:16 +0100 | [diff] [blame] | 15 | #include "qemu/cutils.h" |
Max Reitz | 12aa408 | 2018-06-13 20:18:12 +0200 | [diff] [blame] | 16 | #include "qemu/coroutine.h" |
Max Reitz | 1181e19 | 2018-06-13 20:18:13 +0200 | [diff] [blame] | 17 | #include "qemu/range.h" |
Paolo Bonzini | 893f7eb | 2012-10-18 16:49:23 +0200 | [diff] [blame] | 18 | #include "trace.h" |
John Snow | c87621e | 2016-10-27 12:07:00 -0400 | [diff] [blame] | 19 | #include "block/blockjob_int.h" |
Paolo Bonzini | 737e150 | 2012-12-17 18:19:44 +0100 | [diff] [blame] | 20 | #include "block/block_int.h" |
Max Reitz | 373340b | 2015-10-19 17:53:22 +0200 | [diff] [blame] | 21 | #include "sysemu/block-backend.h" |
Markus Armbruster | da34e65 | 2016-03-14 09:01:28 +0100 | [diff] [blame] | 22 | #include "qapi/error.h" |
Markus Armbruster | cc7a8ea | 2015-03-17 17:22:46 +0100 | [diff] [blame] | 23 | #include "qapi/qmp/qerror.h" |
Paolo Bonzini | 893f7eb | 2012-10-18 16:49:23 +0200 | [diff] [blame] | 24 | #include "qemu/ratelimit.h" |
Paolo Bonzini | b812f67 | 2013-01-21 17:09:43 +0100 | [diff] [blame] | 25 | #include "qemu/bitmap.h" |
Paolo Bonzini | 893f7eb | 2012-10-18 16:49:23 +0200 | [diff] [blame] | 26 | |
Paolo Bonzini | 402a474 | 2013-01-22 09:03:14 +0100 | [diff] [blame] | 27 | #define MAX_IN_FLIGHT 16 |
Eric Blake | b436982 | 2017-07-07 07:44:46 -0500 | [diff] [blame] | 28 | #define MAX_IO_BYTES (1 << 20) /* 1 Mb */ |
| 29 | #define DEFAULT_MIRROR_BUF_SIZE (MAX_IN_FLIGHT * MAX_IO_BYTES) |
Paolo Bonzini | 402a474 | 2013-01-22 09:03:14 +0100 | [diff] [blame] | 30 | |
| 31 | /* The mirroring buffer is a list of granularity-sized chunks. |
| 32 | * Free chunks are organized in a list. |
| 33 | */ |
| 34 | typedef struct MirrorBuffer { |
| 35 | QSIMPLEQ_ENTRY(MirrorBuffer) next; |
| 36 | } MirrorBuffer; |
Paolo Bonzini | 893f7eb | 2012-10-18 16:49:23 +0200 | [diff] [blame] | 37 | |
Max Reitz | 12aa408 | 2018-06-13 20:18:12 +0200 | [diff] [blame] | 38 | typedef struct MirrorOp MirrorOp; |
| 39 | |
Paolo Bonzini | 893f7eb | 2012-10-18 16:49:23 +0200 | [diff] [blame] | 40 | typedef struct MirrorBlockJob { |
| 41 | BlockJob common; |
Kevin Wolf | e253f4b | 2016-04-12 16:17:41 +0200 | [diff] [blame] | 42 | BlockBackend *target; |
Kevin Wolf | 4ef85a9 | 2017-01-25 19:16:34 +0100 | [diff] [blame] | 43 | BlockDriverState *mirror_top_bs; |
Fam Zheng | 5bc361b | 2013-12-16 14:45:29 +0800 | [diff] [blame] | 44 | BlockDriverState *base; |
Max Reitz | 3f072a7 | 2019-06-12 16:27:32 +0200 | [diff] [blame] | 45 | BlockDriverState *base_overlay; |
Kevin Wolf | 4ef85a9 | 2017-01-25 19:16:34 +0100 | [diff] [blame] | 46 | |
BenoƮt Canet | 09158f0 | 2014-06-27 18:25:25 +0200 | [diff] [blame] | 47 | /* The name of the graph node to replace */ |
| 48 | char *replaces; |
| 49 | /* The BDS to replace */ |
| 50 | BlockDriverState *to_replace; |
| 51 | /* Used to block operations on the drive-mirror-replace target */ |
| 52 | Error *replace_blocker; |
Fam Zheng | 03544a6 | 2013-12-16 14:45:30 +0800 | [diff] [blame] | 53 | bool is_none_mode; |
Max Reitz | 274fcce | 2016-06-10 20:57:47 +0200 | [diff] [blame] | 54 | BlockMirrorBackingMode backing_mode; |
Max Reitz | cdf3bc9 | 2019-07-24 19:12:30 +0200 | [diff] [blame] | 55 | /* Whether the target image requires explicit zero-initialization */ |
| 56 | bool zero_target; |
Max Reitz | d06107a | 2018-06-13 20:18:21 +0200 | [diff] [blame] | 57 | MirrorCopyMode copy_mode; |
Paolo Bonzini | b952b55 | 2012-10-18 16:49:28 +0200 | [diff] [blame] | 58 | BlockdevOnError on_source_error, on_target_error; |
Paolo Bonzini | d63ffd8 | 2012-10-18 16:49:25 +0200 | [diff] [blame] | 59 | bool synced; |
Max Reitz | d06107a | 2018-06-13 20:18:21 +0200 | [diff] [blame] | 60 | /* Set when the target is synced (dirty bitmap is clean, nothing |
| 61 | * in flight) and the job is running in active mode */ |
| 62 | bool actively_synced; |
Paolo Bonzini | d63ffd8 | 2012-10-18 16:49:25 +0200 | [diff] [blame] | 63 | bool should_complete; |
Paolo Bonzini | eee13df | 2013-01-21 17:09:46 +0100 | [diff] [blame] | 64 | int64_t granularity; |
Paolo Bonzini | b812f67 | 2013-01-21 17:09:43 +0100 | [diff] [blame] | 65 | size_t buf_size; |
Max Reitz | b21c765 | 2014-10-24 15:57:36 +0200 | [diff] [blame] | 66 | int64_t bdev_length; |
Paolo Bonzini | b812f67 | 2013-01-21 17:09:43 +0100 | [diff] [blame] | 67 | unsigned long *cow_bitmap; |
Fam Zheng | e4654d2 | 2013-11-13 18:29:43 +0800 | [diff] [blame] | 68 | BdrvDirtyBitmap *dirty_bitmap; |
Fam Zheng | dc162c8 | 2016-10-13 17:58:21 -0400 | [diff] [blame] | 69 | BdrvDirtyBitmapIter *dbi; |
Paolo Bonzini | 893f7eb | 2012-10-18 16:49:23 +0200 | [diff] [blame] | 70 | uint8_t *buf; |
Paolo Bonzini | 402a474 | 2013-01-22 09:03:14 +0100 | [diff] [blame] | 71 | QSIMPLEQ_HEAD(, MirrorBuffer) buf_free; |
| 72 | int buf_free_count; |
Paolo Bonzini | bd48bde | 2013-01-22 09:03:12 +0100 | [diff] [blame] | 73 | |
Denis V. Lunev | 49efb1f | 2016-07-14 16:33:24 +0300 | [diff] [blame] | 74 | uint64_t last_pause_ns; |
Paolo Bonzini | 402a474 | 2013-01-22 09:03:14 +0100 | [diff] [blame] | 75 | unsigned long *in_flight_bitmap; |
Paolo Bonzini | bd48bde | 2013-01-22 09:03:12 +0100 | [diff] [blame] | 76 | int in_flight; |
Eric Blake | b436982 | 2017-07-07 07:44:46 -0500 | [diff] [blame] | 77 | int64_t bytes_in_flight; |
Paolo Bonzini | b58deb3 | 2018-12-06 11:58:10 +0100 | [diff] [blame] | 78 | QTAILQ_HEAD(, MirrorOp) ops_in_flight; |
Paolo Bonzini | bd48bde | 2013-01-22 09:03:12 +0100 | [diff] [blame] | 79 | int ret; |
Fam Zheng | 0fc9f8e | 2015-06-08 13:56:08 +0800 | [diff] [blame] | 80 | bool unmap; |
Eric Blake | b436982 | 2017-07-07 07:44:46 -0500 | [diff] [blame] | 81 | int target_cluster_size; |
Fam Zheng | e5b4357 | 2016-02-05 10:00:29 +0800 | [diff] [blame] | 82 | int max_iov; |
Anton Nefedov | 90ab48e | 2017-02-02 17:25:15 +0300 | [diff] [blame] | 83 | bool initial_zeroing_ongoing; |
Max Reitz | d06107a | 2018-06-13 20:18:21 +0200 | [diff] [blame] | 84 | int in_active_write_counter; |
John Snow | 737efc1 | 2018-09-06 09:02:15 -0400 | [diff] [blame] | 85 | bool prepared; |
Sergio Lopez | 5e77175 | 2019-03-08 16:48:53 +0100 | [diff] [blame] | 86 | bool in_drain; |
Paolo Bonzini | 893f7eb | 2012-10-18 16:49:23 +0200 | [diff] [blame] | 87 | } MirrorBlockJob; |
| 88 | |
Max Reitz | 429076e | 2018-06-13 20:18:19 +0200 | [diff] [blame] | 89 | typedef struct MirrorBDSOpaque { |
| 90 | MirrorBlockJob *job; |
Max Reitz | f94dc3b | 2019-05-22 19:03:47 +0200 | [diff] [blame] | 91 | bool stop; |
Max Reitz | 53431b9 | 2021-02-11 18:22:41 +0100 | [diff] [blame] | 92 | bool is_commit; |
Max Reitz | 429076e | 2018-06-13 20:18:19 +0200 | [diff] [blame] | 93 | } MirrorBDSOpaque; |
| 94 | |
Max Reitz | 12aa408 | 2018-06-13 20:18:12 +0200 | [diff] [blame] | 95 | struct MirrorOp { |
Paolo Bonzini | bd48bde | 2013-01-22 09:03:12 +0100 | [diff] [blame] | 96 | MirrorBlockJob *s; |
| 97 | QEMUIOVector qiov; |
Eric Blake | b436982 | 2017-07-07 07:44:46 -0500 | [diff] [blame] | 98 | int64_t offset; |
| 99 | uint64_t bytes; |
Max Reitz | 2e1990b | 2018-06-13 20:18:11 +0200 | [diff] [blame] | 100 | |
| 101 | /* The pointee is set by mirror_co_read(), mirror_co_zero(), and |
| 102 | * mirror_co_discard() before yielding for the first time */ |
| 103 | int64_t *bytes_handled; |
Max Reitz | 12aa408 | 2018-06-13 20:18:12 +0200 | [diff] [blame] | 104 | |
Max Reitz | 1181e19 | 2018-06-13 20:18:13 +0200 | [diff] [blame] | 105 | bool is_pseudo_op; |
Max Reitz | d06107a | 2018-06-13 20:18:21 +0200 | [diff] [blame] | 106 | bool is_active_write; |
Kevin Wolf | ce8cabb | 2020-03-26 16:36:28 +0100 | [diff] [blame] | 107 | bool is_in_flight; |
Max Reitz | 12aa408 | 2018-06-13 20:18:12 +0200 | [diff] [blame] | 108 | CoQueue waiting_requests; |
Kevin Wolf | eed325b | 2020-01-28 16:06:41 +0100 | [diff] [blame] | 109 | Coroutine *co; |
Vladimir Sementsov-Ogievskiy | d44dae1 | 2021-07-03 00:16:36 +0300 | [diff] [blame] | 110 | MirrorOp *waiting_for_op; |
Max Reitz | 12aa408 | 2018-06-13 20:18:12 +0200 | [diff] [blame] | 111 | |
| 112 | QTAILQ_ENTRY(MirrorOp) next; |
| 113 | }; |
Paolo Bonzini | bd48bde | 2013-01-22 09:03:12 +0100 | [diff] [blame] | 114 | |
Max Reitz | 4295c5f | 2018-06-13 20:18:10 +0200 | [diff] [blame] | 115 | typedef enum MirrorMethod { |
| 116 | MIRROR_METHOD_COPY, |
| 117 | MIRROR_METHOD_ZERO, |
| 118 | MIRROR_METHOD_DISCARD, |
| 119 | } MirrorMethod; |
| 120 | |
Paolo Bonzini | b952b55 | 2012-10-18 16:49:28 +0200 | [diff] [blame] | 121 | static BlockErrorAction mirror_error_action(MirrorBlockJob *s, bool read, |
| 122 | int error) |
| 123 | { |
| 124 | s->synced = false; |
Max Reitz | d06107a | 2018-06-13 20:18:21 +0200 | [diff] [blame] | 125 | s->actively_synced = false; |
Paolo Bonzini | b952b55 | 2012-10-18 16:49:28 +0200 | [diff] [blame] | 126 | if (read) { |
Kevin Wolf | 81e254d | 2016-04-18 11:36:38 +0200 | [diff] [blame] | 127 | return block_job_error_action(&s->common, s->on_source_error, |
| 128 | true, error); |
Paolo Bonzini | b952b55 | 2012-10-18 16:49:28 +0200 | [diff] [blame] | 129 | } else { |
Kevin Wolf | 81e254d | 2016-04-18 11:36:38 +0200 | [diff] [blame] | 130 | return block_job_error_action(&s->common, s->on_target_error, |
| 131 | false, error); |
Paolo Bonzini | b952b55 | 2012-10-18 16:49:28 +0200 | [diff] [blame] | 132 | } |
| 133 | } |
| 134 | |
Max Reitz | 1181e19 | 2018-06-13 20:18:13 +0200 | [diff] [blame] | 135 | static void coroutine_fn mirror_wait_on_conflicts(MirrorOp *self, |
| 136 | MirrorBlockJob *s, |
| 137 | uint64_t offset, |
| 138 | uint64_t bytes) |
| 139 | { |
| 140 | uint64_t self_start_chunk = offset / s->granularity; |
| 141 | uint64_t self_end_chunk = DIV_ROUND_UP(offset + bytes, s->granularity); |
| 142 | uint64_t self_nb_chunks = self_end_chunk - self_start_chunk; |
| 143 | |
| 144 | while (find_next_bit(s->in_flight_bitmap, self_end_chunk, |
| 145 | self_start_chunk) < self_end_chunk && |
| 146 | s->ret >= 0) |
| 147 | { |
| 148 | MirrorOp *op; |
| 149 | |
| 150 | QTAILQ_FOREACH(op, &s->ops_in_flight, next) { |
| 151 | uint64_t op_start_chunk = op->offset / s->granularity; |
| 152 | uint64_t op_nb_chunks = DIV_ROUND_UP(op->offset + op->bytes, |
| 153 | s->granularity) - |
| 154 | op_start_chunk; |
| 155 | |
| 156 | if (op == self) { |
| 157 | continue; |
| 158 | } |
| 159 | |
| 160 | if (ranges_overlap(self_start_chunk, self_nb_chunks, |
| 161 | op_start_chunk, op_nb_chunks)) |
| 162 | { |
Stefano Garzarella | 66fed30 | 2021-09-10 14:45:33 +0200 | [diff] [blame] | 163 | if (self) { |
| 164 | /* |
| 165 | * If the operation is already (indirectly) waiting for us, |
| 166 | * or will wait for us as soon as it wakes up, then just go |
| 167 | * on (instead of producing a deadlock in the former case). |
| 168 | */ |
| 169 | if (op->waiting_for_op) { |
| 170 | continue; |
| 171 | } |
| 172 | |
| 173 | self->waiting_for_op = op; |
Vladimir Sementsov-Ogievskiy | d44dae1 | 2021-07-03 00:16:36 +0300 | [diff] [blame] | 174 | } |
| 175 | |
Max Reitz | 1181e19 | 2018-06-13 20:18:13 +0200 | [diff] [blame] | 176 | qemu_co_queue_wait(&op->waiting_requests, NULL); |
Stefano Garzarella | 66fed30 | 2021-09-10 14:45:33 +0200 | [diff] [blame] | 177 | |
| 178 | if (self) { |
| 179 | self->waiting_for_op = NULL; |
| 180 | } |
| 181 | |
Max Reitz | 1181e19 | 2018-06-13 20:18:13 +0200 | [diff] [blame] | 182 | break; |
| 183 | } |
| 184 | } |
| 185 | } |
| 186 | } |
| 187 | |
Max Reitz | 2e1990b | 2018-06-13 20:18:11 +0200 | [diff] [blame] | 188 | static void coroutine_fn mirror_iteration_done(MirrorOp *op, int ret) |
Paolo Bonzini | bd48bde | 2013-01-22 09:03:12 +0100 | [diff] [blame] | 189 | { |
| 190 | MirrorBlockJob *s = op->s; |
Paolo Bonzini | 402a474 | 2013-01-22 09:03:14 +0100 | [diff] [blame] | 191 | struct iovec *iov; |
Paolo Bonzini | bd48bde | 2013-01-22 09:03:12 +0100 | [diff] [blame] | 192 | int64_t chunk_num; |
Eric Blake | b436982 | 2017-07-07 07:44:46 -0500 | [diff] [blame] | 193 | int i, nb_chunks; |
Paolo Bonzini | bd48bde | 2013-01-22 09:03:12 +0100 | [diff] [blame] | 194 | |
Eric Blake | b436982 | 2017-07-07 07:44:46 -0500 | [diff] [blame] | 195 | trace_mirror_iteration_done(s, op->offset, op->bytes, ret); |
Paolo Bonzini | bd48bde | 2013-01-22 09:03:12 +0100 | [diff] [blame] | 196 | |
| 197 | s->in_flight--; |
Eric Blake | b436982 | 2017-07-07 07:44:46 -0500 | [diff] [blame] | 198 | s->bytes_in_flight -= op->bytes; |
Paolo Bonzini | 402a474 | 2013-01-22 09:03:14 +0100 | [diff] [blame] | 199 | iov = op->qiov.iov; |
| 200 | for (i = 0; i < op->qiov.niov; i++) { |
| 201 | MirrorBuffer *buf = (MirrorBuffer *) iov[i].iov_base; |
| 202 | QSIMPLEQ_INSERT_TAIL(&s->buf_free, buf, next); |
| 203 | s->buf_free_count++; |
| 204 | } |
| 205 | |
Eric Blake | b436982 | 2017-07-07 07:44:46 -0500 | [diff] [blame] | 206 | chunk_num = op->offset / s->granularity; |
| 207 | nb_chunks = DIV_ROUND_UP(op->bytes, s->granularity); |
Max Reitz | 12aa408 | 2018-06-13 20:18:12 +0200 | [diff] [blame] | 208 | |
Paolo Bonzini | 402a474 | 2013-01-22 09:03:14 +0100 | [diff] [blame] | 209 | bitmap_clear(s->in_flight_bitmap, chunk_num, nb_chunks); |
Max Reitz | 12aa408 | 2018-06-13 20:18:12 +0200 | [diff] [blame] | 210 | QTAILQ_REMOVE(&s->ops_in_flight, op, next); |
Max Reitz | b21c765 | 2014-10-24 15:57:36 +0200 | [diff] [blame] | 211 | if (ret >= 0) { |
| 212 | if (s->cow_bitmap) { |
| 213 | bitmap_set(s->cow_bitmap, chunk_num, nb_chunks); |
| 214 | } |
Anton Nefedov | 90ab48e | 2017-02-02 17:25:15 +0300 | [diff] [blame] | 215 | if (!s->initial_zeroing_ongoing) { |
Kevin Wolf | 30a5c88 | 2018-05-04 12:17:20 +0200 | [diff] [blame] | 216 | job_progress_update(&s->common.job, op->bytes); |
Anton Nefedov | 90ab48e | 2017-02-02 17:25:15 +0300 | [diff] [blame] | 217 | } |
Paolo Bonzini | bd48bde | 2013-01-22 09:03:12 +0100 | [diff] [blame] | 218 | } |
Zhang Min | 6df3bf8 | 2014-01-23 15:59:16 +0800 | [diff] [blame] | 219 | qemu_iovec_destroy(&op->qiov); |
Stefan Hajnoczi | 7b770c7 | 2014-03-21 13:55:19 +0100 | [diff] [blame] | 220 | |
Max Reitz | 12aa408 | 2018-06-13 20:18:12 +0200 | [diff] [blame] | 221 | qemu_co_queue_restart_all(&op->waiting_requests); |
| 222 | g_free(op); |
Paolo Bonzini | bd48bde | 2013-01-22 09:03:12 +0100 | [diff] [blame] | 223 | } |
| 224 | |
Max Reitz | 2e1990b | 2018-06-13 20:18:11 +0200 | [diff] [blame] | 225 | static void coroutine_fn mirror_write_complete(MirrorOp *op, int ret) |
Paolo Bonzini | bd48bde | 2013-01-22 09:03:12 +0100 | [diff] [blame] | 226 | { |
Paolo Bonzini | bd48bde | 2013-01-22 09:03:12 +0100 | [diff] [blame] | 227 | MirrorBlockJob *s = op->s; |
Paolo Bonzini | b9e413d | 2017-02-13 14:52:32 +0100 | [diff] [blame] | 228 | |
Paolo Bonzini | bd48bde | 2013-01-22 09:03:12 +0100 | [diff] [blame] | 229 | if (ret < 0) { |
Paolo Bonzini | bd48bde | 2013-01-22 09:03:12 +0100 | [diff] [blame] | 230 | BlockErrorAction action; |
| 231 | |
Eric Blake | e0d7f73 | 2017-09-25 09:55:20 -0500 | [diff] [blame] | 232 | bdrv_set_dirty_bitmap(s->dirty_bitmap, op->offset, op->bytes); |
Paolo Bonzini | bd48bde | 2013-01-22 09:03:12 +0100 | [diff] [blame] | 233 | action = mirror_error_action(s, false, -ret); |
Wenchao Xia | a589569 | 2014-06-18 08:43:30 +0200 | [diff] [blame] | 234 | if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) { |
Paolo Bonzini | bd48bde | 2013-01-22 09:03:12 +0100 | [diff] [blame] | 235 | s->ret = ret; |
| 236 | } |
| 237 | } |
Vladimir Sementsov-Ogievskiy | d12ade5 | 2018-11-29 13:18:00 +0300 | [diff] [blame] | 238 | |
Paolo Bonzini | bd48bde | 2013-01-22 09:03:12 +0100 | [diff] [blame] | 239 | mirror_iteration_done(op, ret); |
| 240 | } |
| 241 | |
Max Reitz | 2e1990b | 2018-06-13 20:18:11 +0200 | [diff] [blame] | 242 | static void coroutine_fn mirror_read_complete(MirrorOp *op, int ret) |
Paolo Bonzini | bd48bde | 2013-01-22 09:03:12 +0100 | [diff] [blame] | 243 | { |
Paolo Bonzini | bd48bde | 2013-01-22 09:03:12 +0100 | [diff] [blame] | 244 | MirrorBlockJob *s = op->s; |
Paolo Bonzini | b9e413d | 2017-02-13 14:52:32 +0100 | [diff] [blame] | 245 | |
Paolo Bonzini | bd48bde | 2013-01-22 09:03:12 +0100 | [diff] [blame] | 246 | if (ret < 0) { |
Paolo Bonzini | bd48bde | 2013-01-22 09:03:12 +0100 | [diff] [blame] | 247 | BlockErrorAction action; |
| 248 | |
Eric Blake | e0d7f73 | 2017-09-25 09:55:20 -0500 | [diff] [blame] | 249 | bdrv_set_dirty_bitmap(s->dirty_bitmap, op->offset, op->bytes); |
Paolo Bonzini | bd48bde | 2013-01-22 09:03:12 +0100 | [diff] [blame] | 250 | action = mirror_error_action(s, true, -ret); |
Wenchao Xia | a589569 | 2014-06-18 08:43:30 +0200 | [diff] [blame] | 251 | if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) { |
Paolo Bonzini | bd48bde | 2013-01-22 09:03:12 +0100 | [diff] [blame] | 252 | s->ret = ret; |
| 253 | } |
| 254 | |
| 255 | mirror_iteration_done(op, ret); |
Vladimir Sementsov-Ogievskiy | d12ade5 | 2018-11-29 13:18:00 +0300 | [diff] [blame] | 256 | return; |
Paolo Bonzini | bd48bde | 2013-01-22 09:03:12 +0100 | [diff] [blame] | 257 | } |
Vladimir Sementsov-Ogievskiy | d12ade5 | 2018-11-29 13:18:00 +0300 | [diff] [blame] | 258 | |
| 259 | ret = blk_co_pwritev(s->target, op->offset, op->qiov.size, &op->qiov, 0); |
| 260 | mirror_write_complete(op, ret); |
Paolo Bonzini | bd48bde | 2013-01-22 09:03:12 +0100 | [diff] [blame] | 261 | } |
| 262 | |
Eric Blake | 782d97e | 2017-07-07 07:44:49 -0500 | [diff] [blame] | 263 | /* Clip bytes relative to offset to not exceed end-of-file */ |
| 264 | static inline int64_t mirror_clip_bytes(MirrorBlockJob *s, |
| 265 | int64_t offset, |
| 266 | int64_t bytes) |
| 267 | { |
| 268 | return MIN(bytes, s->bdev_length - offset); |
| 269 | } |
| 270 | |
Eric Blake | 782d97e | 2017-07-07 07:44:49 -0500 | [diff] [blame] | 271 | /* Round offset and/or bytes to target cluster if COW is needed, and |
| 272 | * return the offset of the adjusted tail against original. */ |
| 273 | static int mirror_cow_align(MirrorBlockJob *s, int64_t *offset, |
Eric Blake | ae4cc87 | 2017-07-07 07:44:50 -0500 | [diff] [blame] | 274 | uint64_t *bytes) |
Paolo Bonzini | 893f7eb | 2012-10-18 16:49:23 +0200 | [diff] [blame] | 275 | { |
Fam Zheng | e5b4357 | 2016-02-05 10:00:29 +0800 | [diff] [blame] | 276 | bool need_cow; |
| 277 | int ret = 0; |
Eric Blake | 782d97e | 2017-07-07 07:44:49 -0500 | [diff] [blame] | 278 | int64_t align_offset = *offset; |
Eric Blake | 7cfd527 | 2017-10-11 22:46:59 -0500 | [diff] [blame] | 279 | int64_t align_bytes = *bytes; |
Eric Blake | 782d97e | 2017-07-07 07:44:49 -0500 | [diff] [blame] | 280 | int max_bytes = s->granularity * s->max_iov; |
Paolo Bonzini | 893f7eb | 2012-10-18 16:49:23 +0200 | [diff] [blame] | 281 | |
Eric Blake | 782d97e | 2017-07-07 07:44:49 -0500 | [diff] [blame] | 282 | need_cow = !test_bit(*offset / s->granularity, s->cow_bitmap); |
| 283 | need_cow |= !test_bit((*offset + *bytes - 1) / s->granularity, |
Fam Zheng | e5b4357 | 2016-02-05 10:00:29 +0800 | [diff] [blame] | 284 | s->cow_bitmap); |
| 285 | if (need_cow) { |
Eric Blake | 782d97e | 2017-07-07 07:44:49 -0500 | [diff] [blame] | 286 | bdrv_round_to_clusters(blk_bs(s->target), *offset, *bytes, |
| 287 | &align_offset, &align_bytes); |
Paolo Bonzini | 8f0720e | 2013-01-21 17:09:41 +0100 | [diff] [blame] | 288 | } |
| 289 | |
Eric Blake | 782d97e | 2017-07-07 07:44:49 -0500 | [diff] [blame] | 290 | if (align_bytes > max_bytes) { |
| 291 | align_bytes = max_bytes; |
Fam Zheng | e5b4357 | 2016-02-05 10:00:29 +0800 | [diff] [blame] | 292 | if (need_cow) { |
Eric Blake | 782d97e | 2017-07-07 07:44:49 -0500 | [diff] [blame] | 293 | align_bytes = QEMU_ALIGN_DOWN(align_bytes, s->target_cluster_size); |
Fam Zheng | e5b4357 | 2016-02-05 10:00:29 +0800 | [diff] [blame] | 294 | } |
| 295 | } |
Eric Blake | 782d97e | 2017-07-07 07:44:49 -0500 | [diff] [blame] | 296 | /* Clipping may result in align_bytes unaligned to chunk boundary, but |
Fam Zheng | 4150ae6 | 2016-04-20 10:48:34 +0800 | [diff] [blame] | 297 | * that doesn't matter because it's already the end of source image. */ |
Eric Blake | 782d97e | 2017-07-07 07:44:49 -0500 | [diff] [blame] | 298 | align_bytes = mirror_clip_bytes(s, align_offset, align_bytes); |
Fam Zheng | e5b4357 | 2016-02-05 10:00:29 +0800 | [diff] [blame] | 299 | |
Eric Blake | 782d97e | 2017-07-07 07:44:49 -0500 | [diff] [blame] | 300 | ret = align_offset + align_bytes - (*offset + *bytes); |
| 301 | *offset = align_offset; |
| 302 | *bytes = align_bytes; |
Fam Zheng | e5b4357 | 2016-02-05 10:00:29 +0800 | [diff] [blame] | 303 | assert(ret >= 0); |
| 304 | return ret; |
| 305 | } |
| 306 | |
Stefan Hajnoczi | 537c3d4 | 2018-12-13 11:24:34 +0000 | [diff] [blame] | 307 | static inline void coroutine_fn |
Kevin Wolf | 9178f4f | 2020-03-26 16:36:27 +0100 | [diff] [blame] | 308 | mirror_wait_for_any_operation(MirrorBlockJob *s, bool active) |
Fam Zheng | 21cd917 | 2016-02-05 10:00:30 +0800 | [diff] [blame] | 309 | { |
Max Reitz | 12aa408 | 2018-06-13 20:18:12 +0200 | [diff] [blame] | 310 | MirrorOp *op; |
| 311 | |
Max Reitz | 1181e19 | 2018-06-13 20:18:13 +0200 | [diff] [blame] | 312 | QTAILQ_FOREACH(op, &s->ops_in_flight, next) { |
| 313 | /* Do not wait on pseudo ops, because it may in turn wait on |
| 314 | * some other operation to start, which may in fact be the |
| 315 | * caller of this function. Since there is only one pseudo op |
| 316 | * at any given time, we will always find some real operation |
| 317 | * to wait on. */ |
Kevin Wolf | ce8cabb | 2020-03-26 16:36:28 +0100 | [diff] [blame] | 318 | if (!op->is_pseudo_op && op->is_in_flight && |
| 319 | op->is_active_write == active) |
| 320 | { |
Max Reitz | 1181e19 | 2018-06-13 20:18:13 +0200 | [diff] [blame] | 321 | qemu_co_queue_wait(&op->waiting_requests, NULL); |
| 322 | return; |
| 323 | } |
| 324 | } |
| 325 | abort(); |
Fam Zheng | 21cd917 | 2016-02-05 10:00:30 +0800 | [diff] [blame] | 326 | } |
| 327 | |
Stefan Hajnoczi | 537c3d4 | 2018-12-13 11:24:34 +0000 | [diff] [blame] | 328 | static inline void coroutine_fn |
Kevin Wolf | 9178f4f | 2020-03-26 16:36:27 +0100 | [diff] [blame] | 329 | mirror_wait_for_free_in_flight_slot(MirrorBlockJob *s) |
Max Reitz | d06107a | 2018-06-13 20:18:21 +0200 | [diff] [blame] | 330 | { |
| 331 | /* Only non-active operations use up in-flight slots */ |
Kevin Wolf | 9178f4f | 2020-03-26 16:36:27 +0100 | [diff] [blame] | 332 | mirror_wait_for_any_operation(s, false); |
Max Reitz | d06107a | 2018-06-13 20:18:21 +0200 | [diff] [blame] | 333 | } |
| 334 | |
Max Reitz | 2e1990b | 2018-06-13 20:18:11 +0200 | [diff] [blame] | 335 | /* Perform a mirror copy operation. |
| 336 | * |
| 337 | * *op->bytes_handled is set to the number of bytes copied after and |
| 338 | * including offset, excluding any bytes copied prior to offset due |
| 339 | * to alignment. This will be op->bytes if no alignment is necessary, |
| 340 | * or (new_end - op->offset) if the tail is rounded up or down due to |
| 341 | * alignment or buffer limit. |
Fam Zheng | e5b4357 | 2016-02-05 10:00:29 +0800 | [diff] [blame] | 342 | */ |
Max Reitz | 2e1990b | 2018-06-13 20:18:11 +0200 | [diff] [blame] | 343 | static void coroutine_fn mirror_co_read(void *opaque) |
Fam Zheng | e5b4357 | 2016-02-05 10:00:29 +0800 | [diff] [blame] | 344 | { |
Max Reitz | 2e1990b | 2018-06-13 20:18:11 +0200 | [diff] [blame] | 345 | MirrorOp *op = opaque; |
| 346 | MirrorBlockJob *s = op->s; |
Eric Blake | ae4cc87 | 2017-07-07 07:44:50 -0500 | [diff] [blame] | 347 | int nb_chunks; |
| 348 | uint64_t ret; |
Eric Blake | ae4cc87 | 2017-07-07 07:44:50 -0500 | [diff] [blame] | 349 | uint64_t max_bytes; |
Fam Zheng | e5b4357 | 2016-02-05 10:00:29 +0800 | [diff] [blame] | 350 | |
Eric Blake | ae4cc87 | 2017-07-07 07:44:50 -0500 | [diff] [blame] | 351 | max_bytes = s->granularity * s->max_iov; |
Paolo Bonzini | 402a474 | 2013-01-22 09:03:14 +0100 | [diff] [blame] | 352 | |
Fam Zheng | e5b4357 | 2016-02-05 10:00:29 +0800 | [diff] [blame] | 353 | /* We can only handle as much as buf_size at a time. */ |
Max Reitz | 2e1990b | 2018-06-13 20:18:11 +0200 | [diff] [blame] | 354 | op->bytes = MIN(s->buf_size, MIN(max_bytes, op->bytes)); |
| 355 | assert(op->bytes); |
| 356 | assert(op->bytes < BDRV_REQUEST_MAX_BYTES); |
| 357 | *op->bytes_handled = op->bytes; |
Paolo Bonzini | 402a474 | 2013-01-22 09:03:14 +0100 | [diff] [blame] | 358 | |
Fam Zheng | e5b4357 | 2016-02-05 10:00:29 +0800 | [diff] [blame] | 359 | if (s->cow_bitmap) { |
Max Reitz | 2e1990b | 2018-06-13 20:18:11 +0200 | [diff] [blame] | 360 | *op->bytes_handled += mirror_cow_align(s, &op->offset, &op->bytes); |
Fam Zheng | e5b4357 | 2016-02-05 10:00:29 +0800 | [diff] [blame] | 361 | } |
Max Reitz | 2e1990b | 2018-06-13 20:18:11 +0200 | [diff] [blame] | 362 | /* Cannot exceed BDRV_REQUEST_MAX_BYTES + INT_MAX */ |
| 363 | assert(*op->bytes_handled <= UINT_MAX); |
| 364 | assert(op->bytes <= s->buf_size); |
Eric Blake | ae4cc87 | 2017-07-07 07:44:50 -0500 | [diff] [blame] | 365 | /* The offset is granularity-aligned because: |
Fam Zheng | e5b4357 | 2016-02-05 10:00:29 +0800 | [diff] [blame] | 366 | * 1) Caller passes in aligned values; |
| 367 | * 2) mirror_cow_align is used only when target cluster is larger. */ |
Max Reitz | 2e1990b | 2018-06-13 20:18:11 +0200 | [diff] [blame] | 368 | assert(QEMU_IS_ALIGNED(op->offset, s->granularity)); |
Eric Blake | ae4cc87 | 2017-07-07 07:44:50 -0500 | [diff] [blame] | 369 | /* The range is sector-aligned, since bdrv_getlength() rounds up. */ |
Max Reitz | 2e1990b | 2018-06-13 20:18:11 +0200 | [diff] [blame] | 370 | assert(QEMU_IS_ALIGNED(op->bytes, BDRV_SECTOR_SIZE)); |
| 371 | nb_chunks = DIV_ROUND_UP(op->bytes, s->granularity); |
Fam Zheng | e5b4357 | 2016-02-05 10:00:29 +0800 | [diff] [blame] | 372 | |
| 373 | while (s->buf_free_count < nb_chunks) { |
Max Reitz | 2e1990b | 2018-06-13 20:18:11 +0200 | [diff] [blame] | 374 | trace_mirror_yield_in_flight(s, op->offset, s->in_flight); |
Kevin Wolf | 9178f4f | 2020-03-26 16:36:27 +0100 | [diff] [blame] | 375 | mirror_wait_for_free_in_flight_slot(s); |
Paolo Bonzini | b812f67 | 2013-01-21 17:09:43 +0100 | [diff] [blame] | 376 | } |
| 377 | |
Paolo Bonzini | 402a474 | 2013-01-22 09:03:14 +0100 | [diff] [blame] | 378 | /* Now make a QEMUIOVector taking enough granularity-sized chunks |
| 379 | * from s->buf_free. |
| 380 | */ |
| 381 | qemu_iovec_init(&op->qiov, nb_chunks); |
Paolo Bonzini | 402a474 | 2013-01-22 09:03:14 +0100 | [diff] [blame] | 382 | while (nb_chunks-- > 0) { |
| 383 | MirrorBuffer *buf = QSIMPLEQ_FIRST(&s->buf_free); |
Max Reitz | 2e1990b | 2018-06-13 20:18:11 +0200 | [diff] [blame] | 384 | size_t remaining = op->bytes - op->qiov.size; |
Kevin Wolf | 5a0f6fd | 2014-07-01 16:52:21 +0200 | [diff] [blame] | 385 | |
Paolo Bonzini | 402a474 | 2013-01-22 09:03:14 +0100 | [diff] [blame] | 386 | QSIMPLEQ_REMOVE_HEAD(&s->buf_free, next); |
| 387 | s->buf_free_count--; |
Kevin Wolf | 5a0f6fd | 2014-07-01 16:52:21 +0200 | [diff] [blame] | 388 | qemu_iovec_add(&op->qiov, buf, MIN(s->granularity, remaining)); |
Paolo Bonzini | 402a474 | 2013-01-22 09:03:14 +0100 | [diff] [blame] | 389 | } |
Paolo Bonzini | bd48bde | 2013-01-22 09:03:12 +0100 | [diff] [blame] | 390 | |
Paolo Bonzini | 893f7eb | 2012-10-18 16:49:23 +0200 | [diff] [blame] | 391 | /* Copy the dirty cluster. */ |
Paolo Bonzini | bd48bde | 2013-01-22 09:03:12 +0100 | [diff] [blame] | 392 | s->in_flight++; |
Max Reitz | 2e1990b | 2018-06-13 20:18:11 +0200 | [diff] [blame] | 393 | s->bytes_in_flight += op->bytes; |
Kevin Wolf | ce8cabb | 2020-03-26 16:36:28 +0100 | [diff] [blame] | 394 | op->is_in_flight = true; |
Max Reitz | 2e1990b | 2018-06-13 20:18:11 +0200 | [diff] [blame] | 395 | trace_mirror_one_iteration(s, op->offset, op->bytes); |
Fam Zheng | dcfb3be | 2015-06-08 13:56:09 +0800 | [diff] [blame] | 396 | |
Max Reitz | 138f9ff | 2018-06-13 20:18:14 +0200 | [diff] [blame] | 397 | ret = bdrv_co_preadv(s->mirror_top_bs->backing, op->offset, op->bytes, |
| 398 | &op->qiov, 0); |
Max Reitz | 2e1990b | 2018-06-13 20:18:11 +0200 | [diff] [blame] | 399 | mirror_read_complete(op, ret); |
Fam Zheng | e5b4357 | 2016-02-05 10:00:29 +0800 | [diff] [blame] | 400 | } |
| 401 | |
Max Reitz | 2e1990b | 2018-06-13 20:18:11 +0200 | [diff] [blame] | 402 | static void coroutine_fn mirror_co_zero(void *opaque) |
Fam Zheng | e5b4357 | 2016-02-05 10:00:29 +0800 | [diff] [blame] | 403 | { |
Max Reitz | 2e1990b | 2018-06-13 20:18:11 +0200 | [diff] [blame] | 404 | MirrorOp *op = opaque; |
| 405 | int ret; |
Fam Zheng | e5b4357 | 2016-02-05 10:00:29 +0800 | [diff] [blame] | 406 | |
Max Reitz | 2e1990b | 2018-06-13 20:18:11 +0200 | [diff] [blame] | 407 | op->s->in_flight++; |
| 408 | op->s->bytes_in_flight += op->bytes; |
| 409 | *op->bytes_handled = op->bytes; |
Kevin Wolf | ce8cabb | 2020-03-26 16:36:28 +0100 | [diff] [blame] | 410 | op->is_in_flight = true; |
Fam Zheng | e5b4357 | 2016-02-05 10:00:29 +0800 | [diff] [blame] | 411 | |
Max Reitz | 2e1990b | 2018-06-13 20:18:11 +0200 | [diff] [blame] | 412 | ret = blk_co_pwrite_zeroes(op->s->target, op->offset, op->bytes, |
| 413 | op->s->unmap ? BDRV_REQ_MAY_UNMAP : 0); |
| 414 | mirror_write_complete(op, ret); |
| 415 | } |
| 416 | |
| 417 | static void coroutine_fn mirror_co_discard(void *opaque) |
| 418 | { |
| 419 | MirrorOp *op = opaque; |
| 420 | int ret; |
| 421 | |
| 422 | op->s->in_flight++; |
| 423 | op->s->bytes_in_flight += op->bytes; |
| 424 | *op->bytes_handled = op->bytes; |
Kevin Wolf | ce8cabb | 2020-03-26 16:36:28 +0100 | [diff] [blame] | 425 | op->is_in_flight = true; |
Max Reitz | 2e1990b | 2018-06-13 20:18:11 +0200 | [diff] [blame] | 426 | |
| 427 | ret = blk_co_pdiscard(op->s->target, op->offset, op->bytes); |
| 428 | mirror_write_complete(op, ret); |
Fam Zheng | e5b4357 | 2016-02-05 10:00:29 +0800 | [diff] [blame] | 429 | } |
| 430 | |
Max Reitz | 4295c5f | 2018-06-13 20:18:10 +0200 | [diff] [blame] | 431 | static unsigned mirror_perform(MirrorBlockJob *s, int64_t offset, |
| 432 | unsigned bytes, MirrorMethod mirror_method) |
| 433 | { |
Max Reitz | 2e1990b | 2018-06-13 20:18:11 +0200 | [diff] [blame] | 434 | MirrorOp *op; |
| 435 | Coroutine *co; |
| 436 | int64_t bytes_handled = -1; |
| 437 | |
| 438 | op = g_new(MirrorOp, 1); |
| 439 | *op = (MirrorOp){ |
| 440 | .s = s, |
| 441 | .offset = offset, |
| 442 | .bytes = bytes, |
| 443 | .bytes_handled = &bytes_handled, |
| 444 | }; |
Max Reitz | 12aa408 | 2018-06-13 20:18:12 +0200 | [diff] [blame] | 445 | qemu_co_queue_init(&op->waiting_requests); |
Max Reitz | 2e1990b | 2018-06-13 20:18:11 +0200 | [diff] [blame] | 446 | |
Max Reitz | 4295c5f | 2018-06-13 20:18:10 +0200 | [diff] [blame] | 447 | switch (mirror_method) { |
| 448 | case MIRROR_METHOD_COPY: |
Max Reitz | 2e1990b | 2018-06-13 20:18:11 +0200 | [diff] [blame] | 449 | co = qemu_coroutine_create(mirror_co_read, op); |
| 450 | break; |
Max Reitz | 4295c5f | 2018-06-13 20:18:10 +0200 | [diff] [blame] | 451 | case MIRROR_METHOD_ZERO: |
Max Reitz | 2e1990b | 2018-06-13 20:18:11 +0200 | [diff] [blame] | 452 | co = qemu_coroutine_create(mirror_co_zero, op); |
| 453 | break; |
Max Reitz | 4295c5f | 2018-06-13 20:18:10 +0200 | [diff] [blame] | 454 | case MIRROR_METHOD_DISCARD: |
Max Reitz | 2e1990b | 2018-06-13 20:18:11 +0200 | [diff] [blame] | 455 | co = qemu_coroutine_create(mirror_co_discard, op); |
| 456 | break; |
Max Reitz | 4295c5f | 2018-06-13 20:18:10 +0200 | [diff] [blame] | 457 | default: |
| 458 | abort(); |
| 459 | } |
Kevin Wolf | eed325b | 2020-01-28 16:06:41 +0100 | [diff] [blame] | 460 | op->co = co; |
Max Reitz | 2e1990b | 2018-06-13 20:18:11 +0200 | [diff] [blame] | 461 | |
Max Reitz | 12aa408 | 2018-06-13 20:18:12 +0200 | [diff] [blame] | 462 | QTAILQ_INSERT_TAIL(&s->ops_in_flight, op, next); |
Max Reitz | 2e1990b | 2018-06-13 20:18:11 +0200 | [diff] [blame] | 463 | qemu_coroutine_enter(co); |
| 464 | /* At this point, ownership of op has been moved to the coroutine |
| 465 | * and the object may already be freed */ |
| 466 | |
| 467 | /* Assert that this value has been set */ |
| 468 | assert(bytes_handled >= 0); |
| 469 | |
| 470 | /* Same assertion as in mirror_co_read() (and for mirror_co_read() |
| 471 | * and mirror_co_discard(), bytes_handled == op->bytes, which |
| 472 | * is the @bytes parameter given to this function) */ |
| 473 | assert(bytes_handled <= UINT_MAX); |
| 474 | return bytes_handled; |
Max Reitz | 4295c5f | 2018-06-13 20:18:10 +0200 | [diff] [blame] | 475 | } |
| 476 | |
Fam Zheng | e5b4357 | 2016-02-05 10:00:29 +0800 | [diff] [blame] | 477 | static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s) |
| 478 | { |
Max Reitz | 138f9ff | 2018-06-13 20:18:14 +0200 | [diff] [blame] | 479 | BlockDriverState *source = s->mirror_top_bs->backing->bs; |
Max Reitz | 1181e19 | 2018-06-13 20:18:13 +0200 | [diff] [blame] | 480 | MirrorOp *pseudo_op; |
| 481 | int64_t offset; |
| 482 | uint64_t delay_ns = 0, ret = 0; |
Fam Zheng | e5b4357 | 2016-02-05 10:00:29 +0800 | [diff] [blame] | 483 | /* At least the first dirty chunk is mirrored in one iteration. */ |
| 484 | int nb_chunks = 1; |
Denis V. Lunev | 4b5004d | 2016-07-14 16:33:29 +0300 | [diff] [blame] | 485 | bool write_zeroes_ok = bdrv_can_write_zeroes_with_unmap(blk_bs(s->target)); |
Eric Blake | b436982 | 2017-07-07 07:44:46 -0500 | [diff] [blame] | 486 | int max_io_bytes = MAX(s->buf_size / MAX_IN_FLIGHT, MAX_IO_BYTES); |
Fam Zheng | e5b4357 | 2016-02-05 10:00:29 +0800 | [diff] [blame] | 487 | |
Paolo Bonzini | b64bd51 | 2017-06-05 14:39:05 +0200 | [diff] [blame] | 488 | bdrv_dirty_bitmap_lock(s->dirty_bitmap); |
Eric Blake | f798184 | 2017-09-25 09:55:17 -0500 | [diff] [blame] | 489 | offset = bdrv_dirty_iter_next(s->dbi); |
Eric Blake | fb2ef79 | 2017-07-07 07:44:51 -0500 | [diff] [blame] | 490 | if (offset < 0) { |
Fam Zheng | dc162c8 | 2016-10-13 17:58:21 -0400 | [diff] [blame] | 491 | bdrv_set_dirty_iter(s->dbi, 0); |
Eric Blake | f798184 | 2017-09-25 09:55:17 -0500 | [diff] [blame] | 492 | offset = bdrv_dirty_iter_next(s->dbi); |
Eric Blake | 9a46dba | 2017-09-25 09:55:18 -0500 | [diff] [blame] | 493 | trace_mirror_restart_iter(s, bdrv_get_dirty_count(s->dirty_bitmap)); |
Eric Blake | fb2ef79 | 2017-07-07 07:44:51 -0500 | [diff] [blame] | 494 | assert(offset >= 0); |
Fam Zheng | e5b4357 | 2016-02-05 10:00:29 +0800 | [diff] [blame] | 495 | } |
Paolo Bonzini | b64bd51 | 2017-06-05 14:39:05 +0200 | [diff] [blame] | 496 | bdrv_dirty_bitmap_unlock(s->dirty_bitmap); |
Fam Zheng | e5b4357 | 2016-02-05 10:00:29 +0800 | [diff] [blame] | 497 | |
Max Reitz | 1181e19 | 2018-06-13 20:18:13 +0200 | [diff] [blame] | 498 | mirror_wait_on_conflicts(NULL, s, offset, 1); |
Max Reitz | 9c83625 | 2016-04-20 00:59:47 +0200 | [diff] [blame] | 499 | |
Kevin Wolf | da01ff7 | 2018-04-13 17:31:02 +0200 | [diff] [blame] | 500 | job_pause_point(&s->common.job); |
Stefan Hajnoczi | 565ac01 | 2016-06-16 17:56:28 +0100 | [diff] [blame] | 501 | |
Fam Zheng | e5b4357 | 2016-02-05 10:00:29 +0800 | [diff] [blame] | 502 | /* Find the number of consective dirty chunks following the first dirty |
| 503 | * one, and wait for in flight requests in them. */ |
Paolo Bonzini | b64bd51 | 2017-06-05 14:39:05 +0200 | [diff] [blame] | 504 | bdrv_dirty_bitmap_lock(s->dirty_bitmap); |
Eric Blake | fb2ef79 | 2017-07-07 07:44:51 -0500 | [diff] [blame] | 505 | while (nb_chunks * s->granularity < s->buf_size) { |
Fam Zheng | dc162c8 | 2016-10-13 17:58:21 -0400 | [diff] [blame] | 506 | int64_t next_dirty; |
Eric Blake | fb2ef79 | 2017-07-07 07:44:51 -0500 | [diff] [blame] | 507 | int64_t next_offset = offset + nb_chunks * s->granularity; |
| 508 | int64_t next_chunk = next_offset / s->granularity; |
| 509 | if (next_offset >= s->bdev_length || |
John Snow | 28636b8 | 2019-07-29 16:35:53 -0400 | [diff] [blame] | 510 | !bdrv_dirty_bitmap_get_locked(s->dirty_bitmap, next_offset)) { |
Fam Zheng | e5b4357 | 2016-02-05 10:00:29 +0800 | [diff] [blame] | 511 | break; |
| 512 | } |
| 513 | if (test_bit(next_chunk, s->in_flight_bitmap)) { |
Max Reitz | 9c83625 | 2016-04-20 00:59:47 +0200 | [diff] [blame] | 514 | break; |
Fam Zheng | e5b4357 | 2016-02-05 10:00:29 +0800 | [diff] [blame] | 515 | } |
Max Reitz | 9c83625 | 2016-04-20 00:59:47 +0200 | [diff] [blame] | 516 | |
Eric Blake | f798184 | 2017-09-25 09:55:17 -0500 | [diff] [blame] | 517 | next_dirty = bdrv_dirty_iter_next(s->dbi); |
Eric Blake | fb2ef79 | 2017-07-07 07:44:51 -0500 | [diff] [blame] | 518 | if (next_dirty > next_offset || next_dirty < 0) { |
Max Reitz | f27a274 | 2016-04-20 00:59:48 +0200 | [diff] [blame] | 519 | /* The bitmap iterator's cache is stale, refresh it */ |
Eric Blake | 715a74d | 2017-09-25 09:55:16 -0500 | [diff] [blame] | 520 | bdrv_set_dirty_iter(s->dbi, next_offset); |
Eric Blake | f798184 | 2017-09-25 09:55:17 -0500 | [diff] [blame] | 521 | next_dirty = bdrv_dirty_iter_next(s->dbi); |
Max Reitz | f27a274 | 2016-04-20 00:59:48 +0200 | [diff] [blame] | 522 | } |
Eric Blake | fb2ef79 | 2017-07-07 07:44:51 -0500 | [diff] [blame] | 523 | assert(next_dirty == next_offset); |
Max Reitz | 9c83625 | 2016-04-20 00:59:47 +0200 | [diff] [blame] | 524 | nb_chunks++; |
Fam Zheng | e5b4357 | 2016-02-05 10:00:29 +0800 | [diff] [blame] | 525 | } |
| 526 | |
| 527 | /* Clear dirty bits before querying the block status, because |
Eric Blake | 3182664 | 2017-10-11 22:47:08 -0500 | [diff] [blame] | 528 | * calling bdrv_block_status_above could yield - if some blocks are |
Fam Zheng | e5b4357 | 2016-02-05 10:00:29 +0800 | [diff] [blame] | 529 | * marked dirty in this window, we need to know. |
| 530 | */ |
Eric Blake | e0d7f73 | 2017-09-25 09:55:20 -0500 | [diff] [blame] | 531 | bdrv_reset_dirty_bitmap_locked(s->dirty_bitmap, offset, |
| 532 | nb_chunks * s->granularity); |
Paolo Bonzini | b64bd51 | 2017-06-05 14:39:05 +0200 | [diff] [blame] | 533 | bdrv_dirty_bitmap_unlock(s->dirty_bitmap); |
| 534 | |
Max Reitz | 1181e19 | 2018-06-13 20:18:13 +0200 | [diff] [blame] | 535 | /* Before claiming an area in the in-flight bitmap, we have to |
| 536 | * create a MirrorOp for it so that conflicting requests can wait |
| 537 | * for it. mirror_perform() will create the real MirrorOps later, |
| 538 | * for now we just create a pseudo operation that will wake up all |
| 539 | * conflicting requests once all real operations have been |
| 540 | * launched. */ |
| 541 | pseudo_op = g_new(MirrorOp, 1); |
| 542 | *pseudo_op = (MirrorOp){ |
| 543 | .offset = offset, |
| 544 | .bytes = nb_chunks * s->granularity, |
| 545 | .is_pseudo_op = true, |
| 546 | }; |
| 547 | qemu_co_queue_init(&pseudo_op->waiting_requests); |
| 548 | QTAILQ_INSERT_TAIL(&s->ops_in_flight, pseudo_op, next); |
| 549 | |
Eric Blake | fb2ef79 | 2017-07-07 07:44:51 -0500 | [diff] [blame] | 550 | bitmap_set(s->in_flight_bitmap, offset / s->granularity, nb_chunks); |
| 551 | while (nb_chunks > 0 && offset < s->bdev_length) { |
Eric Blake | 3182664 | 2017-10-11 22:47:08 -0500 | [diff] [blame] | 552 | int ret; |
Eric Blake | 7cfd527 | 2017-10-11 22:46:59 -0500 | [diff] [blame] | 553 | int64_t io_bytes; |
Eric Blake | f3e4ce4 | 2017-07-07 07:44:39 -0500 | [diff] [blame] | 554 | int64_t io_bytes_acct; |
Max Reitz | 4295c5f | 2018-06-13 20:18:10 +0200 | [diff] [blame] | 555 | MirrorMethod mirror_method = MIRROR_METHOD_COPY; |
Fam Zheng | e5b4357 | 2016-02-05 10:00:29 +0800 | [diff] [blame] | 556 | |
Eric Blake | fb2ef79 | 2017-07-07 07:44:51 -0500 | [diff] [blame] | 557 | assert(!(offset % s->granularity)); |
Eric Blake | 3182664 | 2017-10-11 22:47:08 -0500 | [diff] [blame] | 558 | ret = bdrv_block_status_above(source, NULL, offset, |
| 559 | nb_chunks * s->granularity, |
| 560 | &io_bytes, NULL, NULL); |
Fam Zheng | e5b4357 | 2016-02-05 10:00:29 +0800 | [diff] [blame] | 561 | if (ret < 0) { |
Eric Blake | fb2ef79 | 2017-07-07 07:44:51 -0500 | [diff] [blame] | 562 | io_bytes = MIN(nb_chunks * s->granularity, max_io_bytes); |
Vladimir Sementsov-Ogievskiy | 0965a41 | 2016-07-14 20:19:01 +0300 | [diff] [blame] | 563 | } else if (ret & BDRV_BLOCK_DATA) { |
Eric Blake | fb2ef79 | 2017-07-07 07:44:51 -0500 | [diff] [blame] | 564 | io_bytes = MIN(io_bytes, max_io_bytes); |
Fam Zheng | e5b4357 | 2016-02-05 10:00:29 +0800 | [diff] [blame] | 565 | } |
| 566 | |
Eric Blake | fb2ef79 | 2017-07-07 07:44:51 -0500 | [diff] [blame] | 567 | io_bytes -= io_bytes % s->granularity; |
| 568 | if (io_bytes < s->granularity) { |
| 569 | io_bytes = s->granularity; |
Fam Zheng | e5b4357 | 2016-02-05 10:00:29 +0800 | [diff] [blame] | 570 | } else if (ret >= 0 && !(ret & BDRV_BLOCK_DATA)) { |
Eric Blake | fb2ef79 | 2017-07-07 07:44:51 -0500 | [diff] [blame] | 571 | int64_t target_offset; |
Eric Blake | 7cfd527 | 2017-10-11 22:46:59 -0500 | [diff] [blame] | 572 | int64_t target_bytes; |
Eric Blake | fb2ef79 | 2017-07-07 07:44:51 -0500 | [diff] [blame] | 573 | bdrv_round_to_clusters(blk_bs(s->target), offset, io_bytes, |
| 574 | &target_offset, &target_bytes); |
| 575 | if (target_offset == offset && |
| 576 | target_bytes == io_bytes) { |
Fam Zheng | e5b4357 | 2016-02-05 10:00:29 +0800 | [diff] [blame] | 577 | mirror_method = ret & BDRV_BLOCK_ZERO ? |
| 578 | MIRROR_METHOD_ZERO : |
| 579 | MIRROR_METHOD_DISCARD; |
| 580 | } |
| 581 | } |
| 582 | |
Denis V. Lunev | cf56a3c | 2016-06-22 15:35:27 +0300 | [diff] [blame] | 583 | while (s->in_flight >= MAX_IN_FLIGHT) { |
Eric Blake | fb2ef79 | 2017-07-07 07:44:51 -0500 | [diff] [blame] | 584 | trace_mirror_yield_in_flight(s, offset, s->in_flight); |
Kevin Wolf | 9178f4f | 2020-03-26 16:36:27 +0100 | [diff] [blame] | 585 | mirror_wait_for_free_in_flight_slot(s); |
Denis V. Lunev | cf56a3c | 2016-06-22 15:35:27 +0300 | [diff] [blame] | 586 | } |
| 587 | |
Vladimir Sementsov-Ogievskiy | dbaa7b5 | 2016-08-03 15:56:44 +0300 | [diff] [blame] | 588 | if (s->ret < 0) { |
Max Reitz | 1181e19 | 2018-06-13 20:18:13 +0200 | [diff] [blame] | 589 | ret = 0; |
| 590 | goto fail; |
Vladimir Sementsov-Ogievskiy | dbaa7b5 | 2016-08-03 15:56:44 +0300 | [diff] [blame] | 591 | } |
| 592 | |
Eric Blake | fb2ef79 | 2017-07-07 07:44:51 -0500 | [diff] [blame] | 593 | io_bytes = mirror_clip_bytes(s, offset, io_bytes); |
Max Reitz | 4295c5f | 2018-06-13 20:18:10 +0200 | [diff] [blame] | 594 | io_bytes = mirror_perform(s, offset, io_bytes, mirror_method); |
| 595 | if (mirror_method != MIRROR_METHOD_COPY && write_zeroes_ok) { |
| 596 | io_bytes_acct = 0; |
| 597 | } else { |
| 598 | io_bytes_acct = io_bytes; |
Fam Zheng | e5b4357 | 2016-02-05 10:00:29 +0800 | [diff] [blame] | 599 | } |
Eric Blake | fb2ef79 | 2017-07-07 07:44:51 -0500 | [diff] [blame] | 600 | assert(io_bytes); |
| 601 | offset += io_bytes; |
| 602 | nb_chunks -= DIV_ROUND_UP(io_bytes, s->granularity); |
Kevin Wolf | dee81d5 | 2018-01-18 21:19:38 +0100 | [diff] [blame] | 603 | delay_ns = block_job_ratelimit_get_delay(&s->common, io_bytes_acct); |
Fam Zheng | dcfb3be | 2015-06-08 13:56:09 +0800 | [diff] [blame] | 604 | } |
Max Reitz | 1181e19 | 2018-06-13 20:18:13 +0200 | [diff] [blame] | 605 | |
| 606 | ret = delay_ns; |
| 607 | fail: |
| 608 | QTAILQ_REMOVE(&s->ops_in_flight, pseudo_op, next); |
| 609 | qemu_co_queue_restart_all(&pseudo_op->waiting_requests); |
| 610 | g_free(pseudo_op); |
| 611 | |
| 612 | return ret; |
Paolo Bonzini | bd48bde | 2013-01-22 09:03:12 +0100 | [diff] [blame] | 613 | } |
Paolo Bonzini | b952b55 | 2012-10-18 16:49:28 +0200 | [diff] [blame] | 614 | |
Paolo Bonzini | 402a474 | 2013-01-22 09:03:14 +0100 | [diff] [blame] | 615 | static void mirror_free_init(MirrorBlockJob *s) |
| 616 | { |
| 617 | int granularity = s->granularity; |
| 618 | size_t buf_size = s->buf_size; |
| 619 | uint8_t *buf = s->buf; |
| 620 | |
| 621 | assert(s->buf_free_count == 0); |
| 622 | QSIMPLEQ_INIT(&s->buf_free); |
| 623 | while (buf_size != 0) { |
| 624 | MirrorBuffer *cur = (MirrorBuffer *)buf; |
| 625 | QSIMPLEQ_INSERT_TAIL(&s->buf_free, cur, next); |
| 626 | s->buf_free_count++; |
| 627 | buf_size -= granularity; |
| 628 | buf += granularity; |
| 629 | } |
| 630 | } |
| 631 | |
Paolo Bonzini | bae8196 | 2016-10-27 12:48:50 +0200 | [diff] [blame] | 632 | /* This is also used for the .pause callback. There is no matching |
| 633 | * mirror_resume() because mirror_run() will begin iterating again |
| 634 | * when the job is resumed. |
| 635 | */ |
Stefan Hajnoczi | 537c3d4 | 2018-12-13 11:24:34 +0000 | [diff] [blame] | 636 | static void coroutine_fn mirror_wait_for_all_io(MirrorBlockJob *s) |
Paolo Bonzini | bd48bde | 2013-01-22 09:03:12 +0100 | [diff] [blame] | 637 | { |
| 638 | while (s->in_flight > 0) { |
Kevin Wolf | 9178f4f | 2020-03-26 16:36:27 +0100 | [diff] [blame] | 639 | mirror_wait_for_free_in_flight_slot(s); |
Paolo Bonzini | bd48bde | 2013-01-22 09:03:12 +0100 | [diff] [blame] | 640 | } |
Paolo Bonzini | 893f7eb | 2012-10-18 16:49:23 +0200 | [diff] [blame] | 641 | } |
| 642 | |
John Snow | 737efc1 | 2018-09-06 09:02:15 -0400 | [diff] [blame] | 643 | /** |
| 644 | * mirror_exit_common: handle both abort() and prepare() cases. |
| 645 | * for .prepare, returns 0 on success and -errno on failure. |
| 646 | * for .abort cases, denoted by abort = true, MUST return 0. |
| 647 | */ |
| 648 | static int mirror_exit_common(Job *job) |
Stefan Hajnoczi | 5a7e7a0 | 2014-10-21 12:03:58 +0100 | [diff] [blame] | 649 | { |
Kevin Wolf | 1908a55 | 2018-04-17 16:41:17 +0200 | [diff] [blame] | 650 | MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job); |
| 651 | BlockJob *bjob = &s->common; |
Max Reitz | f93c3ad | 2019-10-14 17:39:28 +0200 | [diff] [blame] | 652 | MirrorBDSOpaque *bs_opaque; |
Stefan Hajnoczi | 5a7e7a0 | 2014-10-21 12:03:58 +0100 | [diff] [blame] | 653 | AioContext *replace_aio_context = NULL; |
Max Reitz | f93c3ad | 2019-10-14 17:39:28 +0200 | [diff] [blame] | 654 | BlockDriverState *src; |
| 655 | BlockDriverState *target_bs; |
| 656 | BlockDriverState *mirror_top_bs; |
Kevin Wolf | 12fa4af | 2017-02-17 20:42:32 +0100 | [diff] [blame] | 657 | Error *local_err = NULL; |
John Snow | 737efc1 | 2018-09-06 09:02:15 -0400 | [diff] [blame] | 658 | bool abort = job->ret < 0; |
| 659 | int ret = 0; |
| 660 | |
| 661 | if (s->prepared) { |
| 662 | return 0; |
| 663 | } |
| 664 | s->prepared = true; |
Kevin Wolf | 3f09bfb | 2015-09-15 11:58:23 +0200 | [diff] [blame] | 665 | |
Max Reitz | f93c3ad | 2019-10-14 17:39:28 +0200 | [diff] [blame] | 666 | mirror_top_bs = s->mirror_top_bs; |
| 667 | bs_opaque = mirror_top_bs->opaque; |
| 668 | src = mirror_top_bs->backing->bs; |
| 669 | target_bs = blk_bs(s->target); |
| 670 | |
Alberto Garcia | ef53dc0 | 2019-03-12 18:48:42 +0200 | [diff] [blame] | 671 | if (bdrv_chain_contains(src, target_bs)) { |
| 672 | bdrv_unfreeze_backing_chain(mirror_top_bs, target_bs); |
| 673 | } |
| 674 | |
Vladimir Sementsov-Ogievskiy | 5deb6cb | 2019-09-16 17:19:09 +0300 | [diff] [blame] | 675 | bdrv_release_dirty_bitmap(s->dirty_bitmap); |
Paolo Bonzini | 2119882 | 2017-06-05 14:39:03 +0200 | [diff] [blame] | 676 | |
John Snow | 7b508f6 | 2018-08-29 21:57:30 -0400 | [diff] [blame] | 677 | /* Make sure that the source BDS doesn't go away during bdrv_replace_node, |
| 678 | * before we can call bdrv_drained_end */ |
Kevin Wolf | 3f09bfb | 2015-09-15 11:58:23 +0200 | [diff] [blame] | 679 | bdrv_ref(src); |
Kevin Wolf | 4ef85a9 | 2017-01-25 19:16:34 +0100 | [diff] [blame] | 680 | bdrv_ref(mirror_top_bs); |
Kevin Wolf | 7d9fcb3 | 2017-03-02 17:48:14 +0100 | [diff] [blame] | 681 | bdrv_ref(target_bs); |
| 682 | |
Vladimir Sementsov-Ogievskiy | bb0c940 | 2019-08-29 12:09:53 +0300 | [diff] [blame] | 683 | /* |
| 684 | * Remove target parent that still uses BLK_PERM_WRITE/RESIZE before |
Kevin Wolf | 7d9fcb3 | 2017-03-02 17:48:14 +0100 | [diff] [blame] | 685 | * inserting target_bs at s->to_replace, where we might not be able to get |
Kevin Wolf | 63c8ef2 | 2017-05-29 14:08:32 +0200 | [diff] [blame] | 686 | * these permissions. |
Vladimir Sementsov-Ogievskiy | bb0c940 | 2019-08-29 12:09:53 +0300 | [diff] [blame] | 687 | */ |
Kevin Wolf | 7d9fcb3 | 2017-03-02 17:48:14 +0100 | [diff] [blame] | 688 | blk_unref(s->target); |
| 689 | s->target = NULL; |
Kevin Wolf | 4ef85a9 | 2017-01-25 19:16:34 +0100 | [diff] [blame] | 690 | |
| 691 | /* We don't access the source any more. Dropping any WRITE/RESIZE is |
Kevin Wolf | d2da5e2 | 2019-07-22 17:44:27 +0200 | [diff] [blame] | 692 | * required before it could become a backing file of target_bs. Not having |
| 693 | * these permissions any more means that we can't allow any new requests on |
| 694 | * mirror_top_bs from now on, so keep it drained. */ |
| 695 | bdrv_drained_begin(mirror_top_bs); |
Max Reitz | f94dc3b | 2019-05-22 19:03:47 +0200 | [diff] [blame] | 696 | bs_opaque->stop = true; |
| 697 | bdrv_child_refresh_perms(mirror_top_bs, mirror_top_bs->backing, |
| 698 | &error_abort); |
John Snow | 737efc1 | 2018-09-06 09:02:15 -0400 | [diff] [blame] | 699 | if (!abort && s->backing_mode == MIRROR_SOURCE_BACKING_CHAIN) { |
Kevin Wolf | 4ef85a9 | 2017-01-25 19:16:34 +0100 | [diff] [blame] | 700 | BlockDriverState *backing = s->is_none_mode ? src : s->base; |
Max Reitz | 3f072a7 | 2019-06-12 16:27:32 +0200 | [diff] [blame] | 701 | BlockDriverState *unfiltered_target = bdrv_skip_filters(target_bs); |
| 702 | |
| 703 | if (bdrv_cow_bs(unfiltered_target) != backing) { |
| 704 | bdrv_set_backing_hd(unfiltered_target, backing, &local_err); |
Kevin Wolf | 12fa4af | 2017-02-17 20:42:32 +0100 | [diff] [blame] | 705 | if (local_err) { |
| 706 | error_report_err(local_err); |
Vladimir Sementsov-Ogievskiy | 66c8672 | 2020-03-24 18:36:26 +0300 | [diff] [blame] | 707 | local_err = NULL; |
John Snow | 7b508f6 | 2018-08-29 21:57:30 -0400 | [diff] [blame] | 708 | ret = -EPERM; |
Kevin Wolf | 12fa4af | 2017-02-17 20:42:32 +0100 | [diff] [blame] | 709 | } |
Kevin Wolf | 4ef85a9 | 2017-01-25 19:16:34 +0100 | [diff] [blame] | 710 | } |
Max Reitz | c41f5b9 | 2021-04-09 14:04:18 +0200 | [diff] [blame] | 711 | } else if (!abort && s->backing_mode == MIRROR_OPEN_BACKING_CHAIN) { |
| 712 | assert(!bdrv_backing_chain_next(target_bs)); |
| 713 | ret = bdrv_open_backing_file(bdrv_skip_filters(target_bs), NULL, |
| 714 | "backing", &local_err); |
| 715 | if (ret < 0) { |
| 716 | error_report_err(local_err); |
| 717 | local_err = NULL; |
| 718 | } |
Kevin Wolf | 4ef85a9 | 2017-01-25 19:16:34 +0100 | [diff] [blame] | 719 | } |
Stefan Hajnoczi | 5a7e7a0 | 2014-10-21 12:03:58 +0100 | [diff] [blame] | 720 | |
| 721 | if (s->to_replace) { |
| 722 | replace_aio_context = bdrv_get_aio_context(s->to_replace); |
| 723 | aio_context_acquire(replace_aio_context); |
| 724 | } |
| 725 | |
John Snow | 737efc1 | 2018-09-06 09:02:15 -0400 | [diff] [blame] | 726 | if (s->should_complete && !abort) { |
| 727 | BlockDriverState *to_replace = s->to_replace ?: src; |
Alberto Garcia | 1ba7938 | 2018-11-12 16:00:40 +0200 | [diff] [blame] | 728 | bool ro = bdrv_is_read_only(to_replace); |
Kevin Wolf | 4036555 | 2015-10-28 13:24:26 +0100 | [diff] [blame] | 729 | |
Alberto Garcia | 1ba7938 | 2018-11-12 16:00:40 +0200 | [diff] [blame] | 730 | if (ro != bdrv_is_read_only(target_bs)) { |
| 731 | bdrv_reopen_set_read_only(target_bs, ro, NULL); |
Stefan Hajnoczi | 5a7e7a0 | 2014-10-21 12:03:58 +0100 | [diff] [blame] | 732 | } |
Kevin Wolf | b880481 | 2016-04-12 16:20:59 +0200 | [diff] [blame] | 733 | |
| 734 | /* The mirror job has no requests in flight any more, but we need to |
| 735 | * drain potential other users of the BDS before changing the graph. */ |
Sergio Lopez | 5e77175 | 2019-03-08 16:48:53 +0100 | [diff] [blame] | 736 | assert(s->in_drain); |
Kevin Wolf | e253f4b | 2016-04-12 16:17:41 +0200 | [diff] [blame] | 737 | bdrv_drained_begin(target_bs); |
Max Reitz | 6e9cc05 | 2020-02-18 11:34:46 +0100 | [diff] [blame] | 738 | /* |
| 739 | * Cannot use check_to_replace_node() here, because that would |
| 740 | * check for an op blocker on @to_replace, and we have our own |
| 741 | * there. |
| 742 | */ |
| 743 | if (bdrv_recurse_can_replace(src, to_replace)) { |
| 744 | bdrv_replace_node(to_replace, target_bs, &local_err); |
| 745 | } else { |
| 746 | error_setg(&local_err, "Can no longer replace '%s' by '%s', " |
| 747 | "because it can no longer be guaranteed that doing so " |
| 748 | "would not lead to an abrupt change of visible data", |
| 749 | to_replace->node_name, target_bs->node_name); |
| 750 | } |
Kevin Wolf | e253f4b | 2016-04-12 16:17:41 +0200 | [diff] [blame] | 751 | bdrv_drained_end(target_bs); |
Kevin Wolf | 5fe31c2 | 2017-03-06 16:20:51 +0100 | [diff] [blame] | 752 | if (local_err) { |
| 753 | error_report_err(local_err); |
John Snow | 7b508f6 | 2018-08-29 21:57:30 -0400 | [diff] [blame] | 754 | ret = -EPERM; |
Kevin Wolf | 5fe31c2 | 2017-03-06 16:20:51 +0100 | [diff] [blame] | 755 | } |
Stefan Hajnoczi | 5a7e7a0 | 2014-10-21 12:03:58 +0100 | [diff] [blame] | 756 | } |
| 757 | if (s->to_replace) { |
| 758 | bdrv_op_unblock_all(s->to_replace, s->replace_blocker); |
| 759 | error_free(s->replace_blocker); |
| 760 | bdrv_unref(s->to_replace); |
| 761 | } |
| 762 | if (replace_aio_context) { |
| 763 | aio_context_release(replace_aio_context); |
| 764 | } |
| 765 | g_free(s->replaces); |
Kevin Wolf | 7d9fcb3 | 2017-03-02 17:48:14 +0100 | [diff] [blame] | 766 | bdrv_unref(target_bs); |
Kevin Wolf | 4ef85a9 | 2017-01-25 19:16:34 +0100 | [diff] [blame] | 767 | |
Max Reitz | f94dc3b | 2019-05-22 19:03:47 +0200 | [diff] [blame] | 768 | /* |
| 769 | * Remove the mirror filter driver from the graph. Before this, get rid of |
Kevin Wolf | 4ef85a9 | 2017-01-25 19:16:34 +0100 | [diff] [blame] | 770 | * the blockers on the intermediate nodes so that the resulting state is |
Max Reitz | f94dc3b | 2019-05-22 19:03:47 +0200 | [diff] [blame] | 771 | * valid. |
| 772 | */ |
Kevin Wolf | 1908a55 | 2018-04-17 16:41:17 +0200 | [diff] [blame] | 773 | block_job_remove_all_bdrv(bjob); |
Max Reitz | 3f072a7 | 2019-06-12 16:27:32 +0200 | [diff] [blame] | 774 | bdrv_replace_node(mirror_top_bs, mirror_top_bs->backing->bs, &error_abort); |
Kevin Wolf | 4ef85a9 | 2017-01-25 19:16:34 +0100 | [diff] [blame] | 775 | |
| 776 | /* We just changed the BDS the job BB refers to (with either or both of the |
Kevin Wolf | 5fe31c2 | 2017-03-06 16:20:51 +0100 | [diff] [blame] | 777 | * bdrv_replace_node() calls), so switch the BB back so the cleanup does |
| 778 | * the right thing. We don't need any permissions any more now. */ |
Kevin Wolf | 1908a55 | 2018-04-17 16:41:17 +0200 | [diff] [blame] | 779 | blk_remove_bs(bjob->blk); |
| 780 | blk_set_perm(bjob->blk, 0, BLK_PERM_ALL, &error_abort); |
| 781 | blk_insert_bs(bjob->blk, mirror_top_bs, &error_abort); |
Kevin Wolf | 4ef85a9 | 2017-01-25 19:16:34 +0100 | [diff] [blame] | 782 | |
Max Reitz | 429076e | 2018-06-13 20:18:19 +0200 | [diff] [blame] | 783 | bs_opaque->job = NULL; |
Kevin Wolf | 4ef85a9 | 2017-01-25 19:16:34 +0100 | [diff] [blame] | 784 | |
Fam Zheng | 176c369 | 2015-11-23 10:28:04 +0800 | [diff] [blame] | 785 | bdrv_drained_end(src); |
Kevin Wolf | d2da5e2 | 2019-07-22 17:44:27 +0200 | [diff] [blame] | 786 | bdrv_drained_end(mirror_top_bs); |
Sergio Lopez | 5e77175 | 2019-03-08 16:48:53 +0100 | [diff] [blame] | 787 | s->in_drain = false; |
Kevin Wolf | 4ef85a9 | 2017-01-25 19:16:34 +0100 | [diff] [blame] | 788 | bdrv_unref(mirror_top_bs); |
Kevin Wolf | 3f09bfb | 2015-09-15 11:58:23 +0200 | [diff] [blame] | 789 | bdrv_unref(src); |
John Snow | 7b508f6 | 2018-08-29 21:57:30 -0400 | [diff] [blame] | 790 | |
John Snow | 737efc1 | 2018-09-06 09:02:15 -0400 | [diff] [blame] | 791 | return ret; |
| 792 | } |
| 793 | |
| 794 | static int mirror_prepare(Job *job) |
| 795 | { |
| 796 | return mirror_exit_common(job); |
| 797 | } |
| 798 | |
| 799 | static void mirror_abort(Job *job) |
| 800 | { |
| 801 | int ret = mirror_exit_common(job); |
| 802 | assert(ret == 0); |
Stefan Hajnoczi | 5a7e7a0 | 2014-10-21 12:03:58 +0100 | [diff] [blame] | 803 | } |
| 804 | |
Stefan Hajnoczi | 537c3d4 | 2018-12-13 11:24:34 +0000 | [diff] [blame] | 805 | static void coroutine_fn mirror_throttle(MirrorBlockJob *s) |
Denis V. Lunev | 49efb1f | 2016-07-14 16:33:24 +0300 | [diff] [blame] | 806 | { |
| 807 | int64_t now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); |
| 808 | |
Kevin Wolf | 18bb692 | 2018-01-18 20:25:40 +0100 | [diff] [blame] | 809 | if (now - s->last_pause_ns > BLOCK_JOB_SLICE_TIME) { |
Denis V. Lunev | 49efb1f | 2016-07-14 16:33:24 +0300 | [diff] [blame] | 810 | s->last_pause_ns = now; |
Kevin Wolf | 5d43e86 | 2018-04-18 16:32:20 +0200 | [diff] [blame] | 811 | job_sleep_ns(&s->common.job, 0); |
Denis V. Lunev | 49efb1f | 2016-07-14 16:33:24 +0300 | [diff] [blame] | 812 | } else { |
Kevin Wolf | da01ff7 | 2018-04-13 17:31:02 +0200 | [diff] [blame] | 813 | job_pause_point(&s->common.job); |
Denis V. Lunev | 49efb1f | 2016-07-14 16:33:24 +0300 | [diff] [blame] | 814 | } |
| 815 | } |
| 816 | |
Denis V. Lunev | c0b363a | 2016-07-14 16:33:25 +0300 | [diff] [blame] | 817 | static int coroutine_fn mirror_dirty_init(MirrorBlockJob *s) |
| 818 | { |
Eric Blake | 23ca459 | 2017-09-25 09:55:21 -0500 | [diff] [blame] | 819 | int64_t offset; |
Max Reitz | 138f9ff | 2018-06-13 20:18:14 +0200 | [diff] [blame] | 820 | BlockDriverState *bs = s->mirror_top_bs->backing->bs; |
Denis V. Lunev | c0b363a | 2016-07-14 16:33:25 +0300 | [diff] [blame] | 821 | BlockDriverState *target_bs = blk_bs(s->target); |
Eric Blake | 23ca459 | 2017-09-25 09:55:21 -0500 | [diff] [blame] | 822 | int ret; |
Eric Blake | 51b0a48 | 2017-07-07 07:44:59 -0500 | [diff] [blame] | 823 | int64_t count; |
Denis V. Lunev | c0b363a | 2016-07-14 16:33:25 +0300 | [diff] [blame] | 824 | |
Max Reitz | cdf3bc9 | 2019-07-24 19:12:30 +0200 | [diff] [blame] | 825 | if (s->zero_target) { |
Denis V. Lunev | c7c2769 | 2016-07-14 16:33:28 +0300 | [diff] [blame] | 826 | if (!bdrv_can_write_zeroes_with_unmap(target_bs)) { |
Eric Blake | e0d7f73 | 2017-09-25 09:55:20 -0500 | [diff] [blame] | 827 | bdrv_set_dirty_bitmap(s->dirty_bitmap, 0, s->bdev_length); |
Denis V. Lunev | c7c2769 | 2016-07-14 16:33:28 +0300 | [diff] [blame] | 828 | return 0; |
| 829 | } |
| 830 | |
Anton Nefedov | 90ab48e | 2017-02-02 17:25:15 +0300 | [diff] [blame] | 831 | s->initial_zeroing_ongoing = true; |
Eric Blake | 23ca459 | 2017-09-25 09:55:21 -0500 | [diff] [blame] | 832 | for (offset = 0; offset < s->bdev_length; ) { |
| 833 | int bytes = MIN(s->bdev_length - offset, |
| 834 | QEMU_ALIGN_DOWN(INT_MAX, s->granularity)); |
Denis V. Lunev | c7c2769 | 2016-07-14 16:33:28 +0300 | [diff] [blame] | 835 | |
| 836 | mirror_throttle(s); |
| 837 | |
Kevin Wolf | daa7f2f | 2018-04-17 12:56:07 +0200 | [diff] [blame] | 838 | if (job_is_cancelled(&s->common.job)) { |
Anton Nefedov | 90ab48e | 2017-02-02 17:25:15 +0300 | [diff] [blame] | 839 | s->initial_zeroing_ongoing = false; |
Denis V. Lunev | c7c2769 | 2016-07-14 16:33:28 +0300 | [diff] [blame] | 840 | return 0; |
| 841 | } |
| 842 | |
| 843 | if (s->in_flight >= MAX_IN_FLIGHT) { |
Eric Blake | 67adf4b | 2017-03-13 14:55:18 -0500 | [diff] [blame] | 844 | trace_mirror_yield(s, UINT64_MAX, s->buf_free_count, |
| 845 | s->in_flight); |
Kevin Wolf | 9178f4f | 2020-03-26 16:36:27 +0100 | [diff] [blame] | 846 | mirror_wait_for_free_in_flight_slot(s); |
Denis V. Lunev | c7c2769 | 2016-07-14 16:33:28 +0300 | [diff] [blame] | 847 | continue; |
| 848 | } |
| 849 | |
Max Reitz | 4295c5f | 2018-06-13 20:18:10 +0200 | [diff] [blame] | 850 | mirror_perform(s, offset, bytes, MIRROR_METHOD_ZERO); |
Eric Blake | 23ca459 | 2017-09-25 09:55:21 -0500 | [diff] [blame] | 851 | offset += bytes; |
Denis V. Lunev | c7c2769 | 2016-07-14 16:33:28 +0300 | [diff] [blame] | 852 | } |
| 853 | |
Paolo Bonzini | bae8196 | 2016-10-27 12:48:50 +0200 | [diff] [blame] | 854 | mirror_wait_for_all_io(s); |
Anton Nefedov | 90ab48e | 2017-02-02 17:25:15 +0300 | [diff] [blame] | 855 | s->initial_zeroing_ongoing = false; |
Denis V. Lunev | b7d5062 | 2016-07-14 16:33:27 +0300 | [diff] [blame] | 856 | } |
| 857 | |
Denis V. Lunev | c0b363a | 2016-07-14 16:33:25 +0300 | [diff] [blame] | 858 | /* First part, loop on the sectors and initialize the dirty bitmap. */ |
Eric Blake | 23ca459 | 2017-09-25 09:55:21 -0500 | [diff] [blame] | 859 | for (offset = 0; offset < s->bdev_length; ) { |
Denis V. Lunev | c0b363a | 2016-07-14 16:33:25 +0300 | [diff] [blame] | 860 | /* Just to make sure we are not exceeding int limit. */ |
Eric Blake | 23ca459 | 2017-09-25 09:55:21 -0500 | [diff] [blame] | 861 | int bytes = MIN(s->bdev_length - offset, |
| 862 | QEMU_ALIGN_DOWN(INT_MAX, s->granularity)); |
Denis V. Lunev | c0b363a | 2016-07-14 16:33:25 +0300 | [diff] [blame] | 863 | |
| 864 | mirror_throttle(s); |
| 865 | |
Kevin Wolf | daa7f2f | 2018-04-17 12:56:07 +0200 | [diff] [blame] | 866 | if (job_is_cancelled(&s->common.job)) { |
Denis V. Lunev | c0b363a | 2016-07-14 16:33:25 +0300 | [diff] [blame] | 867 | return 0; |
| 868 | } |
| 869 | |
Max Reitz | 3f072a7 | 2019-06-12 16:27:32 +0200 | [diff] [blame] | 870 | ret = bdrv_is_allocated_above(bs, s->base_overlay, true, offset, bytes, |
| 871 | &count); |
Denis V. Lunev | c0b363a | 2016-07-14 16:33:25 +0300 | [diff] [blame] | 872 | if (ret < 0) { |
| 873 | return ret; |
| 874 | } |
| 875 | |
Eric Blake | 23ca459 | 2017-09-25 09:55:21 -0500 | [diff] [blame] | 876 | assert(count); |
Eric Blake | a92b1b0 | 2020-10-27 00:05:53 -0500 | [diff] [blame] | 877 | if (ret > 0) { |
Eric Blake | 23ca459 | 2017-09-25 09:55:21 -0500 | [diff] [blame] | 878 | bdrv_set_dirty_bitmap(s->dirty_bitmap, offset, count); |
Denis V. Lunev | c0b363a | 2016-07-14 16:33:25 +0300 | [diff] [blame] | 879 | } |
Eric Blake | 23ca459 | 2017-09-25 09:55:21 -0500 | [diff] [blame] | 880 | offset += count; |
Denis V. Lunev | c0b363a | 2016-07-14 16:33:25 +0300 | [diff] [blame] | 881 | } |
| 882 | return 0; |
| 883 | } |
| 884 | |
Paolo Bonzini | bdffb31 | 2016-11-09 17:20:08 +0100 | [diff] [blame] | 885 | /* Called when going out of the streaming phase to flush the bulk of the |
| 886 | * data to the medium, or just before completing. |
| 887 | */ |
| 888 | static int mirror_flush(MirrorBlockJob *s) |
| 889 | { |
| 890 | int ret = blk_flush(s->target); |
| 891 | if (ret < 0) { |
| 892 | if (mirror_error_action(s, false, -ret) == BLOCK_ERROR_ACTION_REPORT) { |
| 893 | s->ret = ret; |
| 894 | } |
| 895 | } |
| 896 | return ret; |
| 897 | } |
| 898 | |
John Snow | f67432a | 2018-08-29 21:57:26 -0400 | [diff] [blame] | 899 | static int coroutine_fn mirror_run(Job *job, Error **errp) |
Paolo Bonzini | 893f7eb | 2012-10-18 16:49:23 +0200 | [diff] [blame] | 900 | { |
John Snow | f67432a | 2018-08-29 21:57:26 -0400 | [diff] [blame] | 901 | MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job); |
Max Reitz | 138f9ff | 2018-06-13 20:18:14 +0200 | [diff] [blame] | 902 | BlockDriverState *bs = s->mirror_top_bs->backing->bs; |
Kevin Wolf | e253f4b | 2016-04-12 16:17:41 +0200 | [diff] [blame] | 903 | BlockDriverState *target_bs = blk_bs(s->target); |
Paolo Bonzini | 9a0cec6 | 2016-10-27 12:48:51 +0200 | [diff] [blame] | 904 | bool need_drain = true; |
Denis V. Lunev | c0b363a | 2016-07-14 16:33:25 +0300 | [diff] [blame] | 905 | int64_t length; |
Kevin Wolf | e83dd68 | 2020-05-11 15:58:24 +0200 | [diff] [blame] | 906 | int64_t target_length; |
Paolo Bonzini | b812f67 | 2013-01-21 17:09:43 +0100 | [diff] [blame] | 907 | BlockDriverInfo bdi; |
Jeff Cody | 1d33936 | 2015-01-22 08:03:29 -0500 | [diff] [blame] | 908 | char backing_filename[2]; /* we only need 2 characters because we are only |
| 909 | checking for a NULL string */ |
Paolo Bonzini | 893f7eb | 2012-10-18 16:49:23 +0200 | [diff] [blame] | 910 | int ret = 0; |
Paolo Bonzini | 893f7eb | 2012-10-18 16:49:23 +0200 | [diff] [blame] | 911 | |
Kevin Wolf | daa7f2f | 2018-04-17 12:56:07 +0200 | [diff] [blame] | 912 | if (job_is_cancelled(&s->common.job)) { |
Paolo Bonzini | 893f7eb | 2012-10-18 16:49:23 +0200 | [diff] [blame] | 913 | goto immediate_exit; |
| 914 | } |
| 915 | |
Max Reitz | b21c765 | 2014-10-24 15:57:36 +0200 | [diff] [blame] | 916 | s->bdev_length = bdrv_getlength(bs); |
| 917 | if (s->bdev_length < 0) { |
| 918 | ret = s->bdev_length; |
Fam Zheng | 373df5b | 2014-04-29 18:09:09 +0800 | [diff] [blame] | 919 | goto immediate_exit; |
Kevin Wolf | becc347 | 2017-02-17 11:11:28 +0100 | [diff] [blame] | 920 | } |
| 921 | |
Kevin Wolf | e83dd68 | 2020-05-11 15:58:24 +0200 | [diff] [blame] | 922 | target_length = blk_getlength(s->target); |
| 923 | if (target_length < 0) { |
| 924 | ret = target_length; |
| 925 | goto immediate_exit; |
| 926 | } |
| 927 | |
Kevin Wolf | becc347 | 2017-02-17 11:11:28 +0100 | [diff] [blame] | 928 | /* Active commit must resize the base image if its size differs from the |
| 929 | * active layer. */ |
| 930 | if (s->base == blk_bs(s->target)) { |
Kevin Wolf | e83dd68 | 2020-05-11 15:58:24 +0200 | [diff] [blame] | 931 | if (s->bdev_length > target_length) { |
Max Reitz | c80d8b0 | 2019-09-18 11:51:40 +0200 | [diff] [blame] | 932 | ret = blk_truncate(s->target, s->bdev_length, false, |
Kevin Wolf | 8c6242b | 2020-04-24 14:54:41 +0200 | [diff] [blame] | 933 | PREALLOC_MODE_OFF, 0, NULL); |
Kevin Wolf | becc347 | 2017-02-17 11:11:28 +0100 | [diff] [blame] | 934 | if (ret < 0) { |
| 935 | goto immediate_exit; |
| 936 | } |
| 937 | } |
Kevin Wolf | e83dd68 | 2020-05-11 15:58:24 +0200 | [diff] [blame] | 938 | } else if (s->bdev_length != target_length) { |
| 939 | error_setg(errp, "Source and target image have different sizes"); |
| 940 | ret = -EINVAL; |
| 941 | goto immediate_exit; |
Kevin Wolf | becc347 | 2017-02-17 11:11:28 +0100 | [diff] [blame] | 942 | } |
| 943 | |
| 944 | if (s->bdev_length == 0) { |
Kevin Wolf | 2e1795b | 2018-04-25 14:56:09 +0200 | [diff] [blame] | 945 | /* Transition to the READY state and wait for complete. */ |
| 946 | job_transition_to_ready(&s->common.job); |
Fam Zheng | 9e48b02 | 2014-06-24 20:26:36 +0800 | [diff] [blame] | 947 | s->synced = true; |
Max Reitz | d06107a | 2018-06-13 20:18:21 +0200 | [diff] [blame] | 948 | s->actively_synced = true; |
Kevin Wolf | daa7f2f | 2018-04-17 12:56:07 +0200 | [diff] [blame] | 949 | while (!job_is_cancelled(&s->common.job) && !s->should_complete) { |
Kevin Wolf | 198c49c | 2018-04-24 16:55:04 +0200 | [diff] [blame] | 950 | job_yield(&s->common.job); |
Fam Zheng | 9e48b02 | 2014-06-24 20:26:36 +0800 | [diff] [blame] | 951 | } |
Kevin Wolf | daa7f2f | 2018-04-17 12:56:07 +0200 | [diff] [blame] | 952 | s->common.job.cancelled = false; |
Fam Zheng | 9e48b02 | 2014-06-24 20:26:36 +0800 | [diff] [blame] | 953 | goto immediate_exit; |
Paolo Bonzini | 893f7eb | 2012-10-18 16:49:23 +0200 | [diff] [blame] | 954 | } |
| 955 | |
Max Reitz | b21c765 | 2014-10-24 15:57:36 +0200 | [diff] [blame] | 956 | length = DIV_ROUND_UP(s->bdev_length, s->granularity); |
Paolo Bonzini | 402a474 | 2013-01-22 09:03:14 +0100 | [diff] [blame] | 957 | s->in_flight_bitmap = bitmap_new(length); |
| 958 | |
Paolo Bonzini | b812f67 | 2013-01-21 17:09:43 +0100 | [diff] [blame] | 959 | /* If we have no backing file yet in the destination, we cannot let |
| 960 | * the destination do COW. Instead, we copy sectors around the |
| 961 | * dirty data if needed. We need a bitmap to do that. |
| 962 | */ |
Kevin Wolf | e253f4b | 2016-04-12 16:17:41 +0200 | [diff] [blame] | 963 | bdrv_get_backing_filename(target_bs, backing_filename, |
Paolo Bonzini | b812f67 | 2013-01-21 17:09:43 +0100 | [diff] [blame] | 964 | sizeof(backing_filename)); |
Kevin Wolf | e253f4b | 2016-04-12 16:17:41 +0200 | [diff] [blame] | 965 | if (!bdrv_get_info(target_bs, &bdi) && bdi.cluster_size) { |
Eric Blake | b436982 | 2017-07-07 07:44:46 -0500 | [diff] [blame] | 966 | s->target_cluster_size = bdi.cluster_size; |
| 967 | } else { |
| 968 | s->target_cluster_size = BDRV_SECTOR_SIZE; |
Paolo Bonzini | b812f67 | 2013-01-21 17:09:43 +0100 | [diff] [blame] | 969 | } |
Max Reitz | 3f072a7 | 2019-06-12 16:27:32 +0200 | [diff] [blame] | 970 | if (backing_filename[0] && !bdrv_backing_chain_next(target_bs) && |
Eric Blake | b436982 | 2017-07-07 07:44:46 -0500 | [diff] [blame] | 971 | s->granularity < s->target_cluster_size) { |
| 972 | s->buf_size = MAX(s->buf_size, s->target_cluster_size); |
Fam Zheng | e5b4357 | 2016-02-05 10:00:29 +0800 | [diff] [blame] | 973 | s->cow_bitmap = bitmap_new(length); |
| 974 | } |
Kevin Wolf | e253f4b | 2016-04-12 16:17:41 +0200 | [diff] [blame] | 975 | s->max_iov = MIN(bs->bl.max_iov, target_bs->bl.max_iov); |
Paolo Bonzini | b812f67 | 2013-01-21 17:09:43 +0100 | [diff] [blame] | 976 | |
Kevin Wolf | 7504edf | 2014-05-21 18:16:21 +0200 | [diff] [blame] | 977 | s->buf = qemu_try_blockalign(bs, s->buf_size); |
| 978 | if (s->buf == NULL) { |
| 979 | ret = -ENOMEM; |
| 980 | goto immediate_exit; |
| 981 | } |
| 982 | |
Paolo Bonzini | 402a474 | 2013-01-22 09:03:14 +0100 | [diff] [blame] | 983 | mirror_free_init(s); |
Paolo Bonzini | 893f7eb | 2012-10-18 16:49:23 +0200 | [diff] [blame] | 984 | |
Denis V. Lunev | 49efb1f | 2016-07-14 16:33:24 +0300 | [diff] [blame] | 985 | s->last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); |
Fam Zheng | 03544a6 | 2013-12-16 14:45:30 +0800 | [diff] [blame] | 986 | if (!s->is_none_mode) { |
Denis V. Lunev | c0b363a | 2016-07-14 16:33:25 +0300 | [diff] [blame] | 987 | ret = mirror_dirty_init(s); |
Kevin Wolf | daa7f2f | 2018-04-17 12:56:07 +0200 | [diff] [blame] | 988 | if (ret < 0 || job_is_cancelled(&s->common.job)) { |
Denis V. Lunev | c0b363a | 2016-07-14 16:33:25 +0300 | [diff] [blame] | 989 | goto immediate_exit; |
Paolo Bonzini | 893f7eb | 2012-10-18 16:49:23 +0200 | [diff] [blame] | 990 | } |
| 991 | } |
| 992 | |
Fam Zheng | dc162c8 | 2016-10-13 17:58:21 -0400 | [diff] [blame] | 993 | assert(!s->dbi); |
Eric Blake | 715a74d | 2017-09-25 09:55:16 -0500 | [diff] [blame] | 994 | s->dbi = bdrv_dirty_iter_new(s->dirty_bitmap); |
Paolo Bonzini | 893f7eb | 2012-10-18 16:49:23 +0200 | [diff] [blame] | 995 | for (;;) { |
Paolo Bonzini | cc8c9d6 | 2014-03-21 13:55:18 +0100 | [diff] [blame] | 996 | uint64_t delay_ns = 0; |
Denis V. Lunev | 49efb1f | 2016-07-14 16:33:24 +0300 | [diff] [blame] | 997 | int64_t cnt, delta; |
Paolo Bonzini | 893f7eb | 2012-10-18 16:49:23 +0200 | [diff] [blame] | 998 | bool should_complete; |
| 999 | |
Max Reitz | d06107a | 2018-06-13 20:18:21 +0200 | [diff] [blame] | 1000 | /* Do not start passive operations while there are active |
| 1001 | * writes in progress */ |
| 1002 | while (s->in_active_write_counter) { |
Kevin Wolf | 9178f4f | 2020-03-26 16:36:27 +0100 | [diff] [blame] | 1003 | mirror_wait_for_any_operation(s, true); |
Max Reitz | d06107a | 2018-06-13 20:18:21 +0200 | [diff] [blame] | 1004 | } |
| 1005 | |
Paolo Bonzini | bd48bde | 2013-01-22 09:03:12 +0100 | [diff] [blame] | 1006 | if (s->ret < 0) { |
| 1007 | ret = s->ret; |
| 1008 | goto immediate_exit; |
| 1009 | } |
| 1010 | |
Kevin Wolf | da01ff7 | 2018-04-13 17:31:02 +0200 | [diff] [blame] | 1011 | job_pause_point(&s->common.job); |
Stefan Hajnoczi | 565ac01 | 2016-06-16 17:56:28 +0100 | [diff] [blame] | 1012 | |
John Snow | 20dca81 | 2015-04-17 19:50:02 -0400 | [diff] [blame] | 1013 | cnt = bdrv_get_dirty_count(s->dirty_bitmap); |
Kevin Wolf | 05df8a6 | 2018-01-18 18:08:22 +0100 | [diff] [blame] | 1014 | /* cnt is the number of dirty bytes remaining and s->bytes_in_flight is |
| 1015 | * the number of bytes currently being processed; together those are |
| 1016 | * the current remaining operation length */ |
Kevin Wolf | 30a5c88 | 2018-05-04 12:17:20 +0200 | [diff] [blame] | 1017 | job_progress_set_remaining(&s->common.job, s->bytes_in_flight + cnt); |
Paolo Bonzini | bd48bde | 2013-01-22 09:03:12 +0100 | [diff] [blame] | 1018 | |
| 1019 | /* Note that even when no rate limit is applied we need to yield |
Fam Zheng | a728233 | 2015-04-03 22:05:21 +0800 | [diff] [blame] | 1020 | * periodically with no pending I/O so that bdrv_drain_all() returns. |
Kevin Wolf | 18bb692 | 2018-01-18 20:25:40 +0100 | [diff] [blame] | 1021 | * We do so every BLKOCK_JOB_SLICE_TIME nanoseconds, or when there is |
| 1022 | * an error, or when the source is clean, whichever comes first. */ |
Denis V. Lunev | 49efb1f | 2016-07-14 16:33:24 +0300 | [diff] [blame] | 1023 | delta = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - s->last_pause_ns; |
Kevin Wolf | 18bb692 | 2018-01-18 20:25:40 +0100 | [diff] [blame] | 1024 | if (delta < BLOCK_JOB_SLICE_TIME && |
Paolo Bonzini | bd48bde | 2013-01-22 09:03:12 +0100 | [diff] [blame] | 1025 | s->common.iostatus == BLOCK_DEVICE_IO_STATUS_OK) { |
Denis V. Lunev | cf56a3c | 2016-06-22 15:35:27 +0300 | [diff] [blame] | 1026 | if (s->in_flight >= MAX_IN_FLIGHT || s->buf_free_count == 0 || |
Paolo Bonzini | 402a474 | 2013-01-22 09:03:14 +0100 | [diff] [blame] | 1027 | (cnt == 0 && s->in_flight > 0)) { |
Eric Blake | 9a46dba | 2017-09-25 09:55:18 -0500 | [diff] [blame] | 1028 | trace_mirror_yield(s, cnt, s->buf_free_count, s->in_flight); |
Kevin Wolf | 9178f4f | 2020-03-26 16:36:27 +0100 | [diff] [blame] | 1029 | mirror_wait_for_free_in_flight_slot(s); |
Paolo Bonzini | bd48bde | 2013-01-22 09:03:12 +0100 | [diff] [blame] | 1030 | continue; |
| 1031 | } else if (cnt != 0) { |
Paolo Bonzini | cc8c9d6 | 2014-03-21 13:55:18 +0100 | [diff] [blame] | 1032 | delay_ns = mirror_iteration(s); |
Paolo Bonzini | 893f7eb | 2012-10-18 16:49:23 +0200 | [diff] [blame] | 1033 | } |
Paolo Bonzini | 893f7eb | 2012-10-18 16:49:23 +0200 | [diff] [blame] | 1034 | } |
| 1035 | |
| 1036 | should_complete = false; |
Paolo Bonzini | bd48bde | 2013-01-22 09:03:12 +0100 | [diff] [blame] | 1037 | if (s->in_flight == 0 && cnt == 0) { |
Paolo Bonzini | 893f7eb | 2012-10-18 16:49:23 +0200 | [diff] [blame] | 1038 | trace_mirror_before_flush(s); |
Paolo Bonzini | bdffb31 | 2016-11-09 17:20:08 +0100 | [diff] [blame] | 1039 | if (!s->synced) { |
| 1040 | if (mirror_flush(s) < 0) { |
| 1041 | /* Go check s->ret. */ |
| 1042 | continue; |
Paolo Bonzini | b952b55 | 2012-10-18 16:49:28 +0200 | [diff] [blame] | 1043 | } |
Paolo Bonzini | b952b55 | 2012-10-18 16:49:28 +0200 | [diff] [blame] | 1044 | /* We're out of the streaming phase. From now on, if the job |
| 1045 | * is cancelled we will actually complete all pending I/O and |
| 1046 | * report completion. This way, block-job-cancel will leave |
| 1047 | * the target in a consistent state. |
| 1048 | */ |
Kevin Wolf | 2e1795b | 2018-04-25 14:56:09 +0200 | [diff] [blame] | 1049 | job_transition_to_ready(&s->common.job); |
Paolo Bonzini | bdffb31 | 2016-11-09 17:20:08 +0100 | [diff] [blame] | 1050 | s->synced = true; |
Max Reitz | d06107a | 2018-06-13 20:18:21 +0200 | [diff] [blame] | 1051 | if (s->copy_mode != MIRROR_COPY_MODE_BACKGROUND) { |
| 1052 | s->actively_synced = true; |
| 1053 | } |
Paolo Bonzini | d63ffd8 | 2012-10-18 16:49:25 +0200 | [diff] [blame] | 1054 | } |
Paolo Bonzini | bdffb31 | 2016-11-09 17:20:08 +0100 | [diff] [blame] | 1055 | |
| 1056 | should_complete = s->should_complete || |
Kevin Wolf | daa7f2f | 2018-04-17 12:56:07 +0200 | [diff] [blame] | 1057 | job_is_cancelled(&s->common.job); |
Paolo Bonzini | bdffb31 | 2016-11-09 17:20:08 +0100 | [diff] [blame] | 1058 | cnt = bdrv_get_dirty_count(s->dirty_bitmap); |
Paolo Bonzini | 893f7eb | 2012-10-18 16:49:23 +0200 | [diff] [blame] | 1059 | } |
| 1060 | |
| 1061 | if (cnt == 0 && should_complete) { |
| 1062 | /* The dirty bitmap is not updated while operations are pending. |
| 1063 | * If we're about to exit, wait for pending operations before |
| 1064 | * calling bdrv_get_dirty_count(bs), or we may exit while the |
| 1065 | * source has dirty data to copy! |
| 1066 | * |
| 1067 | * Note that I/O can be submitted by the guest while |
Paolo Bonzini | 9a0cec6 | 2016-10-27 12:48:51 +0200 | [diff] [blame] | 1068 | * mirror_populate runs, so pause it now. Before deciding |
| 1069 | * whether to switch to target check one last time if I/O has |
| 1070 | * come in the meanwhile, and if not flush the data to disk. |
Paolo Bonzini | 893f7eb | 2012-10-18 16:49:23 +0200 | [diff] [blame] | 1071 | */ |
Eric Blake | 9a46dba | 2017-09-25 09:55:18 -0500 | [diff] [blame] | 1072 | trace_mirror_before_drain(s, cnt); |
Paolo Bonzini | 9a0cec6 | 2016-10-27 12:48:51 +0200 | [diff] [blame] | 1073 | |
Sergio Lopez | 5e77175 | 2019-03-08 16:48:53 +0100 | [diff] [blame] | 1074 | s->in_drain = true; |
Paolo Bonzini | 9a0cec6 | 2016-10-27 12:48:51 +0200 | [diff] [blame] | 1075 | bdrv_drained_begin(bs); |
John Snow | 20dca81 | 2015-04-17 19:50:02 -0400 | [diff] [blame] | 1076 | cnt = bdrv_get_dirty_count(s->dirty_bitmap); |
Paolo Bonzini | bdffb31 | 2016-11-09 17:20:08 +0100 | [diff] [blame] | 1077 | if (cnt > 0 || mirror_flush(s) < 0) { |
Paolo Bonzini | 9a0cec6 | 2016-10-27 12:48:51 +0200 | [diff] [blame] | 1078 | bdrv_drained_end(bs); |
Sergio Lopez | 5e77175 | 2019-03-08 16:48:53 +0100 | [diff] [blame] | 1079 | s->in_drain = false; |
Paolo Bonzini | 9a0cec6 | 2016-10-27 12:48:51 +0200 | [diff] [blame] | 1080 | continue; |
| 1081 | } |
| 1082 | |
| 1083 | /* The two disks are in sync. Exit and report successful |
| 1084 | * completion. |
| 1085 | */ |
| 1086 | assert(QLIST_EMPTY(&bs->tracked_requests)); |
Kevin Wolf | daa7f2f | 2018-04-17 12:56:07 +0200 | [diff] [blame] | 1087 | s->common.job.cancelled = false; |
Paolo Bonzini | 9a0cec6 | 2016-10-27 12:48:51 +0200 | [diff] [blame] | 1088 | need_drain = false; |
| 1089 | break; |
Paolo Bonzini | 893f7eb | 2012-10-18 16:49:23 +0200 | [diff] [blame] | 1090 | } |
| 1091 | |
| 1092 | ret = 0; |
Stefan Hajnoczi | ddc4115 | 2018-04-24 13:35:27 +0100 | [diff] [blame] | 1093 | |
| 1094 | if (s->synced && !should_complete) { |
Kevin Wolf | 18bb692 | 2018-01-18 20:25:40 +0100 | [diff] [blame] | 1095 | delay_ns = (s->in_flight == 0 && |
| 1096 | cnt == 0 ? BLOCK_JOB_SLICE_TIME : 0); |
Stefan Hajnoczi | ddc4115 | 2018-04-24 13:35:27 +0100 | [diff] [blame] | 1097 | } |
Eric Blake | 9a46dba | 2017-09-25 09:55:18 -0500 | [diff] [blame] | 1098 | trace_mirror_before_sleep(s, cnt, s->synced, delay_ns); |
Kevin Wolf | 5d43e86 | 2018-04-18 16:32:20 +0200 | [diff] [blame] | 1099 | job_sleep_ns(&s->common.job, delay_ns); |
Kevin Wolf | daa7f2f | 2018-04-17 12:56:07 +0200 | [diff] [blame] | 1100 | if (job_is_cancelled(&s->common.job) && |
Kevin Wolf | 004e95d | 2018-04-20 14:56:08 +0200 | [diff] [blame] | 1101 | (!s->synced || s->common.job.force_cancel)) |
Max Reitz | eb36639 | 2018-05-02 00:05:08 +0200 | [diff] [blame] | 1102 | { |
Liang Li | b76e445 | 2018-03-13 08:12:16 -0400 | [diff] [blame] | 1103 | break; |
Paolo Bonzini | 893f7eb | 2012-10-18 16:49:23 +0200 | [diff] [blame] | 1104 | } |
Denis V. Lunev | 49efb1f | 2016-07-14 16:33:24 +0300 | [diff] [blame] | 1105 | s->last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME); |
Paolo Bonzini | 893f7eb | 2012-10-18 16:49:23 +0200 | [diff] [blame] | 1106 | } |
| 1107 | |
| 1108 | immediate_exit: |
Paolo Bonzini | bd48bde | 2013-01-22 09:03:12 +0100 | [diff] [blame] | 1109 | if (s->in_flight > 0) { |
| 1110 | /* We get here only if something went wrong. Either the job failed, |
| 1111 | * or it was cancelled prematurely so that we do not guarantee that |
| 1112 | * the target is a copy of the source. |
| 1113 | */ |
Kevin Wolf | 004e95d | 2018-04-20 14:56:08 +0200 | [diff] [blame] | 1114 | assert(ret < 0 || ((s->common.job.force_cancel || !s->synced) && |
Kevin Wolf | daa7f2f | 2018-04-17 12:56:07 +0200 | [diff] [blame] | 1115 | job_is_cancelled(&s->common.job))); |
Paolo Bonzini | 9a0cec6 | 2016-10-27 12:48:51 +0200 | [diff] [blame] | 1116 | assert(need_drain); |
Paolo Bonzini | bae8196 | 2016-10-27 12:48:50 +0200 | [diff] [blame] | 1117 | mirror_wait_for_all_io(s); |
Paolo Bonzini | bd48bde | 2013-01-22 09:03:12 +0100 | [diff] [blame] | 1118 | } |
| 1119 | |
| 1120 | assert(s->in_flight == 0); |
Markus Armbruster | 7191bf3 | 2013-01-15 15:29:10 +0100 | [diff] [blame] | 1121 | qemu_vfree(s->buf); |
Paolo Bonzini | b812f67 | 2013-01-21 17:09:43 +0100 | [diff] [blame] | 1122 | g_free(s->cow_bitmap); |
Paolo Bonzini | 402a474 | 2013-01-22 09:03:14 +0100 | [diff] [blame] | 1123 | g_free(s->in_flight_bitmap); |
Fam Zheng | dc162c8 | 2016-10-13 17:58:21 -0400 | [diff] [blame] | 1124 | bdrv_dirty_iter_free(s->dbi); |
Stefan Hajnoczi | 5a7e7a0 | 2014-10-21 12:03:58 +0100 | [diff] [blame] | 1125 | |
Paolo Bonzini | 9a0cec6 | 2016-10-27 12:48:51 +0200 | [diff] [blame] | 1126 | if (need_drain) { |
Sergio Lopez | 5e77175 | 2019-03-08 16:48:53 +0100 | [diff] [blame] | 1127 | s->in_drain = true; |
Paolo Bonzini | 9a0cec6 | 2016-10-27 12:48:51 +0200 | [diff] [blame] | 1128 | bdrv_drained_begin(bs); |
| 1129 | } |
John Snow | f67432a | 2018-08-29 21:57:26 -0400 | [diff] [blame] | 1130 | |
John Snow | f67432a | 2018-08-29 21:57:26 -0400 | [diff] [blame] | 1131 | return ret; |
Paolo Bonzini | 893f7eb | 2012-10-18 16:49:23 +0200 | [diff] [blame] | 1132 | } |
| 1133 | |
Kevin Wolf | 3453d97 | 2018-04-23 12:24:16 +0200 | [diff] [blame] | 1134 | static void mirror_complete(Job *job, Error **errp) |
Paolo Bonzini | d63ffd8 | 2012-10-18 16:49:25 +0200 | [diff] [blame] | 1135 | { |
Kevin Wolf | 3453d97 | 2018-04-23 12:24:16 +0200 | [diff] [blame] | 1136 | MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job); |
Max Reitz | 274fcce | 2016-06-10 20:57:47 +0200 | [diff] [blame] | 1137 | |
Paolo Bonzini | d63ffd8 | 2012-10-18 16:49:25 +0200 | [diff] [blame] | 1138 | if (!s->synced) { |
Alberto Garcia | 9df229c | 2016-07-05 17:28:53 +0300 | [diff] [blame] | 1139 | error_setg(errp, "The active block job '%s' cannot be completed", |
Kevin Wolf | 3453d97 | 2018-04-23 12:24:16 +0200 | [diff] [blame] | 1140 | job->id); |
Paolo Bonzini | d63ffd8 | 2012-10-18 16:49:25 +0200 | [diff] [blame] | 1141 | return; |
| 1142 | } |
| 1143 | |
Changlong Xie | 15d6729 | 2016-06-23 16:57:21 +0800 | [diff] [blame] | 1144 | /* block all operations on to_replace bs */ |
BenoƮt Canet | 09158f0 | 2014-06-27 18:25:25 +0200 | [diff] [blame] | 1145 | if (s->replaces) { |
Stefan Hajnoczi | 5a7e7a0 | 2014-10-21 12:03:58 +0100 | [diff] [blame] | 1146 | AioContext *replace_aio_context; |
| 1147 | |
Wen Congyang | e12f378 | 2015-07-17 10:12:22 +0800 | [diff] [blame] | 1148 | s->to_replace = bdrv_find_node(s->replaces); |
BenoƮt Canet | 09158f0 | 2014-06-27 18:25:25 +0200 | [diff] [blame] | 1149 | if (!s->to_replace) { |
Wen Congyang | e12f378 | 2015-07-17 10:12:22 +0800 | [diff] [blame] | 1150 | error_setg(errp, "Node name '%s' not found", s->replaces); |
BenoƮt Canet | 09158f0 | 2014-06-27 18:25:25 +0200 | [diff] [blame] | 1151 | return; |
| 1152 | } |
| 1153 | |
Stefan Hajnoczi | 5a7e7a0 | 2014-10-21 12:03:58 +0100 | [diff] [blame] | 1154 | replace_aio_context = bdrv_get_aio_context(s->to_replace); |
| 1155 | aio_context_acquire(replace_aio_context); |
| 1156 | |
Kevin Wolf | 4ef85a9 | 2017-01-25 19:16:34 +0100 | [diff] [blame] | 1157 | /* TODO Translate this into permission system. Current definition of |
| 1158 | * GRAPH_MOD would require to request it for the parents; they might |
| 1159 | * not even be BlockDriverStates, however, so a BdrvChild can't address |
| 1160 | * them. May need redefinition of GRAPH_MOD. */ |
BenoƮt Canet | 09158f0 | 2014-06-27 18:25:25 +0200 | [diff] [blame] | 1161 | error_setg(&s->replace_blocker, |
| 1162 | "block device is in use by block-job-complete"); |
| 1163 | bdrv_op_block_all(s->to_replace, s->replace_blocker); |
| 1164 | bdrv_ref(s->to_replace); |
Stefan Hajnoczi | 5a7e7a0 | 2014-10-21 12:03:58 +0100 | [diff] [blame] | 1165 | |
| 1166 | aio_context_release(replace_aio_context); |
BenoƮt Canet | 09158f0 | 2014-06-27 18:25:25 +0200 | [diff] [blame] | 1167 | } |
| 1168 | |
Paolo Bonzini | d63ffd8 | 2012-10-18 16:49:25 +0200 | [diff] [blame] | 1169 | s->should_complete = true; |
Max Reitz | 0076941 | 2021-04-09 14:04:19 +0200 | [diff] [blame] | 1170 | |
| 1171 | /* If the job is paused, it will be re-entered when it is resumed */ |
| 1172 | if (!job->paused) { |
| 1173 | job_enter(job); |
| 1174 | } |
Paolo Bonzini | d63ffd8 | 2012-10-18 16:49:25 +0200 | [diff] [blame] | 1175 | } |
| 1176 | |
Stefan Hajnoczi | 537c3d4 | 2018-12-13 11:24:34 +0000 | [diff] [blame] | 1177 | static void coroutine_fn mirror_pause(Job *job) |
Stefan Hajnoczi | 565ac01 | 2016-06-16 17:56:28 +0100 | [diff] [blame] | 1178 | { |
Kevin Wolf | da01ff7 | 2018-04-13 17:31:02 +0200 | [diff] [blame] | 1179 | MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job); |
Stefan Hajnoczi | 565ac01 | 2016-06-16 17:56:28 +0100 | [diff] [blame] | 1180 | |
Paolo Bonzini | bae8196 | 2016-10-27 12:48:50 +0200 | [diff] [blame] | 1181 | mirror_wait_for_all_io(s); |
Stefan Hajnoczi | 565ac01 | 2016-06-16 17:56:28 +0100 | [diff] [blame] | 1182 | } |
| 1183 | |
Kevin Wolf | 89bd030 | 2018-03-22 14:11:20 +0100 | [diff] [blame] | 1184 | static bool mirror_drained_poll(BlockJob *job) |
| 1185 | { |
| 1186 | MirrorBlockJob *s = container_of(job, MirrorBlockJob, common); |
Sergio Lopez | 5e77175 | 2019-03-08 16:48:53 +0100 | [diff] [blame] | 1187 | |
| 1188 | /* If the job isn't paused nor cancelled, we can't be sure that it won't |
| 1189 | * issue more requests. We make an exception if we've reached this point |
| 1190 | * from one of our own drain sections, to avoid a deadlock waiting for |
| 1191 | * ourselves. |
| 1192 | */ |
| 1193 | if (!s->common.job.paused && !s->common.job.cancelled && !s->in_drain) { |
| 1194 | return true; |
| 1195 | } |
| 1196 | |
Kevin Wolf | 89bd030 | 2018-03-22 14:11:20 +0100 | [diff] [blame] | 1197 | return !!s->in_flight; |
| 1198 | } |
| 1199 | |
Vladimir Sementsov-Ogievskiy | 9c785cd | 2021-04-21 10:58:58 +0300 | [diff] [blame] | 1200 | static void mirror_cancel(Job *job, bool force) |
Vladimir Sementsov-Ogievskiy | 521ff8b | 2021-02-05 19:37:15 +0300 | [diff] [blame] | 1201 | { |
| 1202 | MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job); |
| 1203 | BlockDriverState *target = blk_bs(s->target); |
| 1204 | |
Vladimir Sementsov-Ogievskiy | 9c785cd | 2021-04-21 10:58:58 +0300 | [diff] [blame] | 1205 | if (force || !job_is_ready(job)) { |
| 1206 | bdrv_cancel_in_flight(target); |
| 1207 | } |
Vladimir Sementsov-Ogievskiy | 521ff8b | 2021-02-05 19:37:15 +0300 | [diff] [blame] | 1208 | } |
| 1209 | |
Fam Zheng | 3fc4b10 | 2013-10-08 17:29:38 +0800 | [diff] [blame] | 1210 | static const BlockJobDriver mirror_job_driver = { |
Kevin Wolf | 33e9e9b | 2018-04-12 17:29:59 +0200 | [diff] [blame] | 1211 | .job_driver = { |
| 1212 | .instance_size = sizeof(MirrorBlockJob), |
Kevin Wolf | 252291e | 2018-04-12 17:57:08 +0200 | [diff] [blame] | 1213 | .job_type = JOB_TYPE_MIRROR, |
Kevin Wolf | 80fa2c7 | 2018-04-13 18:50:05 +0200 | [diff] [blame] | 1214 | .free = block_job_free, |
Kevin Wolf | b15de82 | 2018-04-18 17:10:26 +0200 | [diff] [blame] | 1215 | .user_resume = block_job_user_resume, |
John Snow | f67432a | 2018-08-29 21:57:26 -0400 | [diff] [blame] | 1216 | .run = mirror_run, |
John Snow | 737efc1 | 2018-09-06 09:02:15 -0400 | [diff] [blame] | 1217 | .prepare = mirror_prepare, |
| 1218 | .abort = mirror_abort, |
Kevin Wolf | da01ff7 | 2018-04-13 17:31:02 +0200 | [diff] [blame] | 1219 | .pause = mirror_pause, |
Kevin Wolf | 3453d97 | 2018-04-23 12:24:16 +0200 | [diff] [blame] | 1220 | .complete = mirror_complete, |
Vladimir Sementsov-Ogievskiy | 521ff8b | 2021-02-05 19:37:15 +0300 | [diff] [blame] | 1221 | .cancel = mirror_cancel, |
Kevin Wolf | 33e9e9b | 2018-04-12 17:29:59 +0200 | [diff] [blame] | 1222 | }, |
Kevin Wolf | 89bd030 | 2018-03-22 14:11:20 +0100 | [diff] [blame] | 1223 | .drained_poll = mirror_drained_poll, |
Paolo Bonzini | 893f7eb | 2012-10-18 16:49:23 +0200 | [diff] [blame] | 1224 | }; |
| 1225 | |
Fam Zheng | 03544a6 | 2013-12-16 14:45:30 +0800 | [diff] [blame] | 1226 | static const BlockJobDriver commit_active_job_driver = { |
Kevin Wolf | 33e9e9b | 2018-04-12 17:29:59 +0200 | [diff] [blame] | 1227 | .job_driver = { |
| 1228 | .instance_size = sizeof(MirrorBlockJob), |
Kevin Wolf | 252291e | 2018-04-12 17:57:08 +0200 | [diff] [blame] | 1229 | .job_type = JOB_TYPE_COMMIT, |
Kevin Wolf | 80fa2c7 | 2018-04-13 18:50:05 +0200 | [diff] [blame] | 1230 | .free = block_job_free, |
Kevin Wolf | b15de82 | 2018-04-18 17:10:26 +0200 | [diff] [blame] | 1231 | .user_resume = block_job_user_resume, |
John Snow | f67432a | 2018-08-29 21:57:26 -0400 | [diff] [blame] | 1232 | .run = mirror_run, |
John Snow | 737efc1 | 2018-09-06 09:02:15 -0400 | [diff] [blame] | 1233 | .prepare = mirror_prepare, |
| 1234 | .abort = mirror_abort, |
Kevin Wolf | da01ff7 | 2018-04-13 17:31:02 +0200 | [diff] [blame] | 1235 | .pause = mirror_pause, |
Kevin Wolf | 3453d97 | 2018-04-23 12:24:16 +0200 | [diff] [blame] | 1236 | .complete = mirror_complete, |
Kevin Wolf | 33e9e9b | 2018-04-12 17:29:59 +0200 | [diff] [blame] | 1237 | }, |
Kevin Wolf | 89bd030 | 2018-03-22 14:11:20 +0100 | [diff] [blame] | 1238 | .drained_poll = mirror_drained_poll, |
Fam Zheng | 03544a6 | 2013-12-16 14:45:30 +0800 | [diff] [blame] | 1239 | }; |
| 1240 | |
Stefan Hajnoczi | 537c3d4 | 2018-12-13 11:24:34 +0000 | [diff] [blame] | 1241 | static void coroutine_fn |
| 1242 | do_sync_target_write(MirrorBlockJob *job, MirrorMethod method, |
| 1243 | uint64_t offset, uint64_t bytes, |
| 1244 | QEMUIOVector *qiov, int flags) |
Max Reitz | d06107a | 2018-06-13 20:18:21 +0200 | [diff] [blame] | 1245 | { |
Vladimir Sementsov-Ogievskiy | 5c511ac | 2019-10-11 12:07:08 +0300 | [diff] [blame] | 1246 | int ret; |
Vladimir Sementsov-Ogievskiy | dbdf699 | 2019-10-11 12:07:10 +0300 | [diff] [blame] | 1247 | size_t qiov_offset = 0; |
| 1248 | int64_t bitmap_offset, bitmap_end; |
Max Reitz | d06107a | 2018-06-13 20:18:21 +0200 | [diff] [blame] | 1249 | |
Vladimir Sementsov-Ogievskiy | dbdf699 | 2019-10-11 12:07:10 +0300 | [diff] [blame] | 1250 | if (!QEMU_IS_ALIGNED(offset, job->granularity) && |
| 1251 | bdrv_dirty_bitmap_get(job->dirty_bitmap, offset)) |
| 1252 | { |
| 1253 | /* |
| 1254 | * Dirty unaligned padding: ignore it. |
| 1255 | * |
| 1256 | * Reasoning: |
| 1257 | * 1. If we copy it, we can't reset corresponding bit in |
| 1258 | * dirty_bitmap as there may be some "dirty" bytes still not |
| 1259 | * copied. |
| 1260 | * 2. It's already dirty, so skipping it we don't diverge mirror |
| 1261 | * progress. |
| 1262 | * |
| 1263 | * Note, that because of this, guest write may have no contribution |
| 1264 | * into mirror converge, but that's not bad, as we have background |
| 1265 | * process of mirroring. If under some bad circumstances (high guest |
| 1266 | * IO load) background process starve, we will not converge anyway, |
| 1267 | * even if each write will contribute, as guest is not guaranteed to |
| 1268 | * rewrite the whole disk. |
| 1269 | */ |
| 1270 | qiov_offset = QEMU_ALIGN_UP(offset, job->granularity) - offset; |
| 1271 | if (bytes <= qiov_offset) { |
| 1272 | /* nothing to do after shrink */ |
| 1273 | return; |
| 1274 | } |
| 1275 | offset += qiov_offset; |
| 1276 | bytes -= qiov_offset; |
| 1277 | } |
| 1278 | |
| 1279 | if (!QEMU_IS_ALIGNED(offset + bytes, job->granularity) && |
| 1280 | bdrv_dirty_bitmap_get(job->dirty_bitmap, offset + bytes - 1)) |
| 1281 | { |
| 1282 | uint64_t tail = (offset + bytes) % job->granularity; |
| 1283 | |
| 1284 | if (bytes <= tail) { |
| 1285 | /* nothing to do after shrink */ |
| 1286 | return; |
| 1287 | } |
| 1288 | bytes -= tail; |
| 1289 | } |
| 1290 | |
| 1291 | /* |
| 1292 | * Tails are either clean or shrunk, so for bitmap resetting |
| 1293 | * we safely align the range down. |
| 1294 | */ |
| 1295 | bitmap_offset = QEMU_ALIGN_UP(offset, job->granularity); |
| 1296 | bitmap_end = QEMU_ALIGN_DOWN(offset + bytes, job->granularity); |
| 1297 | if (bitmap_offset < bitmap_end) { |
| 1298 | bdrv_reset_dirty_bitmap(job->dirty_bitmap, bitmap_offset, |
| 1299 | bitmap_end - bitmap_offset); |
| 1300 | } |
Vladimir Sementsov-Ogievskiy | 5c511ac | 2019-10-11 12:07:08 +0300 | [diff] [blame] | 1301 | |
| 1302 | job_progress_increase_remaining(&job->common.job, bytes); |
| 1303 | |
| 1304 | switch (method) { |
| 1305 | case MIRROR_METHOD_COPY: |
Vladimir Sementsov-Ogievskiy | dbdf699 | 2019-10-11 12:07:10 +0300 | [diff] [blame] | 1306 | ret = blk_co_pwritev_part(job->target, offset, bytes, |
| 1307 | qiov, qiov_offset, flags); |
Vladimir Sementsov-Ogievskiy | 5c511ac | 2019-10-11 12:07:08 +0300 | [diff] [blame] | 1308 | break; |
| 1309 | |
| 1310 | case MIRROR_METHOD_ZERO: |
| 1311 | assert(!qiov); |
| 1312 | ret = blk_co_pwrite_zeroes(job->target, offset, bytes, flags); |
| 1313 | break; |
| 1314 | |
| 1315 | case MIRROR_METHOD_DISCARD: |
| 1316 | assert(!qiov); |
| 1317 | ret = blk_co_pdiscard(job->target, offset, bytes); |
| 1318 | break; |
| 1319 | |
| 1320 | default: |
| 1321 | abort(); |
Max Reitz | d06107a | 2018-06-13 20:18:21 +0200 | [diff] [blame] | 1322 | } |
| 1323 | |
Vladimir Sementsov-Ogievskiy | 5c511ac | 2019-10-11 12:07:08 +0300 | [diff] [blame] | 1324 | if (ret >= 0) { |
| 1325 | job_progress_update(&job->common.job, bytes); |
| 1326 | } else { |
| 1327 | BlockErrorAction action; |
Max Reitz | d06107a | 2018-06-13 20:18:21 +0200 | [diff] [blame] | 1328 | |
Vladimir Sementsov-Ogievskiy | dbdf699 | 2019-10-11 12:07:10 +0300 | [diff] [blame] | 1329 | /* |
| 1330 | * We failed, so we should mark dirty the whole area, aligned up. |
| 1331 | * Note that we don't care about shrunk tails if any: they were dirty |
| 1332 | * at function start, and they must be still dirty, as we've locked |
| 1333 | * the region for in-flight op. |
| 1334 | */ |
| 1335 | bitmap_offset = QEMU_ALIGN_DOWN(offset, job->granularity); |
| 1336 | bitmap_end = QEMU_ALIGN_UP(offset + bytes, job->granularity); |
| 1337 | bdrv_set_dirty_bitmap(job->dirty_bitmap, bitmap_offset, |
| 1338 | bitmap_end - bitmap_offset); |
Vladimir Sementsov-Ogievskiy | 5c511ac | 2019-10-11 12:07:08 +0300 | [diff] [blame] | 1339 | job->actively_synced = false; |
Max Reitz | d06107a | 2018-06-13 20:18:21 +0200 | [diff] [blame] | 1340 | |
Vladimir Sementsov-Ogievskiy | 5c511ac | 2019-10-11 12:07:08 +0300 | [diff] [blame] | 1341 | action = mirror_error_action(job, false, -ret); |
| 1342 | if (action == BLOCK_ERROR_ACTION_REPORT) { |
| 1343 | if (!job->ret) { |
| 1344 | job->ret = ret; |
Max Reitz | d06107a | 2018-06-13 20:18:21 +0200 | [diff] [blame] | 1345 | } |
| 1346 | } |
Max Reitz | d06107a | 2018-06-13 20:18:21 +0200 | [diff] [blame] | 1347 | } |
| 1348 | } |
| 1349 | |
| 1350 | static MirrorOp *coroutine_fn active_write_prepare(MirrorBlockJob *s, |
| 1351 | uint64_t offset, |
| 1352 | uint64_t bytes) |
| 1353 | { |
| 1354 | MirrorOp *op; |
| 1355 | uint64_t start_chunk = offset / s->granularity; |
| 1356 | uint64_t end_chunk = DIV_ROUND_UP(offset + bytes, s->granularity); |
| 1357 | |
| 1358 | op = g_new(MirrorOp, 1); |
| 1359 | *op = (MirrorOp){ |
| 1360 | .s = s, |
| 1361 | .offset = offset, |
| 1362 | .bytes = bytes, |
| 1363 | .is_active_write = true, |
Kevin Wolf | ce8cabb | 2020-03-26 16:36:28 +0100 | [diff] [blame] | 1364 | .is_in_flight = true, |
Vladimir Sementsov-Ogievskiy | ead3f1b | 2021-07-03 00:16:34 +0300 | [diff] [blame] | 1365 | .co = qemu_coroutine_self(), |
Max Reitz | d06107a | 2018-06-13 20:18:21 +0200 | [diff] [blame] | 1366 | }; |
| 1367 | qemu_co_queue_init(&op->waiting_requests); |
| 1368 | QTAILQ_INSERT_TAIL(&s->ops_in_flight, op, next); |
| 1369 | |
| 1370 | s->in_active_write_counter++; |
| 1371 | |
| 1372 | mirror_wait_on_conflicts(op, s, offset, bytes); |
| 1373 | |
| 1374 | bitmap_set(s->in_flight_bitmap, start_chunk, end_chunk - start_chunk); |
| 1375 | |
| 1376 | return op; |
| 1377 | } |
| 1378 | |
| 1379 | static void coroutine_fn active_write_settle(MirrorOp *op) |
| 1380 | { |
| 1381 | uint64_t start_chunk = op->offset / op->s->granularity; |
| 1382 | uint64_t end_chunk = DIV_ROUND_UP(op->offset + op->bytes, |
| 1383 | op->s->granularity); |
| 1384 | |
| 1385 | if (!--op->s->in_active_write_counter && op->s->actively_synced) { |
| 1386 | BdrvChild *source = op->s->mirror_top_bs->backing; |
| 1387 | |
| 1388 | if (QLIST_FIRST(&source->bs->parents) == source && |
| 1389 | QLIST_NEXT(source, next_parent) == NULL) |
| 1390 | { |
| 1391 | /* Assert that we are back in sync once all active write |
| 1392 | * operations are settled. |
| 1393 | * Note that we can only assert this if the mirror node |
| 1394 | * is the source node's only parent. */ |
| 1395 | assert(!bdrv_get_dirty_count(op->s->dirty_bitmap)); |
| 1396 | } |
| 1397 | } |
| 1398 | bitmap_clear(op->s->in_flight_bitmap, start_chunk, end_chunk - start_chunk); |
| 1399 | QTAILQ_REMOVE(&op->s->ops_in_flight, op, next); |
| 1400 | qemu_co_queue_restart_all(&op->waiting_requests); |
| 1401 | g_free(op); |
| 1402 | } |
| 1403 | |
Kevin Wolf | 4ef85a9 | 2017-01-25 19:16:34 +0100 | [diff] [blame] | 1404 | static int coroutine_fn bdrv_mirror_top_preadv(BlockDriverState *bs, |
Vladimir Sementsov-Ogievskiy | f7ef38d | 2021-09-03 13:27:59 +0300 | [diff] [blame] | 1405 | int64_t offset, int64_t bytes, QEMUIOVector *qiov, BdrvRequestFlags flags) |
Kevin Wolf | 4ef85a9 | 2017-01-25 19:16:34 +0100 | [diff] [blame] | 1406 | { |
| 1407 | return bdrv_co_preadv(bs->backing, offset, bytes, qiov, flags); |
| 1408 | } |
| 1409 | |
Max Reitz | d06107a | 2018-06-13 20:18:21 +0200 | [diff] [blame] | 1410 | static int coroutine_fn bdrv_mirror_top_do_write(BlockDriverState *bs, |
| 1411 | MirrorMethod method, uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, |
| 1412 | int flags) |
| 1413 | { |
| 1414 | MirrorOp *op = NULL; |
| 1415 | MirrorBDSOpaque *s = bs->opaque; |
| 1416 | int ret = 0; |
| 1417 | bool copy_to_target; |
| 1418 | |
| 1419 | copy_to_target = s->job->ret >= 0 && |
| 1420 | s->job->copy_mode == MIRROR_COPY_MODE_WRITE_BLOCKING; |
| 1421 | |
| 1422 | if (copy_to_target) { |
| 1423 | op = active_write_prepare(s->job, offset, bytes); |
| 1424 | } |
| 1425 | |
| 1426 | switch (method) { |
| 1427 | case MIRROR_METHOD_COPY: |
| 1428 | ret = bdrv_co_pwritev(bs->backing, offset, bytes, qiov, flags); |
| 1429 | break; |
| 1430 | |
| 1431 | case MIRROR_METHOD_ZERO: |
| 1432 | ret = bdrv_co_pwrite_zeroes(bs->backing, offset, bytes, flags); |
| 1433 | break; |
| 1434 | |
| 1435 | case MIRROR_METHOD_DISCARD: |
Fam Zheng | 0b9fd3f | 2018-07-10 14:31:17 +0800 | [diff] [blame] | 1436 | ret = bdrv_co_pdiscard(bs->backing, offset, bytes); |
Max Reitz | d06107a | 2018-06-13 20:18:21 +0200 | [diff] [blame] | 1437 | break; |
| 1438 | |
| 1439 | default: |
| 1440 | abort(); |
| 1441 | } |
| 1442 | |
| 1443 | if (ret < 0) { |
| 1444 | goto out; |
| 1445 | } |
| 1446 | |
| 1447 | if (copy_to_target) { |
| 1448 | do_sync_target_write(s->job, method, offset, bytes, qiov, flags); |
| 1449 | } |
| 1450 | |
| 1451 | out: |
| 1452 | if (copy_to_target) { |
| 1453 | active_write_settle(op); |
| 1454 | } |
| 1455 | return ret; |
| 1456 | } |
| 1457 | |
Kevin Wolf | 4ef85a9 | 2017-01-25 19:16:34 +0100 | [diff] [blame] | 1458 | static int coroutine_fn bdrv_mirror_top_pwritev(BlockDriverState *bs, |
Vladimir Sementsov-Ogievskiy | e75abed | 2021-09-03 13:28:00 +0300 | [diff] [blame] | 1459 | int64_t offset, int64_t bytes, QEMUIOVector *qiov, BdrvRequestFlags flags) |
Kevin Wolf | 4ef85a9 | 2017-01-25 19:16:34 +0100 | [diff] [blame] | 1460 | { |
Max Reitz | d06107a | 2018-06-13 20:18:21 +0200 | [diff] [blame] | 1461 | MirrorBDSOpaque *s = bs->opaque; |
| 1462 | QEMUIOVector bounce_qiov; |
| 1463 | void *bounce_buf; |
| 1464 | int ret = 0; |
| 1465 | bool copy_to_target; |
| 1466 | |
| 1467 | copy_to_target = s->job->ret >= 0 && |
| 1468 | s->job->copy_mode == MIRROR_COPY_MODE_WRITE_BLOCKING; |
| 1469 | |
| 1470 | if (copy_to_target) { |
| 1471 | /* The guest might concurrently modify the data to write; but |
| 1472 | * the data on source and destination must match, so we have |
| 1473 | * to use a bounce buffer if we are going to write to the |
| 1474 | * target now. */ |
| 1475 | bounce_buf = qemu_blockalign(bs, bytes); |
| 1476 | iov_to_buf_full(qiov->iov, qiov->niov, 0, bounce_buf, bytes); |
| 1477 | |
| 1478 | qemu_iovec_init(&bounce_qiov, 1); |
| 1479 | qemu_iovec_add(&bounce_qiov, bounce_buf, bytes); |
| 1480 | qiov = &bounce_qiov; |
| 1481 | } |
| 1482 | |
| 1483 | ret = bdrv_mirror_top_do_write(bs, MIRROR_METHOD_COPY, offset, bytes, qiov, |
| 1484 | flags); |
| 1485 | |
| 1486 | if (copy_to_target) { |
| 1487 | qemu_iovec_destroy(&bounce_qiov); |
| 1488 | qemu_vfree(bounce_buf); |
| 1489 | } |
| 1490 | |
| 1491 | return ret; |
Kevin Wolf | 4ef85a9 | 2017-01-25 19:16:34 +0100 | [diff] [blame] | 1492 | } |
| 1493 | |
| 1494 | static int coroutine_fn bdrv_mirror_top_flush(BlockDriverState *bs) |
| 1495 | { |
Vladimir Sementsov-Ogievskiy | ce960aa | 2017-09-29 18:22:55 +0300 | [diff] [blame] | 1496 | if (bs->backing == NULL) { |
| 1497 | /* we can be here after failed bdrv_append in mirror_start_job */ |
| 1498 | return 0; |
| 1499 | } |
Kevin Wolf | 4ef85a9 | 2017-01-25 19:16:34 +0100 | [diff] [blame] | 1500 | return bdrv_co_flush(bs->backing->bs); |
| 1501 | } |
| 1502 | |
Kevin Wolf | 4ef85a9 | 2017-01-25 19:16:34 +0100 | [diff] [blame] | 1503 | static int coroutine_fn bdrv_mirror_top_pwrite_zeroes(BlockDriverState *bs, |
Vladimir Sementsov-Ogievskiy | f34b2bc | 2021-09-03 13:28:03 +0300 | [diff] [blame^] | 1504 | int64_t offset, int64_t bytes, BdrvRequestFlags flags) |
Kevin Wolf | 4ef85a9 | 2017-01-25 19:16:34 +0100 | [diff] [blame] | 1505 | { |
Max Reitz | d06107a | 2018-06-13 20:18:21 +0200 | [diff] [blame] | 1506 | return bdrv_mirror_top_do_write(bs, MIRROR_METHOD_ZERO, offset, bytes, NULL, |
| 1507 | flags); |
Kevin Wolf | 4ef85a9 | 2017-01-25 19:16:34 +0100 | [diff] [blame] | 1508 | } |
| 1509 | |
| 1510 | static int coroutine_fn bdrv_mirror_top_pdiscard(BlockDriverState *bs, |
Manos Pitsidianakis | f5a5ca7 | 2017-06-09 13:18:08 +0300 | [diff] [blame] | 1511 | int64_t offset, int bytes) |
Kevin Wolf | 4ef85a9 | 2017-01-25 19:16:34 +0100 | [diff] [blame] | 1512 | { |
Max Reitz | d06107a | 2018-06-13 20:18:21 +0200 | [diff] [blame] | 1513 | return bdrv_mirror_top_do_write(bs, MIRROR_METHOD_DISCARD, offset, bytes, |
| 1514 | NULL, 0); |
Kevin Wolf | 4ef85a9 | 2017-01-25 19:16:34 +0100 | [diff] [blame] | 1515 | } |
| 1516 | |
Max Reitz | 998b3a1 | 2019-02-01 20:29:28 +0100 | [diff] [blame] | 1517 | static void bdrv_mirror_top_refresh_filename(BlockDriverState *bs) |
Kevin Wolf | fd4a649 | 2017-03-09 11:49:16 +0100 | [diff] [blame] | 1518 | { |
Vladimir Sementsov-Ogievskiy | 18775ff | 2017-09-28 15:03:00 +0300 | [diff] [blame] | 1519 | if (bs->backing == NULL) { |
| 1520 | /* we can be here after failed bdrv_attach_child in |
| 1521 | * bdrv_set_backing_hd */ |
| 1522 | return; |
| 1523 | } |
Kevin Wolf | fd4a649 | 2017-03-09 11:49:16 +0100 | [diff] [blame] | 1524 | pstrcpy(bs->exact_filename, sizeof(bs->exact_filename), |
| 1525 | bs->backing->bs->filename); |
| 1526 | } |
| 1527 | |
Kevin Wolf | 4ef85a9 | 2017-01-25 19:16:34 +0100 | [diff] [blame] | 1528 | static void bdrv_mirror_top_child_perm(BlockDriverState *bs, BdrvChild *c, |
Max Reitz | bf8e925 | 2020-05-13 13:05:16 +0200 | [diff] [blame] | 1529 | BdrvChildRole role, |
Kevin Wolf | e0995dc | 2017-09-14 12:47:11 +0200 | [diff] [blame] | 1530 | BlockReopenQueue *reopen_queue, |
Kevin Wolf | 4ef85a9 | 2017-01-25 19:16:34 +0100 | [diff] [blame] | 1531 | uint64_t perm, uint64_t shared, |
| 1532 | uint64_t *nperm, uint64_t *nshared) |
| 1533 | { |
Max Reitz | f94dc3b | 2019-05-22 19:03:47 +0200 | [diff] [blame] | 1534 | MirrorBDSOpaque *s = bs->opaque; |
| 1535 | |
| 1536 | if (s->stop) { |
| 1537 | /* |
| 1538 | * If the job is to be stopped, we do not need to forward |
| 1539 | * anything to the real image. |
| 1540 | */ |
| 1541 | *nperm = 0; |
| 1542 | *nshared = BLK_PERM_ALL; |
| 1543 | return; |
| 1544 | } |
| 1545 | |
Max Reitz | 53431b9 | 2021-02-11 18:22:41 +0100 | [diff] [blame] | 1546 | bdrv_default_perms(bs, c, role, reopen_queue, |
| 1547 | perm, shared, nperm, nshared); |
Kevin Wolf | 4ef85a9 | 2017-01-25 19:16:34 +0100 | [diff] [blame] | 1548 | |
Max Reitz | 53431b9 | 2021-02-11 18:22:41 +0100 | [diff] [blame] | 1549 | if (s->is_commit) { |
| 1550 | /* |
| 1551 | * For commit jobs, we cannot take CONSISTENT_READ, because |
| 1552 | * that permission is unshared for everything above the base |
| 1553 | * node (except for filters on the base node). |
| 1554 | * We also have to force-share the WRITE permission, or |
| 1555 | * otherwise we would block ourselves at the base node (if |
| 1556 | * writes are blocked for a node, they are also blocked for |
| 1557 | * its backing file). |
| 1558 | * (We could also share RESIZE, because it may be needed for |
| 1559 | * the target if its size is less than the top node's; but |
| 1560 | * bdrv_default_perms_for_cow() automatically shares RESIZE |
| 1561 | * for backing nodes if WRITE is shared, so there is no need |
| 1562 | * to do it here.) |
| 1563 | */ |
| 1564 | *nperm &= ~BLK_PERM_CONSISTENT_READ; |
| 1565 | *nshared |= BLK_PERM_WRITE; |
| 1566 | } |
Kevin Wolf | 4ef85a9 | 2017-01-25 19:16:34 +0100 | [diff] [blame] | 1567 | } |
| 1568 | |
| 1569 | /* Dummy node that provides consistent read to its users without requiring it |
| 1570 | * from its backing file and that allows writes on the backing file chain. */ |
| 1571 | static BlockDriver bdrv_mirror_top = { |
| 1572 | .format_name = "mirror_top", |
| 1573 | .bdrv_co_preadv = bdrv_mirror_top_preadv, |
| 1574 | .bdrv_co_pwritev = bdrv_mirror_top_pwritev, |
| 1575 | .bdrv_co_pwrite_zeroes = bdrv_mirror_top_pwrite_zeroes, |
| 1576 | .bdrv_co_pdiscard = bdrv_mirror_top_pdiscard, |
| 1577 | .bdrv_co_flush = bdrv_mirror_top_flush, |
Kevin Wolf | fd4a649 | 2017-03-09 11:49:16 +0100 | [diff] [blame] | 1578 | .bdrv_refresh_filename = bdrv_mirror_top_refresh_filename, |
Kevin Wolf | 4ef85a9 | 2017-01-25 19:16:34 +0100 | [diff] [blame] | 1579 | .bdrv_child_perm = bdrv_mirror_top_child_perm, |
Max Reitz | 6540fd1 | 2020-05-13 13:05:11 +0200 | [diff] [blame] | 1580 | |
| 1581 | .is_filter = true, |
Kevin Wolf | 4ef85a9 | 2017-01-25 19:16:34 +0100 | [diff] [blame] | 1582 | }; |
| 1583 | |
Vladimir Sementsov-Ogievskiy | cc19f17 | 2019-06-06 18:41:29 +0300 | [diff] [blame] | 1584 | static BlockJob *mirror_start_job( |
| 1585 | const char *job_id, BlockDriverState *bs, |
John Snow | 47970df | 2016-10-27 12:06:57 -0400 | [diff] [blame] | 1586 | int creation_flags, BlockDriverState *target, |
| 1587 | const char *replaces, int64_t speed, |
| 1588 | uint32_t granularity, int64_t buf_size, |
Max Reitz | 274fcce | 2016-06-10 20:57:47 +0200 | [diff] [blame] | 1589 | BlockMirrorBackingMode backing_mode, |
Max Reitz | cdf3bc9 | 2019-07-24 19:12:30 +0200 | [diff] [blame] | 1590 | bool zero_target, |
BenoƮt Canet | 09158f0 | 2014-06-27 18:25:25 +0200 | [diff] [blame] | 1591 | BlockdevOnError on_source_error, |
| 1592 | BlockdevOnError on_target_error, |
Fam Zheng | 0fc9f8e | 2015-06-08 13:56:08 +0800 | [diff] [blame] | 1593 | bool unmap, |
Markus Armbruster | 097310b | 2014-10-07 13:59:15 +0200 | [diff] [blame] | 1594 | BlockCompletionFunc *cb, |
Fam Zheng | 51ccfa2 | 2017-04-21 20:27:03 +0800 | [diff] [blame] | 1595 | void *opaque, |
BenoƮt Canet | 09158f0 | 2014-06-27 18:25:25 +0200 | [diff] [blame] | 1596 | const BlockJobDriver *driver, |
Wen Congyang | b49f7ea | 2016-07-27 15:01:47 +0800 | [diff] [blame] | 1597 | bool is_none_mode, BlockDriverState *base, |
Fam Zheng | 51ccfa2 | 2017-04-21 20:27:03 +0800 | [diff] [blame] | 1598 | bool auto_complete, const char *filter_node_name, |
Max Reitz | 481deba | 2018-06-13 20:18:22 +0200 | [diff] [blame] | 1599 | bool is_mirror, MirrorCopyMode copy_mode, |
Fam Zheng | 51ccfa2 | 2017-04-21 20:27:03 +0800 | [diff] [blame] | 1600 | Error **errp) |
Paolo Bonzini | 893f7eb | 2012-10-18 16:49:23 +0200 | [diff] [blame] | 1601 | { |
| 1602 | MirrorBlockJob *s; |
Max Reitz | 429076e | 2018-06-13 20:18:19 +0200 | [diff] [blame] | 1603 | MirrorBDSOpaque *bs_opaque; |
Kevin Wolf | 4ef85a9 | 2017-01-25 19:16:34 +0100 | [diff] [blame] | 1604 | BlockDriverState *mirror_top_bs; |
Kevin Wolf | 4ef85a9 | 2017-01-25 19:16:34 +0100 | [diff] [blame] | 1605 | bool target_is_backing; |
Max Reitz | 3f072a7 | 2019-06-12 16:27:32 +0200 | [diff] [blame] | 1606 | uint64_t target_perms, target_shared_perms; |
Kevin Wolf | d708642 | 2017-01-13 19:02:32 +0100 | [diff] [blame] | 1607 | int ret; |
Paolo Bonzini | 893f7eb | 2012-10-18 16:49:23 +0200 | [diff] [blame] | 1608 | |
Paolo Bonzini | eee13df | 2013-01-21 17:09:46 +0100 | [diff] [blame] | 1609 | if (granularity == 0) { |
John Snow | 341ebc2 | 2015-04-17 19:49:52 -0400 | [diff] [blame] | 1610 | granularity = bdrv_get_default_bitmap_granularity(target); |
Paolo Bonzini | eee13df | 2013-01-21 17:09:46 +0100 | [diff] [blame] | 1611 | } |
| 1612 | |
Eric Blake | 3182664 | 2017-10-11 22:47:08 -0500 | [diff] [blame] | 1613 | assert(is_power_of_2(granularity)); |
Paolo Bonzini | eee13df | 2013-01-21 17:09:46 +0100 | [diff] [blame] | 1614 | |
Wen Congyang | 48ac0a4 | 2015-05-15 15:51:36 +0800 | [diff] [blame] | 1615 | if (buf_size < 0) { |
| 1616 | error_setg(errp, "Invalid parameter 'buf-size'"); |
Vladimir Sementsov-Ogievskiy | cc19f17 | 2019-06-06 18:41:29 +0300 | [diff] [blame] | 1617 | return NULL; |
Wen Congyang | 48ac0a4 | 2015-05-15 15:51:36 +0800 | [diff] [blame] | 1618 | } |
| 1619 | |
| 1620 | if (buf_size == 0) { |
| 1621 | buf_size = DEFAULT_MIRROR_BUF_SIZE; |
| 1622 | } |
Fam Zheng | 5bc361b | 2013-12-16 14:45:29 +0800 | [diff] [blame] | 1623 | |
Max Reitz | 3f072a7 | 2019-06-12 16:27:32 +0200 | [diff] [blame] | 1624 | if (bdrv_skip_filters(bs) == bdrv_skip_filters(target)) { |
Kevin Wolf | 86fae10 | 2018-08-14 11:52:25 +0200 | [diff] [blame] | 1625 | error_setg(errp, "Can't mirror node into itself"); |
Vladimir Sementsov-Ogievskiy | cc19f17 | 2019-06-06 18:41:29 +0300 | [diff] [blame] | 1626 | return NULL; |
Kevin Wolf | 86fae10 | 2018-08-14 11:52:25 +0200 | [diff] [blame] | 1627 | } |
| 1628 | |
Max Reitz | 53431b9 | 2021-02-11 18:22:41 +0100 | [diff] [blame] | 1629 | target_is_backing = bdrv_chain_contains(bs, target); |
| 1630 | |
Kevin Wolf | 4ef85a9 | 2017-01-25 19:16:34 +0100 | [diff] [blame] | 1631 | /* In the case of active commit, add dummy driver to provide consistent |
| 1632 | * reads on the top, while disabling it in the intermediate nodes, and make |
| 1633 | * the backing chain writable. */ |
Kevin Wolf | 6cdbceb | 2017-02-20 18:10:05 +0100 | [diff] [blame] | 1634 | mirror_top_bs = bdrv_new_open_driver(&bdrv_mirror_top, filter_node_name, |
| 1635 | BDRV_O_RDWR, errp); |
Kevin Wolf | 4ef85a9 | 2017-01-25 19:16:34 +0100 | [diff] [blame] | 1636 | if (mirror_top_bs == NULL) { |
Vladimir Sementsov-Ogievskiy | cc19f17 | 2019-06-06 18:41:29 +0300 | [diff] [blame] | 1637 | return NULL; |
Paolo Bonzini | 893f7eb | 2012-10-18 16:49:23 +0200 | [diff] [blame] | 1638 | } |
Kevin Wolf | d3c8c67 | 2017-07-18 17:24:05 +0200 | [diff] [blame] | 1639 | if (!filter_node_name) { |
| 1640 | mirror_top_bs->implicit = true; |
| 1641 | } |
Max Reitz | e5182c1 | 2019-07-03 19:28:02 +0200 | [diff] [blame] | 1642 | |
| 1643 | /* So that we can always drop this node */ |
| 1644 | mirror_top_bs->never_freeze = true; |
| 1645 | |
Kevin Wolf | 4ef85a9 | 2017-01-25 19:16:34 +0100 | [diff] [blame] | 1646 | mirror_top_bs->total_sectors = bs->total_sectors; |
Max Reitz | 228345b | 2018-04-21 15:29:26 +0200 | [diff] [blame] | 1647 | mirror_top_bs->supported_write_flags = BDRV_REQ_WRITE_UNCHANGED; |
Kevin Wolf | 80f5c33 | 2019-03-22 13:42:39 +0100 | [diff] [blame] | 1648 | mirror_top_bs->supported_zero_flags = BDRV_REQ_WRITE_UNCHANGED | |
| 1649 | BDRV_REQ_NO_FALLBACK; |
Max Reitz | 429076e | 2018-06-13 20:18:19 +0200 | [diff] [blame] | 1650 | bs_opaque = g_new0(MirrorBDSOpaque, 1); |
| 1651 | mirror_top_bs->opaque = bs_opaque; |
Paolo Bonzini | 893f7eb | 2012-10-18 16:49:23 +0200 | [diff] [blame] | 1652 | |
Max Reitz | 53431b9 | 2021-02-11 18:22:41 +0100 | [diff] [blame] | 1653 | bs_opaque->is_commit = target_is_backing; |
| 1654 | |
Kevin Wolf | 4ef85a9 | 2017-01-25 19:16:34 +0100 | [diff] [blame] | 1655 | bdrv_drained_begin(bs); |
Vladimir Sementsov-Ogievskiy | 934aee1 | 2021-02-02 15:49:44 +0300 | [diff] [blame] | 1656 | ret = bdrv_append(mirror_top_bs, bs, errp); |
Kevin Wolf | 4ef85a9 | 2017-01-25 19:16:34 +0100 | [diff] [blame] | 1657 | bdrv_drained_end(bs); |
| 1658 | |
Vladimir Sementsov-Ogievskiy | 934aee1 | 2021-02-02 15:49:44 +0300 | [diff] [blame] | 1659 | if (ret < 0) { |
Kevin Wolf | b2c2832 | 2017-02-20 12:46:42 +0100 | [diff] [blame] | 1660 | bdrv_unref(mirror_top_bs); |
Vladimir Sementsov-Ogievskiy | cc19f17 | 2019-06-06 18:41:29 +0300 | [diff] [blame] | 1661 | return NULL; |
Kevin Wolf | b2c2832 | 2017-02-20 12:46:42 +0100 | [diff] [blame] | 1662 | } |
| 1663 | |
Kevin Wolf | 4ef85a9 | 2017-01-25 19:16:34 +0100 | [diff] [blame] | 1664 | /* Make sure that the source is not resized while the job is running */ |
John Snow | 75859b9 | 2018-03-10 03:27:27 -0500 | [diff] [blame] | 1665 | s = block_job_create(job_id, driver, NULL, mirror_top_bs, |
Kevin Wolf | 4ef85a9 | 2017-01-25 19:16:34 +0100 | [diff] [blame] | 1666 | BLK_PERM_CONSISTENT_READ, |
| 1667 | BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE_UNCHANGED | |
| 1668 | BLK_PERM_WRITE | BLK_PERM_GRAPH_MOD, speed, |
| 1669 | creation_flags, cb, opaque, errp); |
Kevin Wolf | 4ef85a9 | 2017-01-25 19:16:34 +0100 | [diff] [blame] | 1670 | if (!s) { |
| 1671 | goto fail; |
| 1672 | } |
Max Reitz | 429076e | 2018-06-13 20:18:19 +0200 | [diff] [blame] | 1673 | bs_opaque->job = s; |
| 1674 | |
Max Reitz | 7a25fcd | 2017-04-03 19:51:49 +0200 | [diff] [blame] | 1675 | /* The block job now has a reference to this node */ |
| 1676 | bdrv_unref(mirror_top_bs); |
| 1677 | |
Kevin Wolf | 4ef85a9 | 2017-01-25 19:16:34 +0100 | [diff] [blame] | 1678 | s->mirror_top_bs = mirror_top_bs; |
| 1679 | |
| 1680 | /* No resize for the target either; while the mirror is still running, a |
| 1681 | * consistent read isn't necessarily possible. We could possibly allow |
| 1682 | * writes and graph modifications, though it would likely defeat the |
| 1683 | * purpose of a mirror, so leave them blocked for now. |
| 1684 | * |
| 1685 | * In the case of active commit, things look a bit different, though, |
| 1686 | * because the target is an already populated backing file in active use. |
| 1687 | * We can allow anything except resize there.*/ |
Max Reitz | 3f072a7 | 2019-06-12 16:27:32 +0200 | [diff] [blame] | 1688 | |
| 1689 | target_perms = BLK_PERM_WRITE; |
| 1690 | target_shared_perms = BLK_PERM_WRITE_UNCHANGED; |
| 1691 | |
Max Reitz | 3f072a7 | 2019-06-12 16:27:32 +0200 | [diff] [blame] | 1692 | if (target_is_backing) { |
| 1693 | int64_t bs_size, target_size; |
| 1694 | bs_size = bdrv_getlength(bs); |
| 1695 | if (bs_size < 0) { |
| 1696 | error_setg_errno(errp, -bs_size, |
| 1697 | "Could not inquire top image size"); |
| 1698 | goto fail; |
| 1699 | } |
| 1700 | |
| 1701 | target_size = bdrv_getlength(target); |
| 1702 | if (target_size < 0) { |
| 1703 | error_setg_errno(errp, -target_size, |
| 1704 | "Could not inquire base image size"); |
| 1705 | goto fail; |
| 1706 | } |
| 1707 | |
| 1708 | if (target_size < bs_size) { |
| 1709 | target_perms |= BLK_PERM_RESIZE; |
| 1710 | } |
| 1711 | |
| 1712 | target_shared_perms |= BLK_PERM_CONSISTENT_READ |
| 1713 | | BLK_PERM_WRITE |
| 1714 | | BLK_PERM_GRAPH_MOD; |
| 1715 | } else if (bdrv_chain_contains(bs, bdrv_skip_filters(target))) { |
| 1716 | /* |
| 1717 | * We may want to allow this in the future, but it would |
| 1718 | * require taking some extra care. |
| 1719 | */ |
| 1720 | error_setg(errp, "Cannot mirror to a filter on top of a node in the " |
| 1721 | "source's backing chain"); |
| 1722 | goto fail; |
| 1723 | } |
| 1724 | |
| 1725 | if (backing_mode != MIRROR_LEAVE_BACKING_CHAIN) { |
| 1726 | target_perms |= BLK_PERM_GRAPH_MOD; |
| 1727 | } |
| 1728 | |
Kevin Wolf | d861ab3 | 2019-04-25 14:25:10 +0200 | [diff] [blame] | 1729 | s->target = blk_new(s->common.job.aio_context, |
Max Reitz | 3f072a7 | 2019-06-12 16:27:32 +0200 | [diff] [blame] | 1730 | target_perms, target_shared_perms); |
Kevin Wolf | d708642 | 2017-01-13 19:02:32 +0100 | [diff] [blame] | 1731 | ret = blk_insert_bs(s->target, target, errp); |
| 1732 | if (ret < 0) { |
Kevin Wolf | 4ef85a9 | 2017-01-25 19:16:34 +0100 | [diff] [blame] | 1733 | goto fail; |
Kevin Wolf | d708642 | 2017-01-13 19:02:32 +0100 | [diff] [blame] | 1734 | } |
Fam Zheng | 045a2f8 | 2017-08-23 21:42:41 +0800 | [diff] [blame] | 1735 | if (is_mirror) { |
| 1736 | /* XXX: Mirror target could be a NBD server of target QEMU in the case |
| 1737 | * of non-shared block migration. To allow migration completion, we |
| 1738 | * have to allow "inactivate" of the target BB. When that happens, we |
| 1739 | * know the job is drained, and the vcpus are stopped, so no write |
| 1740 | * operation will be performed. Block layer already has assertions to |
| 1741 | * ensure that. */ |
| 1742 | blk_set_force_allow_inactivate(s->target); |
| 1743 | } |
Kevin Wolf | 9ff7f0d | 2019-05-06 19:18:03 +0200 | [diff] [blame] | 1744 | blk_set_allow_aio_context_change(s->target, true); |
Kevin Wolf | cf31293 | 2019-07-22 17:46:23 +0200 | [diff] [blame] | 1745 | blk_set_disable_request_queuing(s->target, true); |
Kevin Wolf | e253f4b | 2016-04-12 16:17:41 +0200 | [diff] [blame] | 1746 | |
BenoƮt Canet | 09158f0 | 2014-06-27 18:25:25 +0200 | [diff] [blame] | 1747 | s->replaces = g_strdup(replaces); |
Paolo Bonzini | b952b55 | 2012-10-18 16:49:28 +0200 | [diff] [blame] | 1748 | s->on_source_error = on_source_error; |
| 1749 | s->on_target_error = on_target_error; |
Fam Zheng | 03544a6 | 2013-12-16 14:45:30 +0800 | [diff] [blame] | 1750 | s->is_none_mode = is_none_mode; |
Max Reitz | 274fcce | 2016-06-10 20:57:47 +0200 | [diff] [blame] | 1751 | s->backing_mode = backing_mode; |
Max Reitz | cdf3bc9 | 2019-07-24 19:12:30 +0200 | [diff] [blame] | 1752 | s->zero_target = zero_target; |
Max Reitz | 481deba | 2018-06-13 20:18:22 +0200 | [diff] [blame] | 1753 | s->copy_mode = copy_mode; |
Fam Zheng | 5bc361b | 2013-12-16 14:45:29 +0800 | [diff] [blame] | 1754 | s->base = base; |
Max Reitz | 3f072a7 | 2019-06-12 16:27:32 +0200 | [diff] [blame] | 1755 | s->base_overlay = bdrv_find_overlay(bs, base); |
Paolo Bonzini | eee13df | 2013-01-21 17:09:46 +0100 | [diff] [blame] | 1756 | s->granularity = granularity; |
Wen Congyang | 48ac0a4 | 2015-05-15 15:51:36 +0800 | [diff] [blame] | 1757 | s->buf_size = ROUND_UP(buf_size, granularity); |
Fam Zheng | 0fc9f8e | 2015-06-08 13:56:08 +0800 | [diff] [blame] | 1758 | s->unmap = unmap; |
Wen Congyang | b49f7ea | 2016-07-27 15:01:47 +0800 | [diff] [blame] | 1759 | if (auto_complete) { |
| 1760 | s->should_complete = true; |
| 1761 | } |
Paolo Bonzini | b812f67 | 2013-01-21 17:09:43 +0100 | [diff] [blame] | 1762 | |
Fam Zheng | 0db6e54 | 2015-04-17 19:49:50 -0400 | [diff] [blame] | 1763 | s->dirty_bitmap = bdrv_create_dirty_bitmap(bs, granularity, NULL, errp); |
Fam Zheng | b8afb52 | 2014-04-16 09:34:30 +0800 | [diff] [blame] | 1764 | if (!s->dirty_bitmap) { |
Kevin Wolf | 88f9d1b | 2017-03-06 16:12:44 +0100 | [diff] [blame] | 1765 | goto fail; |
Fam Zheng | b8afb52 | 2014-04-16 09:34:30 +0800 | [diff] [blame] | 1766 | } |
Vladimir Sementsov-Ogievskiy | dbdf699 | 2019-10-11 12:07:10 +0300 | [diff] [blame] | 1767 | if (s->copy_mode == MIRROR_COPY_MODE_WRITE_BLOCKING) { |
| 1768 | bdrv_disable_dirty_bitmap(s->dirty_bitmap); |
| 1769 | } |
Alberto Garcia | 10f3cd1 | 2015-11-02 16:51:53 +0200 | [diff] [blame] | 1770 | |
Alberto Garcia | 67b2442 | 2018-11-22 17:00:27 +0200 | [diff] [blame] | 1771 | ret = block_job_add_bdrv(&s->common, "source", bs, 0, |
| 1772 | BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE | |
| 1773 | BLK_PERM_CONSISTENT_READ, |
| 1774 | errp); |
| 1775 | if (ret < 0) { |
| 1776 | goto fail; |
| 1777 | } |
| 1778 | |
Kevin Wolf | 4ef85a9 | 2017-01-25 19:16:34 +0100 | [diff] [blame] | 1779 | /* Required permissions are already taken with blk_new() */ |
Kevin Wolf | 76d554e | 2017-01-17 11:56:42 +0100 | [diff] [blame] | 1780 | block_job_add_bdrv(&s->common, "target", target, 0, BLK_PERM_ALL, |
| 1781 | &error_abort); |
| 1782 | |
Alberto Garcia | f3ede4b | 2016-10-28 10:08:09 +0300 | [diff] [blame] | 1783 | /* In commit_active_start() all intermediate nodes disappear, so |
| 1784 | * any jobs in them must be blocked */ |
Kevin Wolf | 4ef85a9 | 2017-01-25 19:16:34 +0100 | [diff] [blame] | 1785 | if (target_is_backing) { |
Max Reitz | 3f072a7 | 2019-06-12 16:27:32 +0200 | [diff] [blame] | 1786 | BlockDriverState *iter, *filtered_target; |
| 1787 | uint64_t iter_shared_perms; |
| 1788 | |
| 1789 | /* |
| 1790 | * The topmost node with |
| 1791 | * bdrv_skip_filters(filtered_target) == bdrv_skip_filters(target) |
| 1792 | */ |
| 1793 | filtered_target = bdrv_cow_bs(bdrv_find_overlay(bs, target)); |
| 1794 | |
| 1795 | assert(bdrv_skip_filters(filtered_target) == |
| 1796 | bdrv_skip_filters(target)); |
| 1797 | |
| 1798 | /* |
| 1799 | * XXX BLK_PERM_WRITE needs to be allowed so we don't block |
| 1800 | * ourselves at s->base (if writes are blocked for a node, they are |
| 1801 | * also blocked for its backing file). The other options would be a |
| 1802 | * second filter driver above s->base (== target). |
| 1803 | */ |
| 1804 | iter_shared_perms = BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE; |
| 1805 | |
| 1806 | for (iter = bdrv_filter_or_cow_bs(bs); iter != target; |
| 1807 | iter = bdrv_filter_or_cow_bs(iter)) |
| 1808 | { |
| 1809 | if (iter == filtered_target) { |
| 1810 | /* |
| 1811 | * From here on, all nodes are filters on the base. |
| 1812 | * This allows us to share BLK_PERM_CONSISTENT_READ. |
| 1813 | */ |
| 1814 | iter_shared_perms |= BLK_PERM_CONSISTENT_READ; |
| 1815 | } |
| 1816 | |
Kevin Wolf | 4ef85a9 | 2017-01-25 19:16:34 +0100 | [diff] [blame] | 1817 | ret = block_job_add_bdrv(&s->common, "intermediate node", iter, 0, |
Max Reitz | 3f072a7 | 2019-06-12 16:27:32 +0200 | [diff] [blame] | 1818 | iter_shared_perms, errp); |
Kevin Wolf | 4ef85a9 | 2017-01-25 19:16:34 +0100 | [diff] [blame] | 1819 | if (ret < 0) { |
| 1820 | goto fail; |
| 1821 | } |
Alberto Garcia | f3ede4b | 2016-10-28 10:08:09 +0300 | [diff] [blame] | 1822 | } |
Alberto Garcia | ef53dc0 | 2019-03-12 18:48:42 +0200 | [diff] [blame] | 1823 | |
| 1824 | if (bdrv_freeze_backing_chain(mirror_top_bs, target, errp) < 0) { |
| 1825 | goto fail; |
| 1826 | } |
Alberto Garcia | f3ede4b | 2016-10-28 10:08:09 +0300 | [diff] [blame] | 1827 | } |
Alberto Garcia | 10f3cd1 | 2015-11-02 16:51:53 +0200 | [diff] [blame] | 1828 | |
Max Reitz | 12aa408 | 2018-06-13 20:18:12 +0200 | [diff] [blame] | 1829 | QTAILQ_INIT(&s->ops_in_flight); |
| 1830 | |
John Snow | 5ccac6f | 2016-11-08 01:50:37 -0500 | [diff] [blame] | 1831 | trace_mirror_start(bs, s, opaque); |
Kevin Wolf | da01ff7 | 2018-04-13 17:31:02 +0200 | [diff] [blame] | 1832 | job_start(&s->common.job); |
Vladimir Sementsov-Ogievskiy | cc19f17 | 2019-06-06 18:41:29 +0300 | [diff] [blame] | 1833 | |
| 1834 | return &s->common; |
Kevin Wolf | 4ef85a9 | 2017-01-25 19:16:34 +0100 | [diff] [blame] | 1835 | |
| 1836 | fail: |
| 1837 | if (s) { |
Max Reitz | 7a25fcd | 2017-04-03 19:51:49 +0200 | [diff] [blame] | 1838 | /* Make sure this BDS does not go away until we have completed the graph |
| 1839 | * changes below */ |
| 1840 | bdrv_ref(mirror_top_bs); |
| 1841 | |
Kevin Wolf | 4ef85a9 | 2017-01-25 19:16:34 +0100 | [diff] [blame] | 1842 | g_free(s->replaces); |
| 1843 | blk_unref(s->target); |
Max Reitz | 429076e | 2018-06-13 20:18:19 +0200 | [diff] [blame] | 1844 | bs_opaque->job = NULL; |
Alberto Garcia | e917e2c | 2018-11-22 17:00:26 +0200 | [diff] [blame] | 1845 | if (s->dirty_bitmap) { |
Vladimir Sementsov-Ogievskiy | 5deb6cb | 2019-09-16 17:19:09 +0300 | [diff] [blame] | 1846 | bdrv_release_dirty_bitmap(s->dirty_bitmap); |
Alberto Garcia | e917e2c | 2018-11-22 17:00:26 +0200 | [diff] [blame] | 1847 | } |
Kevin Wolf | 4ad3518 | 2018-04-19 17:30:16 +0200 | [diff] [blame] | 1848 | job_early_fail(&s->common.job); |
Kevin Wolf | 4ef85a9 | 2017-01-25 19:16:34 +0100 | [diff] [blame] | 1849 | } |
| 1850 | |
Max Reitz | f94dc3b | 2019-05-22 19:03:47 +0200 | [diff] [blame] | 1851 | bs_opaque->stop = true; |
| 1852 | bdrv_child_refresh_perms(mirror_top_bs, mirror_top_bs->backing, |
| 1853 | &error_abort); |
Max Reitz | 3f072a7 | 2019-06-12 16:27:32 +0200 | [diff] [blame] | 1854 | bdrv_replace_node(mirror_top_bs, mirror_top_bs->backing->bs, &error_abort); |
Max Reitz | 7a25fcd | 2017-04-03 19:51:49 +0200 | [diff] [blame] | 1855 | |
| 1856 | bdrv_unref(mirror_top_bs); |
Vladimir Sementsov-Ogievskiy | cc19f17 | 2019-06-06 18:41:29 +0300 | [diff] [blame] | 1857 | |
| 1858 | return NULL; |
Paolo Bonzini | 893f7eb | 2012-10-18 16:49:23 +0200 | [diff] [blame] | 1859 | } |
Fam Zheng | 03544a6 | 2013-12-16 14:45:30 +0800 | [diff] [blame] | 1860 | |
Alberto Garcia | 71aa986 | 2016-07-05 17:28:57 +0300 | [diff] [blame] | 1861 | void mirror_start(const char *job_id, BlockDriverState *bs, |
| 1862 | BlockDriverState *target, const char *replaces, |
John Snow | a1999b3 | 2018-09-06 09:02:11 -0400 | [diff] [blame] | 1863 | int creation_flags, int64_t speed, |
| 1864 | uint32_t granularity, int64_t buf_size, |
Max Reitz | 274fcce | 2016-06-10 20:57:47 +0200 | [diff] [blame] | 1865 | MirrorSyncMode mode, BlockMirrorBackingMode backing_mode, |
Max Reitz | cdf3bc9 | 2019-07-24 19:12:30 +0200 | [diff] [blame] | 1866 | bool zero_target, |
Max Reitz | 274fcce | 2016-06-10 20:57:47 +0200 | [diff] [blame] | 1867 | BlockdevOnError on_source_error, |
Fam Zheng | 03544a6 | 2013-12-16 14:45:30 +0800 | [diff] [blame] | 1868 | BlockdevOnError on_target_error, |
Max Reitz | 481deba | 2018-06-13 20:18:22 +0200 | [diff] [blame] | 1869 | bool unmap, const char *filter_node_name, |
| 1870 | MirrorCopyMode copy_mode, Error **errp) |
Fam Zheng | 03544a6 | 2013-12-16 14:45:30 +0800 | [diff] [blame] | 1871 | { |
| 1872 | bool is_none_mode; |
| 1873 | BlockDriverState *base; |
| 1874 | |
John Snow | c8b5650 | 2019-07-29 16:35:52 -0400 | [diff] [blame] | 1875 | if ((mode == MIRROR_SYNC_MODE_INCREMENTAL) || |
| 1876 | (mode == MIRROR_SYNC_MODE_BITMAP)) { |
| 1877 | error_setg(errp, "Sync mode '%s' not supported", |
| 1878 | MirrorSyncMode_str(mode)); |
John Snow | d58d845 | 2015-04-17 19:49:58 -0400 | [diff] [blame] | 1879 | return; |
| 1880 | } |
Fam Zheng | 03544a6 | 2013-12-16 14:45:30 +0800 | [diff] [blame] | 1881 | is_none_mode = mode == MIRROR_SYNC_MODE_NONE; |
Max Reitz | 3f072a7 | 2019-06-12 16:27:32 +0200 | [diff] [blame] | 1882 | base = mode == MIRROR_SYNC_MODE_TOP ? bdrv_backing_chain_next(bs) : NULL; |
John Snow | a1999b3 | 2018-09-06 09:02:11 -0400 | [diff] [blame] | 1883 | mirror_start_job(job_id, bs, creation_flags, target, replaces, |
Max Reitz | cdf3bc9 | 2019-07-24 19:12:30 +0200 | [diff] [blame] | 1884 | speed, granularity, buf_size, backing_mode, zero_target, |
Fam Zheng | 51ccfa2 | 2017-04-21 20:27:03 +0800 | [diff] [blame] | 1885 | on_source_error, on_target_error, unmap, NULL, NULL, |
Kevin Wolf | 6cdbceb | 2017-02-20 18:10:05 +0100 | [diff] [blame] | 1886 | &mirror_job_driver, is_none_mode, base, false, |
Max Reitz | 481deba | 2018-06-13 20:18:22 +0200 | [diff] [blame] | 1887 | filter_node_name, true, copy_mode, errp); |
Fam Zheng | 03544a6 | 2013-12-16 14:45:30 +0800 | [diff] [blame] | 1888 | } |
| 1889 | |
Vladimir Sementsov-Ogievskiy | cc19f17 | 2019-06-06 18:41:29 +0300 | [diff] [blame] | 1890 | BlockJob *commit_active_start(const char *job_id, BlockDriverState *bs, |
| 1891 | BlockDriverState *base, int creation_flags, |
| 1892 | int64_t speed, BlockdevOnError on_error, |
| 1893 | const char *filter_node_name, |
| 1894 | BlockCompletionFunc *cb, void *opaque, |
| 1895 | bool auto_complete, Error **errp) |
Fam Zheng | 03544a6 | 2013-12-16 14:45:30 +0800 | [diff] [blame] | 1896 | { |
Alberto Garcia | 1ba7938 | 2018-11-12 16:00:40 +0200 | [diff] [blame] | 1897 | bool base_read_only; |
Vladimir Sementsov-Ogievskiy | eb5becc | 2021-02-02 15:49:48 +0300 | [diff] [blame] | 1898 | BlockJob *job; |
Jeff Cody | 4da8358 | 2014-01-24 09:02:36 -0500 | [diff] [blame] | 1899 | |
Alberto Garcia | 1ba7938 | 2018-11-12 16:00:40 +0200 | [diff] [blame] | 1900 | base_read_only = bdrv_is_read_only(base); |
Jeff Cody | 4da8358 | 2014-01-24 09:02:36 -0500 | [diff] [blame] | 1901 | |
Alberto Garcia | 1ba7938 | 2018-11-12 16:00:40 +0200 | [diff] [blame] | 1902 | if (base_read_only) { |
| 1903 | if (bdrv_reopen_set_read_only(base, false, errp) < 0) { |
Vladimir Sementsov-Ogievskiy | cc19f17 | 2019-06-06 18:41:29 +0300 | [diff] [blame] | 1904 | return NULL; |
Alberto Garcia | 1ba7938 | 2018-11-12 16:00:40 +0200 | [diff] [blame] | 1905 | } |
Fam Zheng | 20a63d2 | 2013-12-16 14:45:31 +0800 | [diff] [blame] | 1906 | } |
Jeff Cody | 4da8358 | 2014-01-24 09:02:36 -0500 | [diff] [blame] | 1907 | |
Vladimir Sementsov-Ogievskiy | eb5becc | 2021-02-02 15:49:48 +0300 | [diff] [blame] | 1908 | job = mirror_start_job( |
Vladimir Sementsov-Ogievskiy | cc19f17 | 2019-06-06 18:41:29 +0300 | [diff] [blame] | 1909 | job_id, bs, creation_flags, base, NULL, speed, 0, 0, |
Max Reitz | cdf3bc9 | 2019-07-24 19:12:30 +0200 | [diff] [blame] | 1910 | MIRROR_LEAVE_BACKING_CHAIN, false, |
Fam Zheng | 51ccfa2 | 2017-04-21 20:27:03 +0800 | [diff] [blame] | 1911 | on_error, on_error, true, cb, opaque, |
Kevin Wolf | 6cdbceb | 2017-02-20 18:10:05 +0100 | [diff] [blame] | 1912 | &commit_active_job_driver, false, base, auto_complete, |
Max Reitz | 481deba | 2018-06-13 20:18:22 +0200 | [diff] [blame] | 1913 | filter_node_name, false, MIRROR_COPY_MODE_BACKGROUND, |
Vladimir Sementsov-Ogievskiy | eb5becc | 2021-02-02 15:49:48 +0300 | [diff] [blame] | 1914 | errp); |
| 1915 | if (!job) { |
Jeff Cody | 4da8358 | 2014-01-24 09:02:36 -0500 | [diff] [blame] | 1916 | goto error_restore_flags; |
| 1917 | } |
| 1918 | |
Vladimir Sementsov-Ogievskiy | eb5becc | 2021-02-02 15:49:48 +0300 | [diff] [blame] | 1919 | return job; |
Jeff Cody | 4da8358 | 2014-01-24 09:02:36 -0500 | [diff] [blame] | 1920 | |
| 1921 | error_restore_flags: |
| 1922 | /* ignore error and errp for bdrv_reopen, because we want to propagate |
| 1923 | * the original error */ |
Alberto Garcia | 1ba7938 | 2018-11-12 16:00:40 +0200 | [diff] [blame] | 1924 | if (base_read_only) { |
| 1925 | bdrv_reopen_set_read_only(base, true, NULL); |
| 1926 | } |
Vladimir Sementsov-Ogievskiy | cc19f17 | 2019-06-06 18:41:29 +0300 | [diff] [blame] | 1927 | return NULL; |
Fam Zheng | 03544a6 | 2013-12-16 14:45:30 +0800 | [diff] [blame] | 1928 | } |