blob: e13d7bc6b69fbe89049281c212f68629799fa2c7 [file] [log] [blame]
Vladimir Sementsov-Ogievskiybeb5f542019-09-20 17:20:48 +03001/*
2 * block_copy API
3 *
4 * Copyright (C) 2013 Proxmox Server Solutions
5 * Copyright (c) 2019 Virtuozzo International GmbH.
6 *
7 * Authors:
8 * Dietmar Maurer (dietmar@proxmox.com)
9 * Vladimir Sementsov-Ogievskiy <vsementsov@virtuozzo.com>
10 *
11 * This work is licensed under the terms of the GNU GPL, version 2 or later.
12 * See the COPYING file in the top-level directory.
13 */
14
15#include "qemu/osdep.h"
16
17#include "trace.h"
18#include "qapi/error.h"
19#include "block/block-copy.h"
Markus Armbrustere2c1c342022-12-21 14:35:49 +010020#include "block/block_int-io.h"
21#include "block/dirty-bitmap.h"
Vladimir Sementsov-Ogievskiyd088e6a2022-03-03 20:43:39 +010022#include "block/reqlist.h"
Vladimir Sementsov-Ogievskiybeb5f542019-09-20 17:20:48 +030023#include "sysemu/block-backend.h"
Vladimir Sementsov-Ogievskiyb3b70362019-10-22 14:18:01 +030024#include "qemu/units.h"
Markus Armbrustere2c1c342022-12-21 14:35:49 +010025#include "qemu/co-shared-resource.h"
Vladimir Sementsov-Ogievskiy4ce5dd32020-04-29 16:08:47 +030026#include "qemu/coroutine.h"
Markus Armbrustere2c1c342022-12-21 14:35:49 +010027#include "qemu/ratelimit.h"
Vladimir Sementsov-Ogievskiy4ce5dd32020-04-29 16:08:47 +030028#include "block/aio_task.h"
Vladimir Sementsov-Ogievskiyb518e9e2021-08-24 11:38:31 +030029#include "qemu/error-report.h"
Peter Maydell5df022c2022-02-26 18:07:23 +000030#include "qemu/memalign.h"
Vladimir Sementsov-Ogievskiyb3b70362019-10-22 14:18:01 +030031
32#define BLOCK_COPY_MAX_COPY_RANGE (16 * MiB)
Vladimir Sementsov-Ogievskiy0e240242019-10-22 14:18:05 +030033#define BLOCK_COPY_MAX_BUFFER (1 * MiB)
Vladimir Sementsov-Ogievskiy7f739d02019-10-22 14:18:04 +030034#define BLOCK_COPY_MAX_MEM (128 * MiB)
Vladimir Sementsov-Ogievskiy4ce5dd32020-04-29 16:08:47 +030035#define BLOCK_COPY_MAX_WORKERS 64
Vladimir Sementsov-Ogievskiy7e032df2021-01-17 00:46:48 +030036#define BLOCK_COPY_SLICE_TIME 100000000ULL /* ns */
Vladimir Sementsov-Ogievskiyb518e9e2021-08-24 11:38:31 +030037#define BLOCK_COPY_CLUSTER_SIZE_DEFAULT (1 << 16)
Vladimir Sementsov-Ogievskiy4ce5dd32020-04-29 16:08:47 +030038
Paolo Bonzini05d5e122021-06-24 09:20:40 +020039typedef enum {
40 COPY_READ_WRITE_CLUSTER,
41 COPY_READ_WRITE,
42 COPY_WRITE_ZEROES,
43 COPY_RANGE_SMALL,
44 COPY_RANGE_FULL
45} BlockCopyMethod;
46
Vladimir Sementsov-Ogievskiy4ce5dd32020-04-29 16:08:47 +030047static coroutine_fn int block_copy_task_entry(AioTask *task);
48
49typedef struct BlockCopyCallState {
Emanuele Giuseppe Espositod0c389d2021-06-24 09:20:42 +020050 /* Fields initialized in block_copy_async() and never changed. */
Vladimir Sementsov-Ogievskiy3b8c2322021-01-17 00:46:44 +030051 BlockCopyState *s;
52 int64_t offset;
53 int64_t bytes;
Vladimir Sementsov-Ogievskiy26be9d62021-01-17 00:46:46 +030054 int max_workers;
55 int64_t max_chunk;
Vladimir Sementsov-Ogievskiy7e032df2021-01-17 00:46:48 +030056 bool ignore_ratelimit;
Vladimir Sementsov-Ogievskiyde4641b2021-01-17 00:46:45 +030057 BlockCopyAsyncCallbackFunc cb;
58 void *cb_opaque;
Vladimir Sementsov-Ogievskiyde4641b2021-01-17 00:46:45 +030059 /* Coroutine where async block-copy is running */
60 Coroutine *co;
Vladimir Sementsov-Ogievskiy3b8c2322021-01-17 00:46:44 +030061
Emanuele Giuseppe Espositod0c389d2021-06-24 09:20:42 +020062 /* Fields whose state changes throughout the execution */
Emanuele Giuseppe Esposito149009b2021-06-24 09:20:43 +020063 bool finished; /* atomic */
Emanuele Giuseppe Espositod0c389d2021-06-24 09:20:42 +020064 QemuCoSleep sleep; /* TODO: protect API with a lock */
Emanuele Giuseppe Esposito149009b2021-06-24 09:20:43 +020065 bool cancelled; /* atomic */
Vladimir Sementsov-Ogievskiy2e099a92021-01-17 00:46:47 +030066 /* To reference all call states from BlockCopyState */
67 QLIST_ENTRY(BlockCopyCallState) list;
68
Emanuele Giuseppe Espositod0c389d2021-06-24 09:20:42 +020069 /*
70 * Fields that report information about return values and erros.
71 * Protected by lock in BlockCopyState.
72 */
Vladimir Sementsov-Ogievskiy4ce5dd32020-04-29 16:08:47 +030073 bool error_is_read;
Emanuele Giuseppe Espositod0c389d2021-06-24 09:20:42 +020074 /*
75 * @ret is set concurrently by tasks under mutex. Only set once by first
76 * failed task (and untouched if no task failed).
77 * After finishing (call_state->finished is true), it is not modified
78 * anymore and may be safely read without mutex.
79 */
80 int ret;
Vladimir Sementsov-Ogievskiy4ce5dd32020-04-29 16:08:47 +030081} BlockCopyCallState;
Vladimir Sementsov-Ogievskiybeb5f542019-09-20 17:20:48 +030082
Vladimir Sementsov-Ogievskiye9407782020-04-29 16:08:43 +030083typedef struct BlockCopyTask {
Vladimir Sementsov-Ogievskiy4ce5dd32020-04-29 16:08:47 +030084 AioTask task;
85
Emanuele Giuseppe Espositod0c389d2021-06-24 09:20:42 +020086 /*
87 * Fields initialized in block_copy_task_create()
88 * and never changed.
89 */
Vladimir Sementsov-Ogievskiy1348a652020-04-29 16:08:45 +030090 BlockCopyState *s;
Vladimir Sementsov-Ogievskiy4ce5dd32020-04-29 16:08:47 +030091 BlockCopyCallState *call_state;
Emanuele Giuseppe Espositod0c389d2021-06-24 09:20:42 +020092 /*
93 * @method can also be set again in the while loop of
94 * block_copy_dirty_clusters(), but it is never accessed concurrently
95 * because the only other function that reads it is
96 * block_copy_task_entry() and it is invoked afterwards in the same
97 * iteration.
98 */
Paolo Bonzini05d5e122021-06-24 09:20:40 +020099 BlockCopyMethod method;
Emanuele Giuseppe Espositod0c389d2021-06-24 09:20:42 +0200100
101 /*
Vladimir Sementsov-Ogievskiyd088e6a2022-03-03 20:43:39 +0100102 * Generally, req is protected by lock in BlockCopyState, Still req.offset
103 * is only set on task creation, so may be read concurrently after creation.
104 * req.bytes is changed at most once, and need only protecting the case of
105 * parallel read while updating @bytes value in block_copy_task_shrink().
Emanuele Giuseppe Espositod0c389d2021-06-24 09:20:42 +0200106 */
Vladimir Sementsov-Ogievskiyd088e6a2022-03-03 20:43:39 +0100107 BlockReq req;
Vladimir Sementsov-Ogievskiye9407782020-04-29 16:08:43 +0300108} BlockCopyTask;
Vladimir Sementsov-Ogievskiy397f4e92020-03-11 13:30:04 +0300109
Vladimir Sementsov-Ogievskiy42ac2142020-04-29 16:08:46 +0300110static int64_t task_end(BlockCopyTask *task)
111{
Vladimir Sementsov-Ogievskiyd088e6a2022-03-03 20:43:39 +0100112 return task->req.offset + task->req.bytes;
Vladimir Sementsov-Ogievskiy42ac2142020-04-29 16:08:46 +0300113}
114
Vladimir Sementsov-Ogievskiy397f4e92020-03-11 13:30:04 +0300115typedef struct BlockCopyState {
116 /*
117 * BdrvChild objects are not owned or managed by block-copy. They are
118 * provided by block-copy user and user is responsible for appropriate
119 * permissions on these children.
120 */
121 BdrvChild *source;
122 BdrvChild *target;
Emanuele Giuseppe Espositod0c389d2021-06-24 09:20:42 +0200123
124 /*
125 * Fields initialized in block_copy_state_new()
126 * and never changed.
127 */
Vladimir Sementsov-Ogievskiy397f4e92020-03-11 13:30:04 +0300128 int64_t cluster_size;
Paolo Bonzini05d5e122021-06-24 09:20:40 +0200129 int64_t max_transfer;
Vladimir Sementsov-Ogievskiy397f4e92020-03-11 13:30:04 +0300130 uint64_t len;
Vladimir Sementsov-Ogievskiy397f4e92020-03-11 13:30:04 +0300131 BdrvRequestFlags write_flags;
132
133 /*
Emanuele Giuseppe Espositod0c389d2021-06-24 09:20:42 +0200134 * Fields whose state changes throughout the execution
135 * Protected by lock.
136 */
137 CoMutex lock;
138 int64_t in_flight_bytes;
139 BlockCopyMethod method;
Vladimir Sementsov-Ogievskiyd088e6a2022-03-03 20:43:39 +0100140 BlockReqList reqs;
Emanuele Giuseppe Espositod0c389d2021-06-24 09:20:42 +0200141 QLIST_HEAD(, BlockCopyCallState) calls;
142 /*
Vladimir Sementsov-Ogievskiy397f4e92020-03-11 13:30:04 +0300143 * skip_unallocated:
144 *
145 * Used by sync=top jobs, which first scan the source node for unallocated
146 * areas and clear them in the copy_bitmap. During this process, the bitmap
147 * is thus not fully initialized: It may still have bits set for areas that
148 * are unallocated and should actually not be copied.
149 *
150 * This is indicated by skip_unallocated.
151 *
152 * In this case, block_copy() will query the source’s allocation status,
153 * skip unallocated regions, clear them in the copy_bitmap, and invoke
154 * block_copy_reset_unallocated() every time it does.
155 */
Emanuele Giuseppe Espositod0c389d2021-06-24 09:20:42 +0200156 bool skip_unallocated; /* atomic */
157 /* State fields that use a thread-safe API */
158 BdrvDirtyBitmap *copy_bitmap;
Vladimir Sementsov-Ogievskiy397f4e92020-03-11 13:30:04 +0300159 ProgressMeter *progress;
Vladimir Sementsov-Ogievskiy397f4e92020-03-11 13:30:04 +0300160 SharedResource *mem;
Vladimir Sementsov-Ogievskiy7e032df2021-01-17 00:46:48 +0300161 RateLimit rate_limit;
Vladimir Sementsov-Ogievskiy397f4e92020-03-11 13:30:04 +0300162} BlockCopyState;
163
Emanuele Giuseppe Espositod0c389d2021-06-24 09:20:42 +0200164/* Called with lock held */
Paolo Bonzini05d5e122021-06-24 09:20:40 +0200165static int64_t block_copy_chunk_size(BlockCopyState *s)
166{
167 switch (s->method) {
168 case COPY_READ_WRITE_CLUSTER:
169 return s->cluster_size;
170 case COPY_READ_WRITE:
171 case COPY_RANGE_SMALL:
172 return MIN(MAX(s->cluster_size, BLOCK_COPY_MAX_BUFFER),
173 s->max_transfer);
174 case COPY_RANGE_FULL:
175 return MIN(MAX(s->cluster_size, BLOCK_COPY_MAX_COPY_RANGE),
176 s->max_transfer);
177 default:
178 /* Cannot have COPY_WRITE_ZEROES here. */
179 abort();
180 }
181}
182
Vladimir Sementsov-Ogievskiy42ac2142020-04-29 16:08:46 +0300183/*
184 * Search for the first dirty area in offset/bytes range and create task at
185 * the beginning of it.
186 */
Emanuele Giuseppe Espositod0c389d2021-06-24 09:20:42 +0200187static coroutine_fn BlockCopyTask *
188block_copy_task_create(BlockCopyState *s, BlockCopyCallState *call_state,
189 int64_t offset, int64_t bytes)
Vladimir Sementsov-Ogievskiya6ffe192019-10-01 16:14:05 +0300190{
Vladimir Sementsov-Ogievskiy42ac2142020-04-29 16:08:46 +0300191 BlockCopyTask *task;
Paolo Bonzini05d5e122021-06-24 09:20:40 +0200192 int64_t max_chunk;
Vladimir Sementsov-Ogievskiyf13e60a2020-04-29 16:08:44 +0300193
Emanuele Giuseppe Espositod0c389d2021-06-24 09:20:42 +0200194 QEMU_LOCK_GUARD(&s->lock);
Paolo Bonzini05d5e122021-06-24 09:20:40 +0200195 max_chunk = MIN_NON_ZERO(block_copy_chunk_size(s), call_state->max_chunk);
Vladimir Sementsov-Ogievskiy42ac2142020-04-29 16:08:46 +0300196 if (!bdrv_dirty_bitmap_next_dirty_area(s->copy_bitmap,
197 offset, offset + bytes,
Vladimir Sementsov-Ogievskiy26be9d62021-01-17 00:46:46 +0300198 max_chunk, &offset, &bytes))
Vladimir Sementsov-Ogievskiy42ac2142020-04-29 16:08:46 +0300199 {
200 return NULL;
201 }
202
Stefan Reiter7661a882020-08-10 11:55:22 +0200203 assert(QEMU_IS_ALIGNED(offset, s->cluster_size));
204 bytes = QEMU_ALIGN_UP(bytes, s->cluster_size);
205
Vladimir Sementsov-Ogievskiy42ac2142020-04-29 16:08:46 +0300206 /* region is dirty, so no existent tasks possible in it */
Vladimir Sementsov-Ogievskiyd088e6a2022-03-03 20:43:39 +0100207 assert(!reqlist_find_conflict(&s->reqs, offset, bytes));
Vladimir Sementsov-Ogievskiy5332e5d2020-03-11 13:30:03 +0300208
209 bdrv_reset_dirty_bitmap(s->copy_bitmap, offset, bytes);
210 s->in_flight_bytes += bytes;
211
Vladimir Sementsov-Ogievskiy42ac2142020-04-29 16:08:46 +0300212 task = g_new(BlockCopyTask, 1);
Vladimir Sementsov-Ogievskiy1348a652020-04-29 16:08:45 +0300213 *task = (BlockCopyTask) {
Vladimir Sementsov-Ogievskiy4ce5dd32020-04-29 16:08:47 +0300214 .task.func = block_copy_task_entry,
Vladimir Sementsov-Ogievskiy1348a652020-04-29 16:08:45 +0300215 .s = s,
Vladimir Sementsov-Ogievskiy4ce5dd32020-04-29 16:08:47 +0300216 .call_state = call_state,
Paolo Bonzini05d5e122021-06-24 09:20:40 +0200217 .method = s->method,
Vladimir Sementsov-Ogievskiy1348a652020-04-29 16:08:45 +0300218 };
Vladimir Sementsov-Ogievskiyd088e6a2022-03-03 20:43:39 +0100219 reqlist_init_req(&s->reqs, &task->req, offset, bytes);
Vladimir Sementsov-Ogievskiyf13e60a2020-04-29 16:08:44 +0300220
221 return task;
Vladimir Sementsov-Ogievskiya6ffe192019-10-01 16:14:05 +0300222}
223
Vladimir Sementsov-Ogievskiy5332e5d2020-03-11 13:30:03 +0300224/*
Vladimir Sementsov-Ogievskiye9407782020-04-29 16:08:43 +0300225 * block_copy_task_shrink
Vladimir Sementsov-Ogievskiy5332e5d2020-03-11 13:30:03 +0300226 *
Vladimir Sementsov-Ogievskiye9407782020-04-29 16:08:43 +0300227 * Drop the tail of the task to be handled later. Set dirty bits back and
228 * wake up all tasks waiting for us (may be some of them are not intersecting
229 * with shrunk task)
Vladimir Sementsov-Ogievskiy5332e5d2020-03-11 13:30:03 +0300230 */
Vladimir Sementsov-Ogievskiy1348a652020-04-29 16:08:45 +0300231static void coroutine_fn block_copy_task_shrink(BlockCopyTask *task,
Vladimir Sementsov-Ogievskiye9407782020-04-29 16:08:43 +0300232 int64_t new_bytes)
Vladimir Sementsov-Ogievskiya6ffe192019-10-01 16:14:05 +0300233{
Emanuele Giuseppe Espositod0c389d2021-06-24 09:20:42 +0200234 QEMU_LOCK_GUARD(&task->s->lock);
Vladimir Sementsov-Ogievskiyd088e6a2022-03-03 20:43:39 +0100235 if (new_bytes == task->req.bytes) {
Vladimir Sementsov-Ogievskiy5332e5d2020-03-11 13:30:03 +0300236 return;
237 }
238
Vladimir Sementsov-Ogievskiyd088e6a2022-03-03 20:43:39 +0100239 assert(new_bytes > 0 && new_bytes < task->req.bytes);
Vladimir Sementsov-Ogievskiy5332e5d2020-03-11 13:30:03 +0300240
Vladimir Sementsov-Ogievskiyd088e6a2022-03-03 20:43:39 +0100241 task->s->in_flight_bytes -= task->req.bytes - new_bytes;
Vladimir Sementsov-Ogievskiy1348a652020-04-29 16:08:45 +0300242 bdrv_set_dirty_bitmap(task->s->copy_bitmap,
Vladimir Sementsov-Ogievskiyd088e6a2022-03-03 20:43:39 +0100243 task->req.offset + new_bytes,
244 task->req.bytes - new_bytes);
Vladimir Sementsov-Ogievskiy5332e5d2020-03-11 13:30:03 +0300245
Vladimir Sementsov-Ogievskiyd088e6a2022-03-03 20:43:39 +0100246 reqlist_shrink_req(&task->req, new_bytes);
Vladimir Sementsov-Ogievskiy5332e5d2020-03-11 13:30:03 +0300247}
248
Vladimir Sementsov-Ogievskiy1348a652020-04-29 16:08:45 +0300249static void coroutine_fn block_copy_task_end(BlockCopyTask *task, int ret)
Vladimir Sementsov-Ogievskiy5332e5d2020-03-11 13:30:03 +0300250{
Emanuele Giuseppe Espositod0c389d2021-06-24 09:20:42 +0200251 QEMU_LOCK_GUARD(&task->s->lock);
Vladimir Sementsov-Ogievskiyd088e6a2022-03-03 20:43:39 +0100252 task->s->in_flight_bytes -= task->req.bytes;
Vladimir Sementsov-Ogievskiy5332e5d2020-03-11 13:30:03 +0300253 if (ret < 0) {
Vladimir Sementsov-Ogievskiyd088e6a2022-03-03 20:43:39 +0100254 bdrv_set_dirty_bitmap(task->s->copy_bitmap, task->req.offset,
255 task->req.bytes);
Vladimir Sementsov-Ogievskiy5332e5d2020-03-11 13:30:03 +0300256 }
Vladimir Sementsov-Ogievskiy201b4bb2021-08-24 11:38:42 +0300257 if (task->s->progress) {
258 progress_set_remaining(task->s->progress,
259 bdrv_get_dirty_count(task->s->copy_bitmap) +
260 task->s->in_flight_bytes);
261 }
Vladimir Sementsov-Ogievskiyd088e6a2022-03-03 20:43:39 +0100262 reqlist_remove_req(&task->req);
Vladimir Sementsov-Ogievskiya6ffe192019-10-01 16:14:05 +0300263}
264
Vladimir Sementsov-Ogievskiybeb5f542019-09-20 17:20:48 +0300265void block_copy_state_free(BlockCopyState *s)
266{
267 if (!s) {
268 return;
269 }
270
Paolo Bonzini49519672021-04-13 10:20:32 +0200271 ratelimit_destroy(&s->rate_limit);
Vladimir Sementsov-Ogievskiy5deb6cb2019-09-16 17:19:09 +0300272 bdrv_release_dirty_bitmap(s->copy_bitmap);
Vladimir Sementsov-Ogievskiy7f739d02019-10-22 14:18:04 +0300273 shres_destroy(s->mem);
Vladimir Sementsov-Ogievskiybeb5f542019-09-20 17:20:48 +0300274 g_free(s);
275}
276
Vladimir Sementsov-Ogievskiy9d31bc52020-03-11 13:29:58 +0300277static uint32_t block_copy_max_transfer(BdrvChild *source, BdrvChild *target)
278{
279 return MIN_NON_ZERO(INT_MAX,
280 MIN_NON_ZERO(source->bs->bl.max_transfer,
281 target->bs->bl.max_transfer));
282}
283
Vladimir Sementsov-Ogievskiyf8b95042021-08-24 11:38:29 +0300284void block_copy_set_copy_opts(BlockCopyState *s, bool use_copy_range,
285 bool compress)
286{
287 /* Keep BDRV_REQ_SERIALISING set (or not set) in block_copy_state_new() */
288 s->write_flags = (s->write_flags & BDRV_REQ_SERIALISING) |
289 (compress ? BDRV_REQ_WRITE_COMPRESSED : 0);
290
291 if (s->max_transfer < s->cluster_size) {
292 /*
293 * copy_range does not respect max_transfer. We don't want to bother
294 * with requests smaller than block-copy cluster size, so fallback to
295 * buffered copying (read and write respect max_transfer on their
296 * behalf).
297 */
298 s->method = COPY_READ_WRITE_CLUSTER;
299 } else if (compress) {
300 /* Compression supports only cluster-size writes and no copy-range. */
301 s->method = COPY_READ_WRITE_CLUSTER;
302 } else {
303 /*
304 * If copy range enabled, start with COPY_RANGE_SMALL, until first
305 * successful copy_range (look at block_copy_do_copy).
306 */
307 s->method = use_copy_range ? COPY_RANGE_SMALL : COPY_READ_WRITE;
308 }
309}
310
Vladimir Sementsov-Ogievskiyb518e9e2021-08-24 11:38:31 +0300311static int64_t block_copy_calculate_cluster_size(BlockDriverState *target,
312 Error **errp)
313{
314 int ret;
315 BlockDriverInfo bdi;
316 bool target_does_cow = bdrv_backing_chain_next(target);
317
318 /*
319 * If there is no backing file on the target, we cannot rely on COW if our
320 * backup cluster size is smaller than the target cluster size. Even for
321 * targets with a backing file, try to avoid COW if possible.
322 */
323 ret = bdrv_get_info(target, &bdi);
324 if (ret == -ENOTSUP && !target_does_cow) {
325 /* Cluster size is not defined */
326 warn_report("The target block device doesn't provide "
327 "information about the block size and it doesn't have a "
328 "backing file. The default block size of %u bytes is "
329 "used. If the actual block size of the target exceeds "
330 "this default, the backup may be unusable",
331 BLOCK_COPY_CLUSTER_SIZE_DEFAULT);
332 return BLOCK_COPY_CLUSTER_SIZE_DEFAULT;
333 } else if (ret < 0 && !target_does_cow) {
334 error_setg_errno(errp, -ret,
335 "Couldn't determine the cluster size of the target image, "
336 "which has no backing file");
337 error_append_hint(errp,
338 "Aborting, since this may create an unusable destination image\n");
339 return ret;
340 } else if (ret < 0 && target_does_cow) {
341 /* Not fatal; just trudge on ahead. */
342 return BLOCK_COPY_CLUSTER_SIZE_DEFAULT;
343 }
344
345 return MAX(BLOCK_COPY_CLUSTER_SIZE_DEFAULT, bdi.cluster_size);
346}
347
Vladimir Sementsov-Ogievskiy00e30f02019-10-01 16:14:09 +0300348BlockCopyState *block_copy_state_new(BdrvChild *source, BdrvChild *target,
Vladimir Sementsov-Ogievskiy1f7252e2022-03-03 20:43:36 +0100349 const BdrvDirtyBitmap *bitmap,
Vladimir Sementsov-Ogievskiyabde8ac2021-08-24 11:38:56 +0300350 Error **errp)
Vladimir Sementsov-Ogievskiybeb5f542019-09-20 17:20:48 +0300351{
Vladimir Sementsov-Ogievskiy1f7252e2022-03-03 20:43:36 +0100352 ERRP_GUARD();
Vladimir Sementsov-Ogievskiybeb5f542019-09-20 17:20:48 +0300353 BlockCopyState *s;
Vladimir Sementsov-Ogievskiyb518e9e2021-08-24 11:38:31 +0300354 int64_t cluster_size;
Vladimir Sementsov-Ogievskiybeb5f542019-09-20 17:20:48 +0300355 BdrvDirtyBitmap *copy_bitmap;
Vladimir Sementsov-Ogievskiy49577722021-08-24 11:38:28 +0300356 bool is_fleecing;
Vladimir Sementsov-Ogievskiybeb5f542019-09-20 17:20:48 +0300357
Vladimir Sementsov-Ogievskiyb518e9e2021-08-24 11:38:31 +0300358 cluster_size = block_copy_calculate_cluster_size(target->bs, errp);
359 if (cluster_size < 0) {
360 return NULL;
361 }
362
Vladimir Sementsov-Ogievskiy00e30f02019-10-01 16:14:09 +0300363 copy_bitmap = bdrv_create_dirty_bitmap(source->bs, cluster_size, NULL,
364 errp);
Vladimir Sementsov-Ogievskiybeb5f542019-09-20 17:20:48 +0300365 if (!copy_bitmap) {
366 return NULL;
367 }
368 bdrv_disable_dirty_bitmap(copy_bitmap);
Vladimir Sementsov-Ogievskiy1f7252e2022-03-03 20:43:36 +0100369 if (bitmap) {
370 if (!bdrv_merge_dirty_bitmap(copy_bitmap, bitmap, NULL, errp)) {
371 error_prepend(errp, "Failed to merge bitmap '%s' to internal "
372 "copy-bitmap: ", bdrv_dirty_bitmap_name(bitmap));
373 bdrv_release_dirty_bitmap(copy_bitmap);
374 return NULL;
375 }
376 } else {
377 bdrv_set_dirty_bitmap(copy_bitmap, 0,
378 bdrv_dirty_bitmap_size(copy_bitmap));
379 }
Vladimir Sementsov-Ogievskiybeb5f542019-09-20 17:20:48 +0300380
Vladimir Sementsov-Ogievskiy49577722021-08-24 11:38:28 +0300381 /*
382 * If source is in backing chain of target assume that target is going to be
383 * used for "image fleecing", i.e. it should represent a kind of snapshot of
384 * source at backup-start point in time. And target is going to be read by
385 * somebody (for example, used as NBD export) during backup job.
386 *
387 * In this case, we need to add BDRV_REQ_SERIALISING write flag to avoid
388 * intersection of backup writes and third party reads from target,
389 * otherwise reading from target we may occasionally read already updated by
390 * guest data.
391 *
392 * For more information see commit f8d59dfb40bb and test
393 * tests/qemu-iotests/222
394 */
395 is_fleecing = bdrv_chain_contains(target->bs, source->bs);
396
Vladimir Sementsov-Ogievskiybeb5f542019-09-20 17:20:48 +0300397 s = g_new(BlockCopyState, 1);
398 *s = (BlockCopyState) {
Vladimir Sementsov-Ogievskiy00e30f02019-10-01 16:14:09 +0300399 .source = source,
400 .target = target,
Vladimir Sementsov-Ogievskiybeb5f542019-09-20 17:20:48 +0300401 .copy_bitmap = copy_bitmap,
402 .cluster_size = cluster_size,
403 .len = bdrv_dirty_bitmap_size(copy_bitmap),
Vladimir Sementsov-Ogievskiyf8b95042021-08-24 11:38:29 +0300404 .write_flags = (is_fleecing ? BDRV_REQ_SERIALISING : 0),
Vladimir Sementsov-Ogievskiy7f739d02019-10-22 14:18:04 +0300405 .mem = shres_create(BLOCK_COPY_MAX_MEM),
Paolo Bonzini05d5e122021-06-24 09:20:40 +0200406 .max_transfer = QEMU_ALIGN_DOWN(
407 block_copy_max_transfer(source, target),
408 cluster_size),
Vladimir Sementsov-Ogievskiybeb5f542019-09-20 17:20:48 +0300409 };
410
Vladimir Sementsov-Ogievskiyabde8ac2021-08-24 11:38:56 +0300411 block_copy_set_copy_opts(s, false, false);
Vladimir Sementsov-Ogievskiybeb5f542019-09-20 17:20:48 +0300412
Paolo Bonzini49519672021-04-13 10:20:32 +0200413 ratelimit_init(&s->rate_limit);
Emanuele Giuseppe Espositod0c389d2021-06-24 09:20:42 +0200414 qemu_co_mutex_init(&s->lock);
Vladimir Sementsov-Ogievskiyd088e6a2022-03-03 20:43:39 +0100415 QLIST_INIT(&s->reqs);
Vladimir Sementsov-Ogievskiy2e099a92021-01-17 00:46:47 +0300416 QLIST_INIT(&s->calls);
Vladimir Sementsov-Ogievskiya6ffe192019-10-01 16:14:05 +0300417
Vladimir Sementsov-Ogievskiybeb5f542019-09-20 17:20:48 +0300418 return s;
Vladimir Sementsov-Ogievskiybeb5f542019-09-20 17:20:48 +0300419}
420
Emanuele Giuseppe Espositod0c389d2021-06-24 09:20:42 +0200421/* Only set before running the job, no need for locking. */
Vladimir Sementsov-Ogievskiyd0ebeca2020-03-11 13:29:57 +0300422void block_copy_set_progress_meter(BlockCopyState *s, ProgressMeter *pm)
423{
424 s->progress = pm;
425}
426
Vladimir Sementsov-Ogievskiybeb5f542019-09-20 17:20:48 +0300427/*
Vladimir Sementsov-Ogievskiy4ce5dd32020-04-29 16:08:47 +0300428 * Takes ownership of @task
429 *
430 * If pool is NULL directly run the task, otherwise schedule it into the pool.
431 *
432 * Returns: task.func return code if pool is NULL
433 * otherwise -ECANCELED if pool status is bad
434 * otherwise 0 (successfully scheduled)
435 */
436static coroutine_fn int block_copy_task_run(AioTaskPool *pool,
437 BlockCopyTask *task)
438{
439 if (!pool) {
440 int ret = task->task.func(&task->task);
441
442 g_free(task);
443 return ret;
444 }
445
446 aio_task_pool_wait_slot(pool);
447 if (aio_task_pool_status(pool) < 0) {
Vladimir Sementsov-Ogievskiyd088e6a2022-03-03 20:43:39 +0100448 co_put_to_shres(task->s->mem, task->req.bytes);
Vladimir Sementsov-Ogievskiy4ce5dd32020-04-29 16:08:47 +0300449 block_copy_task_end(task, -ECANCELED);
450 g_free(task);
451 return -ECANCELED;
452 }
453
454 aio_task_pool_start_task(pool, &task->task);
455
456 return 0;
457}
458
459/*
Vladimir Sementsov-Ogievskiye332a722019-10-22 14:18:02 +0300460 * block_copy_do_copy
461 *
Vladimir Sementsov-Ogievskiydafaf132020-03-11 13:30:01 +0300462 * Do copy of cluster-aligned chunk. Requested region is allowed to exceed
463 * s->len only to cover last cluster when s->len is not aligned to clusters.
Vladimir Sementsov-Ogievskiye332a722019-10-22 14:18:02 +0300464 *
465 * No sync here: nor bitmap neighter intersecting requests handling, only copy.
466 *
Paolo Bonzini05d5e122021-06-24 09:20:40 +0200467 * @method is an in-out argument, so that copy_range can be either extended to
468 * a full-size buffer or disabled if the copy_range attempt fails. The output
469 * value of @method should be used for subsequent tasks.
Vladimir Sementsov-Ogievskiye332a722019-10-22 14:18:02 +0300470 * Returns 0 on success.
Vladimir Sementsov-Ogievskiybeb5f542019-09-20 17:20:48 +0300471 */
Kevin Wolfabaf8b72023-02-03 16:21:48 +0100472static int coroutine_fn GRAPH_RDLOCK
473block_copy_do_copy(BlockCopyState *s, int64_t offset, int64_t bytes,
474 BlockCopyMethod *method, bool *error_is_read)
Vladimir Sementsov-Ogievskiybeb5f542019-09-20 17:20:48 +0300475{
476 int ret;
Vladimir Sementsov-Ogievskiy87190912020-03-11 13:30:02 +0300477 int64_t nbytes = MIN(offset + bytes, s->len) - offset;
Vladimir Sementsov-Ogievskiye332a722019-10-22 14:18:02 +0300478 void *bounce_buffer = NULL;
Vladimir Sementsov-Ogievskiybeb5f542019-09-20 17:20:48 +0300479
Vladimir Sementsov-Ogievskiy87190912020-03-11 13:30:02 +0300480 assert(offset >= 0 && bytes > 0 && INT64_MAX - offset >= bytes);
481 assert(QEMU_IS_ALIGNED(offset, s->cluster_size));
Vladimir Sementsov-Ogievskiydafaf132020-03-11 13:30:01 +0300482 assert(QEMU_IS_ALIGNED(bytes, s->cluster_size));
Vladimir Sementsov-Ogievskiy87190912020-03-11 13:30:02 +0300483 assert(offset < s->len);
484 assert(offset + bytes <= s->len ||
485 offset + bytes == QEMU_ALIGN_UP(s->len, s->cluster_size));
Vladimir Sementsov-Ogievskiydafaf132020-03-11 13:30:01 +0300486 assert(nbytes < INT_MAX);
Vladimir Sementsov-Ogievskiye332a722019-10-22 14:18:02 +0300487
Paolo Bonzini05d5e122021-06-24 09:20:40 +0200488 switch (*method) {
489 case COPY_WRITE_ZEROES:
Vladimir Sementsov-Ogievskiy87190912020-03-11 13:30:02 +0300490 ret = bdrv_co_pwrite_zeroes(s->target, offset, nbytes, s->write_flags &
Vladimir Sementsov-Ogievskiy2d575112020-03-11 13:29:59 +0300491 ~BDRV_REQ_WRITE_COMPRESSED);
492 if (ret < 0) {
Vladimir Sementsov-Ogievskiy87190912020-03-11 13:30:02 +0300493 trace_block_copy_write_zeroes_fail(s, offset, ret);
Philippe Mathieu-Daudéd7eca542020-05-07 14:11:29 +0200494 *error_is_read = false;
Vladimir Sementsov-Ogievskiy2d575112020-03-11 13:29:59 +0300495 }
496 return ret;
Vladimir Sementsov-Ogievskiy2d575112020-03-11 13:29:59 +0300497
Paolo Bonzini05d5e122021-06-24 09:20:40 +0200498 case COPY_RANGE_SMALL:
499 case COPY_RANGE_FULL:
Vladimir Sementsov-Ogievskiy87190912020-03-11 13:30:02 +0300500 ret = bdrv_co_copy_range(s->source, offset, s->target, offset, nbytes,
Vladimir Sementsov-Ogievskiye332a722019-10-22 14:18:02 +0300501 0, s->write_flags);
Paolo Bonzini05d5e122021-06-24 09:20:40 +0200502 if (ret >= 0) {
503 /* Successful copy-range, increase chunk size. */
504 *method = COPY_RANGE_FULL;
Vladimir Sementsov-Ogievskiybed95232021-05-28 17:16:28 +0300505 return 0;
Vladimir Sementsov-Ogievskiye332a722019-10-22 14:18:02 +0300506 }
Paolo Bonzini05d5e122021-06-24 09:20:40 +0200507
508 trace_block_copy_copy_range_fail(s, offset, ret);
509 *method = COPY_READ_WRITE;
510 /* Fall through to read+write with allocated buffer */
511
512 case COPY_READ_WRITE_CLUSTER:
513 case COPY_READ_WRITE:
514 /*
515 * In case of failed copy_range request above, we may proceed with
516 * buffered request larger than BLOCK_COPY_MAX_BUFFER.
517 * Still, further requests will be properly limited, so don't care too
518 * much. Moreover the most likely case (copy_range is unsupported for
519 * the configuration, so the very first copy_range request fails)
520 * is handled by setting large copy_size only after first successful
521 * copy_range.
522 */
523
524 bounce_buffer = qemu_blockalign(s->source->bs, nbytes);
525
526 ret = bdrv_co_pread(s->source, offset, nbytes, bounce_buffer, 0);
527 if (ret < 0) {
528 trace_block_copy_read_fail(s, offset, ret);
529 *error_is_read = true;
530 goto out;
531 }
532
533 ret = bdrv_co_pwrite(s->target, offset, nbytes, bounce_buffer,
534 s->write_flags);
535 if (ret < 0) {
536 trace_block_copy_write_fail(s, offset, ret);
537 *error_is_read = false;
538 goto out;
539 }
540
541 out:
542 qemu_vfree(bounce_buffer);
543 break;
544
545 default:
546 abort();
Vladimir Sementsov-Ogievskiye332a722019-10-22 14:18:02 +0300547 }
548
Vladimir Sementsov-Ogievskiybeb5f542019-09-20 17:20:48 +0300549 return ret;
Vladimir Sementsov-Ogievskiybeb5f542019-09-20 17:20:48 +0300550}
551
Vladimir Sementsov-Ogievskiy4ce5dd32020-04-29 16:08:47 +0300552static coroutine_fn int block_copy_task_entry(AioTask *task)
553{
554 BlockCopyTask *t = container_of(task, BlockCopyTask, task);
Emanuele Giuseppe Espositoc6a3e3d2021-06-24 09:20:39 +0200555 BlockCopyState *s = t->s;
Philippe Mathieu-Daudéc78dd002020-05-07 14:11:28 +0200556 bool error_is_read = false;
Paolo Bonzini05d5e122021-06-24 09:20:40 +0200557 BlockCopyMethod method = t->method;
Vladimir Sementsov-Ogievskiy4ce5dd32020-04-29 16:08:47 +0300558 int ret;
559
Kevin Wolfabaf8b72023-02-03 16:21:48 +0100560 WITH_GRAPH_RDLOCK_GUARD() {
561 ret = block_copy_do_copy(s, t->req.offset, t->req.bytes, &method,
562 &error_is_read);
563 }
Emanuele Giuseppe Espositod0c389d2021-06-24 09:20:42 +0200564
565 WITH_QEMU_LOCK_GUARD(&s->lock) {
566 if (s->method == t->method) {
567 s->method = method;
Vladimir Sementsov-Ogievskiy8146b352021-05-28 17:16:27 +0300568 }
Emanuele Giuseppe Espositod0c389d2021-06-24 09:20:42 +0200569
570 if (ret < 0) {
571 if (!t->call_state->ret) {
572 t->call_state->ret = ret;
573 t->call_state->error_is_read = error_is_read;
574 }
Vladimir Sementsov-Ogievskiy201b4bb2021-08-24 11:38:42 +0300575 } else if (s->progress) {
Vladimir Sementsov-Ogievskiyd088e6a2022-03-03 20:43:39 +0100576 progress_work_done(s->progress, t->req.bytes);
Emanuele Giuseppe Espositod0c389d2021-06-24 09:20:42 +0200577 }
Vladimir Sementsov-Ogievskiy4ce5dd32020-04-29 16:08:47 +0300578 }
Vladimir Sementsov-Ogievskiyd088e6a2022-03-03 20:43:39 +0100579 co_put_to_shres(s->mem, t->req.bytes);
Vladimir Sementsov-Ogievskiy4ce5dd32020-04-29 16:08:47 +0300580 block_copy_task_end(t, ret);
581
582 return ret;
583}
584
Kevin Wolf7ff95792023-02-03 16:21:43 +0100585static coroutine_fn GRAPH_RDLOCK
586int block_copy_block_status(BlockCopyState *s, int64_t offset, int64_t bytes,
587 int64_t *pnum)
Vladimir Sementsov-Ogievskiy2d575112020-03-11 13:29:59 +0300588{
589 int64_t num;
590 BlockDriverState *base;
591 int ret;
592
Emanuele Giuseppe Espositod0c389d2021-06-24 09:20:42 +0200593 if (qatomic_read(&s->skip_unallocated)) {
Max Reitzc6f6d842020-06-23 11:29:04 +0200594 base = bdrv_backing_chain_next(s->source->bs);
Vladimir Sementsov-Ogievskiy2d575112020-03-11 13:29:59 +0300595 } else {
596 base = NULL;
597 }
598
Emanuele Giuseppe Esposito43a0d4f2022-11-28 09:23:25 -0500599 ret = bdrv_co_block_status_above(s->source->bs, base, offset, bytes, &num,
600 NULL, NULL);
Vladimir Sementsov-Ogievskiy2d575112020-03-11 13:29:59 +0300601 if (ret < 0 || num < s->cluster_size) {
602 /*
603 * On error or if failed to obtain large enough chunk just fallback to
604 * copy one cluster.
605 */
606 num = s->cluster_size;
607 ret = BDRV_BLOCK_ALLOCATED | BDRV_BLOCK_DATA;
608 } else if (offset + num == s->len) {
609 num = QEMU_ALIGN_UP(num, s->cluster_size);
610 } else {
611 num = QEMU_ALIGN_DOWN(num, s->cluster_size);
612 }
613
614 *pnum = num;
615 return ret;
616}
617
Vladimir Sementsov-Ogievskiybeb5f542019-09-20 17:20:48 +0300618/*
619 * Check if the cluster starting at offset is allocated or not.
620 * return via pnum the number of contiguous clusters sharing this allocation.
621 */
Kevin Wolf7ff95792023-02-03 16:21:43 +0100622static int coroutine_fn GRAPH_RDLOCK
623block_copy_is_cluster_allocated(BlockCopyState *s, int64_t offset,
624 int64_t *pnum)
Vladimir Sementsov-Ogievskiybeb5f542019-09-20 17:20:48 +0300625{
Vladimir Sementsov-Ogievskiy00e30f02019-10-01 16:14:09 +0300626 BlockDriverState *bs = s->source->bs;
Vladimir Sementsov-Ogievskiybeb5f542019-09-20 17:20:48 +0300627 int64_t count, total_count = 0;
628 int64_t bytes = s->len - offset;
629 int ret;
630
631 assert(QEMU_IS_ALIGNED(offset, s->cluster_size));
632
633 while (true) {
Kevin Wolf7ff95792023-02-03 16:21:43 +0100634 /* protected in backup_run() */
Emanuele Giuseppe Esposito43a0d4f2022-11-28 09:23:25 -0500635 ret = bdrv_co_is_allocated(bs, offset, bytes, &count);
Vladimir Sementsov-Ogievskiybeb5f542019-09-20 17:20:48 +0300636 if (ret < 0) {
637 return ret;
638 }
639
640 total_count += count;
641
642 if (ret || count == 0) {
643 /*
644 * ret: partial segment(s) are considered allocated.
645 * otherwise: unallocated tail is treated as an entire segment.
646 */
647 *pnum = DIV_ROUND_UP(total_count, s->cluster_size);
648 return ret;
649 }
650
651 /* Unallocated segment(s) with uncertain following segment(s) */
652 if (total_count >= s->cluster_size) {
653 *pnum = total_count / s->cluster_size;
654 return 0;
655 }
656
657 offset += count;
658 bytes -= count;
659 }
660}
661
Vladimir Sementsov-Ogievskiy177541e2022-03-03 20:43:38 +0100662void block_copy_reset(BlockCopyState *s, int64_t offset, int64_t bytes)
663{
664 QEMU_LOCK_GUARD(&s->lock);
665
666 bdrv_reset_dirty_bitmap(s->copy_bitmap, offset, bytes);
667 if (s->progress) {
668 progress_set_remaining(s->progress,
669 bdrv_get_dirty_count(s->copy_bitmap) +
670 s->in_flight_bytes);
671 }
672}
673
Vladimir Sementsov-Ogievskiybeb5f542019-09-20 17:20:48 +0300674/*
675 * Reset bits in copy_bitmap starting at offset if they represent unallocated
676 * data in the image. May reset subsequent contiguous bits.
677 * @return 0 when the cluster at @offset was unallocated,
678 * 1 otherwise, and -ret on error.
679 */
Emanuele Giuseppe Esposito43a0d4f2022-11-28 09:23:25 -0500680int64_t coroutine_fn block_copy_reset_unallocated(BlockCopyState *s,
681 int64_t offset,
682 int64_t *count)
Vladimir Sementsov-Ogievskiybeb5f542019-09-20 17:20:48 +0300683{
684 int ret;
685 int64_t clusters, bytes;
686
687 ret = block_copy_is_cluster_allocated(s, offset, &clusters);
688 if (ret < 0) {
689 return ret;
690 }
691
692 bytes = clusters * s->cluster_size;
693
694 if (!ret) {
Vladimir Sementsov-Ogievskiy177541e2022-03-03 20:43:38 +0100695 block_copy_reset(s, offset, bytes);
Vladimir Sementsov-Ogievskiybeb5f542019-09-20 17:20:48 +0300696 }
697
698 *count = bytes;
699 return ret;
700}
701
Vladimir Sementsov-Ogievskiy5332e5d2020-03-11 13:30:03 +0300702/*
703 * block_copy_dirty_clusters
704 *
705 * Copy dirty clusters in @offset/@bytes range.
706 * Returns 1 if dirty clusters found and successfully copied, 0 if no dirty
707 * clusters found and -errno on failure.
708 */
Kevin Wolf7ff95792023-02-03 16:21:43 +0100709static int coroutine_fn GRAPH_RDLOCK
Vladimir Sementsov-Ogievskiy3b8c2322021-01-17 00:46:44 +0300710block_copy_dirty_clusters(BlockCopyCallState *call_state)
Vladimir Sementsov-Ogievskiybeb5f542019-09-20 17:20:48 +0300711{
Vladimir Sementsov-Ogievskiy3b8c2322021-01-17 00:46:44 +0300712 BlockCopyState *s = call_state->s;
713 int64_t offset = call_state->offset;
714 int64_t bytes = call_state->bytes;
715
Vladimir Sementsov-Ogievskiybeb5f542019-09-20 17:20:48 +0300716 int ret = 0;
Vladimir Sementsov-Ogievskiy5332e5d2020-03-11 13:30:03 +0300717 bool found_dirty = false;
Vladimir Sementsov-Ogievskiy42ac2142020-04-29 16:08:46 +0300718 int64_t end = offset + bytes;
Vladimir Sementsov-Ogievskiy4ce5dd32020-04-29 16:08:47 +0300719 AioTaskPool *aio = NULL;
Vladimir Sementsov-Ogievskiybeb5f542019-09-20 17:20:48 +0300720
721 /*
722 * block_copy() user is responsible for keeping source and target in same
723 * aio context
724 */
Vladimir Sementsov-Ogievskiy00e30f02019-10-01 16:14:09 +0300725 assert(bdrv_get_aio_context(s->source->bs) ==
726 bdrv_get_aio_context(s->target->bs));
Vladimir Sementsov-Ogievskiybeb5f542019-09-20 17:20:48 +0300727
Vladimir Sementsov-Ogievskiy87190912020-03-11 13:30:02 +0300728 assert(QEMU_IS_ALIGNED(offset, s->cluster_size));
Vladimir Sementsov-Ogievskiydafaf132020-03-11 13:30:01 +0300729 assert(QEMU_IS_ALIGNED(bytes, s->cluster_size));
Vladimir Sementsov-Ogievskiybeb5f542019-09-20 17:20:48 +0300730
Emanuele Giuseppe Esposito149009b2021-06-24 09:20:43 +0200731 while (bytes && aio_task_pool_status(aio) == 0 &&
732 !qatomic_read(&call_state->cancelled)) {
Vladimir Sementsov-Ogievskiy4ce5dd32020-04-29 16:08:47 +0300733 BlockCopyTask *task;
Vladimir Sementsov-Ogievskiy42ac2142020-04-29 16:08:46 +0300734 int64_t status_bytes;
Vladimir Sementsov-Ogievskiybeb5f542019-09-20 17:20:48 +0300735
Vladimir Sementsov-Ogievskiy3b8c2322021-01-17 00:46:44 +0300736 task = block_copy_task_create(s, call_state, offset, bytes);
Vladimir Sementsov-Ogievskiy42ac2142020-04-29 16:08:46 +0300737 if (!task) {
738 /* No more dirty bits in the bitmap */
739 trace_block_copy_skip_range(s, offset, bytes);
740 break;
741 }
Vladimir Sementsov-Ogievskiyd088e6a2022-03-03 20:43:39 +0100742 if (task->req.offset > offset) {
743 trace_block_copy_skip_range(s, offset, task->req.offset - offset);
Vladimir Sementsov-Ogievskiybeb5f542019-09-20 17:20:48 +0300744 }
745
Vladimir Sementsov-Ogievskiy5332e5d2020-03-11 13:30:03 +0300746 found_dirty = true;
747
Vladimir Sementsov-Ogievskiyd088e6a2022-03-03 20:43:39 +0100748 ret = block_copy_block_status(s, task->req.offset, task->req.bytes,
Vladimir Sementsov-Ogievskiy42ac2142020-04-29 16:08:46 +0300749 &status_bytes);
Vladimir Sementsov-Ogievskiy5332e5d2020-03-11 13:30:03 +0300750 assert(ret >= 0); /* never fail */
Vladimir Sementsov-Ogievskiyd088e6a2022-03-03 20:43:39 +0100751 if (status_bytes < task->req.bytes) {
Vladimir Sementsov-Ogievskiy42ac2142020-04-29 16:08:46 +0300752 block_copy_task_shrink(task, status_bytes);
753 }
Emanuele Giuseppe Espositod0c389d2021-06-24 09:20:42 +0200754 if (qatomic_read(&s->skip_unallocated) &&
755 !(ret & BDRV_BLOCK_ALLOCATED)) {
Vladimir Sementsov-Ogievskiy1348a652020-04-29 16:08:45 +0300756 block_copy_task_end(task, 0);
Vladimir Sementsov-Ogievskiyd088e6a2022-03-03 20:43:39 +0100757 trace_block_copy_skip_range(s, task->req.offset, task->req.bytes);
Vladimir Sementsov-Ogievskiy42ac2142020-04-29 16:08:46 +0300758 offset = task_end(task);
759 bytes = end - offset;
Vladimir Sementsov-Ogievskiyfc9aefc2020-05-07 21:38:00 +0300760 g_free(task);
Vladimir Sementsov-Ogievskiy2d575112020-03-11 13:29:59 +0300761 continue;
Vladimir Sementsov-Ogievskiybeb5f542019-09-20 17:20:48 +0300762 }
Vladimir Sementsov-Ogievskiybed95232021-05-28 17:16:28 +0300763 if (ret & BDRV_BLOCK_ZERO) {
Paolo Bonzini05d5e122021-06-24 09:20:40 +0200764 task->method = COPY_WRITE_ZEROES;
Vladimir Sementsov-Ogievskiybed95232021-05-28 17:16:28 +0300765 }
Vladimir Sementsov-Ogievskiybeb5f542019-09-20 17:20:48 +0300766
Paolo Bonzinica657c92021-06-14 10:11:27 +0200767 if (!call_state->ignore_ratelimit) {
768 uint64_t ns = ratelimit_calculate_delay(&s->rate_limit, 0);
769 if (ns > 0) {
770 block_copy_task_end(task, -EAGAIN);
771 g_free(task);
772 qemu_co_sleep_ns_wakeable(&call_state->sleep,
773 QEMU_CLOCK_REALTIME, ns);
774 continue;
Vladimir Sementsov-Ogievskiy7e032df2021-01-17 00:46:48 +0300775 }
Vladimir Sementsov-Ogievskiy7e032df2021-01-17 00:46:48 +0300776 }
777
Vladimir Sementsov-Ogievskiyd088e6a2022-03-03 20:43:39 +0100778 ratelimit_calculate_delay(&s->rate_limit, task->req.bytes);
Paolo Bonzinica657c92021-06-14 10:11:27 +0200779
Vladimir Sementsov-Ogievskiyd088e6a2022-03-03 20:43:39 +0100780 trace_block_copy_process(s, task->req.offset);
Vladimir Sementsov-Ogievskiybeb5f542019-09-20 17:20:48 +0300781
Vladimir Sementsov-Ogievskiyd088e6a2022-03-03 20:43:39 +0100782 co_get_from_shres(s->mem, task->req.bytes);
Vladimir Sementsov-Ogievskiybeb5f542019-09-20 17:20:48 +0300783
Vladimir Sementsov-Ogievskiy42ac2142020-04-29 16:08:46 +0300784 offset = task_end(task);
785 bytes = end - offset;
Vladimir Sementsov-Ogievskiy4ce5dd32020-04-29 16:08:47 +0300786
787 if (!aio && bytes) {
Vladimir Sementsov-Ogievskiy26be9d62021-01-17 00:46:46 +0300788 aio = aio_task_pool_new(call_state->max_workers);
Vladimir Sementsov-Ogievskiy4ce5dd32020-04-29 16:08:47 +0300789 }
790
791 ret = block_copy_task_run(aio, task);
792 if (ret < 0) {
793 goto out;
794 }
Vladimir Sementsov-Ogievskiybeb5f542019-09-20 17:20:48 +0300795 }
796
Vladimir Sementsov-Ogievskiy4ce5dd32020-04-29 16:08:47 +0300797out:
798 if (aio) {
799 aio_task_pool_wait_all(aio);
800
801 /*
802 * We are not really interested in -ECANCELED returned from
803 * block_copy_task_run. If it fails, it means some task already failed
804 * for real reason, let's return first failure.
805 * Still, assert that we don't rewrite failure by success.
Vladimir Sementsov-Ogievskiye8de7ba2020-05-26 21:13:47 +0300806 *
807 * Note: ret may be positive here because of block-status result.
Vladimir Sementsov-Ogievskiy4ce5dd32020-04-29 16:08:47 +0300808 */
Vladimir Sementsov-Ogievskiye8de7ba2020-05-26 21:13:47 +0300809 assert(ret >= 0 || aio_task_pool_status(aio) < 0);
Vladimir Sementsov-Ogievskiy4ce5dd32020-04-29 16:08:47 +0300810 ret = aio_task_pool_status(aio);
811
812 aio_task_pool_free(aio);
813 }
Vladimir Sementsov-Ogievskiy4ce5dd32020-04-29 16:08:47 +0300814
815 return ret < 0 ? ret : found_dirty;
Vladimir Sementsov-Ogievskiy5332e5d2020-03-11 13:30:03 +0300816}
817
Vladimir Sementsov-Ogievskiy7e032df2021-01-17 00:46:48 +0300818void block_copy_kick(BlockCopyCallState *call_state)
819{
Paolo Bonzini29a6ea22021-05-17 12:05:47 +0200820 qemu_co_sleep_wake(&call_state->sleep);
Vladimir Sementsov-Ogievskiy7e032df2021-01-17 00:46:48 +0300821}
822
Vladimir Sementsov-Ogievskiy5332e5d2020-03-11 13:30:03 +0300823/*
Vladimir Sementsov-Ogievskiy3b8c2322021-01-17 00:46:44 +0300824 * block_copy_common
Vladimir Sementsov-Ogievskiy5332e5d2020-03-11 13:30:03 +0300825 *
826 * Copy requested region, accordingly to dirty bitmap.
827 * Collaborate with parallel block_copy requests: if they succeed it will help
828 * us. If they fail, we will retry not-copied regions. So, if we return error,
829 * it means that some I/O operation failed in context of _this_ block_copy call,
830 * not some parallel operation.
831 */
Kevin Wolf7ff95792023-02-03 16:21:43 +0100832static int coroutine_fn GRAPH_RDLOCK
833block_copy_common(BlockCopyCallState *call_state)
Vladimir Sementsov-Ogievskiy5332e5d2020-03-11 13:30:03 +0300834{
835 int ret;
Emanuele Giuseppe Espositoc6a3e3d2021-06-24 09:20:39 +0200836 BlockCopyState *s = call_state->s;
Vladimir Sementsov-Ogievskiy5332e5d2020-03-11 13:30:03 +0300837
Emanuele Giuseppe Espositod0c389d2021-06-24 09:20:42 +0200838 qemu_co_mutex_lock(&s->lock);
Emanuele Giuseppe Espositoc6a3e3d2021-06-24 09:20:39 +0200839 QLIST_INSERT_HEAD(&s->calls, call_state, list);
Emanuele Giuseppe Espositod0c389d2021-06-24 09:20:42 +0200840 qemu_co_mutex_unlock(&s->lock);
Vladimir Sementsov-Ogievskiy2e099a92021-01-17 00:46:47 +0300841
Vladimir Sementsov-Ogievskiy5332e5d2020-03-11 13:30:03 +0300842 do {
Vladimir Sementsov-Ogievskiy3b8c2322021-01-17 00:46:44 +0300843 ret = block_copy_dirty_clusters(call_state);
Vladimir Sementsov-Ogievskiy5332e5d2020-03-11 13:30:03 +0300844
Emanuele Giuseppe Esposito149009b2021-06-24 09:20:43 +0200845 if (ret == 0 && !qatomic_read(&call_state->cancelled)) {
Emanuele Giuseppe Espositod0c389d2021-06-24 09:20:42 +0200846 WITH_QEMU_LOCK_GUARD(&s->lock) {
847 /*
848 * Check that there is no task we still need to
849 * wait to complete
850 */
Vladimir Sementsov-Ogievskiyd088e6a2022-03-03 20:43:39 +0100851 ret = reqlist_wait_one(&s->reqs, call_state->offset,
852 call_state->bytes, &s->lock);
Emanuele Giuseppe Espositod0c389d2021-06-24 09:20:42 +0200853 if (ret == 0) {
854 /*
855 * No pending tasks, but check again the bitmap in this
856 * same critical section, since a task might have failed
857 * between this and the critical section in
858 * block_copy_dirty_clusters().
859 *
Vladimir Sementsov-Ogievskiyd088e6a2022-03-03 20:43:39 +0100860 * reqlist_wait_one return value 0 also means that it
Emanuele Giuseppe Espositod0c389d2021-06-24 09:20:42 +0200861 * didn't release the lock. So, we are still in the same
862 * critical section, not interrupted by any concurrent
863 * access to state.
864 */
865 ret = bdrv_dirty_bitmap_next_dirty(s->copy_bitmap,
866 call_state->offset,
867 call_state->bytes) >= 0;
868 }
869 }
Vladimir Sementsov-Ogievskiy5332e5d2020-03-11 13:30:03 +0300870 }
871
872 /*
873 * We retry in two cases:
874 * 1. Some progress done
875 * Something was copied, which means that there were yield points
876 * and some new dirty bits may have appeared (due to failed parallel
877 * block-copy requests).
878 * 2. We have waited for some intersecting block-copy request
879 * It may have failed and produced new dirty bits.
880 */
Emanuele Giuseppe Esposito149009b2021-06-24 09:20:43 +0200881 } while (ret > 0 && !qatomic_read(&call_state->cancelled));
Vladimir Sementsov-Ogievskiya6ffe192019-10-01 16:14:05 +0300882
Emanuele Giuseppe Esposito149009b2021-06-24 09:20:43 +0200883 qatomic_store_release(&call_state->finished, true);
Vladimir Sementsov-Ogievskiyde4641b2021-01-17 00:46:45 +0300884
885 if (call_state->cb) {
886 call_state->cb(call_state->cb_opaque);
887 }
888
Emanuele Giuseppe Espositod0c389d2021-06-24 09:20:42 +0200889 qemu_co_mutex_lock(&s->lock);
Vladimir Sementsov-Ogievskiy2e099a92021-01-17 00:46:47 +0300890 QLIST_REMOVE(call_state, list);
Emanuele Giuseppe Espositod0c389d2021-06-24 09:20:42 +0200891 qemu_co_mutex_unlock(&s->lock);
Vladimir Sementsov-Ogievskiy2e099a92021-01-17 00:46:47 +0300892
Vladimir Sementsov-Ogievskiybeb5f542019-09-20 17:20:48 +0300893 return ret;
894}
Vladimir Sementsov-Ogievskiy397f4e92020-03-11 13:30:04 +0300895
Vladimir Sementsov-Ogievskiy15df6e62022-04-07 16:27:24 +0300896static void coroutine_fn block_copy_async_co_entry(void *opaque)
Vladimir Sementsov-Ogievskiy3b8c2322021-01-17 00:46:44 +0300897{
Kevin Wolf7ff95792023-02-03 16:21:43 +0100898 GRAPH_RDLOCK_GUARD();
Vladimir Sementsov-Ogievskiy15df6e62022-04-07 16:27:24 +0300899 block_copy_common(opaque);
900}
901
902int coroutine_fn block_copy(BlockCopyState *s, int64_t start, int64_t bytes,
903 bool ignore_ratelimit, uint64_t timeout_ns,
904 BlockCopyAsyncCallbackFunc cb,
905 void *cb_opaque)
906{
907 int ret;
908 BlockCopyCallState *call_state = g_new(BlockCopyCallState, 1);
909
910 *call_state = (BlockCopyCallState) {
Vladimir Sementsov-Ogievskiy3b8c2322021-01-17 00:46:44 +0300911 .s = s,
912 .offset = start,
913 .bytes = bytes,
Vladimir Sementsov-Ogievskiy7e032df2021-01-17 00:46:48 +0300914 .ignore_ratelimit = ignore_ratelimit,
Vladimir Sementsov-Ogievskiy26be9d62021-01-17 00:46:46 +0300915 .max_workers = BLOCK_COPY_MAX_WORKERS,
Vladimir Sementsov-Ogievskiy15df6e62022-04-07 16:27:24 +0300916 .cb = cb,
917 .cb_opaque = cb_opaque,
Vladimir Sementsov-Ogievskiy3b8c2322021-01-17 00:46:44 +0300918 };
919
Vladimir Sementsov-Ogievskiy15df6e62022-04-07 16:27:24 +0300920 ret = qemu_co_timeout(block_copy_async_co_entry, call_state, timeout_ns,
921 g_free);
922 if (ret < 0) {
923 assert(ret == -ETIMEDOUT);
924 block_copy_call_cancel(call_state);
925 /* call_state will be freed by running coroutine. */
926 return ret;
927 }
Vladimir Sementsov-Ogievskiy3b8c2322021-01-17 00:46:44 +0300928
Vladimir Sementsov-Ogievskiy15df6e62022-04-07 16:27:24 +0300929 ret = call_state->ret;
930 g_free(call_state);
931
932 return ret;
Vladimir Sementsov-Ogievskiyde4641b2021-01-17 00:46:45 +0300933}
934
935BlockCopyCallState *block_copy_async(BlockCopyState *s,
936 int64_t offset, int64_t bytes,
Vladimir Sementsov-Ogievskiy26be9d62021-01-17 00:46:46 +0300937 int max_workers, int64_t max_chunk,
Vladimir Sementsov-Ogievskiyde4641b2021-01-17 00:46:45 +0300938 BlockCopyAsyncCallbackFunc cb,
939 void *cb_opaque)
940{
941 BlockCopyCallState *call_state = g_new(BlockCopyCallState, 1);
942
943 *call_state = (BlockCopyCallState) {
944 .s = s,
945 .offset = offset,
946 .bytes = bytes,
Vladimir Sementsov-Ogievskiy26be9d62021-01-17 00:46:46 +0300947 .max_workers = max_workers,
948 .max_chunk = max_chunk,
Vladimir Sementsov-Ogievskiyde4641b2021-01-17 00:46:45 +0300949 .cb = cb,
950 .cb_opaque = cb_opaque,
951
952 .co = qemu_coroutine_create(block_copy_async_co_entry, call_state),
953 };
954
955 qemu_coroutine_enter(call_state->co);
956
957 return call_state;
958}
959
960void block_copy_call_free(BlockCopyCallState *call_state)
961{
962 if (!call_state) {
963 return;
964 }
965
Emanuele Giuseppe Esposito149009b2021-06-24 09:20:43 +0200966 assert(qatomic_read(&call_state->finished));
Vladimir Sementsov-Ogievskiyde4641b2021-01-17 00:46:45 +0300967 g_free(call_state);
968}
969
970bool block_copy_call_finished(BlockCopyCallState *call_state)
971{
Emanuele Giuseppe Esposito149009b2021-06-24 09:20:43 +0200972 return qatomic_read(&call_state->finished);
Vladimir Sementsov-Ogievskiyde4641b2021-01-17 00:46:45 +0300973}
974
975bool block_copy_call_succeeded(BlockCopyCallState *call_state)
976{
Emanuele Giuseppe Esposito149009b2021-06-24 09:20:43 +0200977 return qatomic_load_acquire(&call_state->finished) &&
978 !qatomic_read(&call_state->cancelled) &&
979 call_state->ret == 0;
Vladimir Sementsov-Ogievskiyde4641b2021-01-17 00:46:45 +0300980}
981
982bool block_copy_call_failed(BlockCopyCallState *call_state)
983{
Emanuele Giuseppe Esposito149009b2021-06-24 09:20:43 +0200984 return qatomic_load_acquire(&call_state->finished) &&
985 !qatomic_read(&call_state->cancelled) &&
986 call_state->ret < 0;
Vladimir Sementsov-Ogievskiya6d23d52021-01-17 00:46:49 +0300987}
988
989bool block_copy_call_cancelled(BlockCopyCallState *call_state)
990{
Emanuele Giuseppe Esposito149009b2021-06-24 09:20:43 +0200991 return qatomic_read(&call_state->cancelled);
Vladimir Sementsov-Ogievskiyde4641b2021-01-17 00:46:45 +0300992}
993
994int block_copy_call_status(BlockCopyCallState *call_state, bool *error_is_read)
995{
Emanuele Giuseppe Esposito149009b2021-06-24 09:20:43 +0200996 assert(qatomic_load_acquire(&call_state->finished));
Vladimir Sementsov-Ogievskiyde4641b2021-01-17 00:46:45 +0300997 if (error_is_read) {
998 *error_is_read = call_state->error_is_read;
999 }
1000 return call_state->ret;
1001}
1002
Emanuele Giuseppe Esposito149009b2021-06-24 09:20:43 +02001003/*
1004 * Note that cancelling and finishing are racy.
1005 * User can cancel a block-copy that is already finished.
1006 */
Vladimir Sementsov-Ogievskiya6d23d52021-01-17 00:46:49 +03001007void block_copy_call_cancel(BlockCopyCallState *call_state)
1008{
Emanuele Giuseppe Esposito149009b2021-06-24 09:20:43 +02001009 qatomic_set(&call_state->cancelled, true);
Vladimir Sementsov-Ogievskiya6d23d52021-01-17 00:46:49 +03001010 block_copy_kick(call_state);
1011}
1012
Vladimir Sementsov-Ogievskiy397f4e92020-03-11 13:30:04 +03001013BdrvDirtyBitmap *block_copy_dirty_bitmap(BlockCopyState *s)
1014{
1015 return s->copy_bitmap;
1016}
1017
Vladimir Sementsov-Ogievskiyb518e9e2021-08-24 11:38:31 +03001018int64_t block_copy_cluster_size(BlockCopyState *s)
1019{
1020 return s->cluster_size;
1021}
1022
Vladimir Sementsov-Ogievskiy397f4e92020-03-11 13:30:04 +03001023void block_copy_set_skip_unallocated(BlockCopyState *s, bool skip)
1024{
Emanuele Giuseppe Espositod0c389d2021-06-24 09:20:42 +02001025 qatomic_set(&s->skip_unallocated, skip);
Vladimir Sementsov-Ogievskiy397f4e92020-03-11 13:30:04 +03001026}
Vladimir Sementsov-Ogievskiy7e032df2021-01-17 00:46:48 +03001027
1028void block_copy_set_speed(BlockCopyState *s, uint64_t speed)
1029{
Paolo Bonzinica657c92021-06-14 10:11:27 +02001030 ratelimit_set_speed(&s->rate_limit, speed, BLOCK_COPY_SLICE_TIME);
Vladimir Sementsov-Ogievskiy7e032df2021-01-17 00:46:48 +03001031
1032 /*
1033 * Note: it's good to kick all call states from here, but it should be done
1034 * only from a coroutine, to not crash if s->calls list changed while
1035 * entering one call. So for now, the only user of this function kicks its
1036 * only one call_state by hand.
1037 */
1038}