blob: 9afa0bf3b41229536a5b9e67df8d2965c734406f [file] [log] [blame]
Dietmar Maurer98d2c6f2013-06-24 17:13:11 +02001/*
2 * QEMU backup
3 *
4 * Copyright (C) 2013 Proxmox Server Solutions
Vladimir Sementsov-Ogievskiy00e30f02019-10-01 16:14:09 +03005 * Copyright (c) 2019 Virtuozzo International GmbH.
Dietmar Maurer98d2c6f2013-06-24 17:13:11 +02006 *
7 * Authors:
8 * Dietmar Maurer (dietmar@proxmox.com)
9 *
10 * This work is licensed under the terms of the GNU GPL, version 2 or later.
11 * See the COPYING file in the top-level directory.
12 *
13 */
14
Peter Maydell80c71a22016-01-18 18:01:42 +000015#include "qemu/osdep.h"
Dietmar Maurer98d2c6f2013-06-24 17:13:11 +020016
17#include "trace.h"
18#include "block/block.h"
19#include "block/block_int.h"
John Snowc87621e2016-10-27 12:07:00 -040020#include "block/blockjob_int.h"
Wen Congyang49d3e822016-07-27 15:01:43 +080021#include "block/block_backup.h"
Vladimir Sementsov-Ogievskiybeb5f542019-09-20 17:20:48 +030022#include "block/block-copy.h"
Markus Armbrusterda34e652016-03-14 09:01:28 +010023#include "qapi/error.h"
Markus Armbrustercc7a8ea2015-03-17 17:22:46 +010024#include "qapi/qmp/qerror.h"
Dietmar Maurer98d2c6f2013-06-24 17:13:11 +020025#include "qemu/ratelimit.h"
Veronia Bahaaf348b6d2016-03-20 19:16:19 +020026#include "qemu/cutils.h"
Max Reitz373340b2015-10-19 17:53:22 +020027#include "sysemu/block-backend.h"
Fam Zhengb2f56462016-03-08 12:44:52 +080028#include "qemu/bitmap.h"
Vladimir Sementsov-Ogievskiya410a7f2017-02-28 22:33:40 +030029#include "qemu/error-report.h"
Dietmar Maurer98d2c6f2013-06-24 17:13:11 +020030
Vladimir Sementsov-Ogievskiy00e30f02019-10-01 16:14:09 +030031#include "block/backup-top.h"
32
John Snow16096a42016-02-25 15:58:29 -050033#define BACKUP_CLUSTER_SIZE_DEFAULT (1 << 16)
Dietmar Maurer98d2c6f2013-06-24 17:13:11 +020034
Dietmar Maurer98d2c6f2013-06-24 17:13:11 +020035typedef struct BackupBlockJob {
36 BlockJob common;
Vladimir Sementsov-Ogievskiy00e30f02019-10-01 16:14:09 +030037 BlockDriverState *backup_top;
Vladimir Sementsov-Ogievskiy2c8074c2019-09-20 17:20:46 +030038 BlockDriverState *source_bs;
John Snow62aa1fb2019-07-29 16:35:53 -040039
John Snowd58d8452015-04-17 19:49:58 -040040 BdrvDirtyBitmap *sync_bitmap;
John Snow62aa1fb2019-07-29 16:35:53 -040041
Ian Mainfc5d3f82013-07-26 11:39:04 -070042 MirrorSyncMode sync_mode;
John Snowc8b56502019-07-29 16:35:52 -040043 BitmapSyncMode bitmap_mode;
Dietmar Maurer98d2c6f2013-06-24 17:13:11 +020044 BlockdevOnError on_source_error;
45 BlockdevOnError on_target_error;
Kevin Wolf05df8a62018-01-18 18:08:22 +010046 uint64_t len;
Eric Blakecf79cdf2017-07-07 07:44:53 -050047 uint64_t bytes_read;
John Snow16096a42016-02-25 15:58:29 -050048 int64_t cluster_size;
Vladimir Sementsov-Ogievskiya193b0f2017-10-12 16:53:10 +030049
Vladimir Sementsov-Ogievskiy2c8074c2019-09-20 17:20:46 +030050 BlockCopyState *bcs;
Dietmar Maurer98d2c6f2013-06-24 17:13:11 +020051} BackupBlockJob;
52
Kevin Wolfbd219352018-01-19 15:54:40 +010053static const BlockJobDriver backup_job_driver;
54
Vladimir Sementsov-Ogievskiy2c8074c2019-09-20 17:20:46 +030055static void backup_progress_bytes_callback(int64_t bytes, void *opaque)
56{
57 BackupBlockJob *s = opaque;
58
59 s->bytes_read += bytes;
Vladimir Sementsov-Ogievskiy2c8074c2019-09-20 17:20:46 +030060}
61
Vladimir Sementsov-Ogievskiy0bd0c442019-09-20 17:20:44 +030062static int coroutine_fn backup_do_cow(BackupBlockJob *job,
63 int64_t offset, uint64_t bytes,
Vladimir Sementsov-Ogievskiy00e30f02019-10-01 16:14:09 +030064 bool *error_is_read)
Vladimir Sementsov-Ogievskiy0bd0c442019-09-20 17:20:44 +030065{
Vladimir Sementsov-Ogievskiy0bd0c442019-09-20 17:20:44 +030066 int ret = 0;
67 int64_t start, end; /* bytes */
68
Vladimir Sementsov-Ogievskiy0bd0c442019-09-20 17:20:44 +030069 start = QEMU_ALIGN_DOWN(offset, job->cluster_size);
70 end = QEMU_ALIGN_UP(bytes + offset, job->cluster_size);
71
72 trace_backup_do_cow_enter(job, start, offset, bytes);
73
Vladimir Sementsov-Ogievskiy00e30f02019-10-01 16:14:09 +030074 ret = block_copy(job->bcs, start, end - start, error_is_read);
Vladimir Sementsov-Ogievskiy0bd0c442019-09-20 17:20:44 +030075
Eric Blake03f5d602017-07-07 07:44:55 -050076 trace_backup_do_cow_return(job, offset, bytes, ret);
Dietmar Maurer98d2c6f2013-06-24 17:13:11 +020077
Dietmar Maurer98d2c6f2013-06-24 17:13:11 +020078 return ret;
79}
80
Fam Zhengb976ea32015-11-05 18:13:10 -050081static void backup_cleanup_sync_bitmap(BackupBlockJob *job, int ret)
82{
83 BdrvDirtyBitmap *bm;
John Snowc23909e2019-07-29 16:35:53 -040084 bool sync = (((ret == 0) || (job->bitmap_mode == BITMAP_SYNC_MODE_ALWAYS)) \
85 && (job->bitmap_mode != BITMAP_SYNC_MODE_NEVER));
Fam Zhengb976ea32015-11-05 18:13:10 -050086
John Snowc23909e2019-07-29 16:35:53 -040087 if (sync) {
John Snowcf0cd292019-07-29 16:35:53 -040088 /*
John Snowc23909e2019-07-29 16:35:53 -040089 * We succeeded, or we always intended to sync the bitmap.
90 * Delete this bitmap and install the child.
91 */
Vladimir Sementsov-Ogievskiy5deb6cb2019-09-16 17:19:09 +030092 bm = bdrv_dirty_bitmap_abdicate(job->sync_bitmap, NULL);
John Snowc23909e2019-07-29 16:35:53 -040093 } else {
94 /*
95 * We failed, or we never intended to sync the bitmap anyway.
96 * Merge the successor back into the parent, keeping all data.
John Snowcf0cd292019-07-29 16:35:53 -040097 */
Vladimir Sementsov-Ogievskiy5deb6cb2019-09-16 17:19:09 +030098 bm = bdrv_reclaim_dirty_bitmap(job->sync_bitmap, NULL);
John Snowc23909e2019-07-29 16:35:53 -040099 }
100
101 assert(bm);
102
103 if (ret < 0 && job->bitmap_mode == BITMAP_SYNC_MODE_ALWAYS) {
104 /* If we failed and synced, merge in the bits we didn't copy: */
Vladimir Sementsov-Ogievskiy397f4e92020-03-11 13:30:04 +0300105 bdrv_dirty_bitmap_merge_internal(bm, block_copy_dirty_bitmap(job->bcs),
John Snowc23909e2019-07-29 16:35:53 -0400106 NULL, true);
Fam Zhengb976ea32015-11-05 18:13:10 -0500107 }
108}
109
Kevin Wolf4ad35182018-04-19 17:30:16 +0200110static void backup_commit(Job *job)
John Snowc347b2c2015-11-05 18:13:16 -0500111{
Kevin Wolf4ad35182018-04-19 17:30:16 +0200112 BackupBlockJob *s = container_of(job, BackupBlockJob, common.job);
John Snowc347b2c2015-11-05 18:13:16 -0500113 if (s->sync_bitmap) {
114 backup_cleanup_sync_bitmap(s, 0);
115 }
116}
117
Kevin Wolf4ad35182018-04-19 17:30:16 +0200118static void backup_abort(Job *job)
John Snowc347b2c2015-11-05 18:13:16 -0500119{
Kevin Wolf4ad35182018-04-19 17:30:16 +0200120 BackupBlockJob *s = container_of(job, BackupBlockJob, common.job);
John Snowc347b2c2015-11-05 18:13:16 -0500121 if (s->sync_bitmap) {
122 backup_cleanup_sync_bitmap(s, -1);
123 }
124}
125
Kevin Wolf4ad35182018-04-19 17:30:16 +0200126static void backup_clean(Job *job)
John Snowe8a40bf2016-11-08 01:50:35 -0500127{
Kevin Wolf4ad35182018-04-19 17:30:16 +0200128 BackupBlockJob *s = container_of(job, BackupBlockJob, common.job);
Vladimir Sementsov-Ogievskiy00e30f02019-10-01 16:14:09 +0300129 bdrv_backup_top_drop(s->backup_top);
John Snowe8a40bf2016-11-08 01:50:35 -0500130}
131
Wen Congyang49d3e822016-07-27 15:01:43 +0800132void backup_do_checkpoint(BlockJob *job, Error **errp)
133{
134 BackupBlockJob *backup_job = container_of(job, BackupBlockJob, common);
Wen Congyang49d3e822016-07-27 15:01:43 +0800135
Kevin Wolfbd219352018-01-19 15:54:40 +0100136 assert(block_job_driver(job) == &backup_job_driver);
Wen Congyang49d3e822016-07-27 15:01:43 +0800137
138 if (backup_job->sync_mode != MIRROR_SYNC_MODE_NONE) {
139 error_setg(errp, "The backup job only supports block checkpoint in"
140 " sync=none mode");
141 return;
142 }
143
Vladimir Sementsov-Ogievskiy397f4e92020-03-11 13:30:04 +0300144 bdrv_set_dirty_bitmap(block_copy_dirty_bitmap(backup_job->bcs), 0,
145 backup_job->len);
Wen Congyang49d3e822016-07-27 15:01:43 +0800146}
147
Dietmar Maurer98d2c6f2013-06-24 17:13:11 +0200148static BlockErrorAction backup_error_action(BackupBlockJob *job,
149 bool read, int error)
150{
151 if (read) {
Kevin Wolf81e254d2016-04-18 11:36:38 +0200152 return block_job_error_action(&job->common, job->on_source_error,
153 true, error);
Dietmar Maurer98d2c6f2013-06-24 17:13:11 +0200154 } else {
Kevin Wolf81e254d2016-04-18 11:36:38 +0200155 return block_job_error_action(&job->common, job->on_target_error,
156 false, error);
Dietmar Maurer98d2c6f2013-06-24 17:13:11 +0200157 }
158}
159
John Snowd58d8452015-04-17 19:49:58 -0400160static bool coroutine_fn yield_and_check(BackupBlockJob *job)
161{
Kevin Wolfdee81d52018-01-18 21:19:38 +0100162 uint64_t delay_ns;
163
Kevin Wolfdaa7f2f2018-04-17 12:56:07 +0200164 if (job_is_cancelled(&job->common.job)) {
John Snowd58d8452015-04-17 19:49:58 -0400165 return true;
166 }
167
Vladimir Sementsov-Ogievskiy0e23e382019-09-20 17:20:47 +0300168 /*
169 * We need to yield even for delay_ns = 0 so that bdrv_drain_all() can
170 * return. Without a yield, the VM would not reboot.
171 */
Kevin Wolfdee81d52018-01-18 21:19:38 +0100172 delay_ns = block_job_ratelimit_get_delay(&job->common, job->bytes_read);
173 job->bytes_read = 0;
Kevin Wolf5d43e862018-04-18 16:32:20 +0200174 job_sleep_ns(&job->common.job, delay_ns);
John Snowd58d8452015-04-17 19:49:58 -0400175
Kevin Wolfdaa7f2f2018-04-17 12:56:07 +0200176 if (job_is_cancelled(&job->common.job)) {
John Snowd58d8452015-04-17 19:49:58 -0400177 return true;
178 }
179
180 return false;
181}
182
Vladimir Sementsov-Ogievskiyc334e892019-04-29 12:08:41 +0300183static int coroutine_fn backup_loop(BackupBlockJob *job)
John Snowd58d8452015-04-17 19:49:58 -0400184{
185 bool error_is_read;
Vladimir Sementsov-Ogievskiya8389e32019-04-29 12:08:39 +0300186 int64_t offset;
John Snow62aa1fb2019-07-29 16:35:53 -0400187 BdrvDirtyBitmapIter *bdbi;
John Snow62aa1fb2019-07-29 16:35:53 -0400188 int ret = 0;
John Snowd58d8452015-04-17 19:49:58 -0400189
Vladimir Sementsov-Ogievskiy397f4e92020-03-11 13:30:04 +0300190 bdbi = bdrv_dirty_iter_new(block_copy_dirty_bitmap(job->bcs));
John Snow62aa1fb2019-07-29 16:35:53 -0400191 while ((offset = bdrv_dirty_iter_next(bdbi)) != -1) {
Vladimir Sementsov-Ogievskiy53f1c872017-10-12 16:53:13 +0300192 do {
193 if (yield_and_check(job)) {
John Snow62aa1fb2019-07-29 16:35:53 -0400194 goto out;
Vladimir Sementsov-Ogievskiy53f1c872017-10-12 16:53:13 +0300195 }
Vladimir Sementsov-Ogievskiy00e30f02019-10-01 16:14:09 +0300196 ret = backup_do_cow(job, offset, job->cluster_size, &error_is_read);
Vladimir Sementsov-Ogievskiy53f1c872017-10-12 16:53:13 +0300197 if (ret < 0 && backup_error_action(job, error_is_read, -ret) ==
198 BLOCK_ERROR_ACTION_REPORT)
199 {
John Snow62aa1fb2019-07-29 16:35:53 -0400200 goto out;
Vladimir Sementsov-Ogievskiy53f1c872017-10-12 16:53:13 +0300201 }
202 } while (ret < 0);
John Snowd58d8452015-04-17 19:49:58 -0400203 }
204
John Snow62aa1fb2019-07-29 16:35:53 -0400205 out:
206 bdrv_dirty_iter_free(bdbi);
207 return ret;
John Snowd58d8452015-04-17 19:49:58 -0400208}
209
Vladimir Sementsov-Ogievskiy397f4e92020-03-11 13:30:04 +0300210static void backup_init_bcs_bitmap(BackupBlockJob *job)
Vladimir Sementsov-Ogievskiy8cc6dc62017-10-12 16:53:11 +0300211{
John Snow141cdcd2019-07-29 16:35:55 -0400212 bool ret;
213 uint64_t estimate;
Vladimir Sementsov-Ogievskiy397f4e92020-03-11 13:30:04 +0300214 BdrvDirtyBitmap *bcs_bitmap = block_copy_dirty_bitmap(job->bcs);
Vladimir Sementsov-Ogievskiy8cc6dc62017-10-12 16:53:11 +0300215
John Snow141cdcd2019-07-29 16:35:55 -0400216 if (job->sync_mode == MIRROR_SYNC_MODE_BITMAP) {
Vladimir Sementsov-Ogievskiy397f4e92020-03-11 13:30:04 +0300217 ret = bdrv_dirty_bitmap_merge_internal(bcs_bitmap, job->sync_bitmap,
John Snow141cdcd2019-07-29 16:35:55 -0400218 NULL, true);
219 assert(ret);
220 } else {
John Snow7e30dd62019-07-29 16:35:55 -0400221 if (job->sync_mode == MIRROR_SYNC_MODE_TOP) {
222 /*
223 * We can't hog the coroutine to initialize this thoroughly.
224 * Set a flag and resume work when we are able to yield safely.
225 */
Vladimir Sementsov-Ogievskiy397f4e92020-03-11 13:30:04 +0300226 block_copy_set_skip_unallocated(job->bcs, true);
John Snow7e30dd62019-07-29 16:35:55 -0400227 }
Vladimir Sementsov-Ogievskiy397f4e92020-03-11 13:30:04 +0300228 bdrv_set_dirty_bitmap(bcs_bitmap, 0, job->len);
John Snow141cdcd2019-07-29 16:35:55 -0400229 }
230
Vladimir Sementsov-Ogievskiy397f4e92020-03-11 13:30:04 +0300231 estimate = bdrv_get_dirty_count(bcs_bitmap);
John Snow141cdcd2019-07-29 16:35:55 -0400232 job_progress_set_remaining(&job->common.job, estimate);
Vladimir Sementsov-Ogievskiy8cc6dc62017-10-12 16:53:11 +0300233}
234
John Snow68702772018-08-29 21:57:32 -0400235static int coroutine_fn backup_run(Job *job, Error **errp)
Dietmar Maurer98d2c6f2013-06-24 17:13:11 +0200236{
John Snow68702772018-08-29 21:57:32 -0400237 BackupBlockJob *s = container_of(job, BackupBlockJob, common.job);
Dietmar Maurer98d2c6f2013-06-24 17:13:11 +0200238 int ret = 0;
239
Vladimir Sementsov-Ogievskiy397f4e92020-03-11 13:30:04 +0300240 backup_init_bcs_bitmap(s);
Vladimir Sementsov-Ogievskiy8cc6dc62017-10-12 16:53:11 +0300241
John Snow7e30dd62019-07-29 16:35:55 -0400242 if (s->sync_mode == MIRROR_SYNC_MODE_TOP) {
243 int64_t offset = 0;
244 int64_t count;
245
246 for (offset = 0; offset < s->len; ) {
247 if (yield_and_check(s)) {
248 ret = -ECANCELED;
249 goto out;
250 }
251
Vladimir Sementsov-Ogievskiy2c8074c2019-09-20 17:20:46 +0300252 ret = block_copy_reset_unallocated(s->bcs, offset, &count);
John Snow7e30dd62019-07-29 16:35:55 -0400253 if (ret < 0) {
254 goto out;
255 }
256
257 offset += count;
258 }
Vladimir Sementsov-Ogievskiy397f4e92020-03-11 13:30:04 +0300259 block_copy_set_skip_unallocated(s->bcs, false);
John Snow7e30dd62019-07-29 16:35:55 -0400260 }
261
John Snow68702772018-08-29 21:57:32 -0400262 if (s->sync_mode == MIRROR_SYNC_MODE_NONE) {
Vladimir Sementsov-Ogievskiy0e23e382019-09-20 17:20:47 +0300263 /*
Vladimir Sementsov-Ogievskiy397f4e92020-03-11 13:30:04 +0300264 * All bits are set in bcs bitmap to allow any cluster to be copied.
Vladimir Sementsov-Ogievskiy0e23e382019-09-20 17:20:47 +0300265 * This does not actually require them to be copied.
266 */
John Snow68702772018-08-29 21:57:32 -0400267 while (!job_is_cancelled(job)) {
Vladimir Sementsov-Ogievskiy0e23e382019-09-20 17:20:47 +0300268 /*
269 * Yield until the job is cancelled. We just let our before_write
270 * notify callback service CoW requests.
271 */
John Snow68702772018-08-29 21:57:32 -0400272 job_yield(job);
Dietmar Maurer98d2c6f2013-06-24 17:13:11 +0200273 }
Ian Mainfc5d3f82013-07-26 11:39:04 -0700274 } else {
Vladimir Sementsov-Ogievskiyc334e892019-04-29 12:08:41 +0300275 ret = backup_loop(s);
Dietmar Maurer98d2c6f2013-06-24 17:13:11 +0200276 }
277
John Snow7e30dd62019-07-29 16:35:55 -0400278 out:
John Snowf67432a2018-08-29 21:57:26 -0400279 return ret;
Dietmar Maurer98d2c6f2013-06-24 17:13:11 +0200280}
281
John Snowa7815a72016-11-08 01:50:36 -0500282static const BlockJobDriver backup_job_driver = {
Kevin Wolf33e9e9b2018-04-12 17:29:59 +0200283 .job_driver = {
284 .instance_size = sizeof(BackupBlockJob),
Kevin Wolf252291e2018-04-12 17:57:08 +0200285 .job_type = JOB_TYPE_BACKUP,
Kevin Wolf80fa2c72018-04-13 18:50:05 +0200286 .free = block_job_free,
Kevin Wolfb15de822018-04-18 17:10:26 +0200287 .user_resume = block_job_user_resume,
John Snowf67432a2018-08-29 21:57:26 -0400288 .run = backup_run,
Kevin Wolf4ad35182018-04-19 17:30:16 +0200289 .commit = backup_commit,
290 .abort = backup_abort,
291 .clean = backup_clean,
Vladimir Sementsov-Ogievskiybb0c9402019-08-29 12:09:53 +0300292 }
John Snowa7815a72016-11-08 01:50:36 -0500293};
294
Vladimir Sementsov-Ogievskiyae6b12f2019-04-29 12:08:42 +0300295static int64_t backup_calculate_cluster_size(BlockDriverState *target,
296 Error **errp)
297{
298 int ret;
299 BlockDriverInfo bdi;
Max Reitz2b088c62019-06-12 17:46:45 +0200300 bool target_does_cow = bdrv_backing_chain_next(target);
Vladimir Sementsov-Ogievskiyae6b12f2019-04-29 12:08:42 +0300301
302 /*
303 * If there is no backing file on the target, we cannot rely on COW if our
304 * backup cluster size is smaller than the target cluster size. Even for
305 * targets with a backing file, try to avoid COW if possible.
306 */
307 ret = bdrv_get_info(target, &bdi);
Max Reitz2b088c62019-06-12 17:46:45 +0200308 if (ret == -ENOTSUP && !target_does_cow) {
Vladimir Sementsov-Ogievskiyae6b12f2019-04-29 12:08:42 +0300309 /* Cluster size is not defined */
310 warn_report("The target block device doesn't provide "
311 "information about the block size and it doesn't have a "
312 "backing file. The default block size of %u bytes is "
313 "used. If the actual block size of the target exceeds "
314 "this default, the backup may be unusable",
315 BACKUP_CLUSTER_SIZE_DEFAULT);
316 return BACKUP_CLUSTER_SIZE_DEFAULT;
Max Reitz2b088c62019-06-12 17:46:45 +0200317 } else if (ret < 0 && !target_does_cow) {
Vladimir Sementsov-Ogievskiyae6b12f2019-04-29 12:08:42 +0300318 error_setg_errno(errp, -ret,
319 "Couldn't determine the cluster size of the target image, "
320 "which has no backing file");
321 error_append_hint(errp,
322 "Aborting, since this may create an unusable destination image\n");
323 return ret;
Max Reitz2b088c62019-06-12 17:46:45 +0200324 } else if (ret < 0 && target_does_cow) {
Vladimir Sementsov-Ogievskiyae6b12f2019-04-29 12:08:42 +0300325 /* Not fatal; just trudge on ahead. */
326 return BACKUP_CLUSTER_SIZE_DEFAULT;
327 }
328
329 return MAX(BACKUP_CLUSTER_SIZE_DEFAULT, bdi.cluster_size);
330}
331
John Snow111049a2016-11-08 01:50:38 -0500332BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs,
Alberto Garcia70559d42016-07-05 17:28:58 +0300333 BlockDriverState *target, int64_t speed,
334 MirrorSyncMode sync_mode, BdrvDirtyBitmap *sync_bitmap,
John Snowc8b56502019-07-29 16:35:52 -0400335 BitmapSyncMode bitmap_mode,
Pavel Butsykin13b94142016-07-22 11:17:52 +0300336 bool compress,
Vladimir Sementsov-Ogievskiy00e30f02019-10-01 16:14:09 +0300337 const char *filter_node_name,
Dietmar Maurer98d2c6f2013-06-24 17:13:11 +0200338 BlockdevOnError on_source_error,
339 BlockdevOnError on_target_error,
John Snow47970df2016-10-27 12:06:57 -0400340 int creation_flags,
Markus Armbruster097310b2014-10-07 13:59:15 +0200341 BlockCompletionFunc *cb, void *opaque,
Kevin Wolf62c9e412018-04-19 16:09:52 +0200342 JobTxn *txn, Error **errp)
Dietmar Maurer98d2c6f2013-06-24 17:13:11 +0200343{
Kevin Wolf958a04b2020-04-30 16:27:54 +0200344 int64_t len, target_len;
Kevin Wolf91ab6882016-04-14 12:59:55 +0200345 BackupBlockJob *job = NULL;
Vladimir Sementsov-Ogievskiyae6b12f2019-04-29 12:08:42 +0300346 int64_t cluster_size;
Vladimir Sementsov-Ogievskiy2c8074c2019-09-20 17:20:46 +0300347 BdrvRequestFlags write_flags;
Vladimir Sementsov-Ogievskiy00e30f02019-10-01 16:14:09 +0300348 BlockDriverState *backup_top = NULL;
349 BlockCopyState *bcs = NULL;
Dietmar Maurer98d2c6f2013-06-24 17:13:11 +0200350
351 assert(bs);
352 assert(target);
Dietmar Maurer98d2c6f2013-06-24 17:13:11 +0200353
John Snowa6c93652019-07-29 16:35:55 -0400354 /* QMP interface protects us from these cases */
355 assert(sync_mode != MIRROR_SYNC_MODE_INCREMENTAL);
356 assert(sync_bitmap || sync_mode != MIRROR_SYNC_MODE_BITMAP);
357
Fam Zhengc29c1dd2014-12-18 18:37:05 +0800358 if (bs == target) {
359 error_setg(errp, "Source and target cannot be the same");
John Snow111049a2016-11-08 01:50:38 -0500360 return NULL;
Fam Zhengc29c1dd2014-12-18 18:37:05 +0800361 }
362
Fam Zhengc29c1dd2014-12-18 18:37:05 +0800363 if (!bdrv_is_inserted(bs)) {
364 error_setg(errp, "Device is not inserted: %s",
365 bdrv_get_device_name(bs));
John Snow111049a2016-11-08 01:50:38 -0500366 return NULL;
Fam Zhengc29c1dd2014-12-18 18:37:05 +0800367 }
368
369 if (!bdrv_is_inserted(target)) {
370 error_setg(errp, "Device is not inserted: %s",
371 bdrv_get_device_name(target));
John Snow111049a2016-11-08 01:50:38 -0500372 return NULL;
Fam Zhengc29c1dd2014-12-18 18:37:05 +0800373 }
374
Max Reitz2b088c62019-06-12 17:46:45 +0200375 if (compress && !bdrv_supports_compressed_writes(target)) {
Pavel Butsykin13b94142016-07-22 11:17:52 +0300376 error_setg(errp, "Compression is not supported for this drive %s",
377 bdrv_get_device_name(target));
John Snow111049a2016-11-08 01:50:38 -0500378 return NULL;
Pavel Butsykin13b94142016-07-22 11:17:52 +0300379 }
380
Fam Zhengc29c1dd2014-12-18 18:37:05 +0800381 if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_BACKUP_SOURCE, errp)) {
John Snow111049a2016-11-08 01:50:38 -0500382 return NULL;
Fam Zhengc29c1dd2014-12-18 18:37:05 +0800383 }
384
385 if (bdrv_op_is_blocked(target, BLOCK_OP_TYPE_BACKUP_TARGET, errp)) {
John Snow111049a2016-11-08 01:50:38 -0500386 return NULL;
Fam Zhengc29c1dd2014-12-18 18:37:05 +0800387 }
388
John Snow1a2b8b42019-07-29 16:35:55 -0400389 if (sync_bitmap) {
John Snowb30ffbe2019-07-29 16:35:54 -0400390 /* If we need to write to this bitmap, check that we can: */
391 if (bitmap_mode != BITMAP_SYNC_MODE_NEVER &&
392 bdrv_dirty_bitmap_check(sync_bitmap, BDRV_BITMAP_DEFAULT, errp)) {
393 return NULL;
394 }
395
John Snowd58d8452015-04-17 19:49:58 -0400396 /* Create a new bitmap, and freeze/disable this one. */
Vladimir Sementsov-Ogievskiy5deb6cb2019-09-16 17:19:09 +0300397 if (bdrv_dirty_bitmap_create_successor(sync_bitmap, errp) < 0) {
John Snow111049a2016-11-08 01:50:38 -0500398 return NULL;
John Snowd58d8452015-04-17 19:49:58 -0400399 }
John Snowd58d8452015-04-17 19:49:58 -0400400 }
401
Dietmar Maurer98d2c6f2013-06-24 17:13:11 +0200402 len = bdrv_getlength(bs);
403 if (len < 0) {
Kevin Wolf58226632020-04-30 16:27:53 +0200404 error_setg_errno(errp, -len, "Unable to get length for '%s'",
405 bdrv_get_device_or_node_name(bs));
John Snowd58d8452015-04-17 19:49:58 -0400406 goto error;
Dietmar Maurer98d2c6f2013-06-24 17:13:11 +0200407 }
408
Kevin Wolf958a04b2020-04-30 16:27:54 +0200409 target_len = bdrv_getlength(target);
410 if (target_len < 0) {
411 error_setg_errno(errp, -target_len, "Unable to get length for '%s'",
412 bdrv_get_device_or_node_name(bs));
413 goto error;
414 }
415
416 if (target_len != len) {
417 error_setg(errp, "Source and target image have different sizes");
418 goto error;
419 }
420
Vladimir Sementsov-Ogievskiyae6b12f2019-04-29 12:08:42 +0300421 cluster_size = backup_calculate_cluster_size(target, errp);
422 if (cluster_size < 0) {
423 goto error;
424 }
425
Vladimir Sementsov-Ogievskiya1ed82b2019-07-30 19:32:51 +0300426 /*
Vladimir Sementsov-Ogievskiy372c67e2019-09-20 17:20:45 +0300427 * If source is in backing chain of target assume that target is going to be
428 * used for "image fleecing", i.e. it should represent a kind of snapshot of
429 * source at backup-start point in time. And target is going to be read by
430 * somebody (for example, used as NBD export) during backup job.
431 *
432 * In this case, we need to add BDRV_REQ_SERIALISING write flag to avoid
433 * intersection of backup writes and third party reads from target,
434 * otherwise reading from target we may occasionally read already updated by
435 * guest data.
436 *
437 * For more information see commit f8d59dfb40bb and test
438 * tests/qemu-iotests/222
Vladimir Sementsov-Ogievskiya1ed82b2019-07-30 19:32:51 +0300439 */
Vladimir Sementsov-Ogievskiy2c8074c2019-09-20 17:20:46 +0300440 write_flags = (bdrv_chain_contains(target, bs) ? BDRV_REQ_SERIALISING : 0) |
441 (compress ? BDRV_REQ_WRITE_COMPRESSED : 0),
442
Vladimir Sementsov-Ogievskiy00e30f02019-10-01 16:14:09 +0300443 backup_top = bdrv_backup_top_append(bs, target, filter_node_name,
444 cluster_size, write_flags, &bcs, errp);
445 if (!backup_top) {
446 goto error;
447 }
448
Vladimir Sementsov-Ogievskiy843670f2019-10-01 16:14:06 +0300449 /* job->len is fixed, so we can't allow resize */
Vladimir Sementsov-Ogievskiy00e30f02019-10-01 16:14:09 +0300450 job = block_job_create(job_id, &backup_job_driver, txn, backup_top,
451 0, BLK_PERM_ALL,
Vladimir Sementsov-Ogievskiy843670f2019-10-01 16:14:06 +0300452 speed, creation_flags, cb, opaque, errp);
453 if (!job) {
454 goto error;
455 }
456
Vladimir Sementsov-Ogievskiy00e30f02019-10-01 16:14:09 +0300457 job->backup_top = backup_top;
Vladimir Sementsov-Ogievskiy843670f2019-10-01 16:14:06 +0300458 job->source_bs = bs;
459 job->on_source_error = on_source_error;
460 job->on_target_error = on_target_error;
461 job->sync_mode = sync_mode;
462 job->sync_bitmap = sync_bitmap;
463 job->bitmap_mode = bitmap_mode;
Vladimir Sementsov-Ogievskiy00e30f02019-10-01 16:14:09 +0300464 job->bcs = bcs;
Vladimir Sementsov-Ogievskiyae6b12f2019-04-29 12:08:42 +0300465 job->cluster_size = cluster_size;
Vladimir Sementsov-Ogievskiy843670f2019-10-01 16:14:06 +0300466 job->len = len;
John Snow4c9bca72016-02-25 15:58:30 -0500467
Vladimir Sementsov-Ogievskiyd0ebeca2020-03-11 13:29:57 +0300468 block_copy_set_progress_callback(bcs, backup_progress_bytes_callback, job);
469 block_copy_set_progress_meter(bcs, &job->common.job.progress);
Vladimir Sementsov-Ogievskiy0f4b02b2019-10-01 16:14:07 +0300470
Vladimir Sementsov-Ogievskiy00e30f02019-10-01 16:14:09 +0300471 /* Required permissions are already taken by backup-top target */
Kevin Wolf76d554e2017-01-17 11:56:42 +0100472 block_job_add_bdrv(&job->common, "target", target, 0, BLK_PERM_ALL,
473 &error_abort);
John Snow111049a2016-11-08 01:50:38 -0500474
475 return &job->common;
John Snowd58d8452015-04-17 19:49:58 -0400476
477 error:
478 if (sync_bitmap) {
Vladimir Sementsov-Ogievskiy5deb6cb2019-09-16 17:19:09 +0300479 bdrv_reclaim_dirty_bitmap(sync_bitmap, NULL);
John Snowd58d8452015-04-17 19:49:58 -0400480 }
Vladimir Sementsov-Ogievskiy8ccf4582019-10-17 17:21:22 +0300481 if (backup_top) {
Vladimir Sementsov-Ogievskiy00e30f02019-10-01 16:14:09 +0300482 bdrv_backup_top_drop(backup_top);
Kevin Wolf91ab6882016-04-14 12:59:55 +0200483 }
John Snow111049a2016-11-08 01:50:38 -0500484
485 return NULL;
Dietmar Maurer98d2c6f2013-06-24 17:13:11 +0200486}