Dietmar Maurer | 98d2c6f | 2013-06-24 17:13:11 +0200 | [diff] [blame] | 1 | /* |
| 2 | * QEMU backup |
| 3 | * |
| 4 | * Copyright (C) 2013 Proxmox Server Solutions |
Vladimir Sementsov-Ogievskiy | 00e30f0 | 2019-10-01 16:14:09 +0300 | [diff] [blame] | 5 | * Copyright (c) 2019 Virtuozzo International GmbH. |
Dietmar Maurer | 98d2c6f | 2013-06-24 17:13:11 +0200 | [diff] [blame] | 6 | * |
| 7 | * Authors: |
| 8 | * Dietmar Maurer (dietmar@proxmox.com) |
| 9 | * |
| 10 | * This work is licensed under the terms of the GNU GPL, version 2 or later. |
| 11 | * See the COPYING file in the top-level directory. |
| 12 | * |
| 13 | */ |
| 14 | |
Peter Maydell | 80c71a2 | 2016-01-18 18:01:42 +0000 | [diff] [blame] | 15 | #include "qemu/osdep.h" |
Dietmar Maurer | 98d2c6f | 2013-06-24 17:13:11 +0200 | [diff] [blame] | 16 | |
| 17 | #include "trace.h" |
| 18 | #include "block/block.h" |
| 19 | #include "block/block_int.h" |
John Snow | c87621e | 2016-10-27 12:07:00 -0400 | [diff] [blame] | 20 | #include "block/blockjob_int.h" |
Wen Congyang | 49d3e82 | 2016-07-27 15:01:43 +0800 | [diff] [blame] | 21 | #include "block/block_backup.h" |
Vladimir Sementsov-Ogievskiy | beb5f54 | 2019-09-20 17:20:48 +0300 | [diff] [blame] | 22 | #include "block/block-copy.h" |
Markus Armbruster | da34e65 | 2016-03-14 09:01:28 +0100 | [diff] [blame] | 23 | #include "qapi/error.h" |
Markus Armbruster | cc7a8ea | 2015-03-17 17:22:46 +0100 | [diff] [blame] | 24 | #include "qapi/qmp/qerror.h" |
Dietmar Maurer | 98d2c6f | 2013-06-24 17:13:11 +0200 | [diff] [blame] | 25 | #include "qemu/ratelimit.h" |
Veronia Bahaa | f348b6d | 2016-03-20 19:16:19 +0200 | [diff] [blame] | 26 | #include "qemu/cutils.h" |
Max Reitz | 373340b | 2015-10-19 17:53:22 +0200 | [diff] [blame] | 27 | #include "sysemu/block-backend.h" |
Fam Zheng | b2f5646 | 2016-03-08 12:44:52 +0800 | [diff] [blame] | 28 | #include "qemu/bitmap.h" |
Vladimir Sementsov-Ogievskiy | a410a7f | 2017-02-28 22:33:40 +0300 | [diff] [blame] | 29 | #include "qemu/error-report.h" |
Dietmar Maurer | 98d2c6f | 2013-06-24 17:13:11 +0200 | [diff] [blame] | 30 | |
Vladimir Sementsov-Ogievskiy | 00e30f0 | 2019-10-01 16:14:09 +0300 | [diff] [blame] | 31 | #include "block/backup-top.h" |
| 32 | |
John Snow | 16096a4 | 2016-02-25 15:58:29 -0500 | [diff] [blame] | 33 | #define BACKUP_CLUSTER_SIZE_DEFAULT (1 << 16) |
Dietmar Maurer | 98d2c6f | 2013-06-24 17:13:11 +0200 | [diff] [blame] | 34 | |
Dietmar Maurer | 98d2c6f | 2013-06-24 17:13:11 +0200 | [diff] [blame] | 35 | typedef struct BackupBlockJob { |
| 36 | BlockJob common; |
Vladimir Sementsov-Ogievskiy | 00e30f0 | 2019-10-01 16:14:09 +0300 | [diff] [blame] | 37 | BlockDriverState *backup_top; |
Vladimir Sementsov-Ogievskiy | 2c8074c | 2019-09-20 17:20:46 +0300 | [diff] [blame] | 38 | BlockDriverState *source_bs; |
John Snow | 62aa1fb | 2019-07-29 16:35:53 -0400 | [diff] [blame] | 39 | |
John Snow | d58d845 | 2015-04-17 19:49:58 -0400 | [diff] [blame] | 40 | BdrvDirtyBitmap *sync_bitmap; |
John Snow | 62aa1fb | 2019-07-29 16:35:53 -0400 | [diff] [blame] | 41 | |
Ian Main | fc5d3f8 | 2013-07-26 11:39:04 -0700 | [diff] [blame] | 42 | MirrorSyncMode sync_mode; |
John Snow | c8b5650 | 2019-07-29 16:35:52 -0400 | [diff] [blame] | 43 | BitmapSyncMode bitmap_mode; |
Dietmar Maurer | 98d2c6f | 2013-06-24 17:13:11 +0200 | [diff] [blame] | 44 | BlockdevOnError on_source_error; |
| 45 | BlockdevOnError on_target_error; |
Kevin Wolf | 05df8a6 | 2018-01-18 18:08:22 +0100 | [diff] [blame] | 46 | uint64_t len; |
Eric Blake | cf79cdf | 2017-07-07 07:44:53 -0500 | [diff] [blame] | 47 | uint64_t bytes_read; |
John Snow | 16096a4 | 2016-02-25 15:58:29 -0500 | [diff] [blame] | 48 | int64_t cluster_size; |
Vladimir Sementsov-Ogievskiy | a193b0f | 2017-10-12 16:53:10 +0300 | [diff] [blame] | 49 | |
Vladimir Sementsov-Ogievskiy | 2c8074c | 2019-09-20 17:20:46 +0300 | [diff] [blame] | 50 | BlockCopyState *bcs; |
Dietmar Maurer | 98d2c6f | 2013-06-24 17:13:11 +0200 | [diff] [blame] | 51 | } BackupBlockJob; |
| 52 | |
Kevin Wolf | bd21935 | 2018-01-19 15:54:40 +0100 | [diff] [blame] | 53 | static const BlockJobDriver backup_job_driver; |
| 54 | |
Vladimir Sementsov-Ogievskiy | 2c8074c | 2019-09-20 17:20:46 +0300 | [diff] [blame] | 55 | static void backup_progress_bytes_callback(int64_t bytes, void *opaque) |
| 56 | { |
| 57 | BackupBlockJob *s = opaque; |
| 58 | |
| 59 | s->bytes_read += bytes; |
Vladimir Sementsov-Ogievskiy | 2c8074c | 2019-09-20 17:20:46 +0300 | [diff] [blame] | 60 | } |
| 61 | |
Vladimir Sementsov-Ogievskiy | 0bd0c44 | 2019-09-20 17:20:44 +0300 | [diff] [blame] | 62 | static int coroutine_fn backup_do_cow(BackupBlockJob *job, |
| 63 | int64_t offset, uint64_t bytes, |
Vladimir Sementsov-Ogievskiy | 00e30f0 | 2019-10-01 16:14:09 +0300 | [diff] [blame] | 64 | bool *error_is_read) |
Vladimir Sementsov-Ogievskiy | 0bd0c44 | 2019-09-20 17:20:44 +0300 | [diff] [blame] | 65 | { |
Vladimir Sementsov-Ogievskiy | 0bd0c44 | 2019-09-20 17:20:44 +0300 | [diff] [blame] | 66 | int ret = 0; |
| 67 | int64_t start, end; /* bytes */ |
| 68 | |
Vladimir Sementsov-Ogievskiy | 0bd0c44 | 2019-09-20 17:20:44 +0300 | [diff] [blame] | 69 | start = QEMU_ALIGN_DOWN(offset, job->cluster_size); |
| 70 | end = QEMU_ALIGN_UP(bytes + offset, job->cluster_size); |
| 71 | |
| 72 | trace_backup_do_cow_enter(job, start, offset, bytes); |
| 73 | |
Vladimir Sementsov-Ogievskiy | 00e30f0 | 2019-10-01 16:14:09 +0300 | [diff] [blame] | 74 | ret = block_copy(job->bcs, start, end - start, error_is_read); |
Vladimir Sementsov-Ogievskiy | 0bd0c44 | 2019-09-20 17:20:44 +0300 | [diff] [blame] | 75 | |
Eric Blake | 03f5d60 | 2017-07-07 07:44:55 -0500 | [diff] [blame] | 76 | trace_backup_do_cow_return(job, offset, bytes, ret); |
Dietmar Maurer | 98d2c6f | 2013-06-24 17:13:11 +0200 | [diff] [blame] | 77 | |
Dietmar Maurer | 98d2c6f | 2013-06-24 17:13:11 +0200 | [diff] [blame] | 78 | return ret; |
| 79 | } |
| 80 | |
Fam Zheng | b976ea3 | 2015-11-05 18:13:10 -0500 | [diff] [blame] | 81 | static void backup_cleanup_sync_bitmap(BackupBlockJob *job, int ret) |
| 82 | { |
| 83 | BdrvDirtyBitmap *bm; |
John Snow | c23909e | 2019-07-29 16:35:53 -0400 | [diff] [blame] | 84 | bool sync = (((ret == 0) || (job->bitmap_mode == BITMAP_SYNC_MODE_ALWAYS)) \ |
| 85 | && (job->bitmap_mode != BITMAP_SYNC_MODE_NEVER)); |
Fam Zheng | b976ea3 | 2015-11-05 18:13:10 -0500 | [diff] [blame] | 86 | |
John Snow | c23909e | 2019-07-29 16:35:53 -0400 | [diff] [blame] | 87 | if (sync) { |
John Snow | cf0cd29 | 2019-07-29 16:35:53 -0400 | [diff] [blame] | 88 | /* |
John Snow | c23909e | 2019-07-29 16:35:53 -0400 | [diff] [blame] | 89 | * We succeeded, or we always intended to sync the bitmap. |
| 90 | * Delete this bitmap and install the child. |
| 91 | */ |
Vladimir Sementsov-Ogievskiy | 5deb6cb | 2019-09-16 17:19:09 +0300 | [diff] [blame] | 92 | bm = bdrv_dirty_bitmap_abdicate(job->sync_bitmap, NULL); |
John Snow | c23909e | 2019-07-29 16:35:53 -0400 | [diff] [blame] | 93 | } else { |
| 94 | /* |
| 95 | * We failed, or we never intended to sync the bitmap anyway. |
| 96 | * Merge the successor back into the parent, keeping all data. |
John Snow | cf0cd29 | 2019-07-29 16:35:53 -0400 | [diff] [blame] | 97 | */ |
Vladimir Sementsov-Ogievskiy | 5deb6cb | 2019-09-16 17:19:09 +0300 | [diff] [blame] | 98 | bm = bdrv_reclaim_dirty_bitmap(job->sync_bitmap, NULL); |
John Snow | c23909e | 2019-07-29 16:35:53 -0400 | [diff] [blame] | 99 | } |
| 100 | |
| 101 | assert(bm); |
| 102 | |
| 103 | if (ret < 0 && job->bitmap_mode == BITMAP_SYNC_MODE_ALWAYS) { |
| 104 | /* If we failed and synced, merge in the bits we didn't copy: */ |
Vladimir Sementsov-Ogievskiy | 397f4e9 | 2020-03-11 13:30:04 +0300 | [diff] [blame] | 105 | bdrv_dirty_bitmap_merge_internal(bm, block_copy_dirty_bitmap(job->bcs), |
John Snow | c23909e | 2019-07-29 16:35:53 -0400 | [diff] [blame] | 106 | NULL, true); |
Fam Zheng | b976ea3 | 2015-11-05 18:13:10 -0500 | [diff] [blame] | 107 | } |
| 108 | } |
| 109 | |
Kevin Wolf | 4ad3518 | 2018-04-19 17:30:16 +0200 | [diff] [blame] | 110 | static void backup_commit(Job *job) |
John Snow | c347b2c | 2015-11-05 18:13:16 -0500 | [diff] [blame] | 111 | { |
Kevin Wolf | 4ad3518 | 2018-04-19 17:30:16 +0200 | [diff] [blame] | 112 | BackupBlockJob *s = container_of(job, BackupBlockJob, common.job); |
John Snow | c347b2c | 2015-11-05 18:13:16 -0500 | [diff] [blame] | 113 | if (s->sync_bitmap) { |
| 114 | backup_cleanup_sync_bitmap(s, 0); |
| 115 | } |
| 116 | } |
| 117 | |
Kevin Wolf | 4ad3518 | 2018-04-19 17:30:16 +0200 | [diff] [blame] | 118 | static void backup_abort(Job *job) |
John Snow | c347b2c | 2015-11-05 18:13:16 -0500 | [diff] [blame] | 119 | { |
Kevin Wolf | 4ad3518 | 2018-04-19 17:30:16 +0200 | [diff] [blame] | 120 | BackupBlockJob *s = container_of(job, BackupBlockJob, common.job); |
John Snow | c347b2c | 2015-11-05 18:13:16 -0500 | [diff] [blame] | 121 | if (s->sync_bitmap) { |
| 122 | backup_cleanup_sync_bitmap(s, -1); |
| 123 | } |
| 124 | } |
| 125 | |
Kevin Wolf | 4ad3518 | 2018-04-19 17:30:16 +0200 | [diff] [blame] | 126 | static void backup_clean(Job *job) |
John Snow | e8a40bf | 2016-11-08 01:50:35 -0500 | [diff] [blame] | 127 | { |
Kevin Wolf | 4ad3518 | 2018-04-19 17:30:16 +0200 | [diff] [blame] | 128 | BackupBlockJob *s = container_of(job, BackupBlockJob, common.job); |
Vladimir Sementsov-Ogievskiy | 00e30f0 | 2019-10-01 16:14:09 +0300 | [diff] [blame] | 129 | bdrv_backup_top_drop(s->backup_top); |
John Snow | e8a40bf | 2016-11-08 01:50:35 -0500 | [diff] [blame] | 130 | } |
| 131 | |
Wen Congyang | 49d3e82 | 2016-07-27 15:01:43 +0800 | [diff] [blame] | 132 | void backup_do_checkpoint(BlockJob *job, Error **errp) |
| 133 | { |
| 134 | BackupBlockJob *backup_job = container_of(job, BackupBlockJob, common); |
Wen Congyang | 49d3e82 | 2016-07-27 15:01:43 +0800 | [diff] [blame] | 135 | |
Kevin Wolf | bd21935 | 2018-01-19 15:54:40 +0100 | [diff] [blame] | 136 | assert(block_job_driver(job) == &backup_job_driver); |
Wen Congyang | 49d3e82 | 2016-07-27 15:01:43 +0800 | [diff] [blame] | 137 | |
| 138 | if (backup_job->sync_mode != MIRROR_SYNC_MODE_NONE) { |
| 139 | error_setg(errp, "The backup job only supports block checkpoint in" |
| 140 | " sync=none mode"); |
| 141 | return; |
| 142 | } |
| 143 | |
Vladimir Sementsov-Ogievskiy | 397f4e9 | 2020-03-11 13:30:04 +0300 | [diff] [blame] | 144 | bdrv_set_dirty_bitmap(block_copy_dirty_bitmap(backup_job->bcs), 0, |
| 145 | backup_job->len); |
Wen Congyang | 49d3e82 | 2016-07-27 15:01:43 +0800 | [diff] [blame] | 146 | } |
| 147 | |
Dietmar Maurer | 98d2c6f | 2013-06-24 17:13:11 +0200 | [diff] [blame] | 148 | static BlockErrorAction backup_error_action(BackupBlockJob *job, |
| 149 | bool read, int error) |
| 150 | { |
| 151 | if (read) { |
Kevin Wolf | 81e254d | 2016-04-18 11:36:38 +0200 | [diff] [blame] | 152 | return block_job_error_action(&job->common, job->on_source_error, |
| 153 | true, error); |
Dietmar Maurer | 98d2c6f | 2013-06-24 17:13:11 +0200 | [diff] [blame] | 154 | } else { |
Kevin Wolf | 81e254d | 2016-04-18 11:36:38 +0200 | [diff] [blame] | 155 | return block_job_error_action(&job->common, job->on_target_error, |
| 156 | false, error); |
Dietmar Maurer | 98d2c6f | 2013-06-24 17:13:11 +0200 | [diff] [blame] | 157 | } |
| 158 | } |
| 159 | |
John Snow | d58d845 | 2015-04-17 19:49:58 -0400 | [diff] [blame] | 160 | static bool coroutine_fn yield_and_check(BackupBlockJob *job) |
| 161 | { |
Kevin Wolf | dee81d5 | 2018-01-18 21:19:38 +0100 | [diff] [blame] | 162 | uint64_t delay_ns; |
| 163 | |
Kevin Wolf | daa7f2f | 2018-04-17 12:56:07 +0200 | [diff] [blame] | 164 | if (job_is_cancelled(&job->common.job)) { |
John Snow | d58d845 | 2015-04-17 19:49:58 -0400 | [diff] [blame] | 165 | return true; |
| 166 | } |
| 167 | |
Vladimir Sementsov-Ogievskiy | 0e23e38 | 2019-09-20 17:20:47 +0300 | [diff] [blame] | 168 | /* |
| 169 | * We need to yield even for delay_ns = 0 so that bdrv_drain_all() can |
| 170 | * return. Without a yield, the VM would not reboot. |
| 171 | */ |
Kevin Wolf | dee81d5 | 2018-01-18 21:19:38 +0100 | [diff] [blame] | 172 | delay_ns = block_job_ratelimit_get_delay(&job->common, job->bytes_read); |
| 173 | job->bytes_read = 0; |
Kevin Wolf | 5d43e86 | 2018-04-18 16:32:20 +0200 | [diff] [blame] | 174 | job_sleep_ns(&job->common.job, delay_ns); |
John Snow | d58d845 | 2015-04-17 19:49:58 -0400 | [diff] [blame] | 175 | |
Kevin Wolf | daa7f2f | 2018-04-17 12:56:07 +0200 | [diff] [blame] | 176 | if (job_is_cancelled(&job->common.job)) { |
John Snow | d58d845 | 2015-04-17 19:49:58 -0400 | [diff] [blame] | 177 | return true; |
| 178 | } |
| 179 | |
| 180 | return false; |
| 181 | } |
| 182 | |
Vladimir Sementsov-Ogievskiy | c334e89 | 2019-04-29 12:08:41 +0300 | [diff] [blame] | 183 | static int coroutine_fn backup_loop(BackupBlockJob *job) |
John Snow | d58d845 | 2015-04-17 19:49:58 -0400 | [diff] [blame] | 184 | { |
| 185 | bool error_is_read; |
Vladimir Sementsov-Ogievskiy | a8389e3 | 2019-04-29 12:08:39 +0300 | [diff] [blame] | 186 | int64_t offset; |
John Snow | 62aa1fb | 2019-07-29 16:35:53 -0400 | [diff] [blame] | 187 | BdrvDirtyBitmapIter *bdbi; |
John Snow | 62aa1fb | 2019-07-29 16:35:53 -0400 | [diff] [blame] | 188 | int ret = 0; |
John Snow | d58d845 | 2015-04-17 19:49:58 -0400 | [diff] [blame] | 189 | |
Vladimir Sementsov-Ogievskiy | 397f4e9 | 2020-03-11 13:30:04 +0300 | [diff] [blame] | 190 | bdbi = bdrv_dirty_iter_new(block_copy_dirty_bitmap(job->bcs)); |
John Snow | 62aa1fb | 2019-07-29 16:35:53 -0400 | [diff] [blame] | 191 | while ((offset = bdrv_dirty_iter_next(bdbi)) != -1) { |
Vladimir Sementsov-Ogievskiy | 53f1c87 | 2017-10-12 16:53:13 +0300 | [diff] [blame] | 192 | do { |
| 193 | if (yield_and_check(job)) { |
John Snow | 62aa1fb | 2019-07-29 16:35:53 -0400 | [diff] [blame] | 194 | goto out; |
Vladimir Sementsov-Ogievskiy | 53f1c87 | 2017-10-12 16:53:13 +0300 | [diff] [blame] | 195 | } |
Vladimir Sementsov-Ogievskiy | 00e30f0 | 2019-10-01 16:14:09 +0300 | [diff] [blame] | 196 | ret = backup_do_cow(job, offset, job->cluster_size, &error_is_read); |
Vladimir Sementsov-Ogievskiy | 53f1c87 | 2017-10-12 16:53:13 +0300 | [diff] [blame] | 197 | if (ret < 0 && backup_error_action(job, error_is_read, -ret) == |
| 198 | BLOCK_ERROR_ACTION_REPORT) |
| 199 | { |
John Snow | 62aa1fb | 2019-07-29 16:35:53 -0400 | [diff] [blame] | 200 | goto out; |
Vladimir Sementsov-Ogievskiy | 53f1c87 | 2017-10-12 16:53:13 +0300 | [diff] [blame] | 201 | } |
| 202 | } while (ret < 0); |
John Snow | d58d845 | 2015-04-17 19:49:58 -0400 | [diff] [blame] | 203 | } |
| 204 | |
John Snow | 62aa1fb | 2019-07-29 16:35:53 -0400 | [diff] [blame] | 205 | out: |
| 206 | bdrv_dirty_iter_free(bdbi); |
| 207 | return ret; |
John Snow | d58d845 | 2015-04-17 19:49:58 -0400 | [diff] [blame] | 208 | } |
| 209 | |
Vladimir Sementsov-Ogievskiy | 397f4e9 | 2020-03-11 13:30:04 +0300 | [diff] [blame] | 210 | static void backup_init_bcs_bitmap(BackupBlockJob *job) |
Vladimir Sementsov-Ogievskiy | 8cc6dc6 | 2017-10-12 16:53:11 +0300 | [diff] [blame] | 211 | { |
John Snow | 141cdcd | 2019-07-29 16:35:55 -0400 | [diff] [blame] | 212 | bool ret; |
| 213 | uint64_t estimate; |
Vladimir Sementsov-Ogievskiy | 397f4e9 | 2020-03-11 13:30:04 +0300 | [diff] [blame] | 214 | BdrvDirtyBitmap *bcs_bitmap = block_copy_dirty_bitmap(job->bcs); |
Vladimir Sementsov-Ogievskiy | 8cc6dc6 | 2017-10-12 16:53:11 +0300 | [diff] [blame] | 215 | |
John Snow | 141cdcd | 2019-07-29 16:35:55 -0400 | [diff] [blame] | 216 | if (job->sync_mode == MIRROR_SYNC_MODE_BITMAP) { |
Vladimir Sementsov-Ogievskiy | 397f4e9 | 2020-03-11 13:30:04 +0300 | [diff] [blame] | 217 | ret = bdrv_dirty_bitmap_merge_internal(bcs_bitmap, job->sync_bitmap, |
John Snow | 141cdcd | 2019-07-29 16:35:55 -0400 | [diff] [blame] | 218 | NULL, true); |
| 219 | assert(ret); |
| 220 | } else { |
John Snow | 7e30dd6 | 2019-07-29 16:35:55 -0400 | [diff] [blame] | 221 | if (job->sync_mode == MIRROR_SYNC_MODE_TOP) { |
| 222 | /* |
| 223 | * We can't hog the coroutine to initialize this thoroughly. |
| 224 | * Set a flag and resume work when we are able to yield safely. |
| 225 | */ |
Vladimir Sementsov-Ogievskiy | 397f4e9 | 2020-03-11 13:30:04 +0300 | [diff] [blame] | 226 | block_copy_set_skip_unallocated(job->bcs, true); |
John Snow | 7e30dd6 | 2019-07-29 16:35:55 -0400 | [diff] [blame] | 227 | } |
Vladimir Sementsov-Ogievskiy | 397f4e9 | 2020-03-11 13:30:04 +0300 | [diff] [blame] | 228 | bdrv_set_dirty_bitmap(bcs_bitmap, 0, job->len); |
John Snow | 141cdcd | 2019-07-29 16:35:55 -0400 | [diff] [blame] | 229 | } |
| 230 | |
Vladimir Sementsov-Ogievskiy | 397f4e9 | 2020-03-11 13:30:04 +0300 | [diff] [blame] | 231 | estimate = bdrv_get_dirty_count(bcs_bitmap); |
John Snow | 141cdcd | 2019-07-29 16:35:55 -0400 | [diff] [blame] | 232 | job_progress_set_remaining(&job->common.job, estimate); |
Vladimir Sementsov-Ogievskiy | 8cc6dc6 | 2017-10-12 16:53:11 +0300 | [diff] [blame] | 233 | } |
| 234 | |
John Snow | 6870277 | 2018-08-29 21:57:32 -0400 | [diff] [blame] | 235 | static int coroutine_fn backup_run(Job *job, Error **errp) |
Dietmar Maurer | 98d2c6f | 2013-06-24 17:13:11 +0200 | [diff] [blame] | 236 | { |
John Snow | 6870277 | 2018-08-29 21:57:32 -0400 | [diff] [blame] | 237 | BackupBlockJob *s = container_of(job, BackupBlockJob, common.job); |
Dietmar Maurer | 98d2c6f | 2013-06-24 17:13:11 +0200 | [diff] [blame] | 238 | int ret = 0; |
| 239 | |
Vladimir Sementsov-Ogievskiy | 397f4e9 | 2020-03-11 13:30:04 +0300 | [diff] [blame] | 240 | backup_init_bcs_bitmap(s); |
Vladimir Sementsov-Ogievskiy | 8cc6dc6 | 2017-10-12 16:53:11 +0300 | [diff] [blame] | 241 | |
John Snow | 7e30dd6 | 2019-07-29 16:35:55 -0400 | [diff] [blame] | 242 | if (s->sync_mode == MIRROR_SYNC_MODE_TOP) { |
| 243 | int64_t offset = 0; |
| 244 | int64_t count; |
| 245 | |
| 246 | for (offset = 0; offset < s->len; ) { |
| 247 | if (yield_and_check(s)) { |
| 248 | ret = -ECANCELED; |
| 249 | goto out; |
| 250 | } |
| 251 | |
Vladimir Sementsov-Ogievskiy | 2c8074c | 2019-09-20 17:20:46 +0300 | [diff] [blame] | 252 | ret = block_copy_reset_unallocated(s->bcs, offset, &count); |
John Snow | 7e30dd6 | 2019-07-29 16:35:55 -0400 | [diff] [blame] | 253 | if (ret < 0) { |
| 254 | goto out; |
| 255 | } |
| 256 | |
| 257 | offset += count; |
| 258 | } |
Vladimir Sementsov-Ogievskiy | 397f4e9 | 2020-03-11 13:30:04 +0300 | [diff] [blame] | 259 | block_copy_set_skip_unallocated(s->bcs, false); |
John Snow | 7e30dd6 | 2019-07-29 16:35:55 -0400 | [diff] [blame] | 260 | } |
| 261 | |
John Snow | 6870277 | 2018-08-29 21:57:32 -0400 | [diff] [blame] | 262 | if (s->sync_mode == MIRROR_SYNC_MODE_NONE) { |
Vladimir Sementsov-Ogievskiy | 0e23e38 | 2019-09-20 17:20:47 +0300 | [diff] [blame] | 263 | /* |
Vladimir Sementsov-Ogievskiy | 397f4e9 | 2020-03-11 13:30:04 +0300 | [diff] [blame] | 264 | * All bits are set in bcs bitmap to allow any cluster to be copied. |
Vladimir Sementsov-Ogievskiy | 0e23e38 | 2019-09-20 17:20:47 +0300 | [diff] [blame] | 265 | * This does not actually require them to be copied. |
| 266 | */ |
John Snow | 6870277 | 2018-08-29 21:57:32 -0400 | [diff] [blame] | 267 | while (!job_is_cancelled(job)) { |
Vladimir Sementsov-Ogievskiy | 0e23e38 | 2019-09-20 17:20:47 +0300 | [diff] [blame] | 268 | /* |
| 269 | * Yield until the job is cancelled. We just let our before_write |
| 270 | * notify callback service CoW requests. |
| 271 | */ |
John Snow | 6870277 | 2018-08-29 21:57:32 -0400 | [diff] [blame] | 272 | job_yield(job); |
Dietmar Maurer | 98d2c6f | 2013-06-24 17:13:11 +0200 | [diff] [blame] | 273 | } |
Ian Main | fc5d3f8 | 2013-07-26 11:39:04 -0700 | [diff] [blame] | 274 | } else { |
Vladimir Sementsov-Ogievskiy | c334e89 | 2019-04-29 12:08:41 +0300 | [diff] [blame] | 275 | ret = backup_loop(s); |
Dietmar Maurer | 98d2c6f | 2013-06-24 17:13:11 +0200 | [diff] [blame] | 276 | } |
| 277 | |
John Snow | 7e30dd6 | 2019-07-29 16:35:55 -0400 | [diff] [blame] | 278 | out: |
John Snow | f67432a | 2018-08-29 21:57:26 -0400 | [diff] [blame] | 279 | return ret; |
Dietmar Maurer | 98d2c6f | 2013-06-24 17:13:11 +0200 | [diff] [blame] | 280 | } |
| 281 | |
John Snow | a7815a7 | 2016-11-08 01:50:36 -0500 | [diff] [blame] | 282 | static const BlockJobDriver backup_job_driver = { |
Kevin Wolf | 33e9e9b | 2018-04-12 17:29:59 +0200 | [diff] [blame] | 283 | .job_driver = { |
| 284 | .instance_size = sizeof(BackupBlockJob), |
Kevin Wolf | 252291e | 2018-04-12 17:57:08 +0200 | [diff] [blame] | 285 | .job_type = JOB_TYPE_BACKUP, |
Kevin Wolf | 80fa2c7 | 2018-04-13 18:50:05 +0200 | [diff] [blame] | 286 | .free = block_job_free, |
Kevin Wolf | b15de82 | 2018-04-18 17:10:26 +0200 | [diff] [blame] | 287 | .user_resume = block_job_user_resume, |
John Snow | f67432a | 2018-08-29 21:57:26 -0400 | [diff] [blame] | 288 | .run = backup_run, |
Kevin Wolf | 4ad3518 | 2018-04-19 17:30:16 +0200 | [diff] [blame] | 289 | .commit = backup_commit, |
| 290 | .abort = backup_abort, |
| 291 | .clean = backup_clean, |
Vladimir Sementsov-Ogievskiy | bb0c940 | 2019-08-29 12:09:53 +0300 | [diff] [blame] | 292 | } |
John Snow | a7815a7 | 2016-11-08 01:50:36 -0500 | [diff] [blame] | 293 | }; |
| 294 | |
Vladimir Sementsov-Ogievskiy | ae6b12f | 2019-04-29 12:08:42 +0300 | [diff] [blame] | 295 | static int64_t backup_calculate_cluster_size(BlockDriverState *target, |
| 296 | Error **errp) |
| 297 | { |
| 298 | int ret; |
| 299 | BlockDriverInfo bdi; |
Max Reitz | 2b088c6 | 2019-06-12 17:46:45 +0200 | [diff] [blame] | 300 | bool target_does_cow = bdrv_backing_chain_next(target); |
Vladimir Sementsov-Ogievskiy | ae6b12f | 2019-04-29 12:08:42 +0300 | [diff] [blame] | 301 | |
| 302 | /* |
| 303 | * If there is no backing file on the target, we cannot rely on COW if our |
| 304 | * backup cluster size is smaller than the target cluster size. Even for |
| 305 | * targets with a backing file, try to avoid COW if possible. |
| 306 | */ |
| 307 | ret = bdrv_get_info(target, &bdi); |
Max Reitz | 2b088c6 | 2019-06-12 17:46:45 +0200 | [diff] [blame] | 308 | if (ret == -ENOTSUP && !target_does_cow) { |
Vladimir Sementsov-Ogievskiy | ae6b12f | 2019-04-29 12:08:42 +0300 | [diff] [blame] | 309 | /* Cluster size is not defined */ |
| 310 | warn_report("The target block device doesn't provide " |
| 311 | "information about the block size and it doesn't have a " |
| 312 | "backing file. The default block size of %u bytes is " |
| 313 | "used. If the actual block size of the target exceeds " |
| 314 | "this default, the backup may be unusable", |
| 315 | BACKUP_CLUSTER_SIZE_DEFAULT); |
| 316 | return BACKUP_CLUSTER_SIZE_DEFAULT; |
Max Reitz | 2b088c6 | 2019-06-12 17:46:45 +0200 | [diff] [blame] | 317 | } else if (ret < 0 && !target_does_cow) { |
Vladimir Sementsov-Ogievskiy | ae6b12f | 2019-04-29 12:08:42 +0300 | [diff] [blame] | 318 | error_setg_errno(errp, -ret, |
| 319 | "Couldn't determine the cluster size of the target image, " |
| 320 | "which has no backing file"); |
| 321 | error_append_hint(errp, |
| 322 | "Aborting, since this may create an unusable destination image\n"); |
| 323 | return ret; |
Max Reitz | 2b088c6 | 2019-06-12 17:46:45 +0200 | [diff] [blame] | 324 | } else if (ret < 0 && target_does_cow) { |
Vladimir Sementsov-Ogievskiy | ae6b12f | 2019-04-29 12:08:42 +0300 | [diff] [blame] | 325 | /* Not fatal; just trudge on ahead. */ |
| 326 | return BACKUP_CLUSTER_SIZE_DEFAULT; |
| 327 | } |
| 328 | |
| 329 | return MAX(BACKUP_CLUSTER_SIZE_DEFAULT, bdi.cluster_size); |
| 330 | } |
| 331 | |
John Snow | 111049a | 2016-11-08 01:50:38 -0500 | [diff] [blame] | 332 | BlockJob *backup_job_create(const char *job_id, BlockDriverState *bs, |
Alberto Garcia | 70559d4 | 2016-07-05 17:28:58 +0300 | [diff] [blame] | 333 | BlockDriverState *target, int64_t speed, |
| 334 | MirrorSyncMode sync_mode, BdrvDirtyBitmap *sync_bitmap, |
John Snow | c8b5650 | 2019-07-29 16:35:52 -0400 | [diff] [blame] | 335 | BitmapSyncMode bitmap_mode, |
Pavel Butsykin | 13b9414 | 2016-07-22 11:17:52 +0300 | [diff] [blame] | 336 | bool compress, |
Vladimir Sementsov-Ogievskiy | 00e30f0 | 2019-10-01 16:14:09 +0300 | [diff] [blame] | 337 | const char *filter_node_name, |
Dietmar Maurer | 98d2c6f | 2013-06-24 17:13:11 +0200 | [diff] [blame] | 338 | BlockdevOnError on_source_error, |
| 339 | BlockdevOnError on_target_error, |
John Snow | 47970df | 2016-10-27 12:06:57 -0400 | [diff] [blame] | 340 | int creation_flags, |
Markus Armbruster | 097310b | 2014-10-07 13:59:15 +0200 | [diff] [blame] | 341 | BlockCompletionFunc *cb, void *opaque, |
Kevin Wolf | 62c9e41 | 2018-04-19 16:09:52 +0200 | [diff] [blame] | 342 | JobTxn *txn, Error **errp) |
Dietmar Maurer | 98d2c6f | 2013-06-24 17:13:11 +0200 | [diff] [blame] | 343 | { |
Kevin Wolf | 958a04b | 2020-04-30 16:27:54 +0200 | [diff] [blame] | 344 | int64_t len, target_len; |
Kevin Wolf | 91ab688 | 2016-04-14 12:59:55 +0200 | [diff] [blame] | 345 | BackupBlockJob *job = NULL; |
Vladimir Sementsov-Ogievskiy | ae6b12f | 2019-04-29 12:08:42 +0300 | [diff] [blame] | 346 | int64_t cluster_size; |
Vladimir Sementsov-Ogievskiy | 2c8074c | 2019-09-20 17:20:46 +0300 | [diff] [blame] | 347 | BdrvRequestFlags write_flags; |
Vladimir Sementsov-Ogievskiy | 00e30f0 | 2019-10-01 16:14:09 +0300 | [diff] [blame] | 348 | BlockDriverState *backup_top = NULL; |
| 349 | BlockCopyState *bcs = NULL; |
Dietmar Maurer | 98d2c6f | 2013-06-24 17:13:11 +0200 | [diff] [blame] | 350 | |
| 351 | assert(bs); |
| 352 | assert(target); |
Dietmar Maurer | 98d2c6f | 2013-06-24 17:13:11 +0200 | [diff] [blame] | 353 | |
John Snow | a6c9365 | 2019-07-29 16:35:55 -0400 | [diff] [blame] | 354 | /* QMP interface protects us from these cases */ |
| 355 | assert(sync_mode != MIRROR_SYNC_MODE_INCREMENTAL); |
| 356 | assert(sync_bitmap || sync_mode != MIRROR_SYNC_MODE_BITMAP); |
| 357 | |
Fam Zheng | c29c1dd | 2014-12-18 18:37:05 +0800 | [diff] [blame] | 358 | if (bs == target) { |
| 359 | error_setg(errp, "Source and target cannot be the same"); |
John Snow | 111049a | 2016-11-08 01:50:38 -0500 | [diff] [blame] | 360 | return NULL; |
Fam Zheng | c29c1dd | 2014-12-18 18:37:05 +0800 | [diff] [blame] | 361 | } |
| 362 | |
Fam Zheng | c29c1dd | 2014-12-18 18:37:05 +0800 | [diff] [blame] | 363 | if (!bdrv_is_inserted(bs)) { |
| 364 | error_setg(errp, "Device is not inserted: %s", |
| 365 | bdrv_get_device_name(bs)); |
John Snow | 111049a | 2016-11-08 01:50:38 -0500 | [diff] [blame] | 366 | return NULL; |
Fam Zheng | c29c1dd | 2014-12-18 18:37:05 +0800 | [diff] [blame] | 367 | } |
| 368 | |
| 369 | if (!bdrv_is_inserted(target)) { |
| 370 | error_setg(errp, "Device is not inserted: %s", |
| 371 | bdrv_get_device_name(target)); |
John Snow | 111049a | 2016-11-08 01:50:38 -0500 | [diff] [blame] | 372 | return NULL; |
Fam Zheng | c29c1dd | 2014-12-18 18:37:05 +0800 | [diff] [blame] | 373 | } |
| 374 | |
Max Reitz | 2b088c6 | 2019-06-12 17:46:45 +0200 | [diff] [blame] | 375 | if (compress && !bdrv_supports_compressed_writes(target)) { |
Pavel Butsykin | 13b9414 | 2016-07-22 11:17:52 +0300 | [diff] [blame] | 376 | error_setg(errp, "Compression is not supported for this drive %s", |
| 377 | bdrv_get_device_name(target)); |
John Snow | 111049a | 2016-11-08 01:50:38 -0500 | [diff] [blame] | 378 | return NULL; |
Pavel Butsykin | 13b9414 | 2016-07-22 11:17:52 +0300 | [diff] [blame] | 379 | } |
| 380 | |
Fam Zheng | c29c1dd | 2014-12-18 18:37:05 +0800 | [diff] [blame] | 381 | if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_BACKUP_SOURCE, errp)) { |
John Snow | 111049a | 2016-11-08 01:50:38 -0500 | [diff] [blame] | 382 | return NULL; |
Fam Zheng | c29c1dd | 2014-12-18 18:37:05 +0800 | [diff] [blame] | 383 | } |
| 384 | |
| 385 | if (bdrv_op_is_blocked(target, BLOCK_OP_TYPE_BACKUP_TARGET, errp)) { |
John Snow | 111049a | 2016-11-08 01:50:38 -0500 | [diff] [blame] | 386 | return NULL; |
Fam Zheng | c29c1dd | 2014-12-18 18:37:05 +0800 | [diff] [blame] | 387 | } |
| 388 | |
John Snow | 1a2b8b4 | 2019-07-29 16:35:55 -0400 | [diff] [blame] | 389 | if (sync_bitmap) { |
John Snow | b30ffbe | 2019-07-29 16:35:54 -0400 | [diff] [blame] | 390 | /* If we need to write to this bitmap, check that we can: */ |
| 391 | if (bitmap_mode != BITMAP_SYNC_MODE_NEVER && |
| 392 | bdrv_dirty_bitmap_check(sync_bitmap, BDRV_BITMAP_DEFAULT, errp)) { |
| 393 | return NULL; |
| 394 | } |
| 395 | |
John Snow | d58d845 | 2015-04-17 19:49:58 -0400 | [diff] [blame] | 396 | /* Create a new bitmap, and freeze/disable this one. */ |
Vladimir Sementsov-Ogievskiy | 5deb6cb | 2019-09-16 17:19:09 +0300 | [diff] [blame] | 397 | if (bdrv_dirty_bitmap_create_successor(sync_bitmap, errp) < 0) { |
John Snow | 111049a | 2016-11-08 01:50:38 -0500 | [diff] [blame] | 398 | return NULL; |
John Snow | d58d845 | 2015-04-17 19:49:58 -0400 | [diff] [blame] | 399 | } |
John Snow | d58d845 | 2015-04-17 19:49:58 -0400 | [diff] [blame] | 400 | } |
| 401 | |
Dietmar Maurer | 98d2c6f | 2013-06-24 17:13:11 +0200 | [diff] [blame] | 402 | len = bdrv_getlength(bs); |
| 403 | if (len < 0) { |
Kevin Wolf | 5822663 | 2020-04-30 16:27:53 +0200 | [diff] [blame] | 404 | error_setg_errno(errp, -len, "Unable to get length for '%s'", |
| 405 | bdrv_get_device_or_node_name(bs)); |
John Snow | d58d845 | 2015-04-17 19:49:58 -0400 | [diff] [blame] | 406 | goto error; |
Dietmar Maurer | 98d2c6f | 2013-06-24 17:13:11 +0200 | [diff] [blame] | 407 | } |
| 408 | |
Kevin Wolf | 958a04b | 2020-04-30 16:27:54 +0200 | [diff] [blame] | 409 | target_len = bdrv_getlength(target); |
| 410 | if (target_len < 0) { |
| 411 | error_setg_errno(errp, -target_len, "Unable to get length for '%s'", |
| 412 | bdrv_get_device_or_node_name(bs)); |
| 413 | goto error; |
| 414 | } |
| 415 | |
| 416 | if (target_len != len) { |
| 417 | error_setg(errp, "Source and target image have different sizes"); |
| 418 | goto error; |
| 419 | } |
| 420 | |
Vladimir Sementsov-Ogievskiy | ae6b12f | 2019-04-29 12:08:42 +0300 | [diff] [blame] | 421 | cluster_size = backup_calculate_cluster_size(target, errp); |
| 422 | if (cluster_size < 0) { |
| 423 | goto error; |
| 424 | } |
| 425 | |
Vladimir Sementsov-Ogievskiy | a1ed82b | 2019-07-30 19:32:51 +0300 | [diff] [blame] | 426 | /* |
Vladimir Sementsov-Ogievskiy | 372c67e | 2019-09-20 17:20:45 +0300 | [diff] [blame] | 427 | * If source is in backing chain of target assume that target is going to be |
| 428 | * used for "image fleecing", i.e. it should represent a kind of snapshot of |
| 429 | * source at backup-start point in time. And target is going to be read by |
| 430 | * somebody (for example, used as NBD export) during backup job. |
| 431 | * |
| 432 | * In this case, we need to add BDRV_REQ_SERIALISING write flag to avoid |
| 433 | * intersection of backup writes and third party reads from target, |
| 434 | * otherwise reading from target we may occasionally read already updated by |
| 435 | * guest data. |
| 436 | * |
| 437 | * For more information see commit f8d59dfb40bb and test |
| 438 | * tests/qemu-iotests/222 |
Vladimir Sementsov-Ogievskiy | a1ed82b | 2019-07-30 19:32:51 +0300 | [diff] [blame] | 439 | */ |
Vladimir Sementsov-Ogievskiy | 2c8074c | 2019-09-20 17:20:46 +0300 | [diff] [blame] | 440 | write_flags = (bdrv_chain_contains(target, bs) ? BDRV_REQ_SERIALISING : 0) | |
| 441 | (compress ? BDRV_REQ_WRITE_COMPRESSED : 0), |
| 442 | |
Vladimir Sementsov-Ogievskiy | 00e30f0 | 2019-10-01 16:14:09 +0300 | [diff] [blame] | 443 | backup_top = bdrv_backup_top_append(bs, target, filter_node_name, |
| 444 | cluster_size, write_flags, &bcs, errp); |
| 445 | if (!backup_top) { |
| 446 | goto error; |
| 447 | } |
| 448 | |
Vladimir Sementsov-Ogievskiy | 843670f | 2019-10-01 16:14:06 +0300 | [diff] [blame] | 449 | /* job->len is fixed, so we can't allow resize */ |
Vladimir Sementsov-Ogievskiy | 00e30f0 | 2019-10-01 16:14:09 +0300 | [diff] [blame] | 450 | job = block_job_create(job_id, &backup_job_driver, txn, backup_top, |
| 451 | 0, BLK_PERM_ALL, |
Vladimir Sementsov-Ogievskiy | 843670f | 2019-10-01 16:14:06 +0300 | [diff] [blame] | 452 | speed, creation_flags, cb, opaque, errp); |
| 453 | if (!job) { |
| 454 | goto error; |
| 455 | } |
| 456 | |
Vladimir Sementsov-Ogievskiy | 00e30f0 | 2019-10-01 16:14:09 +0300 | [diff] [blame] | 457 | job->backup_top = backup_top; |
Vladimir Sementsov-Ogievskiy | 843670f | 2019-10-01 16:14:06 +0300 | [diff] [blame] | 458 | job->source_bs = bs; |
| 459 | job->on_source_error = on_source_error; |
| 460 | job->on_target_error = on_target_error; |
| 461 | job->sync_mode = sync_mode; |
| 462 | job->sync_bitmap = sync_bitmap; |
| 463 | job->bitmap_mode = bitmap_mode; |
Vladimir Sementsov-Ogievskiy | 00e30f0 | 2019-10-01 16:14:09 +0300 | [diff] [blame] | 464 | job->bcs = bcs; |
Vladimir Sementsov-Ogievskiy | ae6b12f | 2019-04-29 12:08:42 +0300 | [diff] [blame] | 465 | job->cluster_size = cluster_size; |
Vladimir Sementsov-Ogievskiy | 843670f | 2019-10-01 16:14:06 +0300 | [diff] [blame] | 466 | job->len = len; |
John Snow | 4c9bca7 | 2016-02-25 15:58:30 -0500 | [diff] [blame] | 467 | |
Vladimir Sementsov-Ogievskiy | d0ebeca | 2020-03-11 13:29:57 +0300 | [diff] [blame] | 468 | block_copy_set_progress_callback(bcs, backup_progress_bytes_callback, job); |
| 469 | block_copy_set_progress_meter(bcs, &job->common.job.progress); |
Vladimir Sementsov-Ogievskiy | 0f4b02b | 2019-10-01 16:14:07 +0300 | [diff] [blame] | 470 | |
Vladimir Sementsov-Ogievskiy | 00e30f0 | 2019-10-01 16:14:09 +0300 | [diff] [blame] | 471 | /* Required permissions are already taken by backup-top target */ |
Kevin Wolf | 76d554e | 2017-01-17 11:56:42 +0100 | [diff] [blame] | 472 | block_job_add_bdrv(&job->common, "target", target, 0, BLK_PERM_ALL, |
| 473 | &error_abort); |
John Snow | 111049a | 2016-11-08 01:50:38 -0500 | [diff] [blame] | 474 | |
| 475 | return &job->common; |
John Snow | d58d845 | 2015-04-17 19:49:58 -0400 | [diff] [blame] | 476 | |
| 477 | error: |
| 478 | if (sync_bitmap) { |
Vladimir Sementsov-Ogievskiy | 5deb6cb | 2019-09-16 17:19:09 +0300 | [diff] [blame] | 479 | bdrv_reclaim_dirty_bitmap(sync_bitmap, NULL); |
John Snow | d58d845 | 2015-04-17 19:49:58 -0400 | [diff] [blame] | 480 | } |
Vladimir Sementsov-Ogievskiy | 8ccf458 | 2019-10-17 17:21:22 +0300 | [diff] [blame] | 481 | if (backup_top) { |
Vladimir Sementsov-Ogievskiy | 00e30f0 | 2019-10-01 16:14:09 +0300 | [diff] [blame] | 482 | bdrv_backup_top_drop(backup_top); |
Kevin Wolf | 91ab688 | 2016-04-14 12:59:55 +0200 | [diff] [blame] | 483 | } |
John Snow | 111049a | 2016-11-08 01:50:38 -0500 | [diff] [blame] | 484 | |
| 485 | return NULL; |
Dietmar Maurer | 98d2c6f | 2013-06-24 17:13:11 +0200 | [diff] [blame] | 486 | } |