Stefan Hajnoczi | 4f1043b | 2012-01-18 14:40:44 +0000 | [diff] [blame] | 1 | /* |
| 2 | * Image streaming |
| 3 | * |
| 4 | * Copyright IBM, Corp. 2011 |
| 5 | * |
| 6 | * Authors: |
| 7 | * Stefan Hajnoczi <stefanha@linux.vnet.ibm.com> |
| 8 | * |
| 9 | * This work is licensed under the terms of the GNU LGPL, version 2 or later. |
| 10 | * See the COPYING.LIB file in the top-level directory. |
| 11 | * |
| 12 | */ |
| 13 | |
Peter Maydell | 80c71a2 | 2016-01-18 18:01:42 +0000 | [diff] [blame] | 14 | #include "qemu/osdep.h" |
Stefan Hajnoczi | 4f1043b | 2012-01-18 14:40:44 +0000 | [diff] [blame] | 15 | #include "trace.h" |
Paolo Bonzini | 737e150 | 2012-12-17 18:19:44 +0100 | [diff] [blame] | 16 | #include "block/block_int.h" |
John Snow | c87621e | 2016-10-27 12:07:00 -0400 | [diff] [blame] | 17 | #include "block/blockjob_int.h" |
Markus Armbruster | da34e65 | 2016-03-14 09:01:28 +0100 | [diff] [blame] | 18 | #include "qapi/error.h" |
Markus Armbruster | cc7a8ea | 2015-03-17 17:22:46 +0100 | [diff] [blame] | 19 | #include "qapi/qmp/qerror.h" |
Paolo Bonzini | 6ef228f | 2012-05-09 16:09:46 +0200 | [diff] [blame] | 20 | #include "qemu/ratelimit.h" |
Max Reitz | 373340b | 2015-10-19 17:53:22 +0200 | [diff] [blame] | 21 | #include "sysemu/block-backend.h" |
Stefan Hajnoczi | 4f1043b | 2012-01-18 14:40:44 +0000 | [diff] [blame] | 22 | |
| 23 | enum { |
| 24 | /* |
| 25 | * Size of data buffer for populating the image file. This should be large |
| 26 | * enough to process multiple clusters in a single call, so that populating |
| 27 | * contiguous regions of the image is efficient. |
| 28 | */ |
| 29 | STREAM_BUFFER_SIZE = 512 * 1024, /* in bytes */ |
| 30 | }; |
| 31 | |
Stefan Hajnoczi | 5094a6c | 2012-01-18 14:40:45 +0000 | [diff] [blame] | 32 | #define SLICE_TIME 100000000ULL /* ns */ |
| 33 | |
Stefan Hajnoczi | 4f1043b | 2012-01-18 14:40:44 +0000 | [diff] [blame] | 34 | typedef struct StreamBlockJob { |
| 35 | BlockJob common; |
Stefan Hajnoczi | 5094a6c | 2012-01-18 14:40:45 +0000 | [diff] [blame] | 36 | RateLimit limit; |
Stefan Hajnoczi | 4f1043b | 2012-01-18 14:40:44 +0000 | [diff] [blame] | 37 | BlockDriverState *base; |
Paolo Bonzini | 1d80909 | 2012-09-28 17:22:59 +0200 | [diff] [blame] | 38 | BlockdevOnError on_error; |
Jeff Cody | 13d8cc5 | 2014-06-25 15:40:11 -0400 | [diff] [blame] | 39 | char *backing_file_str; |
Alberto Garcia | 61b49e4 | 2016-10-28 10:08:10 +0300 | [diff] [blame] | 40 | int bs_flags; |
Stefan Hajnoczi | 4f1043b | 2012-01-18 14:40:44 +0000 | [diff] [blame] | 41 | } StreamBlockJob; |
| 42 | |
Kevin Wolf | 03e35d8 | 2016-04-12 15:15:49 +0200 | [diff] [blame] | 43 | static int coroutine_fn stream_populate(BlockBackend *blk, |
Stefan Hajnoczi | 4f1043b | 2012-01-18 14:40:44 +0000 | [diff] [blame] | 44 | int64_t sector_num, int nb_sectors, |
| 45 | void *buf) |
| 46 | { |
| 47 | struct iovec iov = { |
| 48 | .iov_base = buf, |
| 49 | .iov_len = nb_sectors * BDRV_SECTOR_SIZE, |
| 50 | }; |
| 51 | QEMUIOVector qiov; |
| 52 | |
| 53 | qemu_iovec_init_external(&qiov, &iov, 1); |
| 54 | |
| 55 | /* Copy-on-read the unallocated clusters */ |
Kevin Wolf | 03e35d8 | 2016-04-12 15:15:49 +0200 | [diff] [blame] | 56 | return blk_co_preadv(blk, sector_num * BDRV_SECTOR_SIZE, qiov.size, &qiov, |
| 57 | BDRV_REQ_COPY_ON_READ); |
Stefan Hajnoczi | 4f1043b | 2012-01-18 14:40:44 +0000 | [diff] [blame] | 58 | } |
| 59 | |
Stefan Hajnoczi | f3e69be | 2014-10-21 12:03:57 +0100 | [diff] [blame] | 60 | typedef struct { |
| 61 | int ret; |
| 62 | bool reached_end; |
| 63 | } StreamCompleteData; |
| 64 | |
| 65 | static void stream_complete(BlockJob *job, void *opaque) |
| 66 | { |
| 67 | StreamBlockJob *s = container_of(job, StreamBlockJob, common); |
| 68 | StreamCompleteData *data = opaque; |
Kevin Wolf | 03e35d8 | 2016-04-12 15:15:49 +0200 | [diff] [blame] | 69 | BlockDriverState *bs = blk_bs(job->blk); |
Stefan Hajnoczi | f3e69be | 2014-10-21 12:03:57 +0100 | [diff] [blame] | 70 | BlockDriverState *base = s->base; |
Kevin Wolf | 12fa4af | 2017-02-17 20:42:32 +0100 | [diff] [blame] | 71 | Error *local_err = NULL; |
Stefan Hajnoczi | f3e69be | 2014-10-21 12:03:57 +0100 | [diff] [blame] | 72 | |
| 73 | if (!block_job_is_cancelled(&s->common) && data->reached_end && |
| 74 | data->ret == 0) { |
| 75 | const char *base_id = NULL, *base_fmt = NULL; |
| 76 | if (base) { |
| 77 | base_id = s->backing_file_str; |
| 78 | if (base->drv) { |
| 79 | base_fmt = base->drv->format_name; |
| 80 | } |
| 81 | } |
Kevin Wolf | 03e35d8 | 2016-04-12 15:15:49 +0200 | [diff] [blame] | 82 | data->ret = bdrv_change_backing_file(bs, base_id, base_fmt); |
Kevin Wolf | 12fa4af | 2017-02-17 20:42:32 +0100 | [diff] [blame] | 83 | bdrv_set_backing_hd(bs, base, &local_err); |
| 84 | if (local_err) { |
| 85 | error_report_err(local_err); |
| 86 | data->ret = -EPERM; |
| 87 | goto out; |
| 88 | } |
Stefan Hajnoczi | f3e69be | 2014-10-21 12:03:57 +0100 | [diff] [blame] | 89 | } |
| 90 | |
Kevin Wolf | 12fa4af | 2017-02-17 20:42:32 +0100 | [diff] [blame] | 91 | out: |
Alberto Garcia | 61b49e4 | 2016-10-28 10:08:10 +0300 | [diff] [blame] | 92 | /* Reopen the image back in read-only mode if necessary */ |
| 93 | if (s->bs_flags != bdrv_get_flags(bs)) { |
Kevin Wolf | a170a91 | 2017-02-09 13:34:18 +0100 | [diff] [blame] | 94 | /* Give up write permissions before making it read-only */ |
| 95 | blk_set_perm(job->blk, 0, BLK_PERM_ALL, &error_abort); |
Alberto Garcia | 61b49e4 | 2016-10-28 10:08:10 +0300 | [diff] [blame] | 96 | bdrv_reopen(bs, s->bs_flags, NULL); |
| 97 | } |
| 98 | |
Stefan Hajnoczi | f3e69be | 2014-10-21 12:03:57 +0100 | [diff] [blame] | 99 | g_free(s->backing_file_str); |
| 100 | block_job_completed(&s->common, data->ret); |
| 101 | g_free(data); |
| 102 | } |
| 103 | |
Stefan Hajnoczi | 4f1043b | 2012-01-18 14:40:44 +0000 | [diff] [blame] | 104 | static void coroutine_fn stream_run(void *opaque) |
| 105 | { |
| 106 | StreamBlockJob *s = opaque; |
Stefan Hajnoczi | f3e69be | 2014-10-21 12:03:57 +0100 | [diff] [blame] | 107 | StreamCompleteData *data; |
Kevin Wolf | 03e35d8 | 2016-04-12 15:15:49 +0200 | [diff] [blame] | 108 | BlockBackend *blk = s->common.blk; |
| 109 | BlockDriverState *bs = blk_bs(blk); |
Marcelo Tosatti | c8c3080 | 2012-01-18 14:40:53 +0000 | [diff] [blame] | 110 | BlockDriverState *base = s->base; |
Alberto Garcia | 6578629 | 2016-03-21 15:47:25 +0200 | [diff] [blame] | 111 | int64_t sector_num = 0; |
| 112 | int64_t end = -1; |
Sascha Silbe | f14a39c | 2016-06-28 17:28:41 +0200 | [diff] [blame] | 113 | uint64_t delay_ns = 0; |
Paolo Bonzini | 1d80909 | 2012-09-28 17:22:59 +0200 | [diff] [blame] | 114 | int error = 0; |
Stefan Hajnoczi | 4f1043b | 2012-01-18 14:40:44 +0000 | [diff] [blame] | 115 | int ret = 0; |
Anthony Liguori | 04120e3 | 2012-05-10 09:10:42 -0500 | [diff] [blame] | 116 | int n = 0; |
Stefan Hajnoczi | 4f1043b | 2012-01-18 14:40:44 +0000 | [diff] [blame] | 117 | void *buf; |
| 118 | |
Kevin Wolf | 760e006 | 2015-06-17 14:55:21 +0200 | [diff] [blame] | 119 | if (!bs->backing) { |
Alberto Garcia | 6578629 | 2016-03-21 15:47:25 +0200 | [diff] [blame] | 120 | goto out; |
Max Reitz | f4a193e | 2013-11-13 20:37:58 +0100 | [diff] [blame] | 121 | } |
| 122 | |
Stefan Hajnoczi | 4f1043b | 2012-01-18 14:40:44 +0000 | [diff] [blame] | 123 | s->common.len = bdrv_getlength(bs); |
| 124 | if (s->common.len < 0) { |
Alberto Garcia | 6578629 | 2016-03-21 15:47:25 +0200 | [diff] [blame] | 125 | ret = s->common.len; |
| 126 | goto out; |
Stefan Hajnoczi | 4f1043b | 2012-01-18 14:40:44 +0000 | [diff] [blame] | 127 | } |
| 128 | |
| 129 | end = s->common.len >> BDRV_SECTOR_BITS; |
| 130 | buf = qemu_blockalign(bs, STREAM_BUFFER_SIZE); |
| 131 | |
| 132 | /* Turn on copy-on-read for the whole block device so that guest read |
| 133 | * requests help us make progress. Only do this when copying the entire |
| 134 | * backing chain since the copy-on-read operation does not take base into |
| 135 | * account. |
| 136 | */ |
| 137 | if (!base) { |
| 138 | bdrv_enable_copy_on_read(bs); |
| 139 | } |
| 140 | |
| 141 | for (sector_num = 0; sector_num < end; sector_num += n) { |
Paolo Bonzini | f9749f2 | 2012-05-08 16:52:00 +0200 | [diff] [blame] | 142 | bool copy; |
Paolo Bonzini | 4513eaf | 2012-05-08 16:51:45 +0200 | [diff] [blame] | 143 | |
Paolo Bonzini | 4513eaf | 2012-05-08 16:51:45 +0200 | [diff] [blame] | 144 | /* Note that even when no rate limit is applied we need to yield |
Kevin Wolf | c57b665 | 2012-11-13 16:35:13 +0100 | [diff] [blame] | 145 | * with no pending I/O here so that bdrv_drain_all() returns. |
Paolo Bonzini | 4513eaf | 2012-05-08 16:51:45 +0200 | [diff] [blame] | 146 | */ |
Alex Bligh | 7483d1e | 2013-08-21 16:03:05 +0100 | [diff] [blame] | 147 | block_job_sleep_ns(&s->common, QEMU_CLOCK_REALTIME, delay_ns); |
Stefan Hajnoczi | 4f1043b | 2012-01-18 14:40:44 +0000 | [diff] [blame] | 148 | if (block_job_is_cancelled(&s->common)) { |
| 149 | break; |
| 150 | } |
| 151 | |
Stefan Weil | c3e4f43 | 2013-09-22 08:19:10 +0200 | [diff] [blame] | 152 | copy = false; |
| 153 | |
Paolo Bonzini | bdad13b | 2013-09-04 19:00:22 +0200 | [diff] [blame] | 154 | ret = bdrv_is_allocated(bs, sector_num, |
| 155 | STREAM_BUFFER_SIZE / BDRV_SECTOR_SIZE, &n); |
Paolo Bonzini | f9749f2 | 2012-05-08 16:52:00 +0200 | [diff] [blame] | 156 | if (ret == 1) { |
| 157 | /* Allocated in the top, no need to copy. */ |
Paolo Bonzini | d663640 | 2013-09-04 19:00:25 +0200 | [diff] [blame] | 158 | } else if (ret >= 0) { |
Paolo Bonzini | f9749f2 | 2012-05-08 16:52:00 +0200 | [diff] [blame] | 159 | /* Copy if allocated in the intermediate images. Limit to the |
| 160 | * known-unallocated area [sector_num, sector_num+n). */ |
Kevin Wolf | 760e006 | 2015-06-17 14:55:21 +0200 | [diff] [blame] | 161 | ret = bdrv_is_allocated_above(backing_bs(bs), base, |
Paolo Bonzini | 4f57863 | 2013-09-04 19:00:24 +0200 | [diff] [blame] | 162 | sector_num, n, &n); |
Stefan Hajnoczi | 571cd9d | 2012-08-28 15:26:48 +0100 | [diff] [blame] | 163 | |
| 164 | /* Finish early if end of backing file has been reached */ |
| 165 | if (ret == 0 && n == 0) { |
| 166 | n = end - sector_num; |
| 167 | } |
| 168 | |
Paolo Bonzini | f9749f2 | 2012-05-08 16:52:00 +0200 | [diff] [blame] | 169 | copy = (ret == 1); |
| 170 | } |
Stefan Hajnoczi | 4f1043b | 2012-01-18 14:40:44 +0000 | [diff] [blame] | 171 | trace_stream_one_iteration(s, sector_num, n, ret); |
Stefan Weil | c3e4f43 | 2013-09-22 08:19:10 +0200 | [diff] [blame] | 172 | if (copy) { |
Kevin Wolf | 03e35d8 | 2016-04-12 15:15:49 +0200 | [diff] [blame] | 173 | ret = stream_populate(blk, sector_num, n, buf); |
Stefan Hajnoczi | 4f1043b | 2012-01-18 14:40:44 +0000 | [diff] [blame] | 174 | } |
| 175 | if (ret < 0) { |
Paolo Bonzini | 1d80909 | 2012-09-28 17:22:59 +0200 | [diff] [blame] | 176 | BlockErrorAction action = |
Kevin Wolf | 81e254d | 2016-04-18 11:36:38 +0200 | [diff] [blame] | 177 | block_job_error_action(&s->common, s->on_error, true, -ret); |
Wenchao Xia | a589569 | 2014-06-18 08:43:30 +0200 | [diff] [blame] | 178 | if (action == BLOCK_ERROR_ACTION_STOP) { |
Paolo Bonzini | 1d80909 | 2012-09-28 17:22:59 +0200 | [diff] [blame] | 179 | n = 0; |
| 180 | continue; |
| 181 | } |
| 182 | if (error == 0) { |
| 183 | error = ret; |
| 184 | } |
Wenchao Xia | a589569 | 2014-06-18 08:43:30 +0200 | [diff] [blame] | 185 | if (action == BLOCK_ERROR_ACTION_REPORT) { |
Paolo Bonzini | 1d80909 | 2012-09-28 17:22:59 +0200 | [diff] [blame] | 186 | break; |
| 187 | } |
Stefan Hajnoczi | 4f1043b | 2012-01-18 14:40:44 +0000 | [diff] [blame] | 188 | } |
Marcelo Tosatti | c8c3080 | 2012-01-18 14:40:53 +0000 | [diff] [blame] | 189 | ret = 0; |
Stefan Hajnoczi | 4f1043b | 2012-01-18 14:40:44 +0000 | [diff] [blame] | 190 | |
| 191 | /* Publish progress */ |
| 192 | s->common.offset += n * BDRV_SECTOR_SIZE; |
Sascha Silbe | f14a39c | 2016-06-28 17:28:41 +0200 | [diff] [blame] | 193 | if (copy && s->common.speed) { |
| 194 | delay_ns = ratelimit_calculate_delay(&s->limit, n); |
| 195 | } |
Stefan Hajnoczi | 4f1043b | 2012-01-18 14:40:44 +0000 | [diff] [blame] | 196 | } |
| 197 | |
| 198 | if (!base) { |
| 199 | bdrv_disable_copy_on_read(bs); |
| 200 | } |
| 201 | |
Paolo Bonzini | 1d80909 | 2012-09-28 17:22:59 +0200 | [diff] [blame] | 202 | /* Do not remove the backing file if an error was there but ignored. */ |
| 203 | ret = error; |
| 204 | |
Stefan Hajnoczi | 4f1043b | 2012-01-18 14:40:44 +0000 | [diff] [blame] | 205 | qemu_vfree(buf); |
Stefan Hajnoczi | f3e69be | 2014-10-21 12:03:57 +0100 | [diff] [blame] | 206 | |
Alberto Garcia | 6578629 | 2016-03-21 15:47:25 +0200 | [diff] [blame] | 207 | out: |
Stefan Hajnoczi | f3e69be | 2014-10-21 12:03:57 +0100 | [diff] [blame] | 208 | /* Modify backing chain and close BDSes in main loop */ |
| 209 | data = g_malloc(sizeof(*data)); |
| 210 | data->ret = ret; |
| 211 | data->reached_end = sector_num == end; |
| 212 | block_job_defer_to_main_loop(&s->common, stream_complete, data); |
Stefan Hajnoczi | 4f1043b | 2012-01-18 14:40:44 +0000 | [diff] [blame] | 213 | } |
| 214 | |
Stefan Hajnoczi | 882ec7c | 2012-04-25 16:51:02 +0100 | [diff] [blame] | 215 | static void stream_set_speed(BlockJob *job, int64_t speed, Error **errp) |
Stefan Hajnoczi | 5094a6c | 2012-01-18 14:40:45 +0000 | [diff] [blame] | 216 | { |
| 217 | StreamBlockJob *s = container_of(job, StreamBlockJob, common); |
| 218 | |
Stefan Hajnoczi | 882ec7c | 2012-04-25 16:51:02 +0100 | [diff] [blame] | 219 | if (speed < 0) { |
Markus Armbruster | c6bd8c7 | 2015-03-17 11:54:50 +0100 | [diff] [blame] | 220 | error_setg(errp, QERR_INVALID_PARAMETER, "speed"); |
Stefan Hajnoczi | 9e6636c | 2012-04-25 16:51:01 +0100 | [diff] [blame] | 221 | return; |
Stefan Hajnoczi | 5094a6c | 2012-01-18 14:40:45 +0000 | [diff] [blame] | 222 | } |
Paolo Bonzini | 6ef228f | 2012-05-09 16:09:46 +0200 | [diff] [blame] | 223 | ratelimit_set_speed(&s->limit, speed / BDRV_SECTOR_SIZE, SLICE_TIME); |
Stefan Hajnoczi | 5094a6c | 2012-01-18 14:40:45 +0000 | [diff] [blame] | 224 | } |
| 225 | |
Fam Zheng | 3fc4b10 | 2013-10-08 17:29:38 +0800 | [diff] [blame] | 226 | static const BlockJobDriver stream_job_driver = { |
Stefan Hajnoczi | 4f1043b | 2012-01-18 14:40:44 +0000 | [diff] [blame] | 227 | .instance_size = sizeof(StreamBlockJob), |
Fam Zheng | 79e14bf | 2013-10-08 17:29:40 +0800 | [diff] [blame] | 228 | .job_type = BLOCK_JOB_TYPE_STREAM, |
Stefan Hajnoczi | 5094a6c | 2012-01-18 14:40:45 +0000 | [diff] [blame] | 229 | .set_speed = stream_set_speed, |
John Snow | a7815a7 | 2016-11-08 01:50:36 -0500 | [diff] [blame] | 230 | .start = stream_run, |
Stefan Hajnoczi | 4f1043b | 2012-01-18 14:40:44 +0000 | [diff] [blame] | 231 | }; |
| 232 | |
Alberto Garcia | 2323322 | 2016-07-05 17:28:59 +0300 | [diff] [blame] | 233 | void stream_start(const char *job_id, BlockDriverState *bs, |
| 234 | BlockDriverState *base, const char *backing_file_str, |
John Snow | 8254b6d | 2016-10-27 12:06:58 -0400 | [diff] [blame] | 235 | int64_t speed, BlockdevOnError on_error, Error **errp) |
Stefan Hajnoczi | 4f1043b | 2012-01-18 14:40:44 +0000 | [diff] [blame] | 236 | { |
| 237 | StreamBlockJob *s; |
Alberto Garcia | 61b49e4 | 2016-10-28 10:08:10 +0300 | [diff] [blame] | 238 | BlockDriverState *iter; |
| 239 | int orig_bs_flags; |
Stefan Hajnoczi | 4f1043b | 2012-01-18 14:40:44 +0000 | [diff] [blame] | 240 | |
Alberto Garcia | 61b49e4 | 2016-10-28 10:08:10 +0300 | [diff] [blame] | 241 | /* Make sure that the image is opened in read-write mode */ |
| 242 | orig_bs_flags = bdrv_get_flags(bs); |
| 243 | if (!(orig_bs_flags & BDRV_O_RDWR)) { |
| 244 | if (bdrv_reopen(bs, orig_bs_flags | BDRV_O_RDWR, errp) != 0) { |
Alberto Garcia | 61b49e4 | 2016-10-28 10:08:10 +0300 | [diff] [blame] | 245 | return; |
| 246 | } |
| 247 | } |
| 248 | |
Kevin Wolf | a170a91 | 2017-02-09 13:34:18 +0100 | [diff] [blame] | 249 | /* Prevent concurrent jobs trying to modify the graph structure here, we |
| 250 | * already have our own plans. Also don't allow resize as the image size is |
| 251 | * queried only at the job start and then cached. */ |
| 252 | s = block_job_create(job_id, &stream_job_driver, bs, |
| 253 | BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE_UNCHANGED | |
| 254 | BLK_PERM_GRAPH_MOD, |
| 255 | BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE_UNCHANGED | |
| 256 | BLK_PERM_WRITE, |
| 257 | speed, BLOCK_JOB_DEFAULT, NULL, NULL, errp); |
| 258 | if (!s) { |
| 259 | goto fail; |
| 260 | } |
| 261 | |
| 262 | /* Block all intermediate nodes between bs and base, because they will |
| 263 | * disappear from the chain after this operation. The streaming job reads |
| 264 | * every block only once, assuming that it doesn't change, so block writes |
| 265 | * and resizes. */ |
Alberto Garcia | 61b49e4 | 2016-10-28 10:08:10 +0300 | [diff] [blame] | 266 | for (iter = backing_bs(bs); iter && iter != base; iter = backing_bs(iter)) { |
Kevin Wolf | 76d554e | 2017-01-17 11:56:42 +0100 | [diff] [blame] | 267 | block_job_add_bdrv(&s->common, "intermediate node", iter, 0, |
Kevin Wolf | a170a91 | 2017-02-09 13:34:18 +0100 | [diff] [blame] | 268 | BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE_UNCHANGED, |
| 269 | &error_abort); |
Alberto Garcia | 61b49e4 | 2016-10-28 10:08:10 +0300 | [diff] [blame] | 270 | } |
| 271 | |
Stefan Hajnoczi | 4f1043b | 2012-01-18 14:40:44 +0000 | [diff] [blame] | 272 | s->base = base; |
Jeff Cody | 13d8cc5 | 2014-06-25 15:40:11 -0400 | [diff] [blame] | 273 | s->backing_file_str = g_strdup(backing_file_str); |
Alberto Garcia | 61b49e4 | 2016-10-28 10:08:10 +0300 | [diff] [blame] | 274 | s->bs_flags = orig_bs_flags; |
Stefan Hajnoczi | 4f1043b | 2012-01-18 14:40:44 +0000 | [diff] [blame] | 275 | |
Paolo Bonzini | 1d80909 | 2012-09-28 17:22:59 +0200 | [diff] [blame] | 276 | s->on_error = on_error; |
John Snow | 5ccac6f | 2016-11-08 01:50:37 -0500 | [diff] [blame] | 277 | trace_stream_start(bs, base, s); |
| 278 | block_job_start(&s->common); |
Kevin Wolf | a170a91 | 2017-02-09 13:34:18 +0100 | [diff] [blame] | 279 | return; |
| 280 | |
| 281 | fail: |
| 282 | if (orig_bs_flags != bdrv_get_flags(bs)) { |
Alberto Garcia | 525989a | 2017-05-15 12:34:24 +0300 | [diff] [blame] | 283 | bdrv_reopen(bs, orig_bs_flags, NULL); |
Kevin Wolf | a170a91 | 2017-02-09 13:34:18 +0100 | [diff] [blame] | 284 | } |
Stefan Hajnoczi | 4f1043b | 2012-01-18 14:40:44 +0000 | [diff] [blame] | 285 | } |