blob: e0628d187f97fdcb829ceb5e66ca7dd42523e5b6 [file] [log] [blame]
lirans@il.ibm.comc163b5c2009-11-02 15:40:58 +02001/*
2 * QEMU live block migration
3 *
4 * Copyright IBM, Corp. 2009
5 *
6 * Authors:
7 * Liran Schour <lirans@il.ibm.com>
8 *
9 * This work is licensed under the terms of the GNU GPL, version 2. See
10 * the COPYING file in the top-level directory.
11 *
Paolo Bonzini6b620ca2012-01-13 17:44:23 +010012 * Contributions after 2012-01-13 are licensed under the terms of the
13 * GNU GPL, version 2 or (at your option) any later version.
lirans@il.ibm.comc163b5c2009-11-02 15:40:58 +020014 */
15
Peter Maydell1393a482016-01-26 18:16:54 +000016#include "qemu/osdep.h"
Markus Armbrusterda34e652016-03-14 09:01:28 +010017#include "qapi/error.h"
lirans@il.ibm.comc163b5c2009-11-02 15:40:58 +020018#include "qemu-common.h"
Markus Armbrusterbfb197e2014-10-07 13:59:11 +020019#include "block/block.h"
20#include "qemu/error-report.h"
21#include "qemu/main-loop.h"
lirans@il.ibm.comc163b5c2009-11-02 15:40:58 +020022#include "hw/hw.h"
Veronia Bahaaf348b6d2016-03-20 19:16:19 +020023#include "qemu/cutils.h"
Paolo Bonzini1de7afc2012-12-17 18:20:00 +010024#include "qemu/queue.h"
25#include "qemu/timer.h"
Paolo Bonzinicaf71f82012-12-17 18:19:50 +010026#include "migration/block.h"
27#include "migration/migration.h"
Paolo Bonzini9c17d612012-12-17 18:20:04 +010028#include "sysemu/blockdev.h"
Fam Zhengc9ebaf72015-03-02 19:36:47 +080029#include "sysemu/block-backend.h"
lirans@il.ibm.comc163b5c2009-11-02 15:40:58 +020030
Paolo Bonzini50717e92013-01-21 17:09:45 +010031#define BLOCK_SIZE (1 << 20)
32#define BDRV_SECTORS_PER_DIRTY_CHUNK (BLOCK_SIZE >> BDRV_SECTOR_BITS)
lirans@il.ibm.comc163b5c2009-11-02 15:40:58 +020033
34#define BLK_MIG_FLAG_DEVICE_BLOCK 0x01
35#define BLK_MIG_FLAG_EOS 0x02
Jan Kiszka01e61e22009-12-01 15:20:17 +010036#define BLK_MIG_FLAG_PROGRESS 0x04
Peter Lieven323004a2013-07-18 09:48:50 +020037#define BLK_MIG_FLAG_ZERO_BLOCK 0x08
lirans@il.ibm.comc163b5c2009-11-02 15:40:58 +020038
39#define MAX_IS_ALLOCATED_SEARCH 65536
lirans@il.ibm.comc163b5c2009-11-02 15:40:58 +020040
Wen Congyangf77dcdb2015-11-20 17:37:13 +080041#define MAX_INFLIGHT_IO 512
42
lirans@il.ibm.comc163b5c2009-11-02 15:40:58 +020043//#define DEBUG_BLK_MIGRATION
44
45#ifdef DEBUG_BLK_MIGRATION
malcd0f2c4c2010-02-07 02:03:50 +030046#define DPRINTF(fmt, ...) \
lirans@il.ibm.comc163b5c2009-11-02 15:40:58 +020047 do { printf("blk_migration: " fmt, ## __VA_ARGS__); } while (0)
48#else
malcd0f2c4c2010-02-07 02:03:50 +030049#define DPRINTF(fmt, ...) \
lirans@il.ibm.comc163b5c2009-11-02 15:40:58 +020050 do { } while (0)
51#endif
52
Jan Kiszkaa55eb922009-11-30 18:21:19 +010053typedef struct BlkMigDevState {
Paolo Bonzini323920c2013-02-22 17:36:24 +010054 /* Written during setup phase. Can be read without a lock. */
Jan Kiszkaa55eb922009-11-30 18:21:19 +010055 BlockDriverState *bs;
Jan Kiszkaa55eb922009-11-30 18:21:19 +010056 int shared_base;
Jan Kiszkaa55eb922009-11-30 18:21:19 +010057 int64_t total_sectors;
Jan Kiszka5e5328b2009-11-30 18:21:20 +010058 QSIMPLEQ_ENTRY(BlkMigDevState) entry;
Paolo Bonzinief0716d2016-02-14 18:17:04 +010059 Error *blocker;
Paolo Bonzini323920c2013-02-22 17:36:24 +010060
61 /* Only used by migration thread. Does not need a lock. */
62 int bulk_completed;
63 int64_t cur_sector;
64 int64_t cur_dirty;
65
Paolo Bonzinief0716d2016-02-14 18:17:04 +010066 /* Data in the aio_bitmap is protected by block migration lock.
67 * Allocation and free happen during setup and cleanup respectively.
68 */
Marcelo Tosatti33656af2010-11-08 17:02:56 -020069 unsigned long *aio_bitmap;
Paolo Bonzinief0716d2016-02-14 18:17:04 +010070
71 /* Protected by block migration lock. */
Paolo Bonzini323920c2013-02-22 17:36:24 +010072 int64_t completed_sectors;
Paolo Bonzinief0716d2016-02-14 18:17:04 +010073
74 /* During migration this is protected by iothread lock / AioContext.
75 * Allocation and free happen during setup and cleanup respectively.
76 */
Fam Zhenge4654d22013-11-13 18:29:43 +080077 BdrvDirtyBitmap *dirty_bitmap;
Jan Kiszkaa55eb922009-11-30 18:21:19 +010078} BlkMigDevState;
79
lirans@il.ibm.comc163b5c2009-11-02 15:40:58 +020080typedef struct BlkMigBlock {
Paolo Bonzini323920c2013-02-22 17:36:24 +010081 /* Only used by migration thread. */
lirans@il.ibm.comc163b5c2009-11-02 15:40:58 +020082 uint8_t *buf;
83 BlkMigDevState *bmds;
84 int64_t sector;
Marcelo Tosatti33656af2010-11-08 17:02:56 -020085 int nr_sectors;
lirans@il.ibm.comc163b5c2009-11-02 15:40:58 +020086 struct iovec iov;
87 QEMUIOVector qiov;
Markus Armbruster7c84b1b2014-10-07 13:59:14 +020088 BlockAIOCB *aiocb;
Paolo Bonzini323920c2013-02-22 17:36:24 +010089
Paolo Bonzini52e850d2013-02-22 17:36:25 +010090 /* Protected by block migration lock. */
lirans@il.ibm.comc163b5c2009-11-02 15:40:58 +020091 int ret;
Jan Kiszka5e5328b2009-11-30 18:21:20 +010092 QSIMPLEQ_ENTRY(BlkMigBlock) entry;
lirans@il.ibm.comc163b5c2009-11-02 15:40:58 +020093} BlkMigBlock;
94
95typedef struct BlkMigState {
Paolo Bonzini323920c2013-02-22 17:36:24 +010096 /* Written during setup phase. Can be read without a lock. */
lirans@il.ibm.comc163b5c2009-11-02 15:40:58 +020097 int blk_enable;
98 int shared_base;
Jan Kiszka5e5328b2009-11-30 18:21:20 +010099 QSIMPLEQ_HEAD(bmds_list, BlkMigDevState) bmds_list;
Paolo Bonzini323920c2013-02-22 17:36:24 +0100100 int64_t total_sector_sum;
Peter Lieven323004a2013-07-18 09:48:50 +0200101 bool zero_blocks;
Paolo Bonzini323920c2013-02-22 17:36:24 +0100102
Paolo Bonzini52e850d2013-02-22 17:36:25 +0100103 /* Protected by lock. */
Jan Kiszka5e5328b2009-11-30 18:21:20 +0100104 QSIMPLEQ_HEAD(blk_list, BlkMigBlock) blk_list;
lirans@il.ibm.comc163b5c2009-11-02 15:40:58 +0200105 int submitted;
106 int read_done;
Paolo Bonzini323920c2013-02-22 17:36:24 +0100107
108 /* Only used by migration thread. Does not need a lock. */
lirans@il.ibm.comc163b5c2009-11-02 15:40:58 +0200109 int transferred;
Jan Kiszka01e61e22009-12-01 15:20:17 +0100110 int prev_progress;
Liran Schoure970ec02010-01-26 10:31:45 +0200111 int bulk_completed;
Paolo Bonzini52e850d2013-02-22 17:36:25 +0100112
Paolo Bonzinief0716d2016-02-14 18:17:04 +0100113 /* Lock must be taken _inside_ the iothread lock and any AioContexts. */
Paolo Bonzini52e850d2013-02-22 17:36:25 +0100114 QemuMutex lock;
lirans@il.ibm.comc163b5c2009-11-02 15:40:58 +0200115} BlkMigState;
116
Jan Kiszkad11ecd32009-11-30 18:21:20 +0100117static BlkMigState block_mig_state;
lirans@il.ibm.comc163b5c2009-11-02 15:40:58 +0200118
Paolo Bonzini52e850d2013-02-22 17:36:25 +0100119static void blk_mig_lock(void)
120{
121 qemu_mutex_lock(&block_mig_state.lock);
122}
123
124static void blk_mig_unlock(void)
125{
126 qemu_mutex_unlock(&block_mig_state.lock);
127}
128
Paolo Bonzini32c835b2013-02-22 17:36:27 +0100129/* Must run outside of the iothread lock during the bulk phase,
130 * or the VM will stall.
131 */
132
Jan Kiszka13f0b672009-11-30 18:21:21 +0100133static void blk_send(QEMUFile *f, BlkMigBlock * blk)
134{
135 int len;
Peter Lieven323004a2013-07-18 09:48:50 +0200136 uint64_t flags = BLK_MIG_FLAG_DEVICE_BLOCK;
137
138 if (block_mig_state.zero_blocks &&
139 buffer_is_zero(blk->buf, BLOCK_SIZE)) {
140 flags |= BLK_MIG_FLAG_ZERO_BLOCK;
141 }
Jan Kiszka13f0b672009-11-30 18:21:21 +0100142
143 /* sector number and flags */
144 qemu_put_be64(f, (blk->sector << BDRV_SECTOR_BITS)
Peter Lieven323004a2013-07-18 09:48:50 +0200145 | flags);
Jan Kiszka13f0b672009-11-30 18:21:21 +0100146
147 /* device name */
Markus Armbrusterbfb197e2014-10-07 13:59:11 +0200148 len = strlen(bdrv_get_device_name(blk->bmds->bs));
Jan Kiszka13f0b672009-11-30 18:21:21 +0100149 qemu_put_byte(f, len);
Markus Armbrusterbfb197e2014-10-07 13:59:11 +0200150 qemu_put_buffer(f, (uint8_t *)bdrv_get_device_name(blk->bmds->bs), len);
Jan Kiszka13f0b672009-11-30 18:21:21 +0100151
Peter Lieven323004a2013-07-18 09:48:50 +0200152 /* if a block is zero we need to flush here since the network
153 * bandwidth is now a lot higher than the storage device bandwidth.
154 * thus if we queue zero blocks we slow down the migration */
155 if (flags & BLK_MIG_FLAG_ZERO_BLOCK) {
156 qemu_fflush(f);
157 return;
158 }
159
Jan Kiszka13f0b672009-11-30 18:21:21 +0100160 qemu_put_buffer(f, blk->buf, BLOCK_SIZE);
161}
162
Jan Kiszka25f23642009-11-30 18:21:21 +0100163int blk_mig_active(void)
164{
165 return !QSIMPLEQ_EMPTY(&block_mig_state.bmds_list);
166}
167
168uint64_t blk_mig_bytes_transferred(void)
169{
170 BlkMigDevState *bmds;
171 uint64_t sum = 0;
172
Paolo Bonzini52e850d2013-02-22 17:36:25 +0100173 blk_mig_lock();
Jan Kiszka25f23642009-11-30 18:21:21 +0100174 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
175 sum += bmds->completed_sectors;
176 }
Paolo Bonzini52e850d2013-02-22 17:36:25 +0100177 blk_mig_unlock();
Jan Kiszka25f23642009-11-30 18:21:21 +0100178 return sum << BDRV_SECTOR_BITS;
179}
180
181uint64_t blk_mig_bytes_remaining(void)
182{
183 return blk_mig_bytes_total() - blk_mig_bytes_transferred();
184}
185
186uint64_t blk_mig_bytes_total(void)
187{
188 BlkMigDevState *bmds;
189 uint64_t sum = 0;
190
191 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
192 sum += bmds->total_sectors;
193 }
194 return sum << BDRV_SECTOR_BITS;
195}
196
Paolo Bonzini52e850d2013-02-22 17:36:25 +0100197
198/* Called with migration lock held. */
199
Marcelo Tosatti33656af2010-11-08 17:02:56 -0200200static int bmds_aio_inflight(BlkMigDevState *bmds, int64_t sector)
201{
202 int64_t chunk = sector / (int64_t)BDRV_SECTORS_PER_DIRTY_CHUNK;
203
Markus Armbruster57322b72014-06-26 13:23:22 +0200204 if (sector < bdrv_nb_sectors(bmds->bs)) {
Marcelo Tosatti33656af2010-11-08 17:02:56 -0200205 return !!(bmds->aio_bitmap[chunk / (sizeof(unsigned long) * 8)] &
206 (1UL << (chunk % (sizeof(unsigned long) * 8))));
207 } else {
208 return 0;
209 }
210}
211
Paolo Bonzini52e850d2013-02-22 17:36:25 +0100212/* Called with migration lock held. */
213
Marcelo Tosatti33656af2010-11-08 17:02:56 -0200214static void bmds_set_aio_inflight(BlkMigDevState *bmds, int64_t sector_num,
215 int nb_sectors, int set)
216{
217 int64_t start, end;
218 unsigned long val, idx, bit;
219
220 start = sector_num / BDRV_SECTORS_PER_DIRTY_CHUNK;
221 end = (sector_num + nb_sectors - 1) / BDRV_SECTORS_PER_DIRTY_CHUNK;
222
223 for (; start <= end; start++) {
224 idx = start / (sizeof(unsigned long) * 8);
225 bit = start % (sizeof(unsigned long) * 8);
226 val = bmds->aio_bitmap[idx];
227 if (set) {
Marcelo Tosatti62155e22010-11-12 16:07:50 -0200228 val |= 1UL << bit;
Marcelo Tosatti33656af2010-11-08 17:02:56 -0200229 } else {
Marcelo Tosatti62155e22010-11-12 16:07:50 -0200230 val &= ~(1UL << bit);
Marcelo Tosatti33656af2010-11-08 17:02:56 -0200231 }
232 bmds->aio_bitmap[idx] = val;
233 }
234}
235
236static void alloc_aio_bitmap(BlkMigDevState *bmds)
237{
238 BlockDriverState *bs = bmds->bs;
239 int64_t bitmap_size;
240
Markus Armbruster57322b72014-06-26 13:23:22 +0200241 bitmap_size = bdrv_nb_sectors(bs) + BDRV_SECTORS_PER_DIRTY_CHUNK * 8 - 1;
Marcelo Tosatti33656af2010-11-08 17:02:56 -0200242 bitmap_size /= BDRV_SECTORS_PER_DIRTY_CHUNK * 8;
243
Anthony Liguori7267c092011-08-20 22:09:37 -0500244 bmds->aio_bitmap = g_malloc0(bitmap_size);
Marcelo Tosatti33656af2010-11-08 17:02:56 -0200245}
246
Paolo Bonzini52e850d2013-02-22 17:36:25 +0100247/* Never hold migration lock when yielding to the main loop! */
248
lirans@il.ibm.comc163b5c2009-11-02 15:40:58 +0200249static void blk_mig_read_cb(void *opaque, int ret)
250{
251 BlkMigBlock *blk = opaque;
Jan Kiszkaa55eb922009-11-30 18:21:19 +0100252
Paolo Bonzini52e850d2013-02-22 17:36:25 +0100253 blk_mig_lock();
lirans@il.ibm.comc163b5c2009-11-02 15:40:58 +0200254 blk->ret = ret;
Jan Kiszkaa55eb922009-11-30 18:21:19 +0100255
Jan Kiszka5e5328b2009-11-30 18:21:20 +0100256 QSIMPLEQ_INSERT_TAIL(&block_mig_state.blk_list, blk, entry);
Marcelo Tosatti33656af2010-11-08 17:02:56 -0200257 bmds_set_aio_inflight(blk->bmds, blk->sector, blk->nr_sectors, 0);
Jan Kiszkaa55eb922009-11-30 18:21:19 +0100258
Jan Kiszkad11ecd32009-11-30 18:21:20 +0100259 block_mig_state.submitted--;
260 block_mig_state.read_done++;
261 assert(block_mig_state.submitted >= 0);
Paolo Bonzini52e850d2013-02-22 17:36:25 +0100262 blk_mig_unlock();
lirans@il.ibm.comc163b5c2009-11-02 15:40:58 +0200263}
264
Paolo Bonzini32c835b2013-02-22 17:36:27 +0100265/* Called with no lock taken. */
266
Luiz Capitulino539de122011-12-05 14:06:56 -0200267static int mig_save_device_bulk(QEMUFile *f, BlkMigDevState *bmds)
Jan Kiszkaa55eb922009-11-30 18:21:19 +0100268{
Jan Kiszka57cce122009-11-30 18:21:20 +0100269 int64_t total_sectors = bmds->total_sectors;
270 int64_t cur_sector = bmds->cur_sector;
lirans@il.ibm.comc163b5c2009-11-02 15:40:58 +0200271 BlockDriverState *bs = bmds->bs;
Jan Kiszka57cce122009-11-30 18:21:20 +0100272 BlkMigBlock *blk;
Jan Kiszka13f0b672009-11-30 18:21:21 +0100273 int nr_sectors;
Jan Kiszkaa55eb922009-11-30 18:21:19 +0100274
275 if (bmds->shared_base) {
Paolo Bonzini32c835b2013-02-22 17:36:27 +0100276 qemu_mutex_lock_iothread();
Paolo Bonzinief0716d2016-02-14 18:17:04 +0100277 aio_context_acquire(bdrv_get_aio_context(bs));
Jan Kiszkab1d10852009-11-30 18:21:20 +0100278 while (cur_sector < total_sectors &&
Jan Kiszka57cce122009-11-30 18:21:20 +0100279 !bdrv_is_allocated(bs, cur_sector, MAX_IS_ALLOCATED_SEARCH,
280 &nr_sectors)) {
lirans@il.ibm.comc163b5c2009-11-02 15:40:58 +0200281 cur_sector += nr_sectors;
282 }
Paolo Bonzinief0716d2016-02-14 18:17:04 +0100283 aio_context_release(bdrv_get_aio_context(bs));
Paolo Bonzini32c835b2013-02-22 17:36:27 +0100284 qemu_mutex_unlock_iothread();
lirans@il.ibm.comc163b5c2009-11-02 15:40:58 +0200285 }
Jan Kiszkaa55eb922009-11-30 18:21:19 +0100286
287 if (cur_sector >= total_sectors) {
Jan Kiszka82801d82009-11-30 18:21:21 +0100288 bmds->cur_sector = bmds->completed_sectors = total_sectors;
lirans@il.ibm.comc163b5c2009-11-02 15:40:58 +0200289 return 1;
290 }
Jan Kiszkaa55eb922009-11-30 18:21:19 +0100291
Jan Kiszka82801d82009-11-30 18:21:21 +0100292 bmds->completed_sectors = cur_sector;
Jan Kiszkaa55eb922009-11-30 18:21:19 +0100293
Jan Kiszka6ea44302009-11-30 18:21:19 +0100294 cur_sector &= ~((int64_t)BDRV_SECTORS_PER_DIRTY_CHUNK - 1);
Jan Kiszkaa55eb922009-11-30 18:21:19 +0100295
Jan Kiszka6ea44302009-11-30 18:21:19 +0100296 /* we are going to transfer a full block even if it is not allocated */
297 nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK;
Jan Kiszkaa55eb922009-11-30 18:21:19 +0100298
Jan Kiszka6ea44302009-11-30 18:21:19 +0100299 if (total_sectors - cur_sector < BDRV_SECTORS_PER_DIRTY_CHUNK) {
Jan Kiszka57cce122009-11-30 18:21:20 +0100300 nr_sectors = total_sectors - cur_sector;
lirans@il.ibm.comc163b5c2009-11-02 15:40:58 +0200301 }
Jan Kiszkaa55eb922009-11-30 18:21:19 +0100302
Markus Armbruster5839e532014-08-19 10:31:08 +0200303 blk = g_new(BlkMigBlock, 1);
Anthony Liguori7267c092011-08-20 22:09:37 -0500304 blk->buf = g_malloc(BLOCK_SIZE);
Jan Kiszka13f0b672009-11-30 18:21:21 +0100305 blk->bmds = bmds;
306 blk->sector = cur_sector;
Marcelo Tosatti33656af2010-11-08 17:02:56 -0200307 blk->nr_sectors = nr_sectors;
Jan Kiszka13f0b672009-11-30 18:21:21 +0100308
Liran Schoure970ec02010-01-26 10:31:45 +0200309 blk->iov.iov_base = blk->buf;
310 blk->iov.iov_len = nr_sectors * BDRV_SECTOR_SIZE;
311 qemu_iovec_init_external(&blk->qiov, &blk->iov, 1);
Jan Kiszka57cce122009-11-30 18:21:20 +0100312
Paolo Bonzini52e850d2013-02-22 17:36:25 +0100313 blk_mig_lock();
Paolo Bonzini13197e32013-02-22 17:36:23 +0100314 block_mig_state.submitted++;
Paolo Bonzini52e850d2013-02-22 17:36:25 +0100315 blk_mig_unlock();
Paolo Bonzini13197e32013-02-22 17:36:23 +0100316
Paolo Bonzinief0716d2016-02-14 18:17:04 +0100317 /* We do not know if bs is under the main thread (and thus does
318 * not acquire the AioContext when doing AIO) or rather under
319 * dataplane. Thus acquire both the iothread mutex and the
320 * AioContext.
321 *
322 * This is ugly and will disappear when we make bdrv_* thread-safe,
323 * without the need to acquire the AioContext.
324 */
Paolo Bonzini32c835b2013-02-22 17:36:27 +0100325 qemu_mutex_lock_iothread();
Paolo Bonzinief0716d2016-02-14 18:17:04 +0100326 aio_context_acquire(bdrv_get_aio_context(bmds->bs));
Liran Schoure970ec02010-01-26 10:31:45 +0200327 blk->aiocb = bdrv_aio_readv(bs, cur_sector, &blk->qiov,
328 nr_sectors, blk_mig_read_cb, blk);
Liran Schourd76cac72010-01-26 14:04:11 +0200329
John Snow20dca812015-04-17 19:50:02 -0400330 bdrv_reset_dirty_bitmap(bmds->dirty_bitmap, cur_sector, nr_sectors);
Paolo Bonzinief0716d2016-02-14 18:17:04 +0100331 aio_context_release(bdrv_get_aio_context(bmds->bs));
Paolo Bonzini32c835b2013-02-22 17:36:27 +0100332 qemu_mutex_unlock_iothread();
Jan Kiszka13f0b672009-11-30 18:21:21 +0100333
Paolo Bonzini32c835b2013-02-22 17:36:27 +0100334 bmds->cur_sector = cur_sector + nr_sectors;
lirans@il.ibm.comc163b5c2009-11-02 15:40:58 +0200335 return (bmds->cur_sector >= total_sectors);
336}
337
Paolo Bonzini32c835b2013-02-22 17:36:27 +0100338/* Called with iothread lock taken. */
339
Fam Zhengb8afb522014-04-16 09:34:30 +0800340static int set_dirty_tracking(void)
lirans@il.ibm.comc163b5c2009-11-02 15:40:58 +0200341{
342 BlkMigDevState *bmds;
Fam Zhengb8afb522014-04-16 09:34:30 +0800343 int ret;
Jan Kiszka5e5328b2009-11-30 18:21:20 +0100344
345 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
Paolo Bonzinief0716d2016-02-14 18:17:04 +0100346 aio_context_acquire(bdrv_get_aio_context(bmds->bs));
Fam Zhengb8afb522014-04-16 09:34:30 +0800347 bmds->dirty_bitmap = bdrv_create_dirty_bitmap(bmds->bs, BLOCK_SIZE,
Fam Zheng0db6e542015-04-17 19:49:50 -0400348 NULL, NULL);
Paolo Bonzinief0716d2016-02-14 18:17:04 +0100349 aio_context_release(bdrv_get_aio_context(bmds->bs));
Fam Zhengb8afb522014-04-16 09:34:30 +0800350 if (!bmds->dirty_bitmap) {
351 ret = -errno;
352 goto fail;
353 }
Fam Zhenge4654d22013-11-13 18:29:43 +0800354 }
Fam Zhengb8afb522014-04-16 09:34:30 +0800355 return 0;
356
357fail:
358 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
359 if (bmds->dirty_bitmap) {
Paolo Bonzinief0716d2016-02-14 18:17:04 +0100360 aio_context_acquire(bdrv_get_aio_context(bmds->bs));
Fam Zhengb8afb522014-04-16 09:34:30 +0800361 bdrv_release_dirty_bitmap(bmds->bs, bmds->dirty_bitmap);
Paolo Bonzinief0716d2016-02-14 18:17:04 +0100362 aio_context_release(bdrv_get_aio_context(bmds->bs));
Fam Zhengb8afb522014-04-16 09:34:30 +0800363 }
364 }
365 return ret;
Fam Zhenge4654d22013-11-13 18:29:43 +0800366}
367
Paolo Bonzinief0716d2016-02-14 18:17:04 +0100368/* Called with iothread lock taken. */
369
Fam Zhenge4654d22013-11-13 18:29:43 +0800370static void unset_dirty_tracking(void)
371{
372 BlkMigDevState *bmds;
373
374 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
Paolo Bonzinief0716d2016-02-14 18:17:04 +0100375 aio_context_acquire(bdrv_get_aio_context(bmds->bs));
Fam Zhenge4654d22013-11-13 18:29:43 +0800376 bdrv_release_dirty_bitmap(bmds->bs, bmds->dirty_bitmap);
Paolo Bonzinief0716d2016-02-14 18:17:04 +0100377 aio_context_release(bdrv_get_aio_context(bmds->bs));
lirans@il.ibm.comc163b5c2009-11-02 15:40:58 +0200378 }
lirans@il.ibm.comc163b5c2009-11-02 15:40:58 +0200379}
380
Markus Armbrusterfea68bb2014-10-07 13:59:10 +0200381static void init_blk_migration(QEMUFile *f)
lirans@il.ibm.comc163b5c2009-11-02 15:40:58 +0200382{
Markus Armbrusterfea68bb2014-10-07 13:59:10 +0200383 BlockDriverState *bs;
Jan Kiszka5e5328b2009-11-30 18:21:20 +0100384 BlkMigDevState *bmds;
Jan Kiszka792773b2009-11-30 20:34:55 +0100385 int64_t sectors;
Kevin Wolf88be7b42016-05-20 18:49:07 +0200386 BdrvNextIterator it;
Jan Kiszkaa55eb922009-11-30 18:21:19 +0100387
Markus Armbrusterfea68bb2014-10-07 13:59:10 +0200388 block_mig_state.submitted = 0;
389 block_mig_state.read_done = 0;
390 block_mig_state.transferred = 0;
391 block_mig_state.total_sector_sum = 0;
392 block_mig_state.prev_progress = -1;
393 block_mig_state.bulk_completed = 0;
394 block_mig_state.zero_blocks = migrate_zero_blocks();
395
Kevin Wolf7c8eece2016-03-22 18:58:50 +0100396
Kevin Wolf88be7b42016-05-20 18:49:07 +0200397 for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
Markus Armbrusterfea68bb2014-10-07 13:59:10 +0200398 if (bdrv_is_read_only(bs)) {
399 continue;
400 }
401
Markus Armbruster57322b72014-06-26 13:23:22 +0200402 sectors = bdrv_nb_sectors(bs);
Shahar Havivi31f54f22010-07-10 18:59:06 +0300403 if (sectors <= 0) {
Stefan Hajnoczib66460e2010-04-09 15:22:13 +0100404 return;
405 }
406
Markus Armbruster5839e532014-08-19 10:31:08 +0200407 bmds = g_new0(BlkMigDevState, 1);
Stefan Hajnoczib66460e2010-04-09 15:22:13 +0100408 bmds->bs = bs;
409 bmds->bulk_completed = 0;
410 bmds->total_sectors = sectors;
411 bmds->completed_sectors = 0;
412 bmds->shared_base = block_mig_state.shared_base;
Marcelo Tosatti33656af2010-11-08 17:02:56 -0200413 alloc_aio_bitmap(bmds);
Fam Zheng3718d8a2014-05-23 21:29:43 +0800414 error_setg(&bmds->blocker, "block device is in use by migration");
415 bdrv_op_block_all(bs, bmds->blocker);
Fam Zheng8442cfd2013-08-23 09:14:48 +0800416 bdrv_ref(bs);
Stefan Hajnoczib66460e2010-04-09 15:22:13 +0100417
418 block_mig_state.total_sector_sum += sectors;
419
420 if (bmds->shared_base) {
Luiz Capitulino539de122011-12-05 14:06:56 -0200421 DPRINTF("Start migration for %s with shared base image\n",
Markus Armbrusterbfb197e2014-10-07 13:59:11 +0200422 bdrv_get_device_name(bs));
Stefan Hajnoczib66460e2010-04-09 15:22:13 +0100423 } else {
Markus Armbrusterbfb197e2014-10-07 13:59:11 +0200424 DPRINTF("Start full migration for %s\n", bdrv_get_device_name(bs));
Stefan Hajnoczib66460e2010-04-09 15:22:13 +0100425 }
426
427 QSIMPLEQ_INSERT_TAIL(&block_mig_state.bmds_list, bmds, entry);
428 }
429}
430
Paolo Bonzini32c835b2013-02-22 17:36:27 +0100431/* Called with no lock taken. */
432
Luiz Capitulino539de122011-12-05 14:06:56 -0200433static int blk_mig_save_bulked_block(QEMUFile *f)
lirans@il.ibm.comc163b5c2009-11-02 15:40:58 +0200434{
Jan Kiszka82801d82009-11-30 18:21:21 +0100435 int64_t completed_sector_sum = 0;
lirans@il.ibm.comc163b5c2009-11-02 15:40:58 +0200436 BlkMigDevState *bmds;
Jan Kiszka01e61e22009-12-01 15:20:17 +0100437 int progress;
Jan Kiszka82801d82009-11-30 18:21:21 +0100438 int ret = 0;
lirans@il.ibm.comc163b5c2009-11-02 15:40:58 +0200439
Jan Kiszka5e5328b2009-11-30 18:21:20 +0100440 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
Jan Kiszkaa55eb922009-11-30 18:21:19 +0100441 if (bmds->bulk_completed == 0) {
Luiz Capitulino539de122011-12-05 14:06:56 -0200442 if (mig_save_device_bulk(f, bmds) == 1) {
Jan Kiszka57cce122009-11-30 18:21:20 +0100443 /* completed bulk section for this device */
444 bmds->bulk_completed = 1;
lirans@il.ibm.comc163b5c2009-11-02 15:40:58 +0200445 }
Jan Kiszka82801d82009-11-30 18:21:21 +0100446 completed_sector_sum += bmds->completed_sectors;
447 ret = 1;
448 break;
449 } else {
450 completed_sector_sum += bmds->completed_sectors;
lirans@il.ibm.comc163b5c2009-11-02 15:40:58 +0200451 }
452 }
Jan Kiszkaa55eb922009-11-30 18:21:19 +0100453
Pierre Riteau8b6b2af2011-01-12 14:41:00 +0100454 if (block_mig_state.total_sector_sum != 0) {
455 progress = completed_sector_sum * 100 /
456 block_mig_state.total_sector_sum;
457 } else {
458 progress = 100;
459 }
Jan Kiszka01e61e22009-12-01 15:20:17 +0100460 if (progress != block_mig_state.prev_progress) {
461 block_mig_state.prev_progress = progress;
462 qemu_put_be64(f, (progress << BDRV_SECTOR_BITS)
463 | BLK_MIG_FLAG_PROGRESS);
Luiz Capitulino539de122011-12-05 14:06:56 -0200464 DPRINTF("Completed %d %%\r", progress);
Jan Kiszka82801d82009-11-30 18:21:21 +0100465 }
466
467 return ret;
lirans@il.ibm.comc163b5c2009-11-02 15:40:58 +0200468}
469
Liran Schourd76cac72010-01-26 14:04:11 +0200470static void blk_mig_reset_dirty_cursor(void)
lirans@il.ibm.comc163b5c2009-11-02 15:40:58 +0200471{
472 BlkMigDevState *bmds;
Jan Kiszka575a58d2009-11-30 18:21:20 +0100473
Jan Kiszka5e5328b2009-11-30 18:21:20 +0100474 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
Liran Schourd76cac72010-01-26 14:04:11 +0200475 bmds->cur_dirty = 0;
476 }
477}
Jan Kiszkaa55eb922009-11-30 18:21:19 +0100478
Paolo Bonzinief0716d2016-02-14 18:17:04 +0100479/* Called with iothread lock and AioContext taken. */
Paolo Bonzini32c835b2013-02-22 17:36:27 +0100480
Luiz Capitulino539de122011-12-05 14:06:56 -0200481static int mig_save_device_dirty(QEMUFile *f, BlkMigDevState *bmds,
482 int is_async)
Liran Schourd76cac72010-01-26 14:04:11 +0200483{
484 BlkMigBlock *blk;
485 int64_t total_sectors = bmds->total_sectors;
486 int64_t sector;
487 int nr_sectors;
Juan Quinteladcd1d222011-09-21 23:01:54 +0200488 int ret = -EIO;
Liran Schourd76cac72010-01-26 14:04:11 +0200489
490 for (sector = bmds->cur_dirty; sector < bmds->total_sectors;) {
Paolo Bonzini52e850d2013-02-22 17:36:25 +0100491 blk_mig_lock();
Marcelo Tosatti62155e22010-11-12 16:07:50 -0200492 if (bmds_aio_inflight(bmds, sector)) {
Paolo Bonzini52e850d2013-02-22 17:36:25 +0100493 blk_mig_unlock();
Fam Zheng53ec73e2015-05-29 18:53:14 +0800494 bdrv_drain(bmds->bs);
Paolo Bonzini52e850d2013-02-22 17:36:25 +0100495 } else {
496 blk_mig_unlock();
Marcelo Tosatti62155e22010-11-12 16:07:50 -0200497 }
Fam Zhenge4654d22013-11-13 18:29:43 +0800498 if (bdrv_get_dirty(bmds->bs, bmds->dirty_bitmap, sector)) {
Liran Schourd76cac72010-01-26 14:04:11 +0200499
500 if (total_sectors - sector < BDRV_SECTORS_PER_DIRTY_CHUNK) {
501 nr_sectors = total_sectors - sector;
502 } else {
503 nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK;
Jan Kiszkaa55eb922009-11-30 18:21:19 +0100504 }
Markus Armbruster5839e532014-08-19 10:31:08 +0200505 blk = g_new(BlkMigBlock, 1);
Anthony Liguori7267c092011-08-20 22:09:37 -0500506 blk->buf = g_malloc(BLOCK_SIZE);
Liran Schourd76cac72010-01-26 14:04:11 +0200507 blk->bmds = bmds;
508 blk->sector = sector;
Marcelo Tosatti33656af2010-11-08 17:02:56 -0200509 blk->nr_sectors = nr_sectors;
Liran Schourd76cac72010-01-26 14:04:11 +0200510
Liran Schour889ae392010-01-26 10:31:49 +0200511 if (is_async) {
Liran Schourd76cac72010-01-26 14:04:11 +0200512 blk->iov.iov_base = blk->buf;
513 blk->iov.iov_len = nr_sectors * BDRV_SECTOR_SIZE;
514 qemu_iovec_init_external(&blk->qiov, &blk->iov, 1);
515
516 blk->aiocb = bdrv_aio_readv(bmds->bs, sector, &blk->qiov,
517 nr_sectors, blk_mig_read_cb, blk);
Paolo Bonzini52e850d2013-02-22 17:36:25 +0100518
519 blk_mig_lock();
Liran Schourd76cac72010-01-26 14:04:11 +0200520 block_mig_state.submitted++;
Marcelo Tosatti33656af2010-11-08 17:02:56 -0200521 bmds_set_aio_inflight(bmds, sector, nr_sectors, 1);
Paolo Bonzini52e850d2013-02-22 17:36:25 +0100522 blk_mig_unlock();
Liran Schourd76cac72010-01-26 14:04:11 +0200523 } else {
Juan Quinteladcd1d222011-09-21 23:01:54 +0200524 ret = bdrv_read(bmds->bs, sector, blk->buf, nr_sectors);
525 if (ret < 0) {
Liran Schourd76cac72010-01-26 14:04:11 +0200526 goto error;
527 }
528 blk_send(f, blk);
529
Anthony Liguori7267c092011-08-20 22:09:37 -0500530 g_free(blk->buf);
531 g_free(blk);
Liran Schourd76cac72010-01-26 14:04:11 +0200532 }
533
John Snow20dca812015-04-17 19:50:02 -0400534 bdrv_reset_dirty_bitmap(bmds->dirty_bitmap, sector, nr_sectors);
Liran Schourd76cac72010-01-26 14:04:11 +0200535 break;
536 }
537 sector += BDRV_SECTORS_PER_DIRTY_CHUNK;
538 bmds->cur_dirty = sector;
539 }
540
541 return (bmds->cur_dirty >= bmds->total_sectors);
542
Liran Schour889ae392010-01-26 10:31:49 +0200543error:
Luiz Capitulino539de122011-12-05 14:06:56 -0200544 DPRINTF("Error reading sector %" PRId64 "\n", sector);
Anthony Liguori7267c092011-08-20 22:09:37 -0500545 g_free(blk->buf);
546 g_free(blk);
Juan Quintela43be3a22012-08-29 21:59:22 +0200547 return ret;
Liran Schourd76cac72010-01-26 14:04:11 +0200548}
549
Paolo Bonzini32c835b2013-02-22 17:36:27 +0100550/* Called with iothread lock taken.
551 *
552 * return value:
Juan Quintelaceb2bd02012-08-29 21:37:14 +0200553 * 0: too much data for max_downtime
554 * 1: few enough data for max_downtime
555*/
Luiz Capitulino539de122011-12-05 14:06:56 -0200556static int blk_mig_save_dirty_block(QEMUFile *f, int is_async)
Liran Schourd76cac72010-01-26 14:04:11 +0200557{
558 BlkMigDevState *bmds;
Juan Quintelaceb2bd02012-08-29 21:37:14 +0200559 int ret = 1;
Liran Schourd76cac72010-01-26 14:04:11 +0200560
561 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
Paolo Bonzinief0716d2016-02-14 18:17:04 +0100562 aio_context_acquire(bdrv_get_aio_context(bmds->bs));
Juan Quintelaceb2bd02012-08-29 21:37:14 +0200563 ret = mig_save_device_dirty(f, bmds, is_async);
Paolo Bonzinief0716d2016-02-14 18:17:04 +0100564 aio_context_release(bdrv_get_aio_context(bmds->bs));
Juan Quintela43be3a22012-08-29 21:59:22 +0200565 if (ret <= 0) {
Liran Schourd76cac72010-01-26 14:04:11 +0200566 break;
lirans@il.ibm.comc163b5c2009-11-02 15:40:58 +0200567 }
568 }
Jan Kiszka575a58d2009-11-30 18:21:20 +0100569
Liran Schourd76cac72010-01-26 14:04:11 +0200570 return ret;
lirans@il.ibm.comc163b5c2009-11-02 15:40:58 +0200571}
572
Paolo Bonzini32c835b2013-02-22 17:36:27 +0100573/* Called with no locks taken. */
574
Juan Quintela59feec42012-08-29 20:17:13 +0200575static int flush_blks(QEMUFile *f)
lirans@il.ibm.comc163b5c2009-11-02 15:40:58 +0200576{
Jan Kiszka5e5328b2009-11-30 18:21:20 +0100577 BlkMigBlock *blk;
Juan Quintela59feec42012-08-29 20:17:13 +0200578 int ret = 0;
Jan Kiszkaa55eb922009-11-30 18:21:19 +0100579
malcd0f2c4c2010-02-07 02:03:50 +0300580 DPRINTF("%s Enter submitted %d read_done %d transferred %d\n",
Jan Kiszkad11ecd32009-11-30 18:21:20 +0100581 __FUNCTION__, block_mig_state.submitted, block_mig_state.read_done,
582 block_mig_state.transferred);
Jan Kiszkaa55eb922009-11-30 18:21:19 +0100583
Paolo Bonzini52e850d2013-02-22 17:36:25 +0100584 blk_mig_lock();
Jan Kiszka5e5328b2009-11-30 18:21:20 +0100585 while ((blk = QSIMPLEQ_FIRST(&block_mig_state.blk_list)) != NULL) {
586 if (qemu_file_rate_limit(f)) {
587 break;
588 }
Jan Kiszka4b640362009-11-30 18:21:21 +0100589 if (blk->ret < 0) {
Juan Quintela59feec42012-08-29 20:17:13 +0200590 ret = blk->ret;
Jan Kiszka4b640362009-11-30 18:21:21 +0100591 break;
592 }
Jan Kiszkaa55eb922009-11-30 18:21:19 +0100593
Jan Kiszka5e5328b2009-11-30 18:21:20 +0100594 QSIMPLEQ_REMOVE_HEAD(&block_mig_state.blk_list, entry);
Paolo Bonzini52e850d2013-02-22 17:36:25 +0100595 blk_mig_unlock();
Paolo Bonzini13197e32013-02-22 17:36:23 +0100596 blk_send(f, blk);
Paolo Bonzini52e850d2013-02-22 17:36:25 +0100597 blk_mig_lock();
Paolo Bonzini13197e32013-02-22 17:36:23 +0100598
Anthony Liguori7267c092011-08-20 22:09:37 -0500599 g_free(blk->buf);
600 g_free(blk);
Jan Kiszkaa55eb922009-11-30 18:21:19 +0100601
Jan Kiszkad11ecd32009-11-30 18:21:20 +0100602 block_mig_state.read_done--;
603 block_mig_state.transferred++;
604 assert(block_mig_state.read_done >= 0);
lirans@il.ibm.comc163b5c2009-11-02 15:40:58 +0200605 }
Paolo Bonzini52e850d2013-02-22 17:36:25 +0100606 blk_mig_unlock();
lirans@il.ibm.comc163b5c2009-11-02 15:40:58 +0200607
malcd0f2c4c2010-02-07 02:03:50 +0300608 DPRINTF("%s Exit submitted %d read_done %d transferred %d\n", __FUNCTION__,
Jan Kiszkad11ecd32009-11-30 18:21:20 +0100609 block_mig_state.submitted, block_mig_state.read_done,
610 block_mig_state.transferred);
Juan Quintela59feec42012-08-29 20:17:13 +0200611 return ret;
lirans@il.ibm.comc163b5c2009-11-02 15:40:58 +0200612}
613
Paolo Bonzini32c835b2013-02-22 17:36:27 +0100614/* Called with iothread lock taken. */
615
Liran Schour889ae392010-01-26 10:31:49 +0200616static int64_t get_remaining_dirty(void)
617{
618 BlkMigDevState *bmds;
619 int64_t dirty = 0;
620
621 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
Paolo Bonzinief0716d2016-02-14 18:17:04 +0100622 aio_context_acquire(bdrv_get_aio_context(bmds->bs));
John Snow20dca812015-04-17 19:50:02 -0400623 dirty += bdrv_get_dirty_count(bmds->dirty_bitmap);
Paolo Bonzinief0716d2016-02-14 18:17:04 +0100624 aio_context_release(bdrv_get_aio_context(bmds->bs));
Liran Schour889ae392010-01-26 10:31:49 +0200625 }
626
Paolo Bonziniacc906c2013-01-21 17:09:44 +0100627 return dirty << BDRV_SECTOR_BITS;
Liran Schour889ae392010-01-26 10:31:49 +0200628}
629
Paolo Bonzini32c835b2013-02-22 17:36:27 +0100630/* Called with iothread lock taken. */
631
Liang Li6ad2a212015-11-02 15:37:03 +0800632static void block_migration_cleanup(void *opaque)
Jan Kiszka4ec7fcc2009-11-30 18:21:21 +0100633{
Jan Kiszka82801d82009-11-30 18:21:21 +0100634 BlkMigDevState *bmds;
635 BlkMigBlock *blk;
Paolo Bonzinief0716d2016-02-14 18:17:04 +0100636 AioContext *ctx;
Jan Kiszka4ec7fcc2009-11-30 18:21:21 +0100637
Kevin Wolf946d58b2012-09-25 15:47:36 +0200638 bdrv_drain_all();
639
Fam Zhenge4654d22013-11-13 18:29:43 +0800640 unset_dirty_tracking();
Marcelo Tosatti8f794c52011-01-26 12:12:31 -0200641
Jan Kiszka82801d82009-11-30 18:21:21 +0100642 while ((bmds = QSIMPLEQ_FIRST(&block_mig_state.bmds_list)) != NULL) {
643 QSIMPLEQ_REMOVE_HEAD(&block_mig_state.bmds_list, entry);
Fam Zheng3718d8a2014-05-23 21:29:43 +0800644 bdrv_op_unblock_all(bmds->bs, bmds->blocker);
645 error_free(bmds->blocker);
Paolo Bonzinief0716d2016-02-14 18:17:04 +0100646
647 /* Save ctx, because bmds->bs can disappear during bdrv_unref. */
648 ctx = bdrv_get_aio_context(bmds->bs);
649 aio_context_acquire(ctx);
Fam Zheng8442cfd2013-08-23 09:14:48 +0800650 bdrv_unref(bmds->bs);
Paolo Bonzinief0716d2016-02-14 18:17:04 +0100651 aio_context_release(ctx);
652
Anthony Liguori7267c092011-08-20 22:09:37 -0500653 g_free(bmds->aio_bitmap);
654 g_free(bmds);
Jan Kiszka4ec7fcc2009-11-30 18:21:21 +0100655 }
656
Paolo Bonzinief0716d2016-02-14 18:17:04 +0100657 blk_mig_lock();
Jan Kiszka82801d82009-11-30 18:21:21 +0100658 while ((blk = QSIMPLEQ_FIRST(&block_mig_state.blk_list)) != NULL) {
659 QSIMPLEQ_REMOVE_HEAD(&block_mig_state.blk_list, entry);
Anthony Liguori7267c092011-08-20 22:09:37 -0500660 g_free(blk->buf);
661 g_free(blk);
Jan Kiszka4ec7fcc2009-11-30 18:21:21 +0100662 }
Paolo Bonzini52e850d2013-02-22 17:36:25 +0100663 blk_mig_unlock();
Jan Kiszka4ec7fcc2009-11-30 18:21:21 +0100664}
665
Juan Quintelad1315aa2012-06-28 15:11:57 +0200666static int block_save_setup(QEMUFile *f, void *opaque)
667{
668 int ret;
669
670 DPRINTF("Enter save live setup submitted %d transferred %d\n",
671 block_mig_state.submitted, block_mig_state.transferred);
672
Paolo Bonzini9b095032013-02-22 17:36:28 +0100673 qemu_mutex_lock_iothread();
chai wen1ac362c2014-06-04 11:47:37 +0800674 init_blk_migration(f);
Juan Quintelad1315aa2012-06-28 15:11:57 +0200675
676 /* start track dirty blocks */
Fam Zhengb8afb522014-04-16 09:34:30 +0800677 ret = set_dirty_tracking();
678
Paolo Bonzinief0716d2016-02-14 18:17:04 +0100679 qemu_mutex_unlock_iothread();
680
Fam Zhengb8afb522014-04-16 09:34:30 +0800681 if (ret) {
Fam Zhengb8afb522014-04-16 09:34:30 +0800682 return ret;
683 }
684
Juan Quintela59feec42012-08-29 20:17:13 +0200685 ret = flush_blks(f);
Juan Quintelad1315aa2012-06-28 15:11:57 +0200686 blk_mig_reset_dirty_cursor();
Juan Quintelad1315aa2012-06-28 15:11:57 +0200687 qemu_put_be64(f, BLK_MIG_FLAG_EOS);
688
Paolo Bonzinid418cf52013-02-22 17:36:11 +0100689 return ret;
Juan Quintelad1315aa2012-06-28 15:11:57 +0200690}
691
Juan Quintela16310a32012-06-28 15:31:37 +0200692static int block_save_iterate(QEMUFile *f, void *opaque)
lirans@il.ibm.comc163b5c2009-11-02 15:40:58 +0200693{
Juan Quintela29757252011-10-19 15:22:18 +0200694 int ret;
Stefan Hajnoczi6aaa9da2013-02-12 10:37:15 +0100695 int64_t last_ftell = qemu_ftell(f);
Gary R Hookebd9fbd2014-11-25 17:30:02 -0600696 int64_t delta_ftell;
Juan Quintela29757252011-10-19 15:22:18 +0200697
Juan Quintela16310a32012-06-28 15:31:37 +0200698 DPRINTF("Enter save live iterate submitted %d transferred %d\n",
699 block_mig_state.submitted, block_mig_state.transferred);
Jan Kiszkaa55eb922009-11-30 18:21:19 +0100700
Juan Quintela59feec42012-08-29 20:17:13 +0200701 ret = flush_blks(f);
Juan Quintela29757252011-10-19 15:22:18 +0200702 if (ret) {
Juan Quintela29757252011-10-19 15:22:18 +0200703 return ret;
Jan Kiszka4b640362009-11-30 18:21:21 +0100704 }
705
Liran Schourd76cac72010-01-26 14:04:11 +0200706 blk_mig_reset_dirty_cursor();
707
Juan Quintela16310a32012-06-28 15:31:37 +0200708 /* control the rate of transfer */
Paolo Bonzini52e850d2013-02-22 17:36:25 +0100709 blk_mig_lock();
Juan Quintela16310a32012-06-28 15:31:37 +0200710 while ((block_mig_state.submitted +
711 block_mig_state.read_done) * BLOCK_SIZE <
Wen Congyangf77dcdb2015-11-20 17:37:13 +0800712 qemu_file_get_rate_limit(f) &&
713 (block_mig_state.submitted +
714 block_mig_state.read_done) <
715 MAX_INFLIGHT_IO) {
Paolo Bonzini52e850d2013-02-22 17:36:25 +0100716 blk_mig_unlock();
Juan Quintela16310a32012-06-28 15:31:37 +0200717 if (block_mig_state.bulk_completed == 0) {
718 /* first finish the bulk phase */
719 if (blk_mig_save_bulked_block(f) == 0) {
720 /* finished saving bulk on all devices */
721 block_mig_state.bulk_completed = 1;
Liran Schourd76cac72010-01-26 14:04:11 +0200722 }
Paolo Bonzini13197e32013-02-22 17:36:23 +0100723 ret = 0;
Juan Quintela16310a32012-06-28 15:31:37 +0200724 } else {
Paolo Bonzini32c835b2013-02-22 17:36:27 +0100725 /* Always called with iothread lock taken for
726 * simplicity, block_save_complete also calls it.
727 */
728 qemu_mutex_lock_iothread();
Juan Quintela43be3a22012-08-29 21:59:22 +0200729 ret = blk_mig_save_dirty_block(f, 1);
Paolo Bonzini32c835b2013-02-22 17:36:27 +0100730 qemu_mutex_unlock_iothread();
Paolo Bonzini13197e32013-02-22 17:36:23 +0100731 }
732 if (ret < 0) {
733 return ret;
734 }
Paolo Bonzini52e850d2013-02-22 17:36:25 +0100735 blk_mig_lock();
Paolo Bonzini13197e32013-02-22 17:36:23 +0100736 if (ret != 0) {
737 /* no more dirty blocks */
738 break;
Liran Schourd76cac72010-01-26 14:04:11 +0200739 }
Jan Kiszka4b640362009-11-30 18:21:21 +0100740 }
Paolo Bonzini52e850d2013-02-22 17:36:25 +0100741 blk_mig_unlock();
Jan Kiszka4b640362009-11-30 18:21:21 +0100742
Juan Quintela59feec42012-08-29 20:17:13 +0200743 ret = flush_blks(f);
Juan Quintela16310a32012-06-28 15:31:37 +0200744 if (ret) {
Juan Quintela16310a32012-06-28 15:31:37 +0200745 return ret;
lirans@il.ibm.comc163b5c2009-11-02 15:40:58 +0200746 }
Jan Kiszkaa55eb922009-11-30 18:21:19 +0100747
748 qemu_put_be64(f, BLK_MIG_FLAG_EOS);
Gary R Hookebd9fbd2014-11-25 17:30:02 -0600749 delta_ftell = qemu_ftell(f) - last_ftell;
750 if (delta_ftell > 0) {
751 return 1;
752 } else if (delta_ftell < 0) {
753 return -1;
754 } else {
755 return 0;
756 }
Juan Quintela16310a32012-06-28 15:31:37 +0200757}
758
Paolo Bonzini32c835b2013-02-22 17:36:27 +0100759/* Called with iothread lock taken. */
760
Juan Quintela16310a32012-06-28 15:31:37 +0200761static int block_save_complete(QEMUFile *f, void *opaque)
762{
763 int ret;
764
765 DPRINTF("Enter save live complete submitted %d transferred %d\n",
766 block_mig_state.submitted, block_mig_state.transferred);
767
Juan Quintela59feec42012-08-29 20:17:13 +0200768 ret = flush_blks(f);
Juan Quintela16310a32012-06-28 15:31:37 +0200769 if (ret) {
Juan Quintela16310a32012-06-28 15:31:37 +0200770 return ret;
771 }
772
773 blk_mig_reset_dirty_cursor();
774
775 /* we know for sure that save bulk is completed and
776 all async read completed */
Paolo Bonzini52e850d2013-02-22 17:36:25 +0100777 blk_mig_lock();
Juan Quintela16310a32012-06-28 15:31:37 +0200778 assert(block_mig_state.submitted == 0);
Paolo Bonzini52e850d2013-02-22 17:36:25 +0100779 blk_mig_unlock();
Juan Quintela16310a32012-06-28 15:31:37 +0200780
Juan Quintela43be3a22012-08-29 21:59:22 +0200781 do {
782 ret = blk_mig_save_dirty_block(f, 0);
Paolo Bonzinid418cf52013-02-22 17:36:11 +0100783 if (ret < 0) {
784 return ret;
785 }
Juan Quintela43be3a22012-08-29 21:59:22 +0200786 } while (ret == 0);
787
Juan Quintela43be3a22012-08-29 21:59:22 +0200788 /* report completion */
789 qemu_put_be64(f, (100 << BDRV_SECTOR_BITS) | BLK_MIG_FLAG_PROGRESS);
Juan Quintela16310a32012-06-28 15:31:37 +0200790
791 DPRINTF("Block migration completed\n");
792
793 qemu_put_be64(f, BLK_MIG_FLAG_EOS);
794
795 return 0;
lirans@il.ibm.comc163b5c2009-11-02 15:40:58 +0200796}
797
Dr. David Alan Gilbertc31b0982015-11-05 18:10:54 +0000798static void block_save_pending(QEMUFile *f, void *opaque, uint64_t max_size,
799 uint64_t *non_postcopiable_pending,
800 uint64_t *postcopiable_pending)
Juan Quintelae4ed1542012-09-21 11:18:18 +0200801{
Stefan Hajnoczi6aaa9da2013-02-12 10:37:15 +0100802 /* Estimate pending number of bytes to send */
Paolo Bonzini13197e32013-02-22 17:36:23 +0100803 uint64_t pending;
804
Paolo Bonzini32c835b2013-02-22 17:36:27 +0100805 qemu_mutex_lock_iothread();
Paolo Bonzinief0716d2016-02-14 18:17:04 +0100806 pending = get_remaining_dirty();
807 qemu_mutex_unlock_iothread();
808
Paolo Bonzini52e850d2013-02-22 17:36:25 +0100809 blk_mig_lock();
Paolo Bonzinief0716d2016-02-14 18:17:04 +0100810 pending += block_mig_state.submitted * BLOCK_SIZE +
811 block_mig_state.read_done * BLOCK_SIZE;
812 blk_mig_unlock();
Juan Quintelae4ed1542012-09-21 11:18:18 +0200813
Stefan Hajnoczi6aaa9da2013-02-12 10:37:15 +0100814 /* Report at least one block pending during bulk phase */
Vladimir Sementsov-Ogievskiy04636dc2014-12-30 13:04:16 +0300815 if (pending <= max_size && !block_mig_state.bulk_completed) {
816 pending = max_size + BLOCK_SIZE;
Stefan Hajnoczi6aaa9da2013-02-12 10:37:15 +0100817 }
Juan Quintelae4ed1542012-09-21 11:18:18 +0200818
Stefan Hajnoczi6aaa9da2013-02-12 10:37:15 +0100819 DPRINTF("Enter save live pending %" PRIu64 "\n", pending);
Dr. David Alan Gilbertc31b0982015-11-05 18:10:54 +0000820 /* We don't do postcopy */
821 *non_postcopiable_pending += pending;
Juan Quintelae4ed1542012-09-21 11:18:18 +0200822}
823
lirans@il.ibm.comc163b5c2009-11-02 15:40:58 +0200824static int block_load(QEMUFile *f, void *opaque, int version_id)
825{
Jan Kiszka01e61e22009-12-01 15:20:17 +0100826 static int banner_printed;
lirans@il.ibm.comc163b5c2009-11-02 15:40:58 +0200827 int len, flags;
828 char device_name[256];
829 int64_t addr;
Pierre Riteau77358b52011-01-21 12:42:30 +0100830 BlockDriverState *bs, *bs_prev = NULL;
Fam Zhengc9ebaf72015-03-02 19:36:47 +0800831 BlockBackend *blk;
Kevin Wolf9bd9c7f2016-02-22 10:21:15 +0100832 Error *local_err = NULL;
lirans@il.ibm.comc163b5c2009-11-02 15:40:58 +0200833 uint8_t *buf;
Pierre Riteau77358b52011-01-21 12:42:30 +0100834 int64_t total_sectors = 0;
835 int nr_sectors;
Juan Quintela42802d42011-10-05 01:14:46 +0200836 int ret;
Jan Kiszkaa55eb922009-11-30 18:21:19 +0100837
lirans@il.ibm.comc163b5c2009-11-02 15:40:58 +0200838 do {
lirans@il.ibm.comc163b5c2009-11-02 15:40:58 +0200839 addr = qemu_get_be64(f);
Jan Kiszkaa55eb922009-11-30 18:21:19 +0100840
Jan Kiszka6ea44302009-11-30 18:21:19 +0100841 flags = addr & ~BDRV_SECTOR_MASK;
842 addr >>= BDRV_SECTOR_BITS;
Jan Kiszkaa55eb922009-11-30 18:21:19 +0100843
844 if (flags & BLK_MIG_FLAG_DEVICE_BLOCK) {
lirans@il.ibm.comc163b5c2009-11-02 15:40:58 +0200845 /* get device name */
846 len = qemu_get_byte(f);
lirans@il.ibm.comc163b5c2009-11-02 15:40:58 +0200847 qemu_get_buffer(f, (uint8_t *)device_name, len);
848 device_name[len] = '\0';
Jan Kiszkaa55eb922009-11-30 18:21:19 +0100849
Fam Zhengc9ebaf72015-03-02 19:36:47 +0800850 blk = blk_by_name(device_name);
851 if (!blk) {
Jan Kiszka4b640362009-11-30 18:21:21 +0100852 fprintf(stderr, "Error unknown block device %s\n",
853 device_name);
854 return -EINVAL;
855 }
Fam Zhengc9ebaf72015-03-02 19:36:47 +0800856 bs = blk_bs(blk);
Max Reitz5433c242015-10-19 17:53:29 +0200857 if (!bs) {
858 fprintf(stderr, "Block device %s has no medium\n",
859 device_name);
860 return -EINVAL;
861 }
Jan Kiszkaa55eb922009-11-30 18:21:19 +0100862
Pierre Riteau77358b52011-01-21 12:42:30 +0100863 if (bs != bs_prev) {
864 bs_prev = bs;
Markus Armbruster57322b72014-06-26 13:23:22 +0200865 total_sectors = bdrv_nb_sectors(bs);
Pierre Riteau77358b52011-01-21 12:42:30 +0100866 if (total_sectors <= 0) {
Markus Armbruster6daf1942011-06-22 14:03:54 +0200867 error_report("Error getting length of block device %s",
Pierre Riteau77358b52011-01-21 12:42:30 +0100868 device_name);
869 return -EINVAL;
870 }
Kevin Wolf9bd9c7f2016-02-22 10:21:15 +0100871
872 bdrv_invalidate_cache(bs, &local_err);
873 if (local_err) {
874 error_report_err(local_err);
875 return -EINVAL;
876 }
Pierre Riteau77358b52011-01-21 12:42:30 +0100877 }
878
879 if (total_sectors - addr < BDRV_SECTORS_PER_DIRTY_CHUNK) {
880 nr_sectors = total_sectors - addr;
881 } else {
882 nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK;
883 }
884
Peter Lieven323004a2013-07-18 09:48:50 +0200885 if (flags & BLK_MIG_FLAG_ZERO_BLOCK) {
Peter Lievend32f35c2013-10-24 12:06:52 +0200886 ret = bdrv_write_zeroes(bs, addr, nr_sectors,
887 BDRV_REQ_MAY_UNMAP);
Peter Lieven323004a2013-07-18 09:48:50 +0200888 } else {
889 buf = g_malloc(BLOCK_SIZE);
890 qemu_get_buffer(f, buf, BLOCK_SIZE);
891 ret = bdrv_write(bs, addr, buf, nr_sectors);
892 g_free(buf);
893 }
Jan Kiszka575a58d2009-11-30 18:21:20 +0100894
Yoshiaki Tamurab02bea32010-07-20 18:19:00 +0900895 if (ret < 0) {
896 return ret;
897 }
Jan Kiszka01e61e22009-12-01 15:20:17 +0100898 } else if (flags & BLK_MIG_FLAG_PROGRESS) {
899 if (!banner_printed) {
900 printf("Receiving block device images\n");
901 banner_printed = 1;
902 }
903 printf("Completed %d %%%c", (int)addr,
904 (addr == 100) ? '\n' : '\r');
905 fflush(stdout);
Jan Kiszkaa55eb922009-11-30 18:21:19 +0100906 } else if (!(flags & BLK_MIG_FLAG_EOS)) {
Stefan Hajnoczid5f1f282013-02-10 23:12:44 +0100907 fprintf(stderr, "Unknown block migration flags: %#x\n", flags);
Jan Kiszka4b640362009-11-30 18:21:21 +0100908 return -EINVAL;
909 }
Juan Quintela42802d42011-10-05 01:14:46 +0200910 ret = qemu_file_get_error(f);
911 if (ret != 0) {
912 return ret;
lirans@il.ibm.comc163b5c2009-11-02 15:40:58 +0200913 }
Jan Kiszkaa55eb922009-11-30 18:21:19 +0100914 } while (!(flags & BLK_MIG_FLAG_EOS));
915
lirans@il.ibm.comc163b5c2009-11-02 15:40:58 +0200916 return 0;
917}
918
Isaku Yamahata6607ae22012-06-19 18:43:09 +0300919static void block_set_params(const MigrationParams *params, void *opaque)
lirans@il.ibm.comc163b5c2009-11-02 15:40:58 +0200920{
Isaku Yamahata6607ae22012-06-19 18:43:09 +0300921 block_mig_state.blk_enable = params->blk;
922 block_mig_state.shared_base = params->shared;
Jan Kiszkaa55eb922009-11-30 18:21:19 +0100923
lirans@il.ibm.comc163b5c2009-11-02 15:40:58 +0200924 /* shared base means that blk_enable = 1 */
Isaku Yamahata6607ae22012-06-19 18:43:09 +0300925 block_mig_state.blk_enable |= params->shared;
lirans@il.ibm.comc163b5c2009-11-02 15:40:58 +0200926}
927
Juan Quintela6bd68782012-06-27 10:59:15 +0200928static bool block_is_active(void *opaque)
929{
930 return block_mig_state.blk_enable == 1;
931}
932
Stefan Weil7a46d042014-07-07 21:09:30 +0200933static SaveVMHandlers savevm_block_handlers = {
Juan Quintela7908c782012-06-26 18:46:10 +0200934 .set_params = block_set_params,
Juan Quintelad1315aa2012-06-28 15:11:57 +0200935 .save_live_setup = block_save_setup,
Juan Quintela16310a32012-06-28 15:31:37 +0200936 .save_live_iterate = block_save_iterate,
Dr. David Alan Gilberta3e06c32015-11-05 18:10:41 +0000937 .save_live_complete_precopy = block_save_complete,
Juan Quintelae4ed1542012-09-21 11:18:18 +0200938 .save_live_pending = block_save_pending,
Juan Quintela7908c782012-06-26 18:46:10 +0200939 .load_state = block_load,
Liang Li6ad2a212015-11-02 15:37:03 +0800940 .cleanup = block_migration_cleanup,
Juan Quintela6bd68782012-06-27 10:59:15 +0200941 .is_active = block_is_active,
Juan Quintela7908c782012-06-26 18:46:10 +0200942};
943
lirans@il.ibm.comc163b5c2009-11-02 15:40:58 +0200944void blk_mig_init(void)
Jan Kiszkaa55eb922009-11-30 18:21:19 +0100945{
Jan Kiszka5e5328b2009-11-30 18:21:20 +0100946 QSIMPLEQ_INIT(&block_mig_state.bmds_list);
947 QSIMPLEQ_INIT(&block_mig_state.blk_list);
Paolo Bonzini52e850d2013-02-22 17:36:25 +0100948 qemu_mutex_init(&block_mig_state.lock);
Jan Kiszka5e5328b2009-11-30 18:21:20 +0100949
Juan Quintela7908c782012-06-26 18:46:10 +0200950 register_savevm_live(NULL, "block", 0, 1, &savevm_block_handlers,
951 &block_mig_state);
lirans@il.ibm.comc163b5c2009-11-02 15:40:58 +0200952}