blob: 290b281446753e34df454bf840adafdeef79a92c [file] [log] [blame]
Juan Quintela56e93d22015-05-07 19:33:31 +02001/*
2 * QEMU System Emulator
3 *
4 * Copyright (c) 2003-2008 Fabrice Bellard
Juan Quintela76cc7b52015-05-08 13:20:21 +02005 * Copyright (c) 2011-2015 Red Hat Inc
6 *
7 * Authors:
8 * Juan Quintela <quintela@redhat.com>
Juan Quintela56e93d22015-05-07 19:33:31 +02009 *
10 * Permission is hereby granted, free of charge, to any person obtaining a copy
11 * of this software and associated documentation files (the "Software"), to deal
12 * in the Software without restriction, including without limitation the rights
13 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
14 * copies of the Software, and to permit persons to whom the Software is
15 * furnished to do so, subject to the following conditions:
16 *
17 * The above copyright notice and this permission notice shall be included in
18 * all copies or substantial portions of the Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 * THE SOFTWARE.
27 */
Markus Armbrustere688df62018-02-01 12:18:31 +010028
Peter Maydell1393a482016-01-26 18:16:54 +000029#include "qemu/osdep.h"
Paolo Bonzini33c11872016-03-15 16:58:45 +010030#include "cpu.h"
Juan Quintela56e93d22015-05-07 19:33:31 +020031#include <zlib.h>
Veronia Bahaaf348b6d2016-03-20 19:16:19 +020032#include "qemu/cutils.h"
Juan Quintela56e93d22015-05-07 19:33:31 +020033#include "qemu/bitops.h"
34#include "qemu/bitmap.h"
Juan Quintela7205c9e2015-05-08 13:54:36 +020035#include "qemu/main-loop.h"
Juan Quintela709e3fe2017-04-05 21:47:50 +020036#include "xbzrle.h"
Juan Quintela7b1e1a22017-04-17 20:26:27 +020037#include "ram.h"
Juan Quintela6666c962017-04-24 20:07:27 +020038#include "migration.h"
Juan Quintela71bb07d2018-02-19 19:01:03 +010039#include "socket.h"
Juan Quintelaf2a8f0a2017-04-24 13:42:55 +020040#include "migration/register.h"
Juan Quintela7b1e1a22017-04-17 20:26:27 +020041#include "migration/misc.h"
Juan Quintela08a0aee2017-04-20 18:52:18 +020042#include "qemu-file.h"
Juan Quintelabe07b0a2017-04-20 13:12:24 +020043#include "postcopy-ram.h"
Juan Quintela56e93d22015-05-07 19:33:31 +020044#include "migration/page_cache.h"
Juan Quintela56e93d22015-05-07 19:33:31 +020045#include "qemu/error-report.h"
Markus Armbrustere688df62018-02-01 12:18:31 +010046#include "qapi/error.h"
Markus Armbruster9af23982018-02-11 10:36:01 +010047#include "qapi/qapi-events-migration.h"
Juan Quintela8acabf62017-10-05 22:00:31 +020048#include "qapi/qmp/qerror.h"
Juan Quintela56e93d22015-05-07 19:33:31 +020049#include "trace.h"
Juan Quintela56e93d22015-05-07 19:33:31 +020050#include "exec/ram_addr.h"
Alexey Perevalovf9494612017-10-05 14:13:20 +030051#include "exec/target_page.h"
Juan Quintela56e93d22015-05-07 19:33:31 +020052#include "qemu/rcu_queue.h"
zhanghailianga91246c2016-10-27 14:42:59 +080053#include "migration/colo.h"
Peter Lieven9ac78b62017-09-26 12:33:16 +020054#include "migration/block.h"
Juan Quintelaaf8b7d22018-04-06 19:32:12 +020055#include "sysemu/sysemu.h"
56#include "qemu/uuid.h"
Peter Xuedd090c2018-05-02 18:47:32 +080057#include "savevm.h"
Juan Quintela56e93d22015-05-07 19:33:31 +020058
Juan Quintela56e93d22015-05-07 19:33:31 +020059/***********************************************************/
60/* ram save/restore */
61
Juan Quintelabb890ed2017-04-28 09:39:55 +020062/* RAM_SAVE_FLAG_ZERO used to be named RAM_SAVE_FLAG_COMPRESS, it
63 * worked for pages that where filled with the same char. We switched
64 * it to only search for the zero value. And to avoid confusion with
65 * RAM_SSAVE_FLAG_COMPRESS_PAGE just rename it.
66 */
67
Juan Quintela56e93d22015-05-07 19:33:31 +020068#define RAM_SAVE_FLAG_FULL 0x01 /* Obsolete, not used anymore */
Juan Quintelabb890ed2017-04-28 09:39:55 +020069#define RAM_SAVE_FLAG_ZERO 0x02
Juan Quintela56e93d22015-05-07 19:33:31 +020070#define RAM_SAVE_FLAG_MEM_SIZE 0x04
71#define RAM_SAVE_FLAG_PAGE 0x08
72#define RAM_SAVE_FLAG_EOS 0x10
73#define RAM_SAVE_FLAG_CONTINUE 0x20
74#define RAM_SAVE_FLAG_XBZRLE 0x40
75/* 0x80 is reserved in migration.h start with 0x100 next */
76#define RAM_SAVE_FLAG_COMPRESS_PAGE 0x100
77
Juan Quintela56e93d22015-05-07 19:33:31 +020078static inline bool is_zero_range(uint8_t *p, uint64_t size)
79{
Richard Hendersona1febc42016-08-29 11:46:14 -070080 return buffer_is_zero(p, size);
Juan Quintela56e93d22015-05-07 19:33:31 +020081}
82
Juan Quintela93604472017-06-06 19:49:03 +020083XBZRLECacheStats xbzrle_counters;
84
Juan Quintela56e93d22015-05-07 19:33:31 +020085/* struct contains XBZRLE cache and a static page
86 used by the compression */
87static struct {
88 /* buffer used for XBZRLE encoding */
89 uint8_t *encoded_buf;
90 /* buffer for storing page content */
91 uint8_t *current_buf;
92 /* Cache for XBZRLE, Protected by lock. */
93 PageCache *cache;
94 QemuMutex lock;
Juan Quintelac00e0922017-05-09 16:22:01 +020095 /* it will store a page full of zeros */
96 uint8_t *zero_target_page;
Juan Quintelaf265e0e2017-06-28 11:52:27 +020097 /* buffer used for XBZRLE decoding */
98 uint8_t *decoded_buf;
Juan Quintela56e93d22015-05-07 19:33:31 +020099} XBZRLE;
100
Juan Quintela56e93d22015-05-07 19:33:31 +0200101static void XBZRLE_cache_lock(void)
102{
103 if (migrate_use_xbzrle())
104 qemu_mutex_lock(&XBZRLE.lock);
105}
106
107static void XBZRLE_cache_unlock(void)
108{
109 if (migrate_use_xbzrle())
110 qemu_mutex_unlock(&XBZRLE.lock);
111}
112
Juan Quintela3d0684b2017-03-23 15:06:39 +0100113/**
114 * xbzrle_cache_resize: resize the xbzrle cache
115 *
116 * This function is called from qmp_migrate_set_cache_size in main
117 * thread, possibly while a migration is in progress. A running
118 * migration may be using the cache and might finish during this call,
119 * hence changes to the cache are protected by XBZRLE.lock().
120 *
Juan Quintelac9dede22017-10-06 23:03:55 +0200121 * Returns 0 for success or -1 for error
Juan Quintela3d0684b2017-03-23 15:06:39 +0100122 *
123 * @new_size: new cache size
Juan Quintela8acabf62017-10-05 22:00:31 +0200124 * @errp: set *errp if the check failed, with reason
Juan Quintela56e93d22015-05-07 19:33:31 +0200125 */
Juan Quintelac9dede22017-10-06 23:03:55 +0200126int xbzrle_cache_resize(int64_t new_size, Error **errp)
Juan Quintela56e93d22015-05-07 19:33:31 +0200127{
128 PageCache *new_cache;
Juan Quintelac9dede22017-10-06 23:03:55 +0200129 int64_t ret = 0;
Juan Quintela56e93d22015-05-07 19:33:31 +0200130
Juan Quintela8acabf62017-10-05 22:00:31 +0200131 /* Check for truncation */
132 if (new_size != (size_t)new_size) {
133 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
134 "exceeding address space");
135 return -1;
136 }
137
Juan Quintela2a313e52017-10-06 23:00:12 +0200138 if (new_size == migrate_xbzrle_cache_size()) {
139 /* nothing to do */
Juan Quintelac9dede22017-10-06 23:03:55 +0200140 return 0;
Juan Quintela2a313e52017-10-06 23:00:12 +0200141 }
142
Juan Quintela56e93d22015-05-07 19:33:31 +0200143 XBZRLE_cache_lock();
144
145 if (XBZRLE.cache != NULL) {
Juan Quintela80f8dfd2017-10-06 22:30:45 +0200146 new_cache = cache_init(new_size, TARGET_PAGE_SIZE, errp);
Juan Quintela56e93d22015-05-07 19:33:31 +0200147 if (!new_cache) {
Juan Quintela56e93d22015-05-07 19:33:31 +0200148 ret = -1;
149 goto out;
150 }
151
152 cache_fini(XBZRLE.cache);
153 XBZRLE.cache = new_cache;
154 }
Juan Quintela56e93d22015-05-07 19:33:31 +0200155out:
156 XBZRLE_cache_unlock();
157 return ret;
158}
159
Cédric Le Goaterb895de52018-05-14 08:57:00 +0200160/* Should be holding either ram_list.mutex, or the RCU lock. */
161#define RAMBLOCK_FOREACH_MIGRATABLE(block) \
162 RAMBLOCK_FOREACH(block) \
163 if (!qemu_ram_is_migratable(block)) {} else
164
Alexey Perevalovf9494612017-10-05 14:13:20 +0300165static void ramblock_recv_map_init(void)
166{
167 RAMBlock *rb;
168
Cédric Le Goaterb895de52018-05-14 08:57:00 +0200169 RAMBLOCK_FOREACH_MIGRATABLE(rb) {
Alexey Perevalovf9494612017-10-05 14:13:20 +0300170 assert(!rb->receivedmap);
171 rb->receivedmap = bitmap_new(rb->max_length >> qemu_target_page_bits());
172 }
173}
174
175int ramblock_recv_bitmap_test(RAMBlock *rb, void *host_addr)
176{
177 return test_bit(ramblock_recv_bitmap_offset(host_addr, rb),
178 rb->receivedmap);
179}
180
Dr. David Alan Gilbert1cba9f62018-03-12 17:21:08 +0000181bool ramblock_recv_bitmap_test_byte_offset(RAMBlock *rb, uint64_t byte_offset)
182{
183 return test_bit(byte_offset >> TARGET_PAGE_BITS, rb->receivedmap);
184}
185
Alexey Perevalovf9494612017-10-05 14:13:20 +0300186void ramblock_recv_bitmap_set(RAMBlock *rb, void *host_addr)
187{
188 set_bit_atomic(ramblock_recv_bitmap_offset(host_addr, rb), rb->receivedmap);
189}
190
191void ramblock_recv_bitmap_set_range(RAMBlock *rb, void *host_addr,
192 size_t nr)
193{
194 bitmap_set_atomic(rb->receivedmap,
195 ramblock_recv_bitmap_offset(host_addr, rb),
196 nr);
197}
198
Peter Xua335deb2018-05-02 18:47:28 +0800199#define RAMBLOCK_RECV_BITMAP_ENDING (0x0123456789abcdefULL)
200
201/*
202 * Format: bitmap_size (8 bytes) + whole_bitmap (N bytes).
203 *
204 * Returns >0 if success with sent bytes, or <0 if error.
205 */
206int64_t ramblock_recv_bitmap_send(QEMUFile *file,
207 const char *block_name)
208{
209 RAMBlock *block = qemu_ram_block_by_name(block_name);
210 unsigned long *le_bitmap, nbits;
211 uint64_t size;
212
213 if (!block) {
214 error_report("%s: invalid block name: %s", __func__, block_name);
215 return -1;
216 }
217
218 nbits = block->used_length >> TARGET_PAGE_BITS;
219
220 /*
221 * Make sure the tmp bitmap buffer is big enough, e.g., on 32bit
222 * machines we may need 4 more bytes for padding (see below
223 * comment). So extend it a bit before hand.
224 */
225 le_bitmap = bitmap_new(nbits + BITS_PER_LONG);
226
227 /*
228 * Always use little endian when sending the bitmap. This is
229 * required that when source and destination VMs are not using the
230 * same endianess. (Note: big endian won't work.)
231 */
232 bitmap_to_le(le_bitmap, block->receivedmap, nbits);
233
234 /* Size of the bitmap, in bytes */
235 size = nbits / 8;
236
237 /*
238 * size is always aligned to 8 bytes for 64bit machines, but it
239 * may not be true for 32bit machines. We need this padding to
240 * make sure the migration can survive even between 32bit and
241 * 64bit machines.
242 */
243 size = ROUND_UP(size, 8);
244
245 qemu_put_be64(file, size);
246 qemu_put_buffer(file, (const uint8_t *)le_bitmap, size);
247 /*
248 * Mark as an end, in case the middle part is screwed up due to
249 * some "misterious" reason.
250 */
251 qemu_put_be64(file, RAMBLOCK_RECV_BITMAP_ENDING);
252 qemu_fflush(file);
253
Peter Xubf269902018-05-25 09:50:42 +0800254 g_free(le_bitmap);
Peter Xua335deb2018-05-02 18:47:28 +0800255
256 if (qemu_file_get_error(file)) {
257 return qemu_file_get_error(file);
258 }
259
260 return size + sizeof(size);
261}
262
Juan Quintelaec481c62017-03-20 22:12:40 +0100263/*
264 * An outstanding page request, on the source, having been received
265 * and queued
266 */
267struct RAMSrcPageRequest {
268 RAMBlock *rb;
269 hwaddr offset;
270 hwaddr len;
271
272 QSIMPLEQ_ENTRY(RAMSrcPageRequest) next_req;
273};
274
Juan Quintela6f37bb82017-03-13 19:26:29 +0100275/* State of RAM for migration */
276struct RAMState {
Juan Quintela204b88b2017-03-15 09:16:57 +0100277 /* QEMUFile used for this migration */
278 QEMUFile *f;
Juan Quintela6f37bb82017-03-13 19:26:29 +0100279 /* Last block that we have visited searching for dirty pages */
280 RAMBlock *last_seen_block;
281 /* Last block from where we have sent data */
282 RAMBlock *last_sent_block;
Juan Quintela269ace22017-03-21 15:23:31 +0100283 /* Last dirty target page we have sent */
284 ram_addr_t last_page;
Juan Quintela6f37bb82017-03-13 19:26:29 +0100285 /* last ram version we have seen */
286 uint32_t last_version;
287 /* We are in the first round */
288 bool ram_bulk_stage;
Juan Quintela8d820d62017-03-13 19:35:50 +0100289 /* How many times we have dirty too many pages */
290 int dirty_rate_high_cnt;
Juan Quintelaf664da82017-03-13 19:44:57 +0100291 /* these variables are used for bitmap sync */
292 /* last time we did a full bitmap_sync */
293 int64_t time_last_bitmap_sync;
Juan Quintelaeac74152017-03-28 14:59:01 +0200294 /* bytes transferred at start_time */
Juan Quintelac4bdf0c2017-03-28 14:59:54 +0200295 uint64_t bytes_xfer_prev;
Juan Quintelaa66cd902017-03-28 15:02:43 +0200296 /* number of dirty pages since start_time */
Juan Quintela68908ed2017-03-28 15:05:53 +0200297 uint64_t num_dirty_pages_period;
Juan Quintelab5833fd2017-03-13 19:49:19 +0100298 /* xbzrle misses since the beginning of the period */
299 uint64_t xbzrle_cache_miss_prev;
Juan Quintela36040d92017-03-13 19:51:13 +0100300 /* number of iterations at the beginning of period */
301 uint64_t iterations_prev;
Juan Quintela23b28c32017-03-13 20:51:34 +0100302 /* Iterations since start */
303 uint64_t iterations;
Juan Quintela93604472017-06-06 19:49:03 +0200304 /* number of dirty bits in the bitmap */
Peter Xu2dfaf122017-08-02 17:41:19 +0800305 uint64_t migration_dirty_pages;
306 /* protects modification of the bitmap */
Juan Quintela108cfae2017-03-13 21:38:09 +0100307 QemuMutex bitmap_mutex;
Juan Quintela68a098f2017-03-14 13:48:42 +0100308 /* The RAMBlock used in the last src_page_requests */
309 RAMBlock *last_req_rb;
Juan Quintelaec481c62017-03-20 22:12:40 +0100310 /* Queue of outstanding page requests from the destination */
311 QemuMutex src_page_req_mutex;
312 QSIMPLEQ_HEAD(src_page_requests, RAMSrcPageRequest) src_page_requests;
Juan Quintela6f37bb82017-03-13 19:26:29 +0100313};
314typedef struct RAMState RAMState;
315
Juan Quintela53518d92017-05-04 11:46:24 +0200316static RAMState *ram_state;
Juan Quintela6f37bb82017-03-13 19:26:29 +0100317
Juan Quintela9edabd42017-03-14 12:02:16 +0100318uint64_t ram_bytes_remaining(void)
319{
Dr. David Alan Gilbertbae416e2017-12-15 11:51:23 +0000320 return ram_state ? (ram_state->migration_dirty_pages * TARGET_PAGE_SIZE) :
321 0;
Juan Quintela9edabd42017-03-14 12:02:16 +0100322}
323
Juan Quintela93604472017-06-06 19:49:03 +0200324MigrationStats ram_counters;
Juan Quintela96506892017-03-14 18:41:03 +0100325
Dr. David Alan Gilbertb8fb8cb2015-09-23 15:27:10 +0100326/* used by the search for pages to send */
327struct PageSearchStatus {
328 /* Current block being searched */
329 RAMBlock *block;
Juan Quintelaa935e302017-03-21 15:36:51 +0100330 /* Current page to search from */
331 unsigned long page;
Dr. David Alan Gilbertb8fb8cb2015-09-23 15:27:10 +0100332 /* Set once we wrap around */
333 bool complete_round;
334};
335typedef struct PageSearchStatus PageSearchStatus;
336
Juan Quintela56e93d22015-05-07 19:33:31 +0200337struct CompressParam {
Juan Quintela56e93d22015-05-07 19:33:31 +0200338 bool done;
Liang Li90e56fb2016-05-05 15:32:56 +0800339 bool quit;
Juan Quintela56e93d22015-05-07 19:33:31 +0200340 QEMUFile *file;
341 QemuMutex mutex;
342 QemuCond cond;
343 RAMBlock *block;
344 ram_addr_t offset;
Xiao Guangrong34ab9e92018-03-30 15:51:22 +0800345
346 /* internally used fields */
Xiao Guangrongdcaf4462018-03-30 15:51:20 +0800347 z_stream stream;
Xiao Guangrong34ab9e92018-03-30 15:51:22 +0800348 uint8_t *originbuf;
Juan Quintela56e93d22015-05-07 19:33:31 +0200349};
350typedef struct CompressParam CompressParam;
351
352struct DecompressParam {
Liang Li73a89122016-05-05 15:32:51 +0800353 bool done;
Liang Li90e56fb2016-05-05 15:32:56 +0800354 bool quit;
Juan Quintela56e93d22015-05-07 19:33:31 +0200355 QemuMutex mutex;
356 QemuCond cond;
357 void *des;
Peter Maydelld341d9f2016-01-22 15:09:21 +0000358 uint8_t *compbuf;
Juan Quintela56e93d22015-05-07 19:33:31 +0200359 int len;
Xiao Guangrong797ca152018-03-30 15:51:21 +0800360 z_stream stream;
Juan Quintela56e93d22015-05-07 19:33:31 +0200361};
362typedef struct DecompressParam DecompressParam;
363
364static CompressParam *comp_param;
365static QemuThread *compress_threads;
366/* comp_done_cond is used to wake up the migration thread when
367 * one of the compression threads has finished the compression.
368 * comp_done_lock is used to co-work with comp_done_cond.
369 */
Liang Li0d9f9a52016-05-05 15:32:59 +0800370static QemuMutex comp_done_lock;
371static QemuCond comp_done_cond;
Juan Quintela56e93d22015-05-07 19:33:31 +0200372/* The empty QEMUFileOps will be used by file in CompressParam */
373static const QEMUFileOps empty_ops = { };
374
Xiao Guangrong34ab9e92018-03-30 15:51:22 +0800375static QEMUFile *decomp_file;
Juan Quintela56e93d22015-05-07 19:33:31 +0200376static DecompressParam *decomp_param;
377static QemuThread *decompress_threads;
Liang Li73a89122016-05-05 15:32:51 +0800378static QemuMutex decomp_done_lock;
379static QemuCond decomp_done_cond;
Juan Quintela56e93d22015-05-07 19:33:31 +0200380
Xiao Guangrongdcaf4462018-03-30 15:51:20 +0800381static int do_compress_ram_page(QEMUFile *f, z_stream *stream, RAMBlock *block,
Xiao Guangrong34ab9e92018-03-30 15:51:22 +0800382 ram_addr_t offset, uint8_t *source_buf);
Juan Quintela56e93d22015-05-07 19:33:31 +0200383
384static void *do_data_compress(void *opaque)
385{
386 CompressParam *param = opaque;
Liang Lia7a9a882016-05-05 15:32:57 +0800387 RAMBlock *block;
388 ram_addr_t offset;
Juan Quintela56e93d22015-05-07 19:33:31 +0200389
Liang Lia7a9a882016-05-05 15:32:57 +0800390 qemu_mutex_lock(&param->mutex);
Liang Li90e56fb2016-05-05 15:32:56 +0800391 while (!param->quit) {
Liang Lia7a9a882016-05-05 15:32:57 +0800392 if (param->block) {
393 block = param->block;
394 offset = param->offset;
395 param->block = NULL;
396 qemu_mutex_unlock(&param->mutex);
397
Xiao Guangrong34ab9e92018-03-30 15:51:22 +0800398 do_compress_ram_page(param->file, &param->stream, block, offset,
399 param->originbuf);
Liang Lia7a9a882016-05-05 15:32:57 +0800400
Liang Li0d9f9a52016-05-05 15:32:59 +0800401 qemu_mutex_lock(&comp_done_lock);
Liang Lia7a9a882016-05-05 15:32:57 +0800402 param->done = true;
Liang Li0d9f9a52016-05-05 15:32:59 +0800403 qemu_cond_signal(&comp_done_cond);
404 qemu_mutex_unlock(&comp_done_lock);
Liang Lia7a9a882016-05-05 15:32:57 +0800405
406 qemu_mutex_lock(&param->mutex);
407 } else {
Juan Quintela56e93d22015-05-07 19:33:31 +0200408 qemu_cond_wait(&param->cond, &param->mutex);
409 }
Juan Quintela56e93d22015-05-07 19:33:31 +0200410 }
Liang Lia7a9a882016-05-05 15:32:57 +0800411 qemu_mutex_unlock(&param->mutex);
Juan Quintela56e93d22015-05-07 19:33:31 +0200412
413 return NULL;
414}
415
416static inline void terminate_compression_threads(void)
417{
418 int idx, thread_count;
419
420 thread_count = migrate_compress_threads();
Juan Quintela3d0684b2017-03-23 15:06:39 +0100421
Juan Quintela56e93d22015-05-07 19:33:31 +0200422 for (idx = 0; idx < thread_count; idx++) {
423 qemu_mutex_lock(&comp_param[idx].mutex);
Liang Li90e56fb2016-05-05 15:32:56 +0800424 comp_param[idx].quit = true;
Juan Quintela56e93d22015-05-07 19:33:31 +0200425 qemu_cond_signal(&comp_param[idx].cond);
426 qemu_mutex_unlock(&comp_param[idx].mutex);
427 }
428}
429
Juan Quintelaf0afa332017-06-28 11:52:28 +0200430static void compress_threads_save_cleanup(void)
Juan Quintela56e93d22015-05-07 19:33:31 +0200431{
432 int i, thread_count;
433
434 if (!migrate_use_compression()) {
435 return;
436 }
437 terminate_compression_threads();
438 thread_count = migrate_compress_threads();
439 for (i = 0; i < thread_count; i++) {
Xiao Guangrongdcaf4462018-03-30 15:51:20 +0800440 /*
441 * we use it as a indicator which shows if the thread is
442 * properly init'd or not
443 */
444 if (!comp_param[i].file) {
445 break;
446 }
Juan Quintela56e93d22015-05-07 19:33:31 +0200447 qemu_thread_join(compress_threads + i);
Juan Quintela56e93d22015-05-07 19:33:31 +0200448 qemu_mutex_destroy(&comp_param[i].mutex);
449 qemu_cond_destroy(&comp_param[i].cond);
Xiao Guangrongdcaf4462018-03-30 15:51:20 +0800450 deflateEnd(&comp_param[i].stream);
Xiao Guangrong34ab9e92018-03-30 15:51:22 +0800451 g_free(comp_param[i].originbuf);
Xiao Guangrongdcaf4462018-03-30 15:51:20 +0800452 qemu_fclose(comp_param[i].file);
453 comp_param[i].file = NULL;
Juan Quintela56e93d22015-05-07 19:33:31 +0200454 }
Liang Li0d9f9a52016-05-05 15:32:59 +0800455 qemu_mutex_destroy(&comp_done_lock);
456 qemu_cond_destroy(&comp_done_cond);
Juan Quintela56e93d22015-05-07 19:33:31 +0200457 g_free(compress_threads);
458 g_free(comp_param);
Juan Quintela56e93d22015-05-07 19:33:31 +0200459 compress_threads = NULL;
460 comp_param = NULL;
Juan Quintela56e93d22015-05-07 19:33:31 +0200461}
462
Xiao Guangrongdcaf4462018-03-30 15:51:20 +0800463static int compress_threads_save_setup(void)
Juan Quintela56e93d22015-05-07 19:33:31 +0200464{
465 int i, thread_count;
466
467 if (!migrate_use_compression()) {
Xiao Guangrongdcaf4462018-03-30 15:51:20 +0800468 return 0;
Juan Quintela56e93d22015-05-07 19:33:31 +0200469 }
Juan Quintela56e93d22015-05-07 19:33:31 +0200470 thread_count = migrate_compress_threads();
471 compress_threads = g_new0(QemuThread, thread_count);
472 comp_param = g_new0(CompressParam, thread_count);
Liang Li0d9f9a52016-05-05 15:32:59 +0800473 qemu_cond_init(&comp_done_cond);
474 qemu_mutex_init(&comp_done_lock);
Juan Quintela56e93d22015-05-07 19:33:31 +0200475 for (i = 0; i < thread_count; i++) {
Xiao Guangrong34ab9e92018-03-30 15:51:22 +0800476 comp_param[i].originbuf = g_try_malloc(TARGET_PAGE_SIZE);
477 if (!comp_param[i].originbuf) {
478 goto exit;
479 }
480
Xiao Guangrongdcaf4462018-03-30 15:51:20 +0800481 if (deflateInit(&comp_param[i].stream,
482 migrate_compress_level()) != Z_OK) {
Xiao Guangrong34ab9e92018-03-30 15:51:22 +0800483 g_free(comp_param[i].originbuf);
Xiao Guangrongdcaf4462018-03-30 15:51:20 +0800484 goto exit;
485 }
486
Cao jine110aa92016-07-29 15:10:31 +0800487 /* comp_param[i].file is just used as a dummy buffer to save data,
488 * set its ops to empty.
Juan Quintela56e93d22015-05-07 19:33:31 +0200489 */
490 comp_param[i].file = qemu_fopen_ops(NULL, &empty_ops);
491 comp_param[i].done = true;
Liang Li90e56fb2016-05-05 15:32:56 +0800492 comp_param[i].quit = false;
Juan Quintela56e93d22015-05-07 19:33:31 +0200493 qemu_mutex_init(&comp_param[i].mutex);
494 qemu_cond_init(&comp_param[i].cond);
495 qemu_thread_create(compress_threads + i, "compress",
496 do_data_compress, comp_param + i,
497 QEMU_THREAD_JOINABLE);
498 }
Xiao Guangrongdcaf4462018-03-30 15:51:20 +0800499 return 0;
500
501exit:
502 compress_threads_save_cleanup();
503 return -1;
Juan Quintela56e93d22015-05-07 19:33:31 +0200504}
505
Juan Quintelaf986c3d2016-01-14 16:52:55 +0100506/* Multiple fd's */
507
Juan Quintelaaf8b7d22018-04-06 19:32:12 +0200508#define MULTIFD_MAGIC 0x11223344U
509#define MULTIFD_VERSION 1
510
511typedef struct {
512 uint32_t magic;
513 uint32_t version;
514 unsigned char uuid[16]; /* QemuUUID */
515 uint8_t id;
516} __attribute__((packed)) MultiFDInit_t;
517
Juan Quintela8c4598f2018-04-07 13:59:07 +0200518typedef struct {
519 /* this fields are not changed once the thread is created */
520 /* channel number */
Juan Quintelaf986c3d2016-01-14 16:52:55 +0100521 uint8_t id;
Juan Quintela8c4598f2018-04-07 13:59:07 +0200522 /* channel thread name */
Juan Quintelaf986c3d2016-01-14 16:52:55 +0100523 char *name;
Juan Quintela8c4598f2018-04-07 13:59:07 +0200524 /* channel thread id */
Juan Quintelaf986c3d2016-01-14 16:52:55 +0100525 QemuThread thread;
Juan Quintela8c4598f2018-04-07 13:59:07 +0200526 /* communication channel */
Juan Quintela60df2d42018-03-07 07:56:15 +0100527 QIOChannel *c;
Juan Quintela8c4598f2018-04-07 13:59:07 +0200528 /* sem where to wait for more work */
Juan Quintelaf986c3d2016-01-14 16:52:55 +0100529 QemuSemaphore sem;
Juan Quintela8c4598f2018-04-07 13:59:07 +0200530 /* this mutex protects the following parameters */
Juan Quintelaf986c3d2016-01-14 16:52:55 +0100531 QemuMutex mutex;
Juan Quintela8c4598f2018-04-07 13:59:07 +0200532 /* is this channel thread running */
Juan Quintela66770702018-02-19 19:01:45 +0100533 bool running;
Juan Quintela8c4598f2018-04-07 13:59:07 +0200534 /* should this thread finish */
Juan Quintelaf986c3d2016-01-14 16:52:55 +0100535 bool quit;
Juan Quintela8c4598f2018-04-07 13:59:07 +0200536} MultiFDSendParams;
537
538typedef struct {
539 /* this fields are not changed once the thread is created */
540 /* channel number */
541 uint8_t id;
542 /* channel thread name */
543 char *name;
544 /* channel thread id */
545 QemuThread thread;
546 /* communication channel */
547 QIOChannel *c;
548 /* sem where to wait for more work */
549 QemuSemaphore sem;
550 /* this mutex protects the following parameters */
551 QemuMutex mutex;
552 /* is this channel thread running */
553 bool running;
554 /* should this thread finish */
555 bool quit;
556} MultiFDRecvParams;
Juan Quintelaf986c3d2016-01-14 16:52:55 +0100557
Juan Quintelaaf8b7d22018-04-06 19:32:12 +0200558static int multifd_send_initial_packet(MultiFDSendParams *p, Error **errp)
559{
560 MultiFDInit_t msg;
561 int ret;
562
563 msg.magic = cpu_to_be32(MULTIFD_MAGIC);
564 msg.version = cpu_to_be32(MULTIFD_VERSION);
565 msg.id = p->id;
566 memcpy(msg.uuid, &qemu_uuid.data, sizeof(msg.uuid));
567
568 ret = qio_channel_write_all(p->c, (char *)&msg, sizeof(msg), errp);
569 if (ret != 0) {
570 return -1;
571 }
572 return 0;
573}
574
575static int multifd_recv_initial_packet(QIOChannel *c, Error **errp)
576{
577 MultiFDInit_t msg;
578 int ret;
579
580 ret = qio_channel_read_all(c, (char *)&msg, sizeof(msg), errp);
581 if (ret != 0) {
582 return -1;
583 }
584
585 be32_to_cpus(&msg.magic);
586 be32_to_cpus(&msg.version);
587
588 if (msg.magic != MULTIFD_MAGIC) {
589 error_setg(errp, "multifd: received packet magic %x "
590 "expected %x", msg.magic, MULTIFD_MAGIC);
591 return -1;
592 }
593
594 if (msg.version != MULTIFD_VERSION) {
595 error_setg(errp, "multifd: received packet version %d "
596 "expected %d", msg.version, MULTIFD_VERSION);
597 return -1;
598 }
599
600 if (memcmp(msg.uuid, &qemu_uuid, sizeof(qemu_uuid))) {
601 char *uuid = qemu_uuid_unparse_strdup(&qemu_uuid);
602 char *msg_uuid = qemu_uuid_unparse_strdup((const QemuUUID *)msg.uuid);
603
604 error_setg(errp, "multifd: received uuid '%s' and expected "
605 "uuid '%s' for channel %hhd", msg_uuid, uuid, msg.id);
606 g_free(uuid);
607 g_free(msg_uuid);
608 return -1;
609 }
610
611 if (msg.id > migrate_multifd_channels()) {
612 error_setg(errp, "multifd: received channel version %d "
613 "expected %d", msg.version, MULTIFD_VERSION);
614 return -1;
615 }
616
617 return msg.id;
618}
619
Juan Quintelaf986c3d2016-01-14 16:52:55 +0100620struct {
621 MultiFDSendParams *params;
622 /* number of created threads */
623 int count;
624} *multifd_send_state;
625
Juan Quintela66770702018-02-19 19:01:45 +0100626static void multifd_send_terminate_threads(Error *err)
Juan Quintelaf986c3d2016-01-14 16:52:55 +0100627{
628 int i;
629
Juan Quintela7a169d72018-02-19 19:01:15 +0100630 if (err) {
631 MigrationState *s = migrate_get_current();
632 migrate_set_error(s, err);
633 if (s->state == MIGRATION_STATUS_SETUP ||
634 s->state == MIGRATION_STATUS_PRE_SWITCHOVER ||
635 s->state == MIGRATION_STATUS_DEVICE ||
636 s->state == MIGRATION_STATUS_ACTIVE) {
637 migrate_set_state(&s->state, s->state,
638 MIGRATION_STATUS_FAILED);
639 }
640 }
641
Juan Quintela66770702018-02-19 19:01:45 +0100642 for (i = 0; i < migrate_multifd_channels(); i++) {
Juan Quintelaf986c3d2016-01-14 16:52:55 +0100643 MultiFDSendParams *p = &multifd_send_state->params[i];
644
645 qemu_mutex_lock(&p->mutex);
646 p->quit = true;
647 qemu_sem_post(&p->sem);
648 qemu_mutex_unlock(&p->mutex);
649 }
650}
651
652int multifd_save_cleanup(Error **errp)
653{
654 int i;
655 int ret = 0;
656
657 if (!migrate_use_multifd()) {
658 return 0;
659 }
Juan Quintela66770702018-02-19 19:01:45 +0100660 multifd_send_terminate_threads(NULL);
661 for (i = 0; i < migrate_multifd_channels(); i++) {
Juan Quintelaf986c3d2016-01-14 16:52:55 +0100662 MultiFDSendParams *p = &multifd_send_state->params[i];
663
Juan Quintela66770702018-02-19 19:01:45 +0100664 if (p->running) {
665 qemu_thread_join(&p->thread);
666 }
Juan Quintela60df2d42018-03-07 07:56:15 +0100667 socket_send_channel_destroy(p->c);
668 p->c = NULL;
Juan Quintelaf986c3d2016-01-14 16:52:55 +0100669 qemu_mutex_destroy(&p->mutex);
670 qemu_sem_destroy(&p->sem);
671 g_free(p->name);
672 p->name = NULL;
673 }
674 g_free(multifd_send_state->params);
675 multifd_send_state->params = NULL;
676 g_free(multifd_send_state);
677 multifd_send_state = NULL;
678 return ret;
679}
680
681static void *multifd_send_thread(void *opaque)
682{
683 MultiFDSendParams *p = opaque;
Juan Quintelaaf8b7d22018-04-06 19:32:12 +0200684 Error *local_err = NULL;
685
686 if (multifd_send_initial_packet(p, &local_err) < 0) {
687 goto out;
688 }
Juan Quintelaf986c3d2016-01-14 16:52:55 +0100689
690 while (true) {
691 qemu_mutex_lock(&p->mutex);
692 if (p->quit) {
693 qemu_mutex_unlock(&p->mutex);
694 break;
695 }
696 qemu_mutex_unlock(&p->mutex);
697 qemu_sem_wait(&p->sem);
698 }
699
Juan Quintelaaf8b7d22018-04-06 19:32:12 +0200700out:
701 if (local_err) {
702 multifd_send_terminate_threads(local_err);
703 }
704
Juan Quintela66770702018-02-19 19:01:45 +0100705 qemu_mutex_lock(&p->mutex);
706 p->running = false;
707 qemu_mutex_unlock(&p->mutex);
708
Juan Quintelaf986c3d2016-01-14 16:52:55 +0100709 return NULL;
710}
711
Juan Quintela60df2d42018-03-07 07:56:15 +0100712static void multifd_new_send_channel_async(QIOTask *task, gpointer opaque)
713{
714 MultiFDSendParams *p = opaque;
715 QIOChannel *sioc = QIO_CHANNEL(qio_task_get_source(task));
716 Error *local_err = NULL;
717
718 if (qio_task_propagate_error(task, &local_err)) {
719 if (multifd_save_cleanup(&local_err) != 0) {
720 migrate_set_error(migrate_get_current(), local_err);
721 }
722 } else {
723 p->c = QIO_CHANNEL(sioc);
724 qio_channel_set_delay(p->c, false);
725 p->running = true;
726 qemu_thread_create(&p->thread, p->name, multifd_send_thread, p,
727 QEMU_THREAD_JOINABLE);
728
729 atomic_inc(&multifd_send_state->count);
730 }
731}
732
Juan Quintelaf986c3d2016-01-14 16:52:55 +0100733int multifd_save_setup(void)
734{
735 int thread_count;
736 uint8_t i;
737
738 if (!migrate_use_multifd()) {
739 return 0;
740 }
741 thread_count = migrate_multifd_channels();
742 multifd_send_state = g_malloc0(sizeof(*multifd_send_state));
743 multifd_send_state->params = g_new0(MultiFDSendParams, thread_count);
Juan Quintela66770702018-02-19 19:01:45 +0100744 atomic_set(&multifd_send_state->count, 0);
Juan Quintelaf986c3d2016-01-14 16:52:55 +0100745 for (i = 0; i < thread_count; i++) {
746 MultiFDSendParams *p = &multifd_send_state->params[i];
747
748 qemu_mutex_init(&p->mutex);
749 qemu_sem_init(&p->sem, 0);
750 p->quit = false;
751 p->id = i;
752 p->name = g_strdup_printf("multifdsend_%d", i);
Juan Quintela60df2d42018-03-07 07:56:15 +0100753 socket_send_channel_create(multifd_new_send_channel_async, p);
Juan Quintelaf986c3d2016-01-14 16:52:55 +0100754 }
755 return 0;
756}
757
Juan Quintelaf986c3d2016-01-14 16:52:55 +0100758struct {
759 MultiFDRecvParams *params;
760 /* number of created threads */
761 int count;
762} *multifd_recv_state;
763
Juan Quintela66770702018-02-19 19:01:45 +0100764static void multifd_recv_terminate_threads(Error *err)
Juan Quintelaf986c3d2016-01-14 16:52:55 +0100765{
766 int i;
767
Juan Quintela7a169d72018-02-19 19:01:15 +0100768 if (err) {
769 MigrationState *s = migrate_get_current();
770 migrate_set_error(s, err);
771 if (s->state == MIGRATION_STATUS_SETUP ||
772 s->state == MIGRATION_STATUS_ACTIVE) {
773 migrate_set_state(&s->state, s->state,
774 MIGRATION_STATUS_FAILED);
775 }
776 }
777
Juan Quintela66770702018-02-19 19:01:45 +0100778 for (i = 0; i < migrate_multifd_channels(); i++) {
Juan Quintelaf986c3d2016-01-14 16:52:55 +0100779 MultiFDRecvParams *p = &multifd_recv_state->params[i];
780
781 qemu_mutex_lock(&p->mutex);
782 p->quit = true;
783 qemu_sem_post(&p->sem);
784 qemu_mutex_unlock(&p->mutex);
785 }
786}
787
788int multifd_load_cleanup(Error **errp)
789{
790 int i;
791 int ret = 0;
792
793 if (!migrate_use_multifd()) {
794 return 0;
795 }
Juan Quintela66770702018-02-19 19:01:45 +0100796 multifd_recv_terminate_threads(NULL);
797 for (i = 0; i < migrate_multifd_channels(); i++) {
Juan Quintelaf986c3d2016-01-14 16:52:55 +0100798 MultiFDRecvParams *p = &multifd_recv_state->params[i];
799
Juan Quintela66770702018-02-19 19:01:45 +0100800 if (p->running) {
801 qemu_thread_join(&p->thread);
802 }
Juan Quintela60df2d42018-03-07 07:56:15 +0100803 object_unref(OBJECT(p->c));
804 p->c = NULL;
Juan Quintelaf986c3d2016-01-14 16:52:55 +0100805 qemu_mutex_destroy(&p->mutex);
806 qemu_sem_destroy(&p->sem);
807 g_free(p->name);
808 p->name = NULL;
809 }
810 g_free(multifd_recv_state->params);
811 multifd_recv_state->params = NULL;
812 g_free(multifd_recv_state);
813 multifd_recv_state = NULL;
814
815 return ret;
816}
817
818static void *multifd_recv_thread(void *opaque)
819{
820 MultiFDRecvParams *p = opaque;
821
822 while (true) {
823 qemu_mutex_lock(&p->mutex);
824 if (p->quit) {
825 qemu_mutex_unlock(&p->mutex);
826 break;
827 }
828 qemu_mutex_unlock(&p->mutex);
829 qemu_sem_wait(&p->sem);
830 }
831
Juan Quintela66770702018-02-19 19:01:45 +0100832 qemu_mutex_lock(&p->mutex);
833 p->running = false;
834 qemu_mutex_unlock(&p->mutex);
835
Juan Quintelaf986c3d2016-01-14 16:52:55 +0100836 return NULL;
837}
838
839int multifd_load_setup(void)
840{
841 int thread_count;
842 uint8_t i;
843
844 if (!migrate_use_multifd()) {
845 return 0;
846 }
847 thread_count = migrate_multifd_channels();
848 multifd_recv_state = g_malloc0(sizeof(*multifd_recv_state));
849 multifd_recv_state->params = g_new0(MultiFDRecvParams, thread_count);
Juan Quintela66770702018-02-19 19:01:45 +0100850 atomic_set(&multifd_recv_state->count, 0);
Juan Quintelaf986c3d2016-01-14 16:52:55 +0100851 for (i = 0; i < thread_count; i++) {
852 MultiFDRecvParams *p = &multifd_recv_state->params[i];
853
854 qemu_mutex_init(&p->mutex);
855 qemu_sem_init(&p->sem, 0);
856 p->quit = false;
857 p->id = i;
858 p->name = g_strdup_printf("multifdrecv_%d", i);
Juan Quintelaf986c3d2016-01-14 16:52:55 +0100859 }
860 return 0;
861}
862
Juan Quintela62c1e0c2018-02-19 18:59:02 +0100863bool multifd_recv_all_channels_created(void)
864{
865 int thread_count = migrate_multifd_channels();
866
867 if (!migrate_use_multifd()) {
868 return true;
869 }
870
871 return thread_count == atomic_read(&multifd_recv_state->count);
872}
873
Juan Quintela71bb07d2018-02-19 19:01:03 +0100874void multifd_recv_new_channel(QIOChannel *ioc)
875{
Juan Quintela60df2d42018-03-07 07:56:15 +0100876 MultiFDRecvParams *p;
Juan Quintelaaf8b7d22018-04-06 19:32:12 +0200877 Error *local_err = NULL;
878 int id;
Juan Quintela60df2d42018-03-07 07:56:15 +0100879
Juan Quintelaaf8b7d22018-04-06 19:32:12 +0200880 id = multifd_recv_initial_packet(ioc, &local_err);
881 if (id < 0) {
882 multifd_recv_terminate_threads(local_err);
883 return;
884 }
885
886 p = &multifd_recv_state->params[id];
887 if (p->c != NULL) {
888 error_setg(&local_err, "multifd: received id '%d' already setup'",
889 id);
890 multifd_recv_terminate_threads(local_err);
891 return;
892 }
Juan Quintela60df2d42018-03-07 07:56:15 +0100893 p->c = ioc;
894 object_ref(OBJECT(ioc));
895
896 p->running = true;
897 qemu_thread_create(&p->thread, p->name, multifd_recv_thread, p,
898 QEMU_THREAD_JOINABLE);
899 atomic_inc(&multifd_recv_state->count);
Juan Quintela36c2f8b2018-03-07 08:40:52 +0100900 if (multifd_recv_state->count == migrate_multifd_channels()) {
901 migration_incoming_process();
902 }
Juan Quintela71bb07d2018-02-19 19:01:03 +0100903}
904
Juan Quintela56e93d22015-05-07 19:33:31 +0200905/**
Juan Quintela3d0684b2017-03-23 15:06:39 +0100906 * save_page_header: write page header to wire
Juan Quintela56e93d22015-05-07 19:33:31 +0200907 *
908 * If this is the 1st block, it also writes the block identification
909 *
Juan Quintela3d0684b2017-03-23 15:06:39 +0100910 * Returns the number of bytes written
Juan Quintela56e93d22015-05-07 19:33:31 +0200911 *
912 * @f: QEMUFile where to send the data
913 * @block: block that contains the page we want to send
914 * @offset: offset inside the block for the page
915 * in the lower bits, it contains flags
916 */
Juan Quintela2bf3aa82017-05-10 13:28:13 +0200917static size_t save_page_header(RAMState *rs, QEMUFile *f, RAMBlock *block,
918 ram_addr_t offset)
Juan Quintela56e93d22015-05-07 19:33:31 +0200919{
Liang Li9f5f3802015-07-13 17:34:10 +0800920 size_t size, len;
Juan Quintela56e93d22015-05-07 19:33:31 +0200921
Juan Quintela24795692017-03-21 11:45:01 +0100922 if (block == rs->last_sent_block) {
923 offset |= RAM_SAVE_FLAG_CONTINUE;
924 }
Juan Quintela2bf3aa82017-05-10 13:28:13 +0200925 qemu_put_be64(f, offset);
Juan Quintela56e93d22015-05-07 19:33:31 +0200926 size = 8;
927
928 if (!(offset & RAM_SAVE_FLAG_CONTINUE)) {
Liang Li9f5f3802015-07-13 17:34:10 +0800929 len = strlen(block->idstr);
Juan Quintela2bf3aa82017-05-10 13:28:13 +0200930 qemu_put_byte(f, len);
931 qemu_put_buffer(f, (uint8_t *)block->idstr, len);
Liang Li9f5f3802015-07-13 17:34:10 +0800932 size += 1 + len;
Juan Quintela24795692017-03-21 11:45:01 +0100933 rs->last_sent_block = block;
Juan Quintela56e93d22015-05-07 19:33:31 +0200934 }
935 return size;
936}
937
Juan Quintela3d0684b2017-03-23 15:06:39 +0100938/**
939 * mig_throttle_guest_down: throotle down the guest
940 *
941 * Reduce amount of guest cpu execution to hopefully slow down memory
942 * writes. If guest dirty memory rate is reduced below the rate at
943 * which we can transfer pages to the destination then we should be
944 * able to complete migration. Some workloads dirty memory way too
945 * fast and will not effectively converge, even with auto-converge.
Jason J. Herne070afca2015-09-08 13:12:35 -0400946 */
947static void mig_throttle_guest_down(void)
948{
949 MigrationState *s = migrate_get_current();
Daniel P. Berrange2594f562016-04-27 11:05:14 +0100950 uint64_t pct_initial = s->parameters.cpu_throttle_initial;
951 uint64_t pct_icrement = s->parameters.cpu_throttle_increment;
Jason J. Herne070afca2015-09-08 13:12:35 -0400952
953 /* We have not started throttling yet. Let's start it. */
954 if (!cpu_throttle_active()) {
955 cpu_throttle_set(pct_initial);
956 } else {
957 /* Throttling already on, just increase the rate */
958 cpu_throttle_set(cpu_throttle_get_percentage() + pct_icrement);
959 }
960}
961
Juan Quintela3d0684b2017-03-23 15:06:39 +0100962/**
963 * xbzrle_cache_zero_page: insert a zero page in the XBZRLE cache
964 *
Juan Quintela6f37bb82017-03-13 19:26:29 +0100965 * @rs: current RAM state
Juan Quintela3d0684b2017-03-23 15:06:39 +0100966 * @current_addr: address for the zero page
967 *
968 * Update the xbzrle cache to reflect a page that's been sent as all 0.
Juan Quintela56e93d22015-05-07 19:33:31 +0200969 * The important thing is that a stale (not-yet-0'd) page be replaced
970 * by the new data.
971 * As a bonus, if the page wasn't in the cache it gets added so that
Juan Quintela3d0684b2017-03-23 15:06:39 +0100972 * when a small write is made into the 0'd page it gets XBZRLE sent.
Juan Quintela56e93d22015-05-07 19:33:31 +0200973 */
Juan Quintela6f37bb82017-03-13 19:26:29 +0100974static void xbzrle_cache_zero_page(RAMState *rs, ram_addr_t current_addr)
Juan Quintela56e93d22015-05-07 19:33:31 +0200975{
Juan Quintela6f37bb82017-03-13 19:26:29 +0100976 if (rs->ram_bulk_stage || !migrate_use_xbzrle()) {
Juan Quintela56e93d22015-05-07 19:33:31 +0200977 return;
978 }
979
980 /* We don't care if this fails to allocate a new cache page
981 * as long as it updated an old one */
Juan Quintelac00e0922017-05-09 16:22:01 +0200982 cache_insert(XBZRLE.cache, current_addr, XBZRLE.zero_target_page,
Juan Quintela93604472017-06-06 19:49:03 +0200983 ram_counters.dirty_sync_count);
Juan Quintela56e93d22015-05-07 19:33:31 +0200984}
985
986#define ENCODING_FLAG_XBZRLE 0x1
987
988/**
989 * save_xbzrle_page: compress and send current page
990 *
991 * Returns: 1 means that we wrote the page
992 * 0 means that page is identical to the one already sent
993 * -1 means that xbzrle would be longer than normal
994 *
Juan Quintela5a987732017-03-13 19:39:02 +0100995 * @rs: current RAM state
Juan Quintela3d0684b2017-03-23 15:06:39 +0100996 * @current_data: pointer to the address of the page contents
997 * @current_addr: addr of the page
Juan Quintela56e93d22015-05-07 19:33:31 +0200998 * @block: block that contains the page we want to send
999 * @offset: offset inside the block for the page
1000 * @last_stage: if we are at the completion stage
Juan Quintela56e93d22015-05-07 19:33:31 +02001001 */
Juan Quintela204b88b2017-03-15 09:16:57 +01001002static int save_xbzrle_page(RAMState *rs, uint8_t **current_data,
Juan Quintela56e93d22015-05-07 19:33:31 +02001003 ram_addr_t current_addr, RAMBlock *block,
Juan Quintela072c2512017-03-14 10:27:31 +01001004 ram_addr_t offset, bool last_stage)
Juan Quintela56e93d22015-05-07 19:33:31 +02001005{
1006 int encoded_len = 0, bytes_xbzrle;
1007 uint8_t *prev_cached_page;
1008
Juan Quintela93604472017-06-06 19:49:03 +02001009 if (!cache_is_cached(XBZRLE.cache, current_addr,
1010 ram_counters.dirty_sync_count)) {
1011 xbzrle_counters.cache_miss++;
Juan Quintela56e93d22015-05-07 19:33:31 +02001012 if (!last_stage) {
1013 if (cache_insert(XBZRLE.cache, current_addr, *current_data,
Juan Quintela93604472017-06-06 19:49:03 +02001014 ram_counters.dirty_sync_count) == -1) {
Juan Quintela56e93d22015-05-07 19:33:31 +02001015 return -1;
1016 } else {
1017 /* update *current_data when the page has been
1018 inserted into cache */
1019 *current_data = get_cached_data(XBZRLE.cache, current_addr);
1020 }
1021 }
1022 return -1;
1023 }
1024
1025 prev_cached_page = get_cached_data(XBZRLE.cache, current_addr);
1026
1027 /* save current buffer into memory */
1028 memcpy(XBZRLE.current_buf, *current_data, TARGET_PAGE_SIZE);
1029
1030 /* XBZRLE encoding (if there is no overflow) */
1031 encoded_len = xbzrle_encode_buffer(prev_cached_page, XBZRLE.current_buf,
1032 TARGET_PAGE_SIZE, XBZRLE.encoded_buf,
1033 TARGET_PAGE_SIZE);
1034 if (encoded_len == 0) {
Juan Quintela55c44462017-01-23 22:32:05 +01001035 trace_save_xbzrle_page_skipping();
Juan Quintela56e93d22015-05-07 19:33:31 +02001036 return 0;
1037 } else if (encoded_len == -1) {
Juan Quintela55c44462017-01-23 22:32:05 +01001038 trace_save_xbzrle_page_overflow();
Juan Quintela93604472017-06-06 19:49:03 +02001039 xbzrle_counters.overflow++;
Juan Quintela56e93d22015-05-07 19:33:31 +02001040 /* update data in the cache */
1041 if (!last_stage) {
1042 memcpy(prev_cached_page, *current_data, TARGET_PAGE_SIZE);
1043 *current_data = prev_cached_page;
1044 }
1045 return -1;
1046 }
1047
1048 /* we need to update the data in the cache, in order to get the same data */
1049 if (!last_stage) {
1050 memcpy(prev_cached_page, XBZRLE.current_buf, TARGET_PAGE_SIZE);
1051 }
1052
1053 /* Send XBZRLE based compressed page */
Juan Quintela2bf3aa82017-05-10 13:28:13 +02001054 bytes_xbzrle = save_page_header(rs, rs->f, block,
Juan Quintela204b88b2017-03-15 09:16:57 +01001055 offset | RAM_SAVE_FLAG_XBZRLE);
1056 qemu_put_byte(rs->f, ENCODING_FLAG_XBZRLE);
1057 qemu_put_be16(rs->f, encoded_len);
1058 qemu_put_buffer(rs->f, XBZRLE.encoded_buf, encoded_len);
Juan Quintela56e93d22015-05-07 19:33:31 +02001059 bytes_xbzrle += encoded_len + 1 + 2;
Juan Quintela93604472017-06-06 19:49:03 +02001060 xbzrle_counters.pages++;
1061 xbzrle_counters.bytes += bytes_xbzrle;
1062 ram_counters.transferred += bytes_xbzrle;
Juan Quintela56e93d22015-05-07 19:33:31 +02001063
1064 return 1;
1065}
1066
Juan Quintela3d0684b2017-03-23 15:06:39 +01001067/**
1068 * migration_bitmap_find_dirty: find the next dirty page from start
Dr. David Alan Gilbertf3f491f2015-11-05 18:11:01 +00001069 *
Juan Quintela3d0684b2017-03-23 15:06:39 +01001070 * Called with rcu_read_lock() to protect migration_bitmap
1071 *
1072 * Returns the byte offset within memory region of the start of a dirty page
1073 *
Juan Quintela6f37bb82017-03-13 19:26:29 +01001074 * @rs: current RAM state
Juan Quintela3d0684b2017-03-23 15:06:39 +01001075 * @rb: RAMBlock where to search for dirty pages
Juan Quintelaa935e302017-03-21 15:36:51 +01001076 * @start: page where we start the search
Dr. David Alan Gilbertf3f491f2015-11-05 18:11:01 +00001077 */
Juan Quintela56e93d22015-05-07 19:33:31 +02001078static inline
Juan Quintelaa935e302017-03-21 15:36:51 +01001079unsigned long migration_bitmap_find_dirty(RAMState *rs, RAMBlock *rb,
Juan Quintelaf20e2862017-03-21 16:19:05 +01001080 unsigned long start)
Juan Quintela56e93d22015-05-07 19:33:31 +02001081{
Juan Quintela6b6712e2017-03-22 15:18:04 +01001082 unsigned long size = rb->used_length >> TARGET_PAGE_BITS;
1083 unsigned long *bitmap = rb->bmap;
Juan Quintela56e93d22015-05-07 19:33:31 +02001084 unsigned long next;
1085
Cédric Le Goaterb895de52018-05-14 08:57:00 +02001086 if (!qemu_ram_is_migratable(rb)) {
1087 return size;
1088 }
1089
Juan Quintela6b6712e2017-03-22 15:18:04 +01001090 if (rs->ram_bulk_stage && start > 0) {
1091 next = start + 1;
Juan Quintela56e93d22015-05-07 19:33:31 +02001092 } else {
Juan Quintela6b6712e2017-03-22 15:18:04 +01001093 next = find_next_bit(bitmap, size, start);
Juan Quintela56e93d22015-05-07 19:33:31 +02001094 }
1095
Juan Quintela6b6712e2017-03-22 15:18:04 +01001096 return next;
Juan Quintela56e93d22015-05-07 19:33:31 +02001097}
1098
Juan Quintela06b10682017-03-21 15:18:05 +01001099static inline bool migration_bitmap_clear_dirty(RAMState *rs,
Juan Quintelaf20e2862017-03-21 16:19:05 +01001100 RAMBlock *rb,
1101 unsigned long page)
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001102{
1103 bool ret;
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001104
Juan Quintela6b6712e2017-03-22 15:18:04 +01001105 ret = test_and_clear_bit(page, rb->bmap);
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001106
1107 if (ret) {
Juan Quintela0d8ec882017-03-13 21:21:41 +01001108 rs->migration_dirty_pages--;
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001109 }
1110 return ret;
1111}
1112
Juan Quintela15440dd2017-03-21 09:35:04 +01001113static void migration_bitmap_sync_range(RAMState *rs, RAMBlock *rb,
1114 ram_addr_t start, ram_addr_t length)
Juan Quintela56e93d22015-05-07 19:33:31 +02001115{
Juan Quintela0d8ec882017-03-13 21:21:41 +01001116 rs->migration_dirty_pages +=
Juan Quintela6b6712e2017-03-22 15:18:04 +01001117 cpu_physical_memory_sync_dirty_bitmap(rb, start, length,
Juan Quintela0d8ec882017-03-13 21:21:41 +01001118 &rs->num_dirty_pages_period);
Juan Quintela56e93d22015-05-07 19:33:31 +02001119}
1120
Juan Quintela3d0684b2017-03-23 15:06:39 +01001121/**
1122 * ram_pagesize_summary: calculate all the pagesizes of a VM
1123 *
1124 * Returns a summary bitmap of the page sizes of all RAMBlocks
1125 *
1126 * For VMs with just normal pages this is equivalent to the host page
1127 * size. If it's got some huge pages then it's the OR of all the
1128 * different page sizes.
Dr. David Alan Gilberte8ca1db2017-02-24 18:28:29 +00001129 */
1130uint64_t ram_pagesize_summary(void)
1131{
1132 RAMBlock *block;
1133 uint64_t summary = 0;
1134
Cédric Le Goaterb895de52018-05-14 08:57:00 +02001135 RAMBLOCK_FOREACH_MIGRATABLE(block) {
Dr. David Alan Gilberte8ca1db2017-02-24 18:28:29 +00001136 summary |= block->page_size;
1137 }
1138
1139 return summary;
1140}
1141
Juan Quintela8d820d62017-03-13 19:35:50 +01001142static void migration_bitmap_sync(RAMState *rs)
Juan Quintela56e93d22015-05-07 19:33:31 +02001143{
1144 RAMBlock *block;
Juan Quintela56e93d22015-05-07 19:33:31 +02001145 int64_t end_time;
Juan Quintelac4bdf0c2017-03-28 14:59:54 +02001146 uint64_t bytes_xfer_now;
Juan Quintela56e93d22015-05-07 19:33:31 +02001147
Juan Quintela93604472017-06-06 19:49:03 +02001148 ram_counters.dirty_sync_count++;
Juan Quintela56e93d22015-05-07 19:33:31 +02001149
Juan Quintelaf664da82017-03-13 19:44:57 +01001150 if (!rs->time_last_bitmap_sync) {
1151 rs->time_last_bitmap_sync = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
Juan Quintela56e93d22015-05-07 19:33:31 +02001152 }
1153
1154 trace_migration_bitmap_sync_start();
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +02001155 memory_global_dirty_log_sync();
Juan Quintela56e93d22015-05-07 19:33:31 +02001156
Juan Quintela108cfae2017-03-13 21:38:09 +01001157 qemu_mutex_lock(&rs->bitmap_mutex);
Juan Quintela56e93d22015-05-07 19:33:31 +02001158 rcu_read_lock();
Cédric Le Goaterb895de52018-05-14 08:57:00 +02001159 RAMBLOCK_FOREACH_MIGRATABLE(block) {
Juan Quintela15440dd2017-03-21 09:35:04 +01001160 migration_bitmap_sync_range(rs, block, 0, block->used_length);
Juan Quintela56e93d22015-05-07 19:33:31 +02001161 }
1162 rcu_read_unlock();
Juan Quintela108cfae2017-03-13 21:38:09 +01001163 qemu_mutex_unlock(&rs->bitmap_mutex);
Juan Quintela56e93d22015-05-07 19:33:31 +02001164
Juan Quintelaa66cd902017-03-28 15:02:43 +02001165 trace_migration_bitmap_sync_end(rs->num_dirty_pages_period);
Chao Fan1ffb5df2017-03-14 09:55:07 +08001166
Juan Quintela56e93d22015-05-07 19:33:31 +02001167 end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
1168
1169 /* more than 1 second = 1000 millisecons */
Juan Quintelaf664da82017-03-13 19:44:57 +01001170 if (end_time > rs->time_last_bitmap_sync + 1000) {
Felipe Franciosid693c6f2017-05-24 17:10:01 +01001171 /* calculate period counters */
Juan Quintela93604472017-06-06 19:49:03 +02001172 ram_counters.dirty_pages_rate = rs->num_dirty_pages_period * 1000
Felipe Franciosid693c6f2017-05-24 17:10:01 +01001173 / (end_time - rs->time_last_bitmap_sync);
Juan Quintela93604472017-06-06 19:49:03 +02001174 bytes_xfer_now = ram_counters.transferred;
Felipe Franciosid693c6f2017-05-24 17:10:01 +01001175
Peter Lieven9ac78b62017-09-26 12:33:16 +02001176 /* During block migration the auto-converge logic incorrectly detects
1177 * that ram migration makes no progress. Avoid this by disabling the
1178 * throttling logic during the bulk phase of block migration. */
1179 if (migrate_auto_converge() && !blk_mig_bulk_active()) {
Juan Quintela56e93d22015-05-07 19:33:31 +02001180 /* The following detection logic can be refined later. For now:
1181 Check to see if the dirtied bytes is 50% more than the approx.
1182 amount of bytes that just got transferred since the last time we
Jason J. Herne070afca2015-09-08 13:12:35 -04001183 were in this routine. If that happens twice, start or increase
1184 throttling */
Jason J. Herne070afca2015-09-08 13:12:35 -04001185
Felipe Franciosid693c6f2017-05-24 17:10:01 +01001186 if ((rs->num_dirty_pages_period * TARGET_PAGE_SIZE >
Juan Quintelaeac74152017-03-28 14:59:01 +02001187 (bytes_xfer_now - rs->bytes_xfer_prev) / 2) &&
Felipe Franciosib4a3c642017-05-24 17:10:03 +01001188 (++rs->dirty_rate_high_cnt >= 2)) {
Juan Quintela56e93d22015-05-07 19:33:31 +02001189 trace_migration_throttle();
Juan Quintela8d820d62017-03-13 19:35:50 +01001190 rs->dirty_rate_high_cnt = 0;
Jason J. Herne070afca2015-09-08 13:12:35 -04001191 mig_throttle_guest_down();
Felipe Franciosid693c6f2017-05-24 17:10:01 +01001192 }
Juan Quintela56e93d22015-05-07 19:33:31 +02001193 }
Jason J. Herne070afca2015-09-08 13:12:35 -04001194
Juan Quintela56e93d22015-05-07 19:33:31 +02001195 if (migrate_use_xbzrle()) {
Juan Quintela23b28c32017-03-13 20:51:34 +01001196 if (rs->iterations_prev != rs->iterations) {
Juan Quintela93604472017-06-06 19:49:03 +02001197 xbzrle_counters.cache_miss_rate =
1198 (double)(xbzrle_counters.cache_miss -
Juan Quintelab5833fd2017-03-13 19:49:19 +01001199 rs->xbzrle_cache_miss_prev) /
Juan Quintela23b28c32017-03-13 20:51:34 +01001200 (rs->iterations - rs->iterations_prev);
Juan Quintela56e93d22015-05-07 19:33:31 +02001201 }
Juan Quintela23b28c32017-03-13 20:51:34 +01001202 rs->iterations_prev = rs->iterations;
Juan Quintela93604472017-06-06 19:49:03 +02001203 rs->xbzrle_cache_miss_prev = xbzrle_counters.cache_miss;
Juan Quintela56e93d22015-05-07 19:33:31 +02001204 }
Felipe Franciosid693c6f2017-05-24 17:10:01 +01001205
1206 /* reset period counters */
Juan Quintelaf664da82017-03-13 19:44:57 +01001207 rs->time_last_bitmap_sync = end_time;
Juan Quintelaa66cd902017-03-28 15:02:43 +02001208 rs->num_dirty_pages_period = 0;
Felipe Franciosid2a4d852017-05-24 17:10:02 +01001209 rs->bytes_xfer_prev = bytes_xfer_now;
Juan Quintela56e93d22015-05-07 19:33:31 +02001210 }
Dr. David Alan Gilbert4addcd42015-12-16 11:47:36 +00001211 if (migrate_use_events()) {
Juan Quintela93604472017-06-06 19:49:03 +02001212 qapi_event_send_migration_pass(ram_counters.dirty_sync_count, NULL);
Dr. David Alan Gilbert4addcd42015-12-16 11:47:36 +00001213 }
Juan Quintela56e93d22015-05-07 19:33:31 +02001214}
1215
1216/**
Juan Quintela3d0684b2017-03-23 15:06:39 +01001217 * save_zero_page: send the zero page to the stream
Juan Quintela56e93d22015-05-07 19:33:31 +02001218 *
Juan Quintela3d0684b2017-03-23 15:06:39 +01001219 * Returns the number of pages written.
Juan Quintela56e93d22015-05-07 19:33:31 +02001220 *
Juan Quintelaf7ccd612017-03-13 20:30:21 +01001221 * @rs: current RAM state
Juan Quintela56e93d22015-05-07 19:33:31 +02001222 * @block: block that contains the page we want to send
1223 * @offset: offset inside the block for the page
Juan Quintela56e93d22015-05-07 19:33:31 +02001224 */
Juan Quintela7faccdc2018-01-08 18:58:17 +01001225static int save_zero_page(RAMState *rs, RAMBlock *block, ram_addr_t offset)
Juan Quintela56e93d22015-05-07 19:33:31 +02001226{
Juan Quintela7faccdc2018-01-08 18:58:17 +01001227 uint8_t *p = block->host + offset;
Juan Quintela56e93d22015-05-07 19:33:31 +02001228 int pages = -1;
1229
1230 if (is_zero_range(p, TARGET_PAGE_SIZE)) {
Juan Quintela93604472017-06-06 19:49:03 +02001231 ram_counters.duplicate++;
1232 ram_counters.transferred +=
Juan Quintelabb890ed2017-04-28 09:39:55 +02001233 save_page_header(rs, rs->f, block, offset | RAM_SAVE_FLAG_ZERO);
Juan Quintelace25d332017-03-15 11:00:51 +01001234 qemu_put_byte(rs->f, 0);
Juan Quintela93604472017-06-06 19:49:03 +02001235 ram_counters.transferred += 1;
Juan Quintela56e93d22015-05-07 19:33:31 +02001236 pages = 1;
1237 }
1238
1239 return pages;
1240}
1241
Juan Quintela57273092017-03-20 22:25:28 +01001242static void ram_release_pages(const char *rbname, uint64_t offset, int pages)
Pavel Butsykin53f09a12017-02-03 18:23:20 +03001243{
Juan Quintela57273092017-03-20 22:25:28 +01001244 if (!migrate_release_ram() || !migration_in_postcopy()) {
Pavel Butsykin53f09a12017-02-03 18:23:20 +03001245 return;
1246 }
1247
Juan Quintelaaaa20642017-03-21 11:35:24 +01001248 ram_discard_range(rbname, offset, pages << TARGET_PAGE_BITS);
Pavel Butsykin53f09a12017-02-03 18:23:20 +03001249}
1250
Xiao Guangrong059ff0f2018-03-30 15:51:23 +08001251/*
1252 * @pages: the number of pages written by the control path,
1253 * < 0 - error
1254 * > 0 - number of pages written
1255 *
1256 * Return true if the pages has been saved, otherwise false is returned.
1257 */
1258static bool control_save_page(RAMState *rs, RAMBlock *block, ram_addr_t offset,
1259 int *pages)
1260{
1261 uint64_t bytes_xmit = 0;
1262 int ret;
1263
1264 *pages = -1;
1265 ret = ram_control_save_page(rs->f, block->offset, offset, TARGET_PAGE_SIZE,
1266 &bytes_xmit);
1267 if (ret == RAM_SAVE_CONTROL_NOT_SUPP) {
1268 return false;
1269 }
1270
1271 if (bytes_xmit) {
1272 ram_counters.transferred += bytes_xmit;
1273 *pages = 1;
1274 }
1275
1276 if (ret == RAM_SAVE_CONTROL_DELAYED) {
1277 return true;
1278 }
1279
1280 if (bytes_xmit > 0) {
1281 ram_counters.normal++;
1282 } else if (bytes_xmit == 0) {
1283 ram_counters.duplicate++;
1284 }
1285
1286 return true;
1287}
1288
Xiao Guangrong65dacaa2018-03-30 15:51:27 +08001289/*
1290 * directly send the page to the stream
1291 *
1292 * Returns the number of pages written.
1293 *
1294 * @rs: current RAM state
1295 * @block: block that contains the page we want to send
1296 * @offset: offset inside the block for the page
1297 * @buf: the page to be sent
1298 * @async: send to page asyncly
1299 */
1300static int save_normal_page(RAMState *rs, RAMBlock *block, ram_addr_t offset,
1301 uint8_t *buf, bool async)
1302{
1303 ram_counters.transferred += save_page_header(rs, rs->f, block,
1304 offset | RAM_SAVE_FLAG_PAGE);
1305 if (async) {
1306 qemu_put_buffer_async(rs->f, buf, TARGET_PAGE_SIZE,
1307 migrate_release_ram() &
1308 migration_in_postcopy());
1309 } else {
1310 qemu_put_buffer(rs->f, buf, TARGET_PAGE_SIZE);
1311 }
1312 ram_counters.transferred += TARGET_PAGE_SIZE;
1313 ram_counters.normal++;
1314 return 1;
1315}
1316
Juan Quintela56e93d22015-05-07 19:33:31 +02001317/**
Juan Quintela3d0684b2017-03-23 15:06:39 +01001318 * ram_save_page: send the given page to the stream
Juan Quintela56e93d22015-05-07 19:33:31 +02001319 *
Juan Quintela3d0684b2017-03-23 15:06:39 +01001320 * Returns the number of pages written.
Dr. David Alan Gilbert3fd3c4b2015-12-10 16:31:46 +00001321 * < 0 - error
1322 * >=0 - Number of pages written - this might legally be 0
1323 * if xbzrle noticed the page was the same.
Juan Quintela56e93d22015-05-07 19:33:31 +02001324 *
Juan Quintela6f37bb82017-03-13 19:26:29 +01001325 * @rs: current RAM state
Juan Quintela56e93d22015-05-07 19:33:31 +02001326 * @block: block that contains the page we want to send
1327 * @offset: offset inside the block for the page
1328 * @last_stage: if we are at the completion stage
Juan Quintela56e93d22015-05-07 19:33:31 +02001329 */
Juan Quintelaa0a8aa12017-03-20 22:29:07 +01001330static int ram_save_page(RAMState *rs, PageSearchStatus *pss, bool last_stage)
Juan Quintela56e93d22015-05-07 19:33:31 +02001331{
1332 int pages = -1;
Juan Quintela56e93d22015-05-07 19:33:31 +02001333 uint8_t *p;
Juan Quintela56e93d22015-05-07 19:33:31 +02001334 bool send_async = true;
zhanghailianga08f6892016-01-15 11:37:44 +08001335 RAMBlock *block = pss->block;
Juan Quintelaa935e302017-03-21 15:36:51 +01001336 ram_addr_t offset = pss->page << TARGET_PAGE_BITS;
Xiao Guangrong059ff0f2018-03-30 15:51:23 +08001337 ram_addr_t current_addr = block->offset + offset;
Juan Quintela56e93d22015-05-07 19:33:31 +02001338
Dr. David Alan Gilbert2f68e392015-08-13 11:51:30 +01001339 p = block->host + offset;
Dr. David Alan Gilbert1db9d8e2017-04-26 19:37:21 +01001340 trace_ram_save_page(block->idstr, (uint64_t)offset, p);
Juan Quintela56e93d22015-05-07 19:33:31 +02001341
Juan Quintela56e93d22015-05-07 19:33:31 +02001342 XBZRLE_cache_lock();
Xiao Guangrongd7400a32018-03-30 15:51:26 +08001343 if (!rs->ram_bulk_stage && !migration_in_postcopy() &&
1344 migrate_use_xbzrle()) {
Xiao Guangrong059ff0f2018-03-30 15:51:23 +08001345 pages = save_xbzrle_page(rs, &p, current_addr, block,
1346 offset, last_stage);
1347 if (!last_stage) {
1348 /* Can't send this cached data async, since the cache page
1349 * might get updated before it gets to the wire
Juan Quintela56e93d22015-05-07 19:33:31 +02001350 */
Xiao Guangrong059ff0f2018-03-30 15:51:23 +08001351 send_async = false;
Juan Quintela56e93d22015-05-07 19:33:31 +02001352 }
1353 }
1354
1355 /* XBZRLE overflow or normal page */
1356 if (pages == -1) {
Xiao Guangrong65dacaa2018-03-30 15:51:27 +08001357 pages = save_normal_page(rs, block, offset, p, send_async);
Juan Quintela56e93d22015-05-07 19:33:31 +02001358 }
1359
1360 XBZRLE_cache_unlock();
1361
1362 return pages;
1363}
1364
Xiao Guangrongdcaf4462018-03-30 15:51:20 +08001365static int do_compress_ram_page(QEMUFile *f, z_stream *stream, RAMBlock *block,
Xiao Guangrong34ab9e92018-03-30 15:51:22 +08001366 ram_addr_t offset, uint8_t *source_buf)
Juan Quintela56e93d22015-05-07 19:33:31 +02001367{
Juan Quintela53518d92017-05-04 11:46:24 +02001368 RAMState *rs = ram_state;
Juan Quintela56e93d22015-05-07 19:33:31 +02001369 int bytes_sent, blen;
Liang Lia7a9a882016-05-05 15:32:57 +08001370 uint8_t *p = block->host + (offset & TARGET_PAGE_MASK);
Juan Quintela56e93d22015-05-07 19:33:31 +02001371
Juan Quintela2bf3aa82017-05-10 13:28:13 +02001372 bytes_sent = save_page_header(rs, f, block, offset |
Juan Quintela56e93d22015-05-07 19:33:31 +02001373 RAM_SAVE_FLAG_COMPRESS_PAGE);
Xiao Guangrong34ab9e92018-03-30 15:51:22 +08001374
1375 /*
1376 * copy it to a internal buffer to avoid it being modified by VM
1377 * so that we can catch up the error during compression and
1378 * decompression
1379 */
1380 memcpy(source_buf, p, TARGET_PAGE_SIZE);
1381 blen = qemu_put_compression_data(f, stream, source_buf, TARGET_PAGE_SIZE);
Liang Lib3be2892016-05-05 15:32:54 +08001382 if (blen < 0) {
1383 bytes_sent = 0;
1384 qemu_file_set_error(migrate_get_current()->to_dst_file, blen);
1385 error_report("compressed data failed!");
1386 } else {
1387 bytes_sent += blen;
Juan Quintela57273092017-03-20 22:25:28 +01001388 ram_release_pages(block->idstr, offset & TARGET_PAGE_MASK, 1);
Liang Lib3be2892016-05-05 15:32:54 +08001389 }
Juan Quintela56e93d22015-05-07 19:33:31 +02001390
1391 return bytes_sent;
1392}
1393
Juan Quintelace25d332017-03-15 11:00:51 +01001394static void flush_compressed_data(RAMState *rs)
Juan Quintela56e93d22015-05-07 19:33:31 +02001395{
1396 int idx, len, thread_count;
1397
1398 if (!migrate_use_compression()) {
1399 return;
1400 }
1401 thread_count = migrate_compress_threads();
Liang Lia7a9a882016-05-05 15:32:57 +08001402
Liang Li0d9f9a52016-05-05 15:32:59 +08001403 qemu_mutex_lock(&comp_done_lock);
Juan Quintela56e93d22015-05-07 19:33:31 +02001404 for (idx = 0; idx < thread_count; idx++) {
Liang Lia7a9a882016-05-05 15:32:57 +08001405 while (!comp_param[idx].done) {
Liang Li0d9f9a52016-05-05 15:32:59 +08001406 qemu_cond_wait(&comp_done_cond, &comp_done_lock);
Juan Quintela56e93d22015-05-07 19:33:31 +02001407 }
Liang Lia7a9a882016-05-05 15:32:57 +08001408 }
Liang Li0d9f9a52016-05-05 15:32:59 +08001409 qemu_mutex_unlock(&comp_done_lock);
Liang Lia7a9a882016-05-05 15:32:57 +08001410
1411 for (idx = 0; idx < thread_count; idx++) {
1412 qemu_mutex_lock(&comp_param[idx].mutex);
Liang Li90e56fb2016-05-05 15:32:56 +08001413 if (!comp_param[idx].quit) {
Juan Quintelace25d332017-03-15 11:00:51 +01001414 len = qemu_put_qemu_file(rs->f, comp_param[idx].file);
Juan Quintela93604472017-06-06 19:49:03 +02001415 ram_counters.transferred += len;
Juan Quintela56e93d22015-05-07 19:33:31 +02001416 }
Liang Lia7a9a882016-05-05 15:32:57 +08001417 qemu_mutex_unlock(&comp_param[idx].mutex);
Juan Quintela56e93d22015-05-07 19:33:31 +02001418 }
1419}
1420
1421static inline void set_compress_params(CompressParam *param, RAMBlock *block,
1422 ram_addr_t offset)
1423{
1424 param->block = block;
1425 param->offset = offset;
1426}
1427
Juan Quintelace25d332017-03-15 11:00:51 +01001428static int compress_page_with_multi_thread(RAMState *rs, RAMBlock *block,
1429 ram_addr_t offset)
Juan Quintela56e93d22015-05-07 19:33:31 +02001430{
1431 int idx, thread_count, bytes_xmit = -1, pages = -1;
1432
1433 thread_count = migrate_compress_threads();
Liang Li0d9f9a52016-05-05 15:32:59 +08001434 qemu_mutex_lock(&comp_done_lock);
Juan Quintela56e93d22015-05-07 19:33:31 +02001435 while (true) {
1436 for (idx = 0; idx < thread_count; idx++) {
1437 if (comp_param[idx].done) {
Liang Lia7a9a882016-05-05 15:32:57 +08001438 comp_param[idx].done = false;
Juan Quintelace25d332017-03-15 11:00:51 +01001439 bytes_xmit = qemu_put_qemu_file(rs->f, comp_param[idx].file);
Liang Lia7a9a882016-05-05 15:32:57 +08001440 qemu_mutex_lock(&comp_param[idx].mutex);
Juan Quintela56e93d22015-05-07 19:33:31 +02001441 set_compress_params(&comp_param[idx], block, offset);
Liang Lia7a9a882016-05-05 15:32:57 +08001442 qemu_cond_signal(&comp_param[idx].cond);
1443 qemu_mutex_unlock(&comp_param[idx].mutex);
Juan Quintela56e93d22015-05-07 19:33:31 +02001444 pages = 1;
Juan Quintela93604472017-06-06 19:49:03 +02001445 ram_counters.normal++;
1446 ram_counters.transferred += bytes_xmit;
Juan Quintela56e93d22015-05-07 19:33:31 +02001447 break;
1448 }
1449 }
1450 if (pages > 0) {
1451 break;
1452 } else {
Liang Li0d9f9a52016-05-05 15:32:59 +08001453 qemu_cond_wait(&comp_done_cond, &comp_done_lock);
Juan Quintela56e93d22015-05-07 19:33:31 +02001454 }
1455 }
Liang Li0d9f9a52016-05-05 15:32:59 +08001456 qemu_mutex_unlock(&comp_done_lock);
Juan Quintela56e93d22015-05-07 19:33:31 +02001457
1458 return pages;
1459}
1460
1461/**
Juan Quintela3d0684b2017-03-23 15:06:39 +01001462 * find_dirty_block: find the next dirty page and update any state
1463 * associated with the search process.
Dr. David Alan Gilbertb9e60922015-09-23 15:27:11 +01001464 *
Juan Quintela3d0684b2017-03-23 15:06:39 +01001465 * Returns if a page is found
Dr. David Alan Gilbertb9e60922015-09-23 15:27:11 +01001466 *
Juan Quintela6f37bb82017-03-13 19:26:29 +01001467 * @rs: current RAM state
Juan Quintela3d0684b2017-03-23 15:06:39 +01001468 * @pss: data about the state of the current dirty page scan
1469 * @again: set to false if the search has scanned the whole of RAM
Dr. David Alan Gilbertb9e60922015-09-23 15:27:11 +01001470 */
Juan Quintelaf20e2862017-03-21 16:19:05 +01001471static bool find_dirty_block(RAMState *rs, PageSearchStatus *pss, bool *again)
Dr. David Alan Gilbertb9e60922015-09-23 15:27:11 +01001472{
Juan Quintelaf20e2862017-03-21 16:19:05 +01001473 pss->page = migration_bitmap_find_dirty(rs, pss->block, pss->page);
Juan Quintela6f37bb82017-03-13 19:26:29 +01001474 if (pss->complete_round && pss->block == rs->last_seen_block &&
Juan Quintelaa935e302017-03-21 15:36:51 +01001475 pss->page >= rs->last_page) {
Dr. David Alan Gilbertb9e60922015-09-23 15:27:11 +01001476 /*
1477 * We've been once around the RAM and haven't found anything.
1478 * Give up.
1479 */
1480 *again = false;
1481 return false;
1482 }
Juan Quintelaa935e302017-03-21 15:36:51 +01001483 if ((pss->page << TARGET_PAGE_BITS) >= pss->block->used_length) {
Dr. David Alan Gilbertb9e60922015-09-23 15:27:11 +01001484 /* Didn't find anything in this RAM Block */
Juan Quintelaa935e302017-03-21 15:36:51 +01001485 pss->page = 0;
Dr. David Alan Gilbertb9e60922015-09-23 15:27:11 +01001486 pss->block = QLIST_NEXT_RCU(pss->block, next);
1487 if (!pss->block) {
1488 /* Hit the end of the list */
1489 pss->block = QLIST_FIRST_RCU(&ram_list.blocks);
1490 /* Flag that we've looped */
1491 pss->complete_round = true;
Juan Quintela6f37bb82017-03-13 19:26:29 +01001492 rs->ram_bulk_stage = false;
Dr. David Alan Gilbertb9e60922015-09-23 15:27:11 +01001493 if (migrate_use_xbzrle()) {
1494 /* If xbzrle is on, stop using the data compression at this
1495 * point. In theory, xbzrle can do better than compression.
1496 */
Juan Quintelace25d332017-03-15 11:00:51 +01001497 flush_compressed_data(rs);
Dr. David Alan Gilbertb9e60922015-09-23 15:27:11 +01001498 }
1499 }
1500 /* Didn't find anything this time, but try again on the new block */
1501 *again = true;
1502 return false;
1503 } else {
1504 /* Can go around again, but... */
1505 *again = true;
1506 /* We've found something so probably don't need to */
1507 return true;
1508 }
1509}
1510
Juan Quintela3d0684b2017-03-23 15:06:39 +01001511/**
1512 * unqueue_page: gets a page of the queue
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001513 *
Juan Quintela3d0684b2017-03-23 15:06:39 +01001514 * Helper for 'get_queued_page' - gets a page off the queue
1515 *
1516 * Returns the block of the page (or NULL if none available)
1517 *
Juan Quintelaec481c62017-03-20 22:12:40 +01001518 * @rs: current RAM state
Juan Quintela3d0684b2017-03-23 15:06:39 +01001519 * @offset: used to return the offset within the RAMBlock
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001520 */
Juan Quintelaf20e2862017-03-21 16:19:05 +01001521static RAMBlock *unqueue_page(RAMState *rs, ram_addr_t *offset)
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001522{
1523 RAMBlock *block = NULL;
1524
Juan Quintelaec481c62017-03-20 22:12:40 +01001525 qemu_mutex_lock(&rs->src_page_req_mutex);
1526 if (!QSIMPLEQ_EMPTY(&rs->src_page_requests)) {
1527 struct RAMSrcPageRequest *entry =
1528 QSIMPLEQ_FIRST(&rs->src_page_requests);
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001529 block = entry->rb;
1530 *offset = entry->offset;
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001531
1532 if (entry->len > TARGET_PAGE_SIZE) {
1533 entry->len -= TARGET_PAGE_SIZE;
1534 entry->offset += TARGET_PAGE_SIZE;
1535 } else {
1536 memory_region_unref(block->mr);
Juan Quintelaec481c62017-03-20 22:12:40 +01001537 QSIMPLEQ_REMOVE_HEAD(&rs->src_page_requests, next_req);
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001538 g_free(entry);
1539 }
1540 }
Juan Quintelaec481c62017-03-20 22:12:40 +01001541 qemu_mutex_unlock(&rs->src_page_req_mutex);
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001542
1543 return block;
1544}
1545
Juan Quintela3d0684b2017-03-23 15:06:39 +01001546/**
1547 * get_queued_page: unqueue a page from the postocpy requests
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001548 *
Juan Quintela3d0684b2017-03-23 15:06:39 +01001549 * Skips pages that are already sent (!dirty)
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001550 *
Juan Quintela3d0684b2017-03-23 15:06:39 +01001551 * Returns if a queued page is found
1552 *
Juan Quintela6f37bb82017-03-13 19:26:29 +01001553 * @rs: current RAM state
Juan Quintela3d0684b2017-03-23 15:06:39 +01001554 * @pss: data about the state of the current dirty page scan
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001555 */
Juan Quintelaf20e2862017-03-21 16:19:05 +01001556static bool get_queued_page(RAMState *rs, PageSearchStatus *pss)
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001557{
1558 RAMBlock *block;
1559 ram_addr_t offset;
1560 bool dirty;
1561
1562 do {
Juan Quintelaf20e2862017-03-21 16:19:05 +01001563 block = unqueue_page(rs, &offset);
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001564 /*
1565 * We're sending this page, and since it's postcopy nothing else
1566 * will dirty it, and we must make sure it doesn't get sent again
1567 * even if this queue request was received after the background
1568 * search already sent it.
1569 */
1570 if (block) {
Juan Quintelaf20e2862017-03-21 16:19:05 +01001571 unsigned long page;
1572
Juan Quintela6b6712e2017-03-22 15:18:04 +01001573 page = offset >> TARGET_PAGE_BITS;
1574 dirty = test_bit(page, block->bmap);
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001575 if (!dirty) {
Juan Quintela06b10682017-03-21 15:18:05 +01001576 trace_get_queued_page_not_dirty(block->idstr, (uint64_t)offset,
Juan Quintela6b6712e2017-03-22 15:18:04 +01001577 page, test_bit(page, block->unsentmap));
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001578 } else {
Juan Quintelaf20e2862017-03-21 16:19:05 +01001579 trace_get_queued_page(block->idstr, (uint64_t)offset, page);
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001580 }
1581 }
1582
1583 } while (block && !dirty);
1584
1585 if (block) {
1586 /*
1587 * As soon as we start servicing pages out of order, then we have
1588 * to kill the bulk stage, since the bulk stage assumes
1589 * in (migration_bitmap_find_and_reset_dirty) that every page is
1590 * dirty, that's no longer true.
1591 */
Juan Quintela6f37bb82017-03-13 19:26:29 +01001592 rs->ram_bulk_stage = false;
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001593
1594 /*
1595 * We want the background search to continue from the queued page
1596 * since the guest is likely to want other pages near to the page
1597 * it just requested.
1598 */
1599 pss->block = block;
Juan Quintelaa935e302017-03-21 15:36:51 +01001600 pss->page = offset >> TARGET_PAGE_BITS;
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001601 }
1602
1603 return !!block;
1604}
1605
Juan Quintela56e93d22015-05-07 19:33:31 +02001606/**
Juan Quintela5e58f962017-04-03 22:06:54 +02001607 * migration_page_queue_free: drop any remaining pages in the ram
1608 * request queue
Dr. David Alan Gilbert6c595cd2015-11-05 18:11:08 +00001609 *
Juan Quintela3d0684b2017-03-23 15:06:39 +01001610 * It should be empty at the end anyway, but in error cases there may
1611 * be some left. in case that there is any page left, we drop it.
1612 *
Dr. David Alan Gilbert6c595cd2015-11-05 18:11:08 +00001613 */
Juan Quintela83c13382017-05-04 11:45:01 +02001614static void migration_page_queue_free(RAMState *rs)
Dr. David Alan Gilbert6c595cd2015-11-05 18:11:08 +00001615{
Juan Quintelaec481c62017-03-20 22:12:40 +01001616 struct RAMSrcPageRequest *mspr, *next_mspr;
Dr. David Alan Gilbert6c595cd2015-11-05 18:11:08 +00001617 /* This queue generally should be empty - but in the case of a failed
1618 * migration might have some droppings in.
1619 */
1620 rcu_read_lock();
Juan Quintelaec481c62017-03-20 22:12:40 +01001621 QSIMPLEQ_FOREACH_SAFE(mspr, &rs->src_page_requests, next_req, next_mspr) {
Dr. David Alan Gilbert6c595cd2015-11-05 18:11:08 +00001622 memory_region_unref(mspr->rb->mr);
Juan Quintelaec481c62017-03-20 22:12:40 +01001623 QSIMPLEQ_REMOVE_HEAD(&rs->src_page_requests, next_req);
Dr. David Alan Gilbert6c595cd2015-11-05 18:11:08 +00001624 g_free(mspr);
1625 }
1626 rcu_read_unlock();
1627}
1628
1629/**
Juan Quintela3d0684b2017-03-23 15:06:39 +01001630 * ram_save_queue_pages: queue the page for transmission
1631 *
1632 * A request from postcopy destination for example.
1633 *
1634 * Returns zero on success or negative on error
1635 *
Juan Quintela3d0684b2017-03-23 15:06:39 +01001636 * @rbname: Name of the RAMBLock of the request. NULL means the
1637 * same that last one.
1638 * @start: starting address from the start of the RAMBlock
1639 * @len: length (in bytes) to send
Dr. David Alan Gilbert6c595cd2015-11-05 18:11:08 +00001640 */
Juan Quintela96506892017-03-14 18:41:03 +01001641int ram_save_queue_pages(const char *rbname, ram_addr_t start, ram_addr_t len)
Dr. David Alan Gilbert6c595cd2015-11-05 18:11:08 +00001642{
1643 RAMBlock *ramblock;
Juan Quintela53518d92017-05-04 11:46:24 +02001644 RAMState *rs = ram_state;
Dr. David Alan Gilbert6c595cd2015-11-05 18:11:08 +00001645
Juan Quintela93604472017-06-06 19:49:03 +02001646 ram_counters.postcopy_requests++;
Dr. David Alan Gilbert6c595cd2015-11-05 18:11:08 +00001647 rcu_read_lock();
1648 if (!rbname) {
1649 /* Reuse last RAMBlock */
Juan Quintela68a098f2017-03-14 13:48:42 +01001650 ramblock = rs->last_req_rb;
Dr. David Alan Gilbert6c595cd2015-11-05 18:11:08 +00001651
1652 if (!ramblock) {
1653 /*
1654 * Shouldn't happen, we can't reuse the last RAMBlock if
1655 * it's the 1st request.
1656 */
1657 error_report("ram_save_queue_pages no previous block");
1658 goto err;
1659 }
1660 } else {
1661 ramblock = qemu_ram_block_by_name(rbname);
1662
1663 if (!ramblock) {
1664 /* We shouldn't be asked for a non-existent RAMBlock */
1665 error_report("ram_save_queue_pages no block '%s'", rbname);
1666 goto err;
1667 }
Juan Quintela68a098f2017-03-14 13:48:42 +01001668 rs->last_req_rb = ramblock;
Dr. David Alan Gilbert6c595cd2015-11-05 18:11:08 +00001669 }
1670 trace_ram_save_queue_pages(ramblock->idstr, start, len);
1671 if (start+len > ramblock->used_length) {
Juan Quintela9458ad62015-11-10 17:42:05 +01001672 error_report("%s request overrun start=" RAM_ADDR_FMT " len="
1673 RAM_ADDR_FMT " blocklen=" RAM_ADDR_FMT,
Dr. David Alan Gilbert6c595cd2015-11-05 18:11:08 +00001674 __func__, start, len, ramblock->used_length);
1675 goto err;
1676 }
1677
Juan Quintelaec481c62017-03-20 22:12:40 +01001678 struct RAMSrcPageRequest *new_entry =
1679 g_malloc0(sizeof(struct RAMSrcPageRequest));
Dr. David Alan Gilbert6c595cd2015-11-05 18:11:08 +00001680 new_entry->rb = ramblock;
1681 new_entry->offset = start;
1682 new_entry->len = len;
1683
1684 memory_region_ref(ramblock->mr);
Juan Quintelaec481c62017-03-20 22:12:40 +01001685 qemu_mutex_lock(&rs->src_page_req_mutex);
1686 QSIMPLEQ_INSERT_TAIL(&rs->src_page_requests, new_entry, next_req);
1687 qemu_mutex_unlock(&rs->src_page_req_mutex);
Dr. David Alan Gilbert6c595cd2015-11-05 18:11:08 +00001688 rcu_read_unlock();
1689
1690 return 0;
1691
1692err:
1693 rcu_read_unlock();
1694 return -1;
1695}
1696
Xiao Guangrongd7400a32018-03-30 15:51:26 +08001697static bool save_page_use_compression(RAMState *rs)
1698{
1699 if (!migrate_use_compression()) {
1700 return false;
1701 }
1702
1703 /*
1704 * If xbzrle is on, stop using the data compression after first
1705 * round of migration even if compression is enabled. In theory,
1706 * xbzrle can do better than compression.
1707 */
1708 if (rs->ram_bulk_stage || !migrate_use_xbzrle()) {
1709 return true;
1710 }
1711
1712 return false;
1713}
1714
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001715/**
Juan Quintela3d0684b2017-03-23 15:06:39 +01001716 * ram_save_target_page: save one target page
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001717 *
Juan Quintela3d0684b2017-03-23 15:06:39 +01001718 * Returns the number of pages written
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001719 *
Juan Quintela6f37bb82017-03-13 19:26:29 +01001720 * @rs: current RAM state
Juan Quintela3d0684b2017-03-23 15:06:39 +01001721 * @pss: data about the page we want to send
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001722 * @last_stage: if we are at the completion stage
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001723 */
Juan Quintelaa0a8aa12017-03-20 22:29:07 +01001724static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss,
Juan Quintelaf20e2862017-03-21 16:19:05 +01001725 bool last_stage)
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001726{
Xiao Guangronga8ec91f2018-03-30 15:51:25 +08001727 RAMBlock *block = pss->block;
1728 ram_addr_t offset = pss->page << TARGET_PAGE_BITS;
1729 int res;
1730
1731 if (control_save_page(rs, block, offset, &res)) {
1732 return res;
1733 }
1734
Xiao Guangrong1faa5662018-03-30 15:51:24 +08001735 /*
Xiao Guangrongd7400a32018-03-30 15:51:26 +08001736 * When starting the process of a new block, the first page of
1737 * the block should be sent out before other pages in the same
1738 * block, and all the pages in last block should have been sent
1739 * out, keeping this order is important, because the 'cont' flag
1740 * is used to avoid resending the block name.
Xiao Guangrong1faa5662018-03-30 15:51:24 +08001741 */
Xiao Guangrongd7400a32018-03-30 15:51:26 +08001742 if (block != rs->last_sent_block && save_page_use_compression(rs)) {
1743 flush_compressed_data(rs);
1744 }
1745
1746 res = save_zero_page(rs, block, offset);
1747 if (res > 0) {
1748 /* Must let xbzrle know, otherwise a previous (now 0'd) cached
1749 * page would be stale
1750 */
1751 if (!save_page_use_compression(rs)) {
1752 XBZRLE_cache_lock();
1753 xbzrle_cache_zero_page(rs, block->offset + offset);
1754 XBZRLE_cache_unlock();
1755 }
1756 ram_release_pages(block->idstr, offset, res);
1757 return res;
1758 }
1759
Xiao Guangrongda3f56c2018-03-30 15:51:28 +08001760 /*
1761 * Make sure the first page is sent out before other pages.
1762 *
1763 * we post it as normal page as compression will take much
1764 * CPU resource.
1765 */
1766 if (block == rs->last_sent_block && save_page_use_compression(rs)) {
Xiao Guangrong701b1872018-04-28 16:10:45 +08001767 return compress_page_with_multi_thread(rs, block, offset);
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001768 }
1769
Xiao Guangrong1faa5662018-03-30 15:51:24 +08001770 return ram_save_page(rs, pss, last_stage);
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001771}
1772
1773/**
Juan Quintela3d0684b2017-03-23 15:06:39 +01001774 * ram_save_host_page: save a whole host page
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001775 *
Juan Quintela3d0684b2017-03-23 15:06:39 +01001776 * Starting at *offset send pages up to the end of the current host
1777 * page. It's valid for the initial offset to point into the middle of
1778 * a host page in which case the remainder of the hostpage is sent.
1779 * Only dirty target pages are sent. Note that the host page size may
1780 * be a huge page for this block.
Dr. David Alan Gilbert1eb3fc02017-05-17 17:58:09 +01001781 * The saving stops at the boundary of the used_length of the block
1782 * if the RAMBlock isn't a multiple of the host page size.
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001783 *
Juan Quintela3d0684b2017-03-23 15:06:39 +01001784 * Returns the number of pages written or negative on error
1785 *
Juan Quintela6f37bb82017-03-13 19:26:29 +01001786 * @rs: current RAM state
Juan Quintela3d0684b2017-03-23 15:06:39 +01001787 * @ms: current migration state
Juan Quintela3d0684b2017-03-23 15:06:39 +01001788 * @pss: data about the page we want to send
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001789 * @last_stage: if we are at the completion stage
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001790 */
Juan Quintelaa0a8aa12017-03-20 22:29:07 +01001791static int ram_save_host_page(RAMState *rs, PageSearchStatus *pss,
Juan Quintelaf20e2862017-03-21 16:19:05 +01001792 bool last_stage)
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001793{
1794 int tmppages, pages = 0;
Juan Quintelaa935e302017-03-21 15:36:51 +01001795 size_t pagesize_bits =
1796 qemu_ram_pagesize(pss->block) >> TARGET_PAGE_BITS;
Dr. David Alan Gilbert4c011c32017-02-24 18:28:39 +00001797
Cédric Le Goaterb895de52018-05-14 08:57:00 +02001798 if (!qemu_ram_is_migratable(pss->block)) {
1799 error_report("block %s should not be migrated !", pss->block->idstr);
1800 return 0;
1801 }
1802
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001803 do {
Xiao Guangrong1faa5662018-03-30 15:51:24 +08001804 /* Check the pages is dirty and if it is send it */
1805 if (!migration_bitmap_clear_dirty(rs, pss->block, pss->page)) {
1806 pss->page++;
1807 continue;
1808 }
1809
Juan Quintelaf20e2862017-03-21 16:19:05 +01001810 tmppages = ram_save_target_page(rs, pss, last_stage);
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001811 if (tmppages < 0) {
1812 return tmppages;
1813 }
1814
1815 pages += tmppages;
Xiao Guangrong1faa5662018-03-30 15:51:24 +08001816 if (pss->block->unsentmap) {
1817 clear_bit(pss->page, pss->block->unsentmap);
1818 }
1819
Juan Quintelaa935e302017-03-21 15:36:51 +01001820 pss->page++;
Dr. David Alan Gilbert1eb3fc02017-05-17 17:58:09 +01001821 } while ((pss->page & (pagesize_bits - 1)) &&
1822 offset_in_ramblock(pss->block, pss->page << TARGET_PAGE_BITS));
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001823
1824 /* The offset we leave with is the last one we looked at */
Juan Quintelaa935e302017-03-21 15:36:51 +01001825 pss->page--;
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001826 return pages;
1827}
Dr. David Alan Gilbert6c595cd2015-11-05 18:11:08 +00001828
1829/**
Juan Quintela3d0684b2017-03-23 15:06:39 +01001830 * ram_find_and_save_block: finds a dirty page and sends it to f
Juan Quintela56e93d22015-05-07 19:33:31 +02001831 *
1832 * Called within an RCU critical section.
1833 *
Juan Quintela3d0684b2017-03-23 15:06:39 +01001834 * Returns the number of pages written where zero means no dirty pages
Juan Quintela56e93d22015-05-07 19:33:31 +02001835 *
Juan Quintela6f37bb82017-03-13 19:26:29 +01001836 * @rs: current RAM state
Juan Quintela56e93d22015-05-07 19:33:31 +02001837 * @last_stage: if we are at the completion stage
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001838 *
1839 * On systems where host-page-size > target-page-size it will send all the
1840 * pages in a host page that are dirty.
Juan Quintela56e93d22015-05-07 19:33:31 +02001841 */
1842
Juan Quintelace25d332017-03-15 11:00:51 +01001843static int ram_find_and_save_block(RAMState *rs, bool last_stage)
Juan Quintela56e93d22015-05-07 19:33:31 +02001844{
Dr. David Alan Gilbertb8fb8cb2015-09-23 15:27:10 +01001845 PageSearchStatus pss;
Juan Quintela56e93d22015-05-07 19:33:31 +02001846 int pages = 0;
Dr. David Alan Gilbertb9e60922015-09-23 15:27:11 +01001847 bool again, found;
Juan Quintela56e93d22015-05-07 19:33:31 +02001848
Ashijeet Acharya0827b9e2017-02-08 19:58:45 +05301849 /* No dirty page as there is zero RAM */
1850 if (!ram_bytes_total()) {
1851 return pages;
1852 }
1853
Juan Quintela6f37bb82017-03-13 19:26:29 +01001854 pss.block = rs->last_seen_block;
Juan Quintelaa935e302017-03-21 15:36:51 +01001855 pss.page = rs->last_page;
Dr. David Alan Gilbertb8fb8cb2015-09-23 15:27:10 +01001856 pss.complete_round = false;
1857
1858 if (!pss.block) {
1859 pss.block = QLIST_FIRST_RCU(&ram_list.blocks);
1860 }
Juan Quintela56e93d22015-05-07 19:33:31 +02001861
Dr. David Alan Gilbertb9e60922015-09-23 15:27:11 +01001862 do {
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001863 again = true;
Juan Quintelaf20e2862017-03-21 16:19:05 +01001864 found = get_queued_page(rs, &pss);
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001865
1866 if (!found) {
1867 /* priority queue empty, so just search for something dirty */
Juan Quintelaf20e2862017-03-21 16:19:05 +01001868 found = find_dirty_block(rs, &pss, &again);
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001869 }
Dr. David Alan Gilbertb9e60922015-09-23 15:27:11 +01001870
1871 if (found) {
Juan Quintelaf20e2862017-03-21 16:19:05 +01001872 pages = ram_save_host_page(rs, &pss, last_stage);
Juan Quintela56e93d22015-05-07 19:33:31 +02001873 }
Dr. David Alan Gilbertb9e60922015-09-23 15:27:11 +01001874 } while (!pages && again);
Juan Quintela56e93d22015-05-07 19:33:31 +02001875
Juan Quintela6f37bb82017-03-13 19:26:29 +01001876 rs->last_seen_block = pss.block;
Juan Quintelaa935e302017-03-21 15:36:51 +01001877 rs->last_page = pss.page;
Juan Quintela56e93d22015-05-07 19:33:31 +02001878
1879 return pages;
1880}
1881
1882void acct_update_position(QEMUFile *f, size_t size, bool zero)
1883{
1884 uint64_t pages = size / TARGET_PAGE_SIZE;
Juan Quintelaf7ccd612017-03-13 20:30:21 +01001885
Juan Quintela56e93d22015-05-07 19:33:31 +02001886 if (zero) {
Juan Quintela93604472017-06-06 19:49:03 +02001887 ram_counters.duplicate += pages;
Juan Quintela56e93d22015-05-07 19:33:31 +02001888 } else {
Juan Quintela93604472017-06-06 19:49:03 +02001889 ram_counters.normal += pages;
1890 ram_counters.transferred += size;
Juan Quintela56e93d22015-05-07 19:33:31 +02001891 qemu_update_position(f, size);
1892 }
1893}
1894
Juan Quintela56e93d22015-05-07 19:33:31 +02001895uint64_t ram_bytes_total(void)
1896{
1897 RAMBlock *block;
1898 uint64_t total = 0;
1899
1900 rcu_read_lock();
Cédric Le Goaterb895de52018-05-14 08:57:00 +02001901 RAMBLOCK_FOREACH_MIGRATABLE(block) {
Juan Quintela56e93d22015-05-07 19:33:31 +02001902 total += block->used_length;
Peter Xu99e15582017-05-12 12:17:39 +08001903 }
Juan Quintela56e93d22015-05-07 19:33:31 +02001904 rcu_read_unlock();
1905 return total;
1906}
1907
Juan Quintelaf265e0e2017-06-28 11:52:27 +02001908static void xbzrle_load_setup(void)
Juan Quintela56e93d22015-05-07 19:33:31 +02001909{
Juan Quintelaf265e0e2017-06-28 11:52:27 +02001910 XBZRLE.decoded_buf = g_malloc(TARGET_PAGE_SIZE);
Juan Quintela56e93d22015-05-07 19:33:31 +02001911}
1912
Juan Quintelaf265e0e2017-06-28 11:52:27 +02001913static void xbzrle_load_cleanup(void)
1914{
1915 g_free(XBZRLE.decoded_buf);
1916 XBZRLE.decoded_buf = NULL;
1917}
1918
Peter Xu7d7c96b2017-10-19 14:31:58 +08001919static void ram_state_cleanup(RAMState **rsp)
1920{
Dr. David Alan Gilbertb9ccaf62018-02-12 16:03:39 +00001921 if (*rsp) {
1922 migration_page_queue_free(*rsp);
1923 qemu_mutex_destroy(&(*rsp)->bitmap_mutex);
1924 qemu_mutex_destroy(&(*rsp)->src_page_req_mutex);
1925 g_free(*rsp);
1926 *rsp = NULL;
1927 }
Peter Xu7d7c96b2017-10-19 14:31:58 +08001928}
1929
Peter Xu84593a02017-10-19 14:31:59 +08001930static void xbzrle_cleanup(void)
1931{
1932 XBZRLE_cache_lock();
1933 if (XBZRLE.cache) {
1934 cache_fini(XBZRLE.cache);
1935 g_free(XBZRLE.encoded_buf);
1936 g_free(XBZRLE.current_buf);
1937 g_free(XBZRLE.zero_target_page);
1938 XBZRLE.cache = NULL;
1939 XBZRLE.encoded_buf = NULL;
1940 XBZRLE.current_buf = NULL;
1941 XBZRLE.zero_target_page = NULL;
1942 }
1943 XBZRLE_cache_unlock();
1944}
1945
Juan Quintelaf265e0e2017-06-28 11:52:27 +02001946static void ram_save_cleanup(void *opaque)
Juan Quintela56e93d22015-05-07 19:33:31 +02001947{
Juan Quintela53518d92017-05-04 11:46:24 +02001948 RAMState **rsp = opaque;
Juan Quintela6b6712e2017-03-22 15:18:04 +01001949 RAMBlock *block;
Juan Quintelaeb859c52017-03-13 21:51:55 +01001950
Li Zhijian2ff64032015-07-02 20:18:05 +08001951 /* caller have hold iothread lock or is in a bh, so there is
1952 * no writing race against this migration_bitmap
1953 */
Juan Quintela6b6712e2017-03-22 15:18:04 +01001954 memory_global_dirty_log_stop();
1955
Cédric Le Goaterb895de52018-05-14 08:57:00 +02001956 RAMBLOCK_FOREACH_MIGRATABLE(block) {
Juan Quintela6b6712e2017-03-22 15:18:04 +01001957 g_free(block->bmap);
1958 block->bmap = NULL;
1959 g_free(block->unsentmap);
1960 block->unsentmap = NULL;
Juan Quintela56e93d22015-05-07 19:33:31 +02001961 }
1962
Peter Xu84593a02017-10-19 14:31:59 +08001963 xbzrle_cleanup();
Juan Quintelaf0afa332017-06-28 11:52:28 +02001964 compress_threads_save_cleanup();
Peter Xu7d7c96b2017-10-19 14:31:58 +08001965 ram_state_cleanup(rsp);
Juan Quintela56e93d22015-05-07 19:33:31 +02001966}
1967
Juan Quintela6f37bb82017-03-13 19:26:29 +01001968static void ram_state_reset(RAMState *rs)
Juan Quintela56e93d22015-05-07 19:33:31 +02001969{
Juan Quintela6f37bb82017-03-13 19:26:29 +01001970 rs->last_seen_block = NULL;
1971 rs->last_sent_block = NULL;
Juan Quintela269ace22017-03-21 15:23:31 +01001972 rs->last_page = 0;
Juan Quintela6f37bb82017-03-13 19:26:29 +01001973 rs->last_version = ram_list.version;
1974 rs->ram_bulk_stage = true;
Juan Quintela56e93d22015-05-07 19:33:31 +02001975}
1976
1977#define MAX_WAIT 50 /* ms, half buffered_file limit */
1978
Dr. David Alan Gilbert4f2e4252015-11-05 18:10:38 +00001979/*
1980 * 'expected' is the value you expect the bitmap mostly to be full
1981 * of; it won't bother printing lines that are all this value.
1982 * If 'todump' is null the migration bitmap is dumped.
1983 */
Juan Quintela6b6712e2017-03-22 15:18:04 +01001984void ram_debug_dump_bitmap(unsigned long *todump, bool expected,
1985 unsigned long pages)
Dr. David Alan Gilbert4f2e4252015-11-05 18:10:38 +00001986{
Dr. David Alan Gilbert4f2e4252015-11-05 18:10:38 +00001987 int64_t cur;
1988 int64_t linelen = 128;
1989 char linebuf[129];
1990
Juan Quintela6b6712e2017-03-22 15:18:04 +01001991 for (cur = 0; cur < pages; cur += linelen) {
Dr. David Alan Gilbert4f2e4252015-11-05 18:10:38 +00001992 int64_t curb;
1993 bool found = false;
1994 /*
1995 * Last line; catch the case where the line length
1996 * is longer than remaining ram
1997 */
Juan Quintela6b6712e2017-03-22 15:18:04 +01001998 if (cur + linelen > pages) {
1999 linelen = pages - cur;
Dr. David Alan Gilbert4f2e4252015-11-05 18:10:38 +00002000 }
2001 for (curb = 0; curb < linelen; curb++) {
2002 bool thisbit = test_bit(cur + curb, todump);
2003 linebuf[curb] = thisbit ? '1' : '.';
2004 found = found || (thisbit != expected);
2005 }
2006 if (found) {
2007 linebuf[curb] = '\0';
2008 fprintf(stderr, "0x%08" PRIx64 " : %s\n", cur, linebuf);
2009 }
2010 }
2011}
2012
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00002013/* **** functions for postcopy ***** */
2014
Pavel Butsykinced1c612017-02-03 18:23:21 +03002015void ram_postcopy_migrated_memory_release(MigrationState *ms)
2016{
2017 struct RAMBlock *block;
Pavel Butsykinced1c612017-02-03 18:23:21 +03002018
Cédric Le Goaterb895de52018-05-14 08:57:00 +02002019 RAMBLOCK_FOREACH_MIGRATABLE(block) {
Juan Quintela6b6712e2017-03-22 15:18:04 +01002020 unsigned long *bitmap = block->bmap;
2021 unsigned long range = block->used_length >> TARGET_PAGE_BITS;
2022 unsigned long run_start = find_next_zero_bit(bitmap, range, 0);
Pavel Butsykinced1c612017-02-03 18:23:21 +03002023
2024 while (run_start < range) {
2025 unsigned long run_end = find_next_bit(bitmap, range, run_start + 1);
Juan Quintelaaaa20642017-03-21 11:35:24 +01002026 ram_discard_range(block->idstr, run_start << TARGET_PAGE_BITS,
Pavel Butsykinced1c612017-02-03 18:23:21 +03002027 (run_end - run_start) << TARGET_PAGE_BITS);
2028 run_start = find_next_zero_bit(bitmap, range, run_end + 1);
2029 }
2030 }
2031}
2032
Juan Quintela3d0684b2017-03-23 15:06:39 +01002033/**
2034 * postcopy_send_discard_bm_ram: discard a RAMBlock
2035 *
2036 * Returns zero on success
2037 *
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00002038 * Callback from postcopy_each_ram_send_discard for each RAMBlock
2039 * Note: At this point the 'unsentmap' is the processed bitmap combined
2040 * with the dirtymap; so a '1' means it's either dirty or unsent.
Juan Quintela3d0684b2017-03-23 15:06:39 +01002041 *
2042 * @ms: current migration state
2043 * @pds: state for postcopy
2044 * @start: RAMBlock starting page
2045 * @length: RAMBlock size
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00002046 */
2047static int postcopy_send_discard_bm_ram(MigrationState *ms,
2048 PostcopyDiscardState *pds,
Juan Quintela6b6712e2017-03-22 15:18:04 +01002049 RAMBlock *block)
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00002050{
Juan Quintela6b6712e2017-03-22 15:18:04 +01002051 unsigned long end = block->used_length >> TARGET_PAGE_BITS;
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00002052 unsigned long current;
Juan Quintela6b6712e2017-03-22 15:18:04 +01002053 unsigned long *unsentmap = block->unsentmap;
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00002054
Juan Quintela6b6712e2017-03-22 15:18:04 +01002055 for (current = 0; current < end; ) {
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00002056 unsigned long one = find_next_bit(unsentmap, end, current);
2057
2058 if (one <= end) {
2059 unsigned long zero = find_next_zero_bit(unsentmap, end, one + 1);
2060 unsigned long discard_length;
2061
2062 if (zero >= end) {
2063 discard_length = end - one;
2064 } else {
2065 discard_length = zero - one;
2066 }
Dr. David Alan Gilbertd688c622016-06-13 12:16:40 +01002067 if (discard_length) {
2068 postcopy_discard_send_range(ms, pds, one, discard_length);
2069 }
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00002070 current = one + discard_length;
2071 } else {
2072 current = one;
2073 }
2074 }
2075
2076 return 0;
2077}
2078
Juan Quintela3d0684b2017-03-23 15:06:39 +01002079/**
2080 * postcopy_each_ram_send_discard: discard all RAMBlocks
2081 *
2082 * Returns 0 for success or negative for error
2083 *
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00002084 * Utility for the outgoing postcopy code.
2085 * Calls postcopy_send_discard_bm_ram for each RAMBlock
2086 * passing it bitmap indexes and name.
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00002087 * (qemu_ram_foreach_block ends up passing unscaled lengths
2088 * which would mean postcopy code would have to deal with target page)
Juan Quintela3d0684b2017-03-23 15:06:39 +01002089 *
2090 * @ms: current migration state
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00002091 */
2092static int postcopy_each_ram_send_discard(MigrationState *ms)
2093{
2094 struct RAMBlock *block;
2095 int ret;
2096
Cédric Le Goaterb895de52018-05-14 08:57:00 +02002097 RAMBLOCK_FOREACH_MIGRATABLE(block) {
Juan Quintela6b6712e2017-03-22 15:18:04 +01002098 PostcopyDiscardState *pds =
2099 postcopy_discard_send_init(ms, block->idstr);
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00002100
2101 /*
2102 * Postcopy sends chunks of bitmap over the wire, but it
2103 * just needs indexes at this point, avoids it having
2104 * target page specific code.
2105 */
Juan Quintela6b6712e2017-03-22 15:18:04 +01002106 ret = postcopy_send_discard_bm_ram(ms, pds, block);
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00002107 postcopy_discard_send_finish(ms, pds);
2108 if (ret) {
2109 return ret;
2110 }
2111 }
2112
2113 return 0;
2114}
2115
Juan Quintela3d0684b2017-03-23 15:06:39 +01002116/**
2117 * postcopy_chunk_hostpages_pass: canocalize bitmap in hostpages
Dr. David Alan Gilbert99e314e2015-11-05 18:11:15 +00002118 *
Juan Quintela3d0684b2017-03-23 15:06:39 +01002119 * Helper for postcopy_chunk_hostpages; it's called twice to
2120 * canonicalize the two bitmaps, that are similar, but one is
2121 * inverted.
Dr. David Alan Gilbert99e314e2015-11-05 18:11:15 +00002122 *
Juan Quintela3d0684b2017-03-23 15:06:39 +01002123 * Postcopy requires that all target pages in a hostpage are dirty or
2124 * clean, not a mix. This function canonicalizes the bitmaps.
2125 *
2126 * @ms: current migration state
2127 * @unsent_pass: if true we need to canonicalize partially unsent host pages
2128 * otherwise we need to canonicalize partially dirty host pages
2129 * @block: block that contains the page we want to canonicalize
2130 * @pds: state for postcopy
Dr. David Alan Gilbert99e314e2015-11-05 18:11:15 +00002131 */
2132static void postcopy_chunk_hostpages_pass(MigrationState *ms, bool unsent_pass,
2133 RAMBlock *block,
2134 PostcopyDiscardState *pds)
2135{
Juan Quintela53518d92017-05-04 11:46:24 +02002136 RAMState *rs = ram_state;
Juan Quintela6b6712e2017-03-22 15:18:04 +01002137 unsigned long *bitmap = block->bmap;
2138 unsigned long *unsentmap = block->unsentmap;
Dr. David Alan Gilbert29c59172017-02-24 18:28:31 +00002139 unsigned int host_ratio = block->page_size / TARGET_PAGE_SIZE;
Juan Quintela6b6712e2017-03-22 15:18:04 +01002140 unsigned long pages = block->used_length >> TARGET_PAGE_BITS;
Dr. David Alan Gilbert99e314e2015-11-05 18:11:15 +00002141 unsigned long run_start;
2142
Dr. David Alan Gilbert29c59172017-02-24 18:28:31 +00002143 if (block->page_size == TARGET_PAGE_SIZE) {
2144 /* Easy case - TPS==HPS for a non-huge page RAMBlock */
2145 return;
2146 }
2147
Dr. David Alan Gilbert99e314e2015-11-05 18:11:15 +00002148 if (unsent_pass) {
2149 /* Find a sent page */
Juan Quintela6b6712e2017-03-22 15:18:04 +01002150 run_start = find_next_zero_bit(unsentmap, pages, 0);
Dr. David Alan Gilbert99e314e2015-11-05 18:11:15 +00002151 } else {
2152 /* Find a dirty page */
Juan Quintela6b6712e2017-03-22 15:18:04 +01002153 run_start = find_next_bit(bitmap, pages, 0);
Dr. David Alan Gilbert99e314e2015-11-05 18:11:15 +00002154 }
2155
Juan Quintela6b6712e2017-03-22 15:18:04 +01002156 while (run_start < pages) {
Dr. David Alan Gilbert99e314e2015-11-05 18:11:15 +00002157 bool do_fixup = false;
2158 unsigned long fixup_start_addr;
2159 unsigned long host_offset;
2160
2161 /*
2162 * If the start of this run of pages is in the middle of a host
2163 * page, then we need to fixup this host page.
2164 */
2165 host_offset = run_start % host_ratio;
2166 if (host_offset) {
2167 do_fixup = true;
2168 run_start -= host_offset;
2169 fixup_start_addr = run_start;
2170 /* For the next pass */
2171 run_start = run_start + host_ratio;
2172 } else {
2173 /* Find the end of this run */
2174 unsigned long run_end;
2175 if (unsent_pass) {
Juan Quintela6b6712e2017-03-22 15:18:04 +01002176 run_end = find_next_bit(unsentmap, pages, run_start + 1);
Dr. David Alan Gilbert99e314e2015-11-05 18:11:15 +00002177 } else {
Juan Quintela6b6712e2017-03-22 15:18:04 +01002178 run_end = find_next_zero_bit(bitmap, pages, run_start + 1);
Dr. David Alan Gilbert99e314e2015-11-05 18:11:15 +00002179 }
2180 /*
2181 * If the end isn't at the start of a host page, then the
2182 * run doesn't finish at the end of a host page
2183 * and we need to discard.
2184 */
2185 host_offset = run_end % host_ratio;
2186 if (host_offset) {
2187 do_fixup = true;
2188 fixup_start_addr = run_end - host_offset;
2189 /*
2190 * This host page has gone, the next loop iteration starts
2191 * from after the fixup
2192 */
2193 run_start = fixup_start_addr + host_ratio;
2194 } else {
2195 /*
2196 * No discards on this iteration, next loop starts from
2197 * next sent/dirty page
2198 */
2199 run_start = run_end + 1;
2200 }
2201 }
2202
2203 if (do_fixup) {
2204 unsigned long page;
2205
2206 /* Tell the destination to discard this page */
2207 if (unsent_pass || !test_bit(fixup_start_addr, unsentmap)) {
2208 /* For the unsent_pass we:
2209 * discard partially sent pages
2210 * For the !unsent_pass (dirty) we:
2211 * discard partially dirty pages that were sent
2212 * (any partially sent pages were already discarded
2213 * by the previous unsent_pass)
2214 */
2215 postcopy_discard_send_range(ms, pds, fixup_start_addr,
2216 host_ratio);
2217 }
2218
2219 /* Clean up the bitmap */
2220 for (page = fixup_start_addr;
2221 page < fixup_start_addr + host_ratio; page++) {
2222 /* All pages in this host page are now not sent */
2223 set_bit(page, unsentmap);
2224
2225 /*
2226 * Remark them as dirty, updating the count for any pages
2227 * that weren't previously dirty.
2228 */
Juan Quintela0d8ec882017-03-13 21:21:41 +01002229 rs->migration_dirty_pages += !test_and_set_bit(page, bitmap);
Dr. David Alan Gilbert99e314e2015-11-05 18:11:15 +00002230 }
2231 }
2232
2233 if (unsent_pass) {
2234 /* Find the next sent page for the next iteration */
Juan Quintela6b6712e2017-03-22 15:18:04 +01002235 run_start = find_next_zero_bit(unsentmap, pages, run_start);
Dr. David Alan Gilbert99e314e2015-11-05 18:11:15 +00002236 } else {
2237 /* Find the next dirty page for the next iteration */
Juan Quintela6b6712e2017-03-22 15:18:04 +01002238 run_start = find_next_bit(bitmap, pages, run_start);
Dr. David Alan Gilbert99e314e2015-11-05 18:11:15 +00002239 }
2240 }
2241}
2242
Juan Quintela3d0684b2017-03-23 15:06:39 +01002243/**
2244 * postcopy_chuck_hostpages: discrad any partially sent host page
2245 *
Dr. David Alan Gilbert99e314e2015-11-05 18:11:15 +00002246 * Utility for the outgoing postcopy code.
2247 *
2248 * Discard any partially sent host-page size chunks, mark any partially
Dr. David Alan Gilbert29c59172017-02-24 18:28:31 +00002249 * dirty host-page size chunks as all dirty. In this case the host-page
2250 * is the host-page for the particular RAMBlock, i.e. it might be a huge page
Dr. David Alan Gilbert99e314e2015-11-05 18:11:15 +00002251 *
Juan Quintela3d0684b2017-03-23 15:06:39 +01002252 * Returns zero on success
2253 *
2254 * @ms: current migration state
Juan Quintela6b6712e2017-03-22 15:18:04 +01002255 * @block: block we want to work with
Dr. David Alan Gilbert99e314e2015-11-05 18:11:15 +00002256 */
Juan Quintela6b6712e2017-03-22 15:18:04 +01002257static int postcopy_chunk_hostpages(MigrationState *ms, RAMBlock *block)
Dr. David Alan Gilbert99e314e2015-11-05 18:11:15 +00002258{
Juan Quintela6b6712e2017-03-22 15:18:04 +01002259 PostcopyDiscardState *pds =
2260 postcopy_discard_send_init(ms, block->idstr);
Dr. David Alan Gilbert99e314e2015-11-05 18:11:15 +00002261
Juan Quintela6b6712e2017-03-22 15:18:04 +01002262 /* First pass: Discard all partially sent host pages */
2263 postcopy_chunk_hostpages_pass(ms, true, block, pds);
2264 /*
2265 * Second pass: Ensure that all partially dirty host pages are made
2266 * fully dirty.
2267 */
2268 postcopy_chunk_hostpages_pass(ms, false, block, pds);
Dr. David Alan Gilbert99e314e2015-11-05 18:11:15 +00002269
Juan Quintela6b6712e2017-03-22 15:18:04 +01002270 postcopy_discard_send_finish(ms, pds);
Dr. David Alan Gilbert99e314e2015-11-05 18:11:15 +00002271 return 0;
2272}
2273
Juan Quintela3d0684b2017-03-23 15:06:39 +01002274/**
2275 * ram_postcopy_send_discard_bitmap: transmit the discard bitmap
2276 *
2277 * Returns zero on success
2278 *
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00002279 * Transmit the set of pages to be discarded after precopy to the target
2280 * these are pages that:
2281 * a) Have been previously transmitted but are now dirty again
2282 * b) Pages that have never been transmitted, this ensures that
2283 * any pages on the destination that have been mapped by background
2284 * tasks get discarded (transparent huge pages is the specific concern)
2285 * Hopefully this is pretty sparse
Juan Quintela3d0684b2017-03-23 15:06:39 +01002286 *
2287 * @ms: current migration state
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00002288 */
2289int ram_postcopy_send_discard_bitmap(MigrationState *ms)
2290{
Juan Quintela53518d92017-05-04 11:46:24 +02002291 RAMState *rs = ram_state;
Juan Quintela6b6712e2017-03-22 15:18:04 +01002292 RAMBlock *block;
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00002293 int ret;
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00002294
2295 rcu_read_lock();
2296
2297 /* This should be our last sync, the src is now paused */
Juan Quintelaeb859c52017-03-13 21:51:55 +01002298 migration_bitmap_sync(rs);
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00002299
Juan Quintela6b6712e2017-03-22 15:18:04 +01002300 /* Easiest way to make sure we don't resume in the middle of a host-page */
2301 rs->last_seen_block = NULL;
2302 rs->last_sent_block = NULL;
2303 rs->last_page = 0;
2304
Cédric Le Goaterb895de52018-05-14 08:57:00 +02002305 RAMBLOCK_FOREACH_MIGRATABLE(block) {
Juan Quintela6b6712e2017-03-22 15:18:04 +01002306 unsigned long pages = block->used_length >> TARGET_PAGE_BITS;
2307 unsigned long *bitmap = block->bmap;
2308 unsigned long *unsentmap = block->unsentmap;
2309
2310 if (!unsentmap) {
2311 /* We don't have a safe way to resize the sentmap, so
2312 * if the bitmap was resized it will be NULL at this
2313 * point.
2314 */
2315 error_report("migration ram resized during precopy phase");
2316 rcu_read_unlock();
2317 return -EINVAL;
2318 }
2319 /* Deal with TPS != HPS and huge pages */
2320 ret = postcopy_chunk_hostpages(ms, block);
2321 if (ret) {
2322 rcu_read_unlock();
2323 return ret;
2324 }
2325
2326 /*
2327 * Update the unsentmap to be unsentmap = unsentmap | dirty
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00002328 */
Juan Quintela6b6712e2017-03-22 15:18:04 +01002329 bitmap_or(unsentmap, unsentmap, bitmap, pages);
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00002330#ifdef DEBUG_POSTCOPY
Juan Quintela6b6712e2017-03-22 15:18:04 +01002331 ram_debug_dump_bitmap(unsentmap, true, pages);
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00002332#endif
Juan Quintela6b6712e2017-03-22 15:18:04 +01002333 }
2334 trace_ram_postcopy_send_discard_bitmap();
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00002335
2336 ret = postcopy_each_ram_send_discard(ms);
2337 rcu_read_unlock();
2338
2339 return ret;
2340}
2341
Juan Quintela3d0684b2017-03-23 15:06:39 +01002342/**
2343 * ram_discard_range: discard dirtied pages at the beginning of postcopy
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00002344 *
Juan Quintela3d0684b2017-03-23 15:06:39 +01002345 * Returns zero on success
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00002346 *
Juan Quintela36449152017-03-23 15:11:59 +01002347 * @rbname: name of the RAMBlock of the request. NULL means the
2348 * same that last one.
Juan Quintela3d0684b2017-03-23 15:06:39 +01002349 * @start: RAMBlock starting page
2350 * @length: RAMBlock size
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00002351 */
Juan Quintelaaaa20642017-03-21 11:35:24 +01002352int ram_discard_range(const char *rbname, uint64_t start, size_t length)
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00002353{
2354 int ret = -1;
2355
Juan Quintela36449152017-03-23 15:11:59 +01002356 trace_ram_discard_range(rbname, start, length);
Dr. David Alan Gilbertd3a50382017-02-24 18:28:32 +00002357
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00002358 rcu_read_lock();
Juan Quintela36449152017-03-23 15:11:59 +01002359 RAMBlock *rb = qemu_ram_block_by_name(rbname);
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00002360
2361 if (!rb) {
Juan Quintela36449152017-03-23 15:11:59 +01002362 error_report("ram_discard_range: Failed to find block '%s'", rbname);
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00002363 goto err;
2364 }
2365
Alexey Perevalovf9494612017-10-05 14:13:20 +03002366 bitmap_clear(rb->receivedmap, start >> qemu_target_page_bits(),
2367 length >> qemu_target_page_bits());
Dr. David Alan Gilbertd3a50382017-02-24 18:28:32 +00002368 ret = ram_block_discard_range(rb, start, length);
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00002369
2370err:
2371 rcu_read_unlock();
2372
2373 return ret;
2374}
2375
Peter Xu84593a02017-10-19 14:31:59 +08002376/*
2377 * For every allocation, we will try not to crash the VM if the
2378 * allocation failed.
2379 */
2380static int xbzrle_init(void)
2381{
2382 Error *local_err = NULL;
2383
2384 if (!migrate_use_xbzrle()) {
2385 return 0;
2386 }
2387
2388 XBZRLE_cache_lock();
2389
2390 XBZRLE.zero_target_page = g_try_malloc0(TARGET_PAGE_SIZE);
2391 if (!XBZRLE.zero_target_page) {
2392 error_report("%s: Error allocating zero page", __func__);
2393 goto err_out;
2394 }
2395
2396 XBZRLE.cache = cache_init(migrate_xbzrle_cache_size(),
2397 TARGET_PAGE_SIZE, &local_err);
2398 if (!XBZRLE.cache) {
2399 error_report_err(local_err);
2400 goto free_zero_page;
2401 }
2402
2403 XBZRLE.encoded_buf = g_try_malloc0(TARGET_PAGE_SIZE);
2404 if (!XBZRLE.encoded_buf) {
2405 error_report("%s: Error allocating encoded_buf", __func__);
2406 goto free_cache;
2407 }
2408
2409 XBZRLE.current_buf = g_try_malloc(TARGET_PAGE_SIZE);
2410 if (!XBZRLE.current_buf) {
2411 error_report("%s: Error allocating current_buf", __func__);
2412 goto free_encoded_buf;
2413 }
2414
2415 /* We are all good */
2416 XBZRLE_cache_unlock();
2417 return 0;
2418
2419free_encoded_buf:
2420 g_free(XBZRLE.encoded_buf);
2421 XBZRLE.encoded_buf = NULL;
2422free_cache:
2423 cache_fini(XBZRLE.cache);
2424 XBZRLE.cache = NULL;
2425free_zero_page:
2426 g_free(XBZRLE.zero_target_page);
2427 XBZRLE.zero_target_page = NULL;
2428err_out:
2429 XBZRLE_cache_unlock();
2430 return -ENOMEM;
2431}
2432
Juan Quintela53518d92017-05-04 11:46:24 +02002433static int ram_state_init(RAMState **rsp)
Juan Quintela56e93d22015-05-07 19:33:31 +02002434{
Peter Xu7d00ee62017-10-19 14:31:57 +08002435 *rsp = g_try_new0(RAMState, 1);
2436
2437 if (!*rsp) {
2438 error_report("%s: Init ramstate fail", __func__);
2439 return -1;
2440 }
Juan Quintela53518d92017-05-04 11:46:24 +02002441
2442 qemu_mutex_init(&(*rsp)->bitmap_mutex);
2443 qemu_mutex_init(&(*rsp)->src_page_req_mutex);
2444 QSIMPLEQ_INIT(&(*rsp)->src_page_requests);
Juan Quintela56e93d22015-05-07 19:33:31 +02002445
Peter Xu7d00ee62017-10-19 14:31:57 +08002446 /*
2447 * Count the total number of pages used by ram blocks not including any
2448 * gaps due to alignment or unplugs.
2449 */
2450 (*rsp)->migration_dirty_pages = ram_bytes_total() >> TARGET_PAGE_BITS;
2451
2452 ram_state_reset(*rsp);
2453
2454 return 0;
2455}
2456
Peter Xud6eff5d2017-10-19 14:32:00 +08002457static void ram_list_init_bitmaps(void)
2458{
2459 RAMBlock *block;
2460 unsigned long pages;
2461
2462 /* Skip setting bitmap if there is no RAM */
2463 if (ram_bytes_total()) {
Cédric Le Goaterb895de52018-05-14 08:57:00 +02002464 RAMBLOCK_FOREACH_MIGRATABLE(block) {
Peter Xud6eff5d2017-10-19 14:32:00 +08002465 pages = block->max_length >> TARGET_PAGE_BITS;
2466 block->bmap = bitmap_new(pages);
2467 bitmap_set(block->bmap, 0, pages);
2468 if (migrate_postcopy_ram()) {
2469 block->unsentmap = bitmap_new(pages);
2470 bitmap_set(block->unsentmap, 0, pages);
2471 }
2472 }
2473 }
2474}
2475
2476static void ram_init_bitmaps(RAMState *rs)
2477{
2478 /* For memory_global_dirty_log_start below. */
2479 qemu_mutex_lock_iothread();
2480 qemu_mutex_lock_ramlist();
2481 rcu_read_lock();
2482
2483 ram_list_init_bitmaps();
2484 memory_global_dirty_log_start();
2485 migration_bitmap_sync(rs);
2486
2487 rcu_read_unlock();
2488 qemu_mutex_unlock_ramlist();
2489 qemu_mutex_unlock_iothread();
2490}
2491
Peter Xu7d00ee62017-10-19 14:31:57 +08002492static int ram_init_all(RAMState **rsp)
2493{
Peter Xu7d00ee62017-10-19 14:31:57 +08002494 if (ram_state_init(rsp)) {
2495 return -1;
2496 }
2497
Peter Xu84593a02017-10-19 14:31:59 +08002498 if (xbzrle_init()) {
2499 ram_state_cleanup(rsp);
2500 return -1;
Juan Quintela56e93d22015-05-07 19:33:31 +02002501 }
2502
Peter Xud6eff5d2017-10-19 14:32:00 +08002503 ram_init_bitmaps(*rsp);
zhanghailianga91246c2016-10-27 14:42:59 +08002504
2505 return 0;
2506}
2507
Peter Xu08614f32018-05-02 18:47:33 +08002508static void ram_state_resume_prepare(RAMState *rs, QEMUFile *out)
2509{
2510 RAMBlock *block;
2511 uint64_t pages = 0;
2512
2513 /*
2514 * Postcopy is not using xbzrle/compression, so no need for that.
2515 * Also, since source are already halted, we don't need to care
2516 * about dirty page logging as well.
2517 */
2518
2519 RAMBLOCK_FOREACH(block) {
2520 pages += bitmap_count_one(block->bmap,
2521 block->used_length >> TARGET_PAGE_BITS);
2522 }
2523
2524 /* This may not be aligned with current bitmaps. Recalculate. */
2525 rs->migration_dirty_pages = pages;
2526
2527 rs->last_seen_block = NULL;
2528 rs->last_sent_block = NULL;
2529 rs->last_page = 0;
2530 rs->last_version = ram_list.version;
2531 /*
2532 * Disable the bulk stage, otherwise we'll resend the whole RAM no
2533 * matter what we have sent.
2534 */
2535 rs->ram_bulk_stage = false;
2536
2537 /* Update RAMState cache of output QEMUFile */
2538 rs->f = out;
2539
2540 trace_ram_state_resume_prepare(pages);
2541}
2542
Juan Quintela3d0684b2017-03-23 15:06:39 +01002543/*
2544 * Each of ram_save_setup, ram_save_iterate and ram_save_complete has
zhanghailianga91246c2016-10-27 14:42:59 +08002545 * long-running RCU critical section. When rcu-reclaims in the code
2546 * start to become numerous it will be necessary to reduce the
2547 * granularity of these critical sections.
2548 */
2549
Juan Quintela3d0684b2017-03-23 15:06:39 +01002550/**
2551 * ram_save_setup: Setup RAM for migration
2552 *
2553 * Returns zero to indicate success and negative for error
2554 *
2555 * @f: QEMUFile where to send the data
2556 * @opaque: RAMState pointer
2557 */
zhanghailianga91246c2016-10-27 14:42:59 +08002558static int ram_save_setup(QEMUFile *f, void *opaque)
2559{
Juan Quintela53518d92017-05-04 11:46:24 +02002560 RAMState **rsp = opaque;
zhanghailianga91246c2016-10-27 14:42:59 +08002561 RAMBlock *block;
2562
Xiao Guangrongdcaf4462018-03-30 15:51:20 +08002563 if (compress_threads_save_setup()) {
2564 return -1;
2565 }
2566
zhanghailianga91246c2016-10-27 14:42:59 +08002567 /* migration has already setup the bitmap, reuse it. */
2568 if (!migration_in_colo_state()) {
Peter Xu7d00ee62017-10-19 14:31:57 +08002569 if (ram_init_all(rsp) != 0) {
Xiao Guangrongdcaf4462018-03-30 15:51:20 +08002570 compress_threads_save_cleanup();
zhanghailianga91246c2016-10-27 14:42:59 +08002571 return -1;
Juan Quintela53518d92017-05-04 11:46:24 +02002572 }
zhanghailianga91246c2016-10-27 14:42:59 +08002573 }
Juan Quintela53518d92017-05-04 11:46:24 +02002574 (*rsp)->f = f;
zhanghailianga91246c2016-10-27 14:42:59 +08002575
2576 rcu_read_lock();
Juan Quintela56e93d22015-05-07 19:33:31 +02002577
2578 qemu_put_be64(f, ram_bytes_total() | RAM_SAVE_FLAG_MEM_SIZE);
2579
Cédric Le Goaterb895de52018-05-14 08:57:00 +02002580 RAMBLOCK_FOREACH_MIGRATABLE(block) {
Juan Quintela56e93d22015-05-07 19:33:31 +02002581 qemu_put_byte(f, strlen(block->idstr));
2582 qemu_put_buffer(f, (uint8_t *)block->idstr, strlen(block->idstr));
2583 qemu_put_be64(f, block->used_length);
Dr. David Alan Gilbertef08fb32017-02-24 18:28:30 +00002584 if (migrate_postcopy_ram() && block->page_size != qemu_host_page_size) {
2585 qemu_put_be64(f, block->page_size);
2586 }
Juan Quintela56e93d22015-05-07 19:33:31 +02002587 }
2588
2589 rcu_read_unlock();
2590
2591 ram_control_before_iterate(f, RAM_CONTROL_SETUP);
2592 ram_control_after_iterate(f, RAM_CONTROL_SETUP);
2593
2594 qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
2595
2596 return 0;
2597}
2598
Juan Quintela3d0684b2017-03-23 15:06:39 +01002599/**
2600 * ram_save_iterate: iterative stage for migration
2601 *
2602 * Returns zero to indicate success and negative for error
2603 *
2604 * @f: QEMUFile where to send the data
2605 * @opaque: RAMState pointer
2606 */
Juan Quintela56e93d22015-05-07 19:33:31 +02002607static int ram_save_iterate(QEMUFile *f, void *opaque)
2608{
Juan Quintela53518d92017-05-04 11:46:24 +02002609 RAMState **temp = opaque;
2610 RAMState *rs = *temp;
Juan Quintela56e93d22015-05-07 19:33:31 +02002611 int ret;
2612 int i;
2613 int64_t t0;
Thomas Huth5c903082016-11-04 14:10:17 +01002614 int done = 0;
Juan Quintela56e93d22015-05-07 19:33:31 +02002615
Peter Lievenb2557342018-03-08 12:18:24 +01002616 if (blk_mig_bulk_active()) {
2617 /* Avoid transferring ram during bulk phase of block migration as
2618 * the bulk phase will usually take a long time and transferring
2619 * ram updates during that time is pointless. */
2620 goto out;
2621 }
2622
Juan Quintela56e93d22015-05-07 19:33:31 +02002623 rcu_read_lock();
Juan Quintela6f37bb82017-03-13 19:26:29 +01002624 if (ram_list.version != rs->last_version) {
2625 ram_state_reset(rs);
Juan Quintela56e93d22015-05-07 19:33:31 +02002626 }
2627
2628 /* Read version before ram_list.blocks */
2629 smp_rmb();
2630
2631 ram_control_before_iterate(f, RAM_CONTROL_ROUND);
2632
2633 t0 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
2634 i = 0;
2635 while ((ret = qemu_file_rate_limit(f)) == 0) {
2636 int pages;
2637
Juan Quintelace25d332017-03-15 11:00:51 +01002638 pages = ram_find_and_save_block(rs, false);
Juan Quintela56e93d22015-05-07 19:33:31 +02002639 /* no more pages to sent */
2640 if (pages == 0) {
Thomas Huth5c903082016-11-04 14:10:17 +01002641 done = 1;
Juan Quintela56e93d22015-05-07 19:33:31 +02002642 break;
2643 }
Juan Quintela23b28c32017-03-13 20:51:34 +01002644 rs->iterations++;
Jason J. Herne070afca2015-09-08 13:12:35 -04002645
Juan Quintela56e93d22015-05-07 19:33:31 +02002646 /* we want to check in the 1st loop, just in case it was the 1st time
2647 and we had to sync the dirty bitmap.
2648 qemu_get_clock_ns() is a bit expensive, so we only check each some
2649 iterations
2650 */
2651 if ((i & 63) == 0) {
2652 uint64_t t1 = (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - t0) / 1000000;
2653 if (t1 > MAX_WAIT) {
Juan Quintela55c44462017-01-23 22:32:05 +01002654 trace_ram_save_iterate_big_wait(t1, i);
Juan Quintela56e93d22015-05-07 19:33:31 +02002655 break;
2656 }
2657 }
2658 i++;
2659 }
Juan Quintelace25d332017-03-15 11:00:51 +01002660 flush_compressed_data(rs);
Juan Quintela56e93d22015-05-07 19:33:31 +02002661 rcu_read_unlock();
2662
2663 /*
2664 * Must occur before EOS (or any QEMUFile operation)
2665 * because of RDMA protocol.
2666 */
2667 ram_control_after_iterate(f, RAM_CONTROL_ROUND);
2668
Peter Lievenb2557342018-03-08 12:18:24 +01002669out:
Juan Quintela56e93d22015-05-07 19:33:31 +02002670 qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
Juan Quintela93604472017-06-06 19:49:03 +02002671 ram_counters.transferred += 8;
Juan Quintela56e93d22015-05-07 19:33:31 +02002672
2673 ret = qemu_file_get_error(f);
2674 if (ret < 0) {
2675 return ret;
2676 }
2677
Thomas Huth5c903082016-11-04 14:10:17 +01002678 return done;
Juan Quintela56e93d22015-05-07 19:33:31 +02002679}
2680
Juan Quintela3d0684b2017-03-23 15:06:39 +01002681/**
2682 * ram_save_complete: function called to send the remaining amount of ram
2683 *
2684 * Returns zero to indicate success
2685 *
2686 * Called with iothread lock
2687 *
2688 * @f: QEMUFile where to send the data
2689 * @opaque: RAMState pointer
2690 */
Juan Quintela56e93d22015-05-07 19:33:31 +02002691static int ram_save_complete(QEMUFile *f, void *opaque)
2692{
Juan Quintela53518d92017-05-04 11:46:24 +02002693 RAMState **temp = opaque;
2694 RAMState *rs = *temp;
Juan Quintela6f37bb82017-03-13 19:26:29 +01002695
Juan Quintela56e93d22015-05-07 19:33:31 +02002696 rcu_read_lock();
2697
Juan Quintela57273092017-03-20 22:25:28 +01002698 if (!migration_in_postcopy()) {
Juan Quintela8d820d62017-03-13 19:35:50 +01002699 migration_bitmap_sync(rs);
Dr. David Alan Gilbert663e6c12015-11-05 18:11:13 +00002700 }
Juan Quintela56e93d22015-05-07 19:33:31 +02002701
2702 ram_control_before_iterate(f, RAM_CONTROL_FINISH);
2703
2704 /* try transferring iterative blocks of memory */
2705
2706 /* flush all remaining blocks regardless of rate limiting */
2707 while (true) {
2708 int pages;
2709
Juan Quintelace25d332017-03-15 11:00:51 +01002710 pages = ram_find_and_save_block(rs, !migration_in_colo_state());
Juan Quintela56e93d22015-05-07 19:33:31 +02002711 /* no more blocks to sent */
2712 if (pages == 0) {
2713 break;
2714 }
2715 }
2716
Juan Quintelace25d332017-03-15 11:00:51 +01002717 flush_compressed_data(rs);
Juan Quintela56e93d22015-05-07 19:33:31 +02002718 ram_control_after_iterate(f, RAM_CONTROL_FINISH);
Juan Quintela56e93d22015-05-07 19:33:31 +02002719
2720 rcu_read_unlock();
Paolo Bonzinid09a6fd2015-07-09 08:47:58 +02002721
Juan Quintela56e93d22015-05-07 19:33:31 +02002722 qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
2723
2724 return 0;
2725}
2726
Dr. David Alan Gilbertc31b0982015-11-05 18:10:54 +00002727static void ram_save_pending(QEMUFile *f, void *opaque, uint64_t max_size,
Vladimir Sementsov-Ogievskiy47995022018-03-13 15:34:00 -04002728 uint64_t *res_precopy_only,
2729 uint64_t *res_compatible,
2730 uint64_t *res_postcopy_only)
Juan Quintela56e93d22015-05-07 19:33:31 +02002731{
Juan Quintela53518d92017-05-04 11:46:24 +02002732 RAMState **temp = opaque;
2733 RAMState *rs = *temp;
Juan Quintela56e93d22015-05-07 19:33:31 +02002734 uint64_t remaining_size;
2735
Juan Quintela9edabd42017-03-14 12:02:16 +01002736 remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE;
Juan Quintela56e93d22015-05-07 19:33:31 +02002737
Juan Quintela57273092017-03-20 22:25:28 +01002738 if (!migration_in_postcopy() &&
Dr. David Alan Gilbert663e6c12015-11-05 18:11:13 +00002739 remaining_size < max_size) {
Juan Quintela56e93d22015-05-07 19:33:31 +02002740 qemu_mutex_lock_iothread();
2741 rcu_read_lock();
Juan Quintela8d820d62017-03-13 19:35:50 +01002742 migration_bitmap_sync(rs);
Juan Quintela56e93d22015-05-07 19:33:31 +02002743 rcu_read_unlock();
2744 qemu_mutex_unlock_iothread();
Juan Quintela9edabd42017-03-14 12:02:16 +01002745 remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE;
Juan Quintela56e93d22015-05-07 19:33:31 +02002746 }
Dr. David Alan Gilbertc31b0982015-11-05 18:10:54 +00002747
Vladimir Sementsov-Ogievskiy86e11672017-07-10 19:30:15 +03002748 if (migrate_postcopy_ram()) {
2749 /* We can do postcopy, and all the data is postcopiable */
Vladimir Sementsov-Ogievskiy47995022018-03-13 15:34:00 -04002750 *res_compatible += remaining_size;
Vladimir Sementsov-Ogievskiy86e11672017-07-10 19:30:15 +03002751 } else {
Vladimir Sementsov-Ogievskiy47995022018-03-13 15:34:00 -04002752 *res_precopy_only += remaining_size;
Vladimir Sementsov-Ogievskiy86e11672017-07-10 19:30:15 +03002753 }
Juan Quintela56e93d22015-05-07 19:33:31 +02002754}
2755
2756static int load_xbzrle(QEMUFile *f, ram_addr_t addr, void *host)
2757{
2758 unsigned int xh_len;
2759 int xh_flags;
Dr. David Alan Gilbert063e7602015-12-16 11:47:37 +00002760 uint8_t *loaded_data;
Juan Quintela56e93d22015-05-07 19:33:31 +02002761
Juan Quintela56e93d22015-05-07 19:33:31 +02002762 /* extract RLE header */
2763 xh_flags = qemu_get_byte(f);
2764 xh_len = qemu_get_be16(f);
2765
2766 if (xh_flags != ENCODING_FLAG_XBZRLE) {
2767 error_report("Failed to load XBZRLE page - wrong compression!");
2768 return -1;
2769 }
2770
2771 if (xh_len > TARGET_PAGE_SIZE) {
2772 error_report("Failed to load XBZRLE page - len overflow!");
2773 return -1;
2774 }
Juan Quintelaf265e0e2017-06-28 11:52:27 +02002775 loaded_data = XBZRLE.decoded_buf;
Juan Quintela56e93d22015-05-07 19:33:31 +02002776 /* load data and decode */
Juan Quintelaf265e0e2017-06-28 11:52:27 +02002777 /* it can change loaded_data to point to an internal buffer */
Dr. David Alan Gilbert063e7602015-12-16 11:47:37 +00002778 qemu_get_buffer_in_place(f, &loaded_data, xh_len);
Juan Quintela56e93d22015-05-07 19:33:31 +02002779
2780 /* decode RLE */
Dr. David Alan Gilbert063e7602015-12-16 11:47:37 +00002781 if (xbzrle_decode_buffer(loaded_data, xh_len, host,
Juan Quintela56e93d22015-05-07 19:33:31 +02002782 TARGET_PAGE_SIZE) == -1) {
2783 error_report("Failed to load XBZRLE page - decode error!");
2784 return -1;
2785 }
2786
2787 return 0;
2788}
2789
Juan Quintela3d0684b2017-03-23 15:06:39 +01002790/**
2791 * ram_block_from_stream: read a RAMBlock id from the migration stream
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00002792 *
Juan Quintela3d0684b2017-03-23 15:06:39 +01002793 * Must be called from within a rcu critical section.
2794 *
2795 * Returns a pointer from within the RCU-protected ram_list.
2796 *
2797 * @f: QEMUFile where to read the data from
2798 * @flags: Page flags (mostly to see if it's a continuation of previous block)
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00002799 */
Juan Quintela3d0684b2017-03-23 15:06:39 +01002800static inline RAMBlock *ram_block_from_stream(QEMUFile *f, int flags)
Juan Quintela56e93d22015-05-07 19:33:31 +02002801{
2802 static RAMBlock *block = NULL;
2803 char id[256];
2804 uint8_t len;
2805
2806 if (flags & RAM_SAVE_FLAG_CONTINUE) {
zhanghailiang4c4bad42016-01-15 11:37:41 +08002807 if (!block) {
Juan Quintela56e93d22015-05-07 19:33:31 +02002808 error_report("Ack, bad migration stream!");
2809 return NULL;
2810 }
zhanghailiang4c4bad42016-01-15 11:37:41 +08002811 return block;
Juan Quintela56e93d22015-05-07 19:33:31 +02002812 }
2813
2814 len = qemu_get_byte(f);
2815 qemu_get_buffer(f, (uint8_t *)id, len);
2816 id[len] = 0;
2817
Dr. David Alan Gilberte3dd7492015-11-05 18:10:33 +00002818 block = qemu_ram_block_by_name(id);
zhanghailiang4c4bad42016-01-15 11:37:41 +08002819 if (!block) {
2820 error_report("Can't find block %s", id);
2821 return NULL;
Juan Quintela56e93d22015-05-07 19:33:31 +02002822 }
2823
Cédric Le Goaterb895de52018-05-14 08:57:00 +02002824 if (!qemu_ram_is_migratable(block)) {
2825 error_report("block %s should not be migrated !", id);
2826 return NULL;
2827 }
2828
zhanghailiang4c4bad42016-01-15 11:37:41 +08002829 return block;
2830}
2831
2832static inline void *host_from_ram_block_offset(RAMBlock *block,
2833 ram_addr_t offset)
2834{
2835 if (!offset_in_ramblock(block, offset)) {
2836 return NULL;
2837 }
2838
2839 return block->host + offset;
Juan Quintela56e93d22015-05-07 19:33:31 +02002840}
2841
Juan Quintela3d0684b2017-03-23 15:06:39 +01002842/**
2843 * ram_handle_compressed: handle the zero page case
2844 *
Juan Quintela56e93d22015-05-07 19:33:31 +02002845 * If a page (or a whole RDMA chunk) has been
2846 * determined to be zero, then zap it.
Juan Quintela3d0684b2017-03-23 15:06:39 +01002847 *
2848 * @host: host address for the zero page
2849 * @ch: what the page is filled from. We only support zero
2850 * @size: size of the zero page
Juan Quintela56e93d22015-05-07 19:33:31 +02002851 */
2852void ram_handle_compressed(void *host, uint8_t ch, uint64_t size)
2853{
2854 if (ch != 0 || !is_zero_range(host, size)) {
2855 memset(host, ch, size);
2856 }
2857}
2858
Xiao Guangrong797ca152018-03-30 15:51:21 +08002859/* return the size after decompression, or negative value on error */
2860static int
2861qemu_uncompress_data(z_stream *stream, uint8_t *dest, size_t dest_len,
2862 const uint8_t *source, size_t source_len)
2863{
2864 int err;
2865
2866 err = inflateReset(stream);
2867 if (err != Z_OK) {
2868 return -1;
2869 }
2870
2871 stream->avail_in = source_len;
2872 stream->next_in = (uint8_t *)source;
2873 stream->avail_out = dest_len;
2874 stream->next_out = dest;
2875
2876 err = inflate(stream, Z_NO_FLUSH);
2877 if (err != Z_STREAM_END) {
2878 return -1;
2879 }
2880
2881 return stream->total_out;
2882}
2883
Juan Quintela56e93d22015-05-07 19:33:31 +02002884static void *do_data_decompress(void *opaque)
2885{
2886 DecompressParam *param = opaque;
2887 unsigned long pagesize;
Liang Li33d151f2016-05-05 15:32:58 +08002888 uint8_t *des;
Xiao Guangrong34ab9e92018-03-30 15:51:22 +08002889 int len, ret;
Juan Quintela56e93d22015-05-07 19:33:31 +02002890
Liang Li33d151f2016-05-05 15:32:58 +08002891 qemu_mutex_lock(&param->mutex);
Liang Li90e56fb2016-05-05 15:32:56 +08002892 while (!param->quit) {
Liang Li33d151f2016-05-05 15:32:58 +08002893 if (param->des) {
2894 des = param->des;
2895 len = param->len;
2896 param->des = 0;
2897 qemu_mutex_unlock(&param->mutex);
2898
Liang Li73a89122016-05-05 15:32:51 +08002899 pagesize = TARGET_PAGE_SIZE;
Xiao Guangrong34ab9e92018-03-30 15:51:22 +08002900
2901 ret = qemu_uncompress_data(&param->stream, des, pagesize,
2902 param->compbuf, len);
Xiao Guangrongf5482222018-05-03 16:06:11 +08002903 if (ret < 0 && migrate_get_current()->decompress_error_check) {
Xiao Guangrong34ab9e92018-03-30 15:51:22 +08002904 error_report("decompress data failed");
2905 qemu_file_set_error(decomp_file, ret);
2906 }
Liang Li73a89122016-05-05 15:32:51 +08002907
Liang Li33d151f2016-05-05 15:32:58 +08002908 qemu_mutex_lock(&decomp_done_lock);
2909 param->done = true;
2910 qemu_cond_signal(&decomp_done_cond);
2911 qemu_mutex_unlock(&decomp_done_lock);
2912
2913 qemu_mutex_lock(&param->mutex);
2914 } else {
2915 qemu_cond_wait(&param->cond, &param->mutex);
2916 }
Juan Quintela56e93d22015-05-07 19:33:31 +02002917 }
Liang Li33d151f2016-05-05 15:32:58 +08002918 qemu_mutex_unlock(&param->mutex);
Juan Quintela56e93d22015-05-07 19:33:31 +02002919
2920 return NULL;
2921}
2922
Xiao Guangrong34ab9e92018-03-30 15:51:22 +08002923static int wait_for_decompress_done(void)
Liang Li5533b2e2016-05-05 15:32:52 +08002924{
2925 int idx, thread_count;
2926
2927 if (!migrate_use_compression()) {
Xiao Guangrong34ab9e92018-03-30 15:51:22 +08002928 return 0;
Liang Li5533b2e2016-05-05 15:32:52 +08002929 }
2930
2931 thread_count = migrate_decompress_threads();
2932 qemu_mutex_lock(&decomp_done_lock);
2933 for (idx = 0; idx < thread_count; idx++) {
2934 while (!decomp_param[idx].done) {
2935 qemu_cond_wait(&decomp_done_cond, &decomp_done_lock);
2936 }
2937 }
2938 qemu_mutex_unlock(&decomp_done_lock);
Xiao Guangrong34ab9e92018-03-30 15:51:22 +08002939 return qemu_file_get_error(decomp_file);
Liang Li5533b2e2016-05-05 15:32:52 +08002940}
2941
Juan Quintelaf0afa332017-06-28 11:52:28 +02002942static void compress_threads_load_cleanup(void)
Juan Quintela56e93d22015-05-07 19:33:31 +02002943{
2944 int i, thread_count;
2945
Juan Quintela3416ab52016-04-20 11:56:01 +02002946 if (!migrate_use_compression()) {
2947 return;
2948 }
Juan Quintela56e93d22015-05-07 19:33:31 +02002949 thread_count = migrate_decompress_threads();
2950 for (i = 0; i < thread_count; i++) {
Xiao Guangrong797ca152018-03-30 15:51:21 +08002951 /*
2952 * we use it as a indicator which shows if the thread is
2953 * properly init'd or not
2954 */
2955 if (!decomp_param[i].compbuf) {
2956 break;
2957 }
2958
Juan Quintela56e93d22015-05-07 19:33:31 +02002959 qemu_mutex_lock(&decomp_param[i].mutex);
Liang Li90e56fb2016-05-05 15:32:56 +08002960 decomp_param[i].quit = true;
Juan Quintela56e93d22015-05-07 19:33:31 +02002961 qemu_cond_signal(&decomp_param[i].cond);
2962 qemu_mutex_unlock(&decomp_param[i].mutex);
2963 }
2964 for (i = 0; i < thread_count; i++) {
Xiao Guangrong797ca152018-03-30 15:51:21 +08002965 if (!decomp_param[i].compbuf) {
2966 break;
2967 }
2968
Juan Quintela56e93d22015-05-07 19:33:31 +02002969 qemu_thread_join(decompress_threads + i);
2970 qemu_mutex_destroy(&decomp_param[i].mutex);
2971 qemu_cond_destroy(&decomp_param[i].cond);
Xiao Guangrong797ca152018-03-30 15:51:21 +08002972 inflateEnd(&decomp_param[i].stream);
Juan Quintela56e93d22015-05-07 19:33:31 +02002973 g_free(decomp_param[i].compbuf);
Xiao Guangrong797ca152018-03-30 15:51:21 +08002974 decomp_param[i].compbuf = NULL;
Juan Quintela56e93d22015-05-07 19:33:31 +02002975 }
2976 g_free(decompress_threads);
2977 g_free(decomp_param);
Juan Quintela56e93d22015-05-07 19:33:31 +02002978 decompress_threads = NULL;
2979 decomp_param = NULL;
Xiao Guangrong34ab9e92018-03-30 15:51:22 +08002980 decomp_file = NULL;
Juan Quintela56e93d22015-05-07 19:33:31 +02002981}
2982
Xiao Guangrong34ab9e92018-03-30 15:51:22 +08002983static int compress_threads_load_setup(QEMUFile *f)
Xiao Guangrong797ca152018-03-30 15:51:21 +08002984{
2985 int i, thread_count;
2986
2987 if (!migrate_use_compression()) {
2988 return 0;
2989 }
2990
2991 thread_count = migrate_decompress_threads();
2992 decompress_threads = g_new0(QemuThread, thread_count);
2993 decomp_param = g_new0(DecompressParam, thread_count);
2994 qemu_mutex_init(&decomp_done_lock);
2995 qemu_cond_init(&decomp_done_cond);
Xiao Guangrong34ab9e92018-03-30 15:51:22 +08002996 decomp_file = f;
Xiao Guangrong797ca152018-03-30 15:51:21 +08002997 for (i = 0; i < thread_count; i++) {
2998 if (inflateInit(&decomp_param[i].stream) != Z_OK) {
2999 goto exit;
3000 }
3001
3002 decomp_param[i].compbuf = g_malloc0(compressBound(TARGET_PAGE_SIZE));
3003 qemu_mutex_init(&decomp_param[i].mutex);
3004 qemu_cond_init(&decomp_param[i].cond);
3005 decomp_param[i].done = true;
3006 decomp_param[i].quit = false;
3007 qemu_thread_create(decompress_threads + i, "decompress",
3008 do_data_decompress, decomp_param + i,
3009 QEMU_THREAD_JOINABLE);
3010 }
3011 return 0;
3012exit:
3013 compress_threads_load_cleanup();
3014 return -1;
3015}
3016
Dr. David Alan Gilbertc1bc6622015-12-16 11:47:38 +00003017static void decompress_data_with_multi_threads(QEMUFile *f,
Juan Quintela56e93d22015-05-07 19:33:31 +02003018 void *host, int len)
3019{
3020 int idx, thread_count;
3021
3022 thread_count = migrate_decompress_threads();
Liang Li73a89122016-05-05 15:32:51 +08003023 qemu_mutex_lock(&decomp_done_lock);
Juan Quintela56e93d22015-05-07 19:33:31 +02003024 while (true) {
3025 for (idx = 0; idx < thread_count; idx++) {
Liang Li73a89122016-05-05 15:32:51 +08003026 if (decomp_param[idx].done) {
Liang Li33d151f2016-05-05 15:32:58 +08003027 decomp_param[idx].done = false;
3028 qemu_mutex_lock(&decomp_param[idx].mutex);
Dr. David Alan Gilbertc1bc6622015-12-16 11:47:38 +00003029 qemu_get_buffer(f, decomp_param[idx].compbuf, len);
Juan Quintela56e93d22015-05-07 19:33:31 +02003030 decomp_param[idx].des = host;
3031 decomp_param[idx].len = len;
Liang Li33d151f2016-05-05 15:32:58 +08003032 qemu_cond_signal(&decomp_param[idx].cond);
3033 qemu_mutex_unlock(&decomp_param[idx].mutex);
Juan Quintela56e93d22015-05-07 19:33:31 +02003034 break;
3035 }
3036 }
3037 if (idx < thread_count) {
3038 break;
Liang Li73a89122016-05-05 15:32:51 +08003039 } else {
3040 qemu_cond_wait(&decomp_done_cond, &decomp_done_lock);
Juan Quintela56e93d22015-05-07 19:33:31 +02003041 }
3042 }
Liang Li73a89122016-05-05 15:32:51 +08003043 qemu_mutex_unlock(&decomp_done_lock);
Juan Quintela56e93d22015-05-07 19:33:31 +02003044}
3045
Juan Quintela3d0684b2017-03-23 15:06:39 +01003046/**
Juan Quintelaf265e0e2017-06-28 11:52:27 +02003047 * ram_load_setup: Setup RAM for migration incoming side
3048 *
3049 * Returns zero to indicate success and negative for error
3050 *
3051 * @f: QEMUFile where to receive the data
3052 * @opaque: RAMState pointer
3053 */
3054static int ram_load_setup(QEMUFile *f, void *opaque)
3055{
Xiao Guangrong34ab9e92018-03-30 15:51:22 +08003056 if (compress_threads_load_setup(f)) {
Xiao Guangrong797ca152018-03-30 15:51:21 +08003057 return -1;
3058 }
3059
Juan Quintelaf265e0e2017-06-28 11:52:27 +02003060 xbzrle_load_setup();
Alexey Perevalovf9494612017-10-05 14:13:20 +03003061 ramblock_recv_map_init();
Juan Quintelaf265e0e2017-06-28 11:52:27 +02003062 return 0;
3063}
3064
3065static int ram_load_cleanup(void *opaque)
3066{
Alexey Perevalovf9494612017-10-05 14:13:20 +03003067 RAMBlock *rb;
Juan Quintelaf265e0e2017-06-28 11:52:27 +02003068 xbzrle_load_cleanup();
Juan Quintelaf0afa332017-06-28 11:52:28 +02003069 compress_threads_load_cleanup();
Alexey Perevalovf9494612017-10-05 14:13:20 +03003070
Cédric Le Goaterb895de52018-05-14 08:57:00 +02003071 RAMBLOCK_FOREACH_MIGRATABLE(rb) {
Alexey Perevalovf9494612017-10-05 14:13:20 +03003072 g_free(rb->receivedmap);
3073 rb->receivedmap = NULL;
3074 }
Juan Quintelaf265e0e2017-06-28 11:52:27 +02003075 return 0;
3076}
3077
3078/**
Juan Quintela3d0684b2017-03-23 15:06:39 +01003079 * ram_postcopy_incoming_init: allocate postcopy data structures
3080 *
3081 * Returns 0 for success and negative if there was one error
3082 *
3083 * @mis: current migration incoming state
3084 *
3085 * Allocate data structures etc needed by incoming migration with
3086 * postcopy-ram. postcopy-ram's similarly names
3087 * postcopy_ram_incoming_init does the work.
Dr. David Alan Gilbert1caddf82015-11-05 18:11:03 +00003088 */
3089int ram_postcopy_incoming_init(MigrationIncomingState *mis)
3090{
Juan Quintelab8c48992017-03-21 17:44:30 +01003091 unsigned long ram_pages = last_ram_page();
Dr. David Alan Gilbert1caddf82015-11-05 18:11:03 +00003092
3093 return postcopy_ram_incoming_init(mis, ram_pages);
3094}
3095
Juan Quintela3d0684b2017-03-23 15:06:39 +01003096/**
3097 * ram_load_postcopy: load a page in postcopy case
3098 *
3099 * Returns 0 for success or -errno in case of error
3100 *
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00003101 * Called in postcopy mode by ram_load().
3102 * rcu_read_lock is taken prior to this being called.
Juan Quintela3d0684b2017-03-23 15:06:39 +01003103 *
3104 * @f: QEMUFile where to send the data
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00003105 */
3106static int ram_load_postcopy(QEMUFile *f)
3107{
3108 int flags = 0, ret = 0;
3109 bool place_needed = false;
Dr. David Alan Gilbert28abd202017-02-24 18:28:37 +00003110 bool matching_page_sizes = false;
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00003111 MigrationIncomingState *mis = migration_incoming_get_current();
3112 /* Temporary page that is later 'placed' */
3113 void *postcopy_host_page = postcopy_get_tmp_page(mis);
Dr. David Alan Gilbertc53b7dd2015-11-05 18:11:12 +00003114 void *last_host = NULL;
Dr. David Alan Gilberta3b6ff62015-11-11 14:02:28 +00003115 bool all_zero = false;
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00003116
3117 while (!ret && !(flags & RAM_SAVE_FLAG_EOS)) {
3118 ram_addr_t addr;
3119 void *host = NULL;
3120 void *page_buffer = NULL;
3121 void *place_source = NULL;
Dr. David Alan Gilbertdf9ff5e2017-02-24 18:28:35 +00003122 RAMBlock *block = NULL;
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00003123 uint8_t ch;
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00003124
3125 addr = qemu_get_be64(f);
Peter Xu7a9ddfb2018-02-08 18:31:05 +08003126
3127 /*
3128 * If qemu file error, we should stop here, and then "addr"
3129 * may be invalid
3130 */
3131 ret = qemu_file_get_error(f);
3132 if (ret) {
3133 break;
3134 }
3135
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00003136 flags = addr & ~TARGET_PAGE_MASK;
3137 addr &= TARGET_PAGE_MASK;
3138
3139 trace_ram_load_postcopy_loop((uint64_t)addr, flags);
3140 place_needed = false;
Juan Quintelabb890ed2017-04-28 09:39:55 +02003141 if (flags & (RAM_SAVE_FLAG_ZERO | RAM_SAVE_FLAG_PAGE)) {
Dr. David Alan Gilbertdf9ff5e2017-02-24 18:28:35 +00003142 block = ram_block_from_stream(f, flags);
zhanghailiang4c4bad42016-01-15 11:37:41 +08003143
3144 host = host_from_ram_block_offset(block, addr);
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00003145 if (!host) {
3146 error_report("Illegal RAM offset " RAM_ADDR_FMT, addr);
3147 ret = -EINVAL;
3148 break;
3149 }
Dr. David Alan Gilbert28abd202017-02-24 18:28:37 +00003150 matching_page_sizes = block->page_size == TARGET_PAGE_SIZE;
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00003151 /*
Dr. David Alan Gilbert28abd202017-02-24 18:28:37 +00003152 * Postcopy requires that we place whole host pages atomically;
3153 * these may be huge pages for RAMBlocks that are backed by
3154 * hugetlbfs.
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00003155 * To make it atomic, the data is read into a temporary page
3156 * that's moved into place later.
3157 * The migration protocol uses, possibly smaller, target-pages
3158 * however the source ensures it always sends all the components
3159 * of a host page in order.
3160 */
3161 page_buffer = postcopy_host_page +
Dr. David Alan Gilbert28abd202017-02-24 18:28:37 +00003162 ((uintptr_t)host & (block->page_size - 1));
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00003163 /* If all TP are zero then we can optimise the place */
Dr. David Alan Gilbert28abd202017-02-24 18:28:37 +00003164 if (!((uintptr_t)host & (block->page_size - 1))) {
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00003165 all_zero = true;
Dr. David Alan Gilbertc53b7dd2015-11-05 18:11:12 +00003166 } else {
3167 /* not the 1st TP within the HP */
3168 if (host != (last_host + TARGET_PAGE_SIZE)) {
Markus Armbruster9af9e0f2015-12-18 16:35:19 +01003169 error_report("Non-sequential target page %p/%p",
Dr. David Alan Gilbertc53b7dd2015-11-05 18:11:12 +00003170 host, last_host);
3171 ret = -EINVAL;
3172 break;
3173 }
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00003174 }
3175
Dr. David Alan Gilbertc53b7dd2015-11-05 18:11:12 +00003176
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00003177 /*
3178 * If it's the last part of a host page then we place the host
3179 * page
3180 */
3181 place_needed = (((uintptr_t)host + TARGET_PAGE_SIZE) &
Dr. David Alan Gilbert28abd202017-02-24 18:28:37 +00003182 (block->page_size - 1)) == 0;
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00003183 place_source = postcopy_host_page;
3184 }
Dr. David Alan Gilbertc53b7dd2015-11-05 18:11:12 +00003185 last_host = host;
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00003186
3187 switch (flags & ~RAM_SAVE_FLAG_CONTINUE) {
Juan Quintelabb890ed2017-04-28 09:39:55 +02003188 case RAM_SAVE_FLAG_ZERO:
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00003189 ch = qemu_get_byte(f);
3190 memset(page_buffer, ch, TARGET_PAGE_SIZE);
3191 if (ch) {
3192 all_zero = false;
3193 }
3194 break;
3195
3196 case RAM_SAVE_FLAG_PAGE:
3197 all_zero = false;
3198 if (!place_needed || !matching_page_sizes) {
3199 qemu_get_buffer(f, page_buffer, TARGET_PAGE_SIZE);
3200 } else {
3201 /* Avoids the qemu_file copy during postcopy, which is
3202 * going to do a copy later; can only do it when we
3203 * do this read in one go (matching page sizes)
3204 */
3205 qemu_get_buffer_in_place(f, (uint8_t **)&place_source,
3206 TARGET_PAGE_SIZE);
3207 }
3208 break;
3209 case RAM_SAVE_FLAG_EOS:
3210 /* normal exit */
3211 break;
3212 default:
3213 error_report("Unknown combination of migration flags: %#x"
3214 " (postcopy mode)", flags);
3215 ret = -EINVAL;
Peter Xu7a9ddfb2018-02-08 18:31:05 +08003216 break;
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00003217 }
3218
Peter Xu7a9ddfb2018-02-08 18:31:05 +08003219 /* Detect for any possible file errors */
3220 if (!ret && qemu_file_get_error(f)) {
3221 ret = qemu_file_get_error(f);
3222 }
3223
3224 if (!ret && place_needed) {
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00003225 /* This gets called at the last target page in the host page */
Dr. David Alan Gilbertdf9ff5e2017-02-24 18:28:35 +00003226 void *place_dest = host + TARGET_PAGE_SIZE - block->page_size;
3227
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00003228 if (all_zero) {
Dr. David Alan Gilbertdf9ff5e2017-02-24 18:28:35 +00003229 ret = postcopy_place_page_zero(mis, place_dest,
Alexey Perevalov8be46202017-10-05 14:13:18 +03003230 block);
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00003231 } else {
Dr. David Alan Gilbertdf9ff5e2017-02-24 18:28:35 +00003232 ret = postcopy_place_page(mis, place_dest,
Alexey Perevalov8be46202017-10-05 14:13:18 +03003233 place_source, block);
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00003234 }
3235 }
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00003236 }
3237
3238 return ret;
3239}
3240
Daniel Henrique Barbozaacab30b2017-11-16 20:35:26 -02003241static bool postcopy_is_advised(void)
3242{
3243 PostcopyState ps = postcopy_state_get();
3244 return ps >= POSTCOPY_INCOMING_ADVISE && ps < POSTCOPY_INCOMING_END;
3245}
3246
3247static bool postcopy_is_running(void)
3248{
3249 PostcopyState ps = postcopy_state_get();
3250 return ps >= POSTCOPY_INCOMING_LISTENING && ps < POSTCOPY_INCOMING_END;
3251}
3252
Juan Quintela56e93d22015-05-07 19:33:31 +02003253static int ram_load(QEMUFile *f, void *opaque, int version_id)
3254{
Juan Quintelaedc60122016-11-02 12:40:46 +01003255 int flags = 0, ret = 0, invalid_flags = 0;
Juan Quintela56e93d22015-05-07 19:33:31 +02003256 static uint64_t seq_iter;
3257 int len = 0;
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00003258 /*
3259 * If system is running in postcopy mode, page inserts to host memory must
3260 * be atomic
3261 */
Daniel Henrique Barbozaacab30b2017-11-16 20:35:26 -02003262 bool postcopy_running = postcopy_is_running();
Dr. David Alan Gilbertef08fb32017-02-24 18:28:30 +00003263 /* ADVISE is earlier, it shows the source has the postcopy capability on */
Daniel Henrique Barbozaacab30b2017-11-16 20:35:26 -02003264 bool postcopy_advised = postcopy_is_advised();
Juan Quintela56e93d22015-05-07 19:33:31 +02003265
3266 seq_iter++;
3267
3268 if (version_id != 4) {
3269 ret = -EINVAL;
3270 }
3271
Juan Quintelaedc60122016-11-02 12:40:46 +01003272 if (!migrate_use_compression()) {
3273 invalid_flags |= RAM_SAVE_FLAG_COMPRESS_PAGE;
3274 }
Juan Quintela56e93d22015-05-07 19:33:31 +02003275 /* This RCU critical section can be very long running.
3276 * When RCU reclaims in the code start to become numerous,
3277 * it will be necessary to reduce the granularity of this
3278 * critical section.
3279 */
3280 rcu_read_lock();
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00003281
3282 if (postcopy_running) {
3283 ret = ram_load_postcopy(f);
3284 }
3285
3286 while (!postcopy_running && !ret && !(flags & RAM_SAVE_FLAG_EOS)) {
Juan Quintela56e93d22015-05-07 19:33:31 +02003287 ram_addr_t addr, total_ram_bytes;
Dr. David Alan Gilberta776aa12015-11-05 18:10:39 +00003288 void *host = NULL;
Juan Quintela56e93d22015-05-07 19:33:31 +02003289 uint8_t ch;
3290
3291 addr = qemu_get_be64(f);
3292 flags = addr & ~TARGET_PAGE_MASK;
3293 addr &= TARGET_PAGE_MASK;
3294
Juan Quintelaedc60122016-11-02 12:40:46 +01003295 if (flags & invalid_flags) {
3296 if (flags & invalid_flags & RAM_SAVE_FLAG_COMPRESS_PAGE) {
3297 error_report("Received an unexpected compressed page");
3298 }
3299
3300 ret = -EINVAL;
3301 break;
3302 }
3303
Juan Quintelabb890ed2017-04-28 09:39:55 +02003304 if (flags & (RAM_SAVE_FLAG_ZERO | RAM_SAVE_FLAG_PAGE |
Dr. David Alan Gilberta776aa12015-11-05 18:10:39 +00003305 RAM_SAVE_FLAG_COMPRESS_PAGE | RAM_SAVE_FLAG_XBZRLE)) {
zhanghailiang4c4bad42016-01-15 11:37:41 +08003306 RAMBlock *block = ram_block_from_stream(f, flags);
3307
3308 host = host_from_ram_block_offset(block, addr);
Dr. David Alan Gilberta776aa12015-11-05 18:10:39 +00003309 if (!host) {
3310 error_report("Illegal RAM offset " RAM_ADDR_FMT, addr);
3311 ret = -EINVAL;
3312 break;
3313 }
Alexey Perevalovf9494612017-10-05 14:13:20 +03003314 ramblock_recv_bitmap_set(block, host);
Dr. David Alan Gilbert1db9d8e2017-04-26 19:37:21 +01003315 trace_ram_load_loop(block->idstr, (uint64_t)addr, flags, host);
Dr. David Alan Gilberta776aa12015-11-05 18:10:39 +00003316 }
3317
Juan Quintela56e93d22015-05-07 19:33:31 +02003318 switch (flags & ~RAM_SAVE_FLAG_CONTINUE) {
3319 case RAM_SAVE_FLAG_MEM_SIZE:
3320 /* Synchronize RAM block list */
3321 total_ram_bytes = addr;
3322 while (!ret && total_ram_bytes) {
3323 RAMBlock *block;
Juan Quintela56e93d22015-05-07 19:33:31 +02003324 char id[256];
3325 ram_addr_t length;
3326
3327 len = qemu_get_byte(f);
3328 qemu_get_buffer(f, (uint8_t *)id, len);
3329 id[len] = 0;
3330 length = qemu_get_be64(f);
3331
Dr. David Alan Gilberte3dd7492015-11-05 18:10:33 +00003332 block = qemu_ram_block_by_name(id);
Cédric Le Goaterb895de52018-05-14 08:57:00 +02003333 if (block && !qemu_ram_is_migratable(block)) {
3334 error_report("block %s should not be migrated !", id);
3335 ret = -EINVAL;
3336 } else if (block) {
Dr. David Alan Gilberte3dd7492015-11-05 18:10:33 +00003337 if (length != block->used_length) {
3338 Error *local_err = NULL;
Juan Quintela56e93d22015-05-07 19:33:31 +02003339
Gongleifa53a0e2016-05-10 10:04:59 +08003340 ret = qemu_ram_resize(block, length,
Dr. David Alan Gilberte3dd7492015-11-05 18:10:33 +00003341 &local_err);
3342 if (local_err) {
3343 error_report_err(local_err);
Juan Quintela56e93d22015-05-07 19:33:31 +02003344 }
Juan Quintela56e93d22015-05-07 19:33:31 +02003345 }
Dr. David Alan Gilbertef08fb32017-02-24 18:28:30 +00003346 /* For postcopy we need to check hugepage sizes match */
3347 if (postcopy_advised &&
3348 block->page_size != qemu_host_page_size) {
3349 uint64_t remote_page_size = qemu_get_be64(f);
3350 if (remote_page_size != block->page_size) {
3351 error_report("Mismatched RAM page size %s "
3352 "(local) %zd != %" PRId64,
3353 id, block->page_size,
3354 remote_page_size);
3355 ret = -EINVAL;
3356 }
3357 }
Dr. David Alan Gilberte3dd7492015-11-05 18:10:33 +00003358 ram_control_load_hook(f, RAM_CONTROL_BLOCK_REG,
3359 block->idstr);
3360 } else {
Juan Quintela56e93d22015-05-07 19:33:31 +02003361 error_report("Unknown ramblock \"%s\", cannot "
3362 "accept migration", id);
3363 ret = -EINVAL;
3364 }
3365
3366 total_ram_bytes -= length;
3367 }
3368 break;
Dr. David Alan Gilberta776aa12015-11-05 18:10:39 +00003369
Juan Quintelabb890ed2017-04-28 09:39:55 +02003370 case RAM_SAVE_FLAG_ZERO:
Juan Quintela56e93d22015-05-07 19:33:31 +02003371 ch = qemu_get_byte(f);
3372 ram_handle_compressed(host, ch, TARGET_PAGE_SIZE);
3373 break;
Dr. David Alan Gilberta776aa12015-11-05 18:10:39 +00003374
Juan Quintela56e93d22015-05-07 19:33:31 +02003375 case RAM_SAVE_FLAG_PAGE:
Juan Quintela56e93d22015-05-07 19:33:31 +02003376 qemu_get_buffer(f, host, TARGET_PAGE_SIZE);
3377 break;
Juan Quintela56e93d22015-05-07 19:33:31 +02003378
Dr. David Alan Gilberta776aa12015-11-05 18:10:39 +00003379 case RAM_SAVE_FLAG_COMPRESS_PAGE:
Juan Quintela56e93d22015-05-07 19:33:31 +02003380 len = qemu_get_be32(f);
3381 if (len < 0 || len > compressBound(TARGET_PAGE_SIZE)) {
3382 error_report("Invalid compressed data length: %d", len);
3383 ret = -EINVAL;
3384 break;
3385 }
Dr. David Alan Gilbertc1bc6622015-12-16 11:47:38 +00003386 decompress_data_with_multi_threads(f, host, len);
Juan Quintela56e93d22015-05-07 19:33:31 +02003387 break;
Dr. David Alan Gilberta776aa12015-11-05 18:10:39 +00003388
Juan Quintela56e93d22015-05-07 19:33:31 +02003389 case RAM_SAVE_FLAG_XBZRLE:
Juan Quintela56e93d22015-05-07 19:33:31 +02003390 if (load_xbzrle(f, addr, host) < 0) {
3391 error_report("Failed to decompress XBZRLE page at "
3392 RAM_ADDR_FMT, addr);
3393 ret = -EINVAL;
3394 break;
3395 }
3396 break;
3397 case RAM_SAVE_FLAG_EOS:
3398 /* normal exit */
3399 break;
3400 default:
3401 if (flags & RAM_SAVE_FLAG_HOOK) {
Dr. David Alan Gilbert632e3a52015-06-11 18:17:23 +01003402 ram_control_load_hook(f, RAM_CONTROL_HOOK, NULL);
Juan Quintela56e93d22015-05-07 19:33:31 +02003403 } else {
3404 error_report("Unknown combination of migration flags: %#x",
3405 flags);
3406 ret = -EINVAL;
3407 }
3408 }
3409 if (!ret) {
3410 ret = qemu_file_get_error(f);
3411 }
3412 }
3413
Xiao Guangrong34ab9e92018-03-30 15:51:22 +08003414 ret |= wait_for_decompress_done();
Juan Quintela56e93d22015-05-07 19:33:31 +02003415 rcu_read_unlock();
Juan Quintela55c44462017-01-23 22:32:05 +01003416 trace_ram_load_complete(ret, seq_iter);
Juan Quintela56e93d22015-05-07 19:33:31 +02003417 return ret;
3418}
3419
Vladimir Sementsov-Ogievskiyc6467622017-07-10 19:30:14 +03003420static bool ram_has_postcopy(void *opaque)
3421{
3422 return migrate_postcopy_ram();
3423}
3424
Peter Xuedd090c2018-05-02 18:47:32 +08003425/* Sync all the dirty bitmap with destination VM. */
3426static int ram_dirty_bitmap_sync_all(MigrationState *s, RAMState *rs)
3427{
3428 RAMBlock *block;
3429 QEMUFile *file = s->to_dst_file;
3430 int ramblock_count = 0;
3431
3432 trace_ram_dirty_bitmap_sync_start();
3433
3434 RAMBLOCK_FOREACH(block) {
3435 qemu_savevm_send_recv_bitmap(file, block->idstr);
3436 trace_ram_dirty_bitmap_request(block->idstr);
3437 ramblock_count++;
3438 }
3439
3440 trace_ram_dirty_bitmap_sync_wait();
3441
3442 /* Wait until all the ramblocks' dirty bitmap synced */
3443 while (ramblock_count--) {
3444 qemu_sem_wait(&s->rp_state.rp_sem);
3445 }
3446
3447 trace_ram_dirty_bitmap_sync_complete();
3448
3449 return 0;
3450}
3451
3452static void ram_dirty_bitmap_reload_notify(MigrationState *s)
3453{
3454 qemu_sem_post(&s->rp_state.rp_sem);
3455}
3456
Peter Xua335deb2018-05-02 18:47:28 +08003457/*
3458 * Read the received bitmap, revert it as the initial dirty bitmap.
3459 * This is only used when the postcopy migration is paused but wants
3460 * to resume from a middle point.
3461 */
3462int ram_dirty_bitmap_reload(MigrationState *s, RAMBlock *block)
3463{
3464 int ret = -EINVAL;
3465 QEMUFile *file = s->rp_state.from_dst_file;
3466 unsigned long *le_bitmap, nbits = block->used_length >> TARGET_PAGE_BITS;
3467 uint64_t local_size = nbits / 8;
3468 uint64_t size, end_mark;
3469
3470 trace_ram_dirty_bitmap_reload_begin(block->idstr);
3471
3472 if (s->state != MIGRATION_STATUS_POSTCOPY_RECOVER) {
3473 error_report("%s: incorrect state %s", __func__,
3474 MigrationStatus_str(s->state));
3475 return -EINVAL;
3476 }
3477
3478 /*
3479 * Note: see comments in ramblock_recv_bitmap_send() on why we
3480 * need the endianess convertion, and the paddings.
3481 */
3482 local_size = ROUND_UP(local_size, 8);
3483
3484 /* Add paddings */
3485 le_bitmap = bitmap_new(nbits + BITS_PER_LONG);
3486
3487 size = qemu_get_be64(file);
3488
3489 /* The size of the bitmap should match with our ramblock */
3490 if (size != local_size) {
3491 error_report("%s: ramblock '%s' bitmap size mismatch "
3492 "(0x%"PRIx64" != 0x%"PRIx64")", __func__,
3493 block->idstr, size, local_size);
3494 ret = -EINVAL;
3495 goto out;
3496 }
3497
3498 size = qemu_get_buffer(file, (uint8_t *)le_bitmap, local_size);
3499 end_mark = qemu_get_be64(file);
3500
3501 ret = qemu_file_get_error(file);
3502 if (ret || size != local_size) {
3503 error_report("%s: read bitmap failed for ramblock '%s': %d"
3504 " (size 0x%"PRIx64", got: 0x%"PRIx64")",
3505 __func__, block->idstr, ret, local_size, size);
3506 ret = -EIO;
3507 goto out;
3508 }
3509
3510 if (end_mark != RAMBLOCK_RECV_BITMAP_ENDING) {
3511 error_report("%s: ramblock '%s' end mark incorrect: 0x%"PRIu64,
3512 __func__, block->idstr, end_mark);
3513 ret = -EINVAL;
3514 goto out;
3515 }
3516
3517 /*
3518 * Endianess convertion. We are during postcopy (though paused).
3519 * The dirty bitmap won't change. We can directly modify it.
3520 */
3521 bitmap_from_le(block->bmap, le_bitmap, nbits);
3522
3523 /*
3524 * What we received is "received bitmap". Revert it as the initial
3525 * dirty bitmap for this ramblock.
3526 */
3527 bitmap_complement(block->bmap, block->bmap, nbits);
3528
3529 trace_ram_dirty_bitmap_reload_complete(block->idstr);
3530
Peter Xuedd090c2018-05-02 18:47:32 +08003531 /*
3532 * We succeeded to sync bitmap for current ramblock. If this is
3533 * the last one to sync, we need to notify the main send thread.
3534 */
3535 ram_dirty_bitmap_reload_notify(s);
3536
Peter Xua335deb2018-05-02 18:47:28 +08003537 ret = 0;
3538out:
Peter Xubf269902018-05-25 09:50:42 +08003539 g_free(le_bitmap);
Peter Xua335deb2018-05-02 18:47:28 +08003540 return ret;
3541}
3542
Peter Xuedd090c2018-05-02 18:47:32 +08003543static int ram_resume_prepare(MigrationState *s, void *opaque)
3544{
3545 RAMState *rs = *(RAMState **)opaque;
Peter Xu08614f32018-05-02 18:47:33 +08003546 int ret;
Peter Xuedd090c2018-05-02 18:47:32 +08003547
Peter Xu08614f32018-05-02 18:47:33 +08003548 ret = ram_dirty_bitmap_sync_all(s, rs);
3549 if (ret) {
3550 return ret;
3551 }
3552
3553 ram_state_resume_prepare(rs, s->to_dst_file);
3554
3555 return 0;
Peter Xuedd090c2018-05-02 18:47:32 +08003556}
3557
Juan Quintela56e93d22015-05-07 19:33:31 +02003558static SaveVMHandlers savevm_ram_handlers = {
Juan Quintela9907e842017-06-28 11:52:24 +02003559 .save_setup = ram_save_setup,
Juan Quintela56e93d22015-05-07 19:33:31 +02003560 .save_live_iterate = ram_save_iterate,
Dr. David Alan Gilbert763c9062015-11-05 18:11:00 +00003561 .save_live_complete_postcopy = ram_save_complete,
Dr. David Alan Gilberta3e06c32015-11-05 18:10:41 +00003562 .save_live_complete_precopy = ram_save_complete,
Vladimir Sementsov-Ogievskiyc6467622017-07-10 19:30:14 +03003563 .has_postcopy = ram_has_postcopy,
Juan Quintela56e93d22015-05-07 19:33:31 +02003564 .save_live_pending = ram_save_pending,
3565 .load_state = ram_load,
Juan Quintelaf265e0e2017-06-28 11:52:27 +02003566 .save_cleanup = ram_save_cleanup,
3567 .load_setup = ram_load_setup,
3568 .load_cleanup = ram_load_cleanup,
Peter Xuedd090c2018-05-02 18:47:32 +08003569 .resume_prepare = ram_resume_prepare,
Juan Quintela56e93d22015-05-07 19:33:31 +02003570};
3571
3572void ram_mig_init(void)
3573{
3574 qemu_mutex_init(&XBZRLE.lock);
Juan Quintela6f37bb82017-03-13 19:26:29 +01003575 register_savevm_live(NULL, "ram", 0, 4, &savevm_ram_handlers, &ram_state);
Juan Quintela56e93d22015-05-07 19:33:31 +02003576}