blob: 997340c7c2b97c40a9f236cde1c9baa8cfc1459c [file] [log] [blame]
Juan Quintela56e93d22015-05-07 19:33:31 +02001/*
2 * QEMU System Emulator
3 *
4 * Copyright (c) 2003-2008 Fabrice Bellard
Juan Quintela76cc7b52015-05-08 13:20:21 +02005 * Copyright (c) 2011-2015 Red Hat Inc
6 *
7 * Authors:
8 * Juan Quintela <quintela@redhat.com>
Juan Quintela56e93d22015-05-07 19:33:31 +02009 *
10 * Permission is hereby granted, free of charge, to any person obtaining a copy
11 * of this software and associated documentation files (the "Software"), to deal
12 * in the Software without restriction, including without limitation the rights
13 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
14 * copies of the Software, and to permit persons to whom the Software is
15 * furnished to do so, subject to the following conditions:
16 *
17 * The above copyright notice and this permission notice shall be included in
18 * all copies or substantial portions of the Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 * THE SOFTWARE.
27 */
Peter Maydell1393a482016-01-26 18:16:54 +000028#include "qemu/osdep.h"
Paolo Bonzini33c11872016-03-15 16:58:45 +010029#include "cpu.h"
Juan Quintela56e93d22015-05-07 19:33:31 +020030#include <zlib.h>
Dr. David Alan Gilbert4addcd42015-12-16 11:47:36 +000031#include "qapi-event.h"
Veronia Bahaaf348b6d2016-03-20 19:16:19 +020032#include "qemu/cutils.h"
Juan Quintela56e93d22015-05-07 19:33:31 +020033#include "qemu/bitops.h"
34#include "qemu/bitmap.h"
Juan Quintela7205c9e2015-05-08 13:54:36 +020035#include "qemu/main-loop.h"
Juan Quintela709e3fe2017-04-05 21:47:50 +020036#include "xbzrle.h"
Juan Quintela7b1e1a22017-04-17 20:26:27 +020037#include "ram.h"
Juan Quintela6666c962017-04-24 20:07:27 +020038#include "migration.h"
Juan Quintelaf2a8f0a2017-04-24 13:42:55 +020039#include "migration/register.h"
Juan Quintela7b1e1a22017-04-17 20:26:27 +020040#include "migration/misc.h"
Juan Quintela08a0aee2017-04-20 18:52:18 +020041#include "qemu-file.h"
Juan Quintelabe07b0a2017-04-20 13:12:24 +020042#include "postcopy-ram.h"
Juan Quintela56e93d22015-05-07 19:33:31 +020043#include "migration/page_cache.h"
Juan Quintela56e93d22015-05-07 19:33:31 +020044#include "qemu/error-report.h"
Juan Quintela8acabf62017-10-05 22:00:31 +020045#include "qapi/qmp/qerror.h"
Juan Quintela56e93d22015-05-07 19:33:31 +020046#include "trace.h"
Juan Quintela56e93d22015-05-07 19:33:31 +020047#include "exec/ram_addr.h"
Alexey Perevalovf9494612017-10-05 14:13:20 +030048#include "exec/target_page.h"
Juan Quintela56e93d22015-05-07 19:33:31 +020049#include "qemu/rcu_queue.h"
zhanghailianga91246c2016-10-27 14:42:59 +080050#include "migration/colo.h"
Peter Lieven9ac78b62017-09-26 12:33:16 +020051#include "migration/block.h"
Juan Quintela56e93d22015-05-07 19:33:31 +020052
Juan Quintela56e93d22015-05-07 19:33:31 +020053/***********************************************************/
54/* ram save/restore */
55
Juan Quintelabb890ed2017-04-28 09:39:55 +020056/* RAM_SAVE_FLAG_ZERO used to be named RAM_SAVE_FLAG_COMPRESS, it
57 * worked for pages that where filled with the same char. We switched
58 * it to only search for the zero value. And to avoid confusion with
59 * RAM_SSAVE_FLAG_COMPRESS_PAGE just rename it.
60 */
61
Juan Quintela56e93d22015-05-07 19:33:31 +020062#define RAM_SAVE_FLAG_FULL 0x01 /* Obsolete, not used anymore */
Juan Quintelabb890ed2017-04-28 09:39:55 +020063#define RAM_SAVE_FLAG_ZERO 0x02
Juan Quintela56e93d22015-05-07 19:33:31 +020064#define RAM_SAVE_FLAG_MEM_SIZE 0x04
65#define RAM_SAVE_FLAG_PAGE 0x08
66#define RAM_SAVE_FLAG_EOS 0x10
67#define RAM_SAVE_FLAG_CONTINUE 0x20
68#define RAM_SAVE_FLAG_XBZRLE 0x40
69/* 0x80 is reserved in migration.h start with 0x100 next */
70#define RAM_SAVE_FLAG_COMPRESS_PAGE 0x100
71
Juan Quintela56e93d22015-05-07 19:33:31 +020072static inline bool is_zero_range(uint8_t *p, uint64_t size)
73{
Richard Hendersona1febc42016-08-29 11:46:14 -070074 return buffer_is_zero(p, size);
Juan Quintela56e93d22015-05-07 19:33:31 +020075}
76
Juan Quintela93604472017-06-06 19:49:03 +020077XBZRLECacheStats xbzrle_counters;
78
Juan Quintela56e93d22015-05-07 19:33:31 +020079/* struct contains XBZRLE cache and a static page
80 used by the compression */
81static struct {
82 /* buffer used for XBZRLE encoding */
83 uint8_t *encoded_buf;
84 /* buffer for storing page content */
85 uint8_t *current_buf;
86 /* Cache for XBZRLE, Protected by lock. */
87 PageCache *cache;
88 QemuMutex lock;
Juan Quintelac00e0922017-05-09 16:22:01 +020089 /* it will store a page full of zeros */
90 uint8_t *zero_target_page;
Juan Quintelaf265e0e2017-06-28 11:52:27 +020091 /* buffer used for XBZRLE decoding */
92 uint8_t *decoded_buf;
Juan Quintela56e93d22015-05-07 19:33:31 +020093} XBZRLE;
94
Juan Quintela56e93d22015-05-07 19:33:31 +020095static void XBZRLE_cache_lock(void)
96{
97 if (migrate_use_xbzrle())
98 qemu_mutex_lock(&XBZRLE.lock);
99}
100
101static void XBZRLE_cache_unlock(void)
102{
103 if (migrate_use_xbzrle())
104 qemu_mutex_unlock(&XBZRLE.lock);
105}
106
Juan Quintela3d0684b2017-03-23 15:06:39 +0100107/**
108 * xbzrle_cache_resize: resize the xbzrle cache
109 *
110 * This function is called from qmp_migrate_set_cache_size in main
111 * thread, possibly while a migration is in progress. A running
112 * migration may be using the cache and might finish during this call,
113 * hence changes to the cache are protected by XBZRLE.lock().
114 *
Juan Quintelac9dede22017-10-06 23:03:55 +0200115 * Returns 0 for success or -1 for error
Juan Quintela3d0684b2017-03-23 15:06:39 +0100116 *
117 * @new_size: new cache size
Juan Quintela8acabf62017-10-05 22:00:31 +0200118 * @errp: set *errp if the check failed, with reason
Juan Quintela56e93d22015-05-07 19:33:31 +0200119 */
Juan Quintelac9dede22017-10-06 23:03:55 +0200120int xbzrle_cache_resize(int64_t new_size, Error **errp)
Juan Quintela56e93d22015-05-07 19:33:31 +0200121{
122 PageCache *new_cache;
Juan Quintelac9dede22017-10-06 23:03:55 +0200123 int64_t ret = 0;
Juan Quintela56e93d22015-05-07 19:33:31 +0200124
Juan Quintela8acabf62017-10-05 22:00:31 +0200125 /* Check for truncation */
126 if (new_size != (size_t)new_size) {
127 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
128 "exceeding address space");
129 return -1;
130 }
131
132 /* Cache should not be larger than guest ram size */
133 if (new_size > ram_bytes_total()) {
134 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
135 "exceeds guest ram size");
136 return -1;
137 }
138
Juan Quintela2a313e52017-10-06 23:00:12 +0200139 if (new_size == migrate_xbzrle_cache_size()) {
140 /* nothing to do */
Juan Quintelac9dede22017-10-06 23:03:55 +0200141 return 0;
Juan Quintela2a313e52017-10-06 23:00:12 +0200142 }
143
Juan Quintela56e93d22015-05-07 19:33:31 +0200144 XBZRLE_cache_lock();
145
146 if (XBZRLE.cache != NULL) {
Juan Quintela80f8dfd2017-10-06 22:30:45 +0200147 new_cache = cache_init(new_size, TARGET_PAGE_SIZE, errp);
Juan Quintela56e93d22015-05-07 19:33:31 +0200148 if (!new_cache) {
Juan Quintela56e93d22015-05-07 19:33:31 +0200149 ret = -1;
150 goto out;
151 }
152
153 cache_fini(XBZRLE.cache);
154 XBZRLE.cache = new_cache;
155 }
Juan Quintela56e93d22015-05-07 19:33:31 +0200156out:
157 XBZRLE_cache_unlock();
158 return ret;
159}
160
Alexey Perevalovf9494612017-10-05 14:13:20 +0300161static void ramblock_recv_map_init(void)
162{
163 RAMBlock *rb;
164
165 RAMBLOCK_FOREACH(rb) {
166 assert(!rb->receivedmap);
167 rb->receivedmap = bitmap_new(rb->max_length >> qemu_target_page_bits());
168 }
169}
170
171int ramblock_recv_bitmap_test(RAMBlock *rb, void *host_addr)
172{
173 return test_bit(ramblock_recv_bitmap_offset(host_addr, rb),
174 rb->receivedmap);
175}
176
177void ramblock_recv_bitmap_set(RAMBlock *rb, void *host_addr)
178{
179 set_bit_atomic(ramblock_recv_bitmap_offset(host_addr, rb), rb->receivedmap);
180}
181
182void ramblock_recv_bitmap_set_range(RAMBlock *rb, void *host_addr,
183 size_t nr)
184{
185 bitmap_set_atomic(rb->receivedmap,
186 ramblock_recv_bitmap_offset(host_addr, rb),
187 nr);
188}
189
Juan Quintelaec481c62017-03-20 22:12:40 +0100190/*
191 * An outstanding page request, on the source, having been received
192 * and queued
193 */
194struct RAMSrcPageRequest {
195 RAMBlock *rb;
196 hwaddr offset;
197 hwaddr len;
198
199 QSIMPLEQ_ENTRY(RAMSrcPageRequest) next_req;
200};
201
Juan Quintela6f37bb82017-03-13 19:26:29 +0100202/* State of RAM for migration */
203struct RAMState {
Juan Quintela204b88b2017-03-15 09:16:57 +0100204 /* QEMUFile used for this migration */
205 QEMUFile *f;
Juan Quintela6f37bb82017-03-13 19:26:29 +0100206 /* Last block that we have visited searching for dirty pages */
207 RAMBlock *last_seen_block;
208 /* Last block from where we have sent data */
209 RAMBlock *last_sent_block;
Juan Quintela269ace22017-03-21 15:23:31 +0100210 /* Last dirty target page we have sent */
211 ram_addr_t last_page;
Juan Quintela6f37bb82017-03-13 19:26:29 +0100212 /* last ram version we have seen */
213 uint32_t last_version;
214 /* We are in the first round */
215 bool ram_bulk_stage;
Juan Quintela8d820d62017-03-13 19:35:50 +0100216 /* How many times we have dirty too many pages */
217 int dirty_rate_high_cnt;
Juan Quintelaf664da82017-03-13 19:44:57 +0100218 /* these variables are used for bitmap sync */
219 /* last time we did a full bitmap_sync */
220 int64_t time_last_bitmap_sync;
Juan Quintelaeac74152017-03-28 14:59:01 +0200221 /* bytes transferred at start_time */
Juan Quintelac4bdf0c2017-03-28 14:59:54 +0200222 uint64_t bytes_xfer_prev;
Juan Quintelaa66cd902017-03-28 15:02:43 +0200223 /* number of dirty pages since start_time */
Juan Quintela68908ed2017-03-28 15:05:53 +0200224 uint64_t num_dirty_pages_period;
Juan Quintelab5833fd2017-03-13 19:49:19 +0100225 /* xbzrle misses since the beginning of the period */
226 uint64_t xbzrle_cache_miss_prev;
Juan Quintela36040d92017-03-13 19:51:13 +0100227 /* number of iterations at the beginning of period */
228 uint64_t iterations_prev;
Juan Quintela23b28c32017-03-13 20:51:34 +0100229 /* Iterations since start */
230 uint64_t iterations;
Juan Quintela93604472017-06-06 19:49:03 +0200231 /* number of dirty bits in the bitmap */
Peter Xu2dfaf122017-08-02 17:41:19 +0800232 uint64_t migration_dirty_pages;
233 /* protects modification of the bitmap */
Juan Quintela108cfae2017-03-13 21:38:09 +0100234 QemuMutex bitmap_mutex;
Juan Quintela68a098f2017-03-14 13:48:42 +0100235 /* The RAMBlock used in the last src_page_requests */
236 RAMBlock *last_req_rb;
Juan Quintelaec481c62017-03-20 22:12:40 +0100237 /* Queue of outstanding page requests from the destination */
238 QemuMutex src_page_req_mutex;
239 QSIMPLEQ_HEAD(src_page_requests, RAMSrcPageRequest) src_page_requests;
Juan Quintela6f37bb82017-03-13 19:26:29 +0100240};
241typedef struct RAMState RAMState;
242
Juan Quintela53518d92017-05-04 11:46:24 +0200243static RAMState *ram_state;
Juan Quintela6f37bb82017-03-13 19:26:29 +0100244
Juan Quintela9edabd42017-03-14 12:02:16 +0100245uint64_t ram_bytes_remaining(void)
246{
Juan Quintela53518d92017-05-04 11:46:24 +0200247 return ram_state->migration_dirty_pages * TARGET_PAGE_SIZE;
Juan Quintela9edabd42017-03-14 12:02:16 +0100248}
249
Juan Quintela93604472017-06-06 19:49:03 +0200250MigrationStats ram_counters;
Juan Quintela96506892017-03-14 18:41:03 +0100251
Dr. David Alan Gilbertb8fb8cb2015-09-23 15:27:10 +0100252/* used by the search for pages to send */
253struct PageSearchStatus {
254 /* Current block being searched */
255 RAMBlock *block;
Juan Quintelaa935e302017-03-21 15:36:51 +0100256 /* Current page to search from */
257 unsigned long page;
Dr. David Alan Gilbertb8fb8cb2015-09-23 15:27:10 +0100258 /* Set once we wrap around */
259 bool complete_round;
260};
261typedef struct PageSearchStatus PageSearchStatus;
262
Juan Quintela56e93d22015-05-07 19:33:31 +0200263struct CompressParam {
Juan Quintela56e93d22015-05-07 19:33:31 +0200264 bool done;
Liang Li90e56fb2016-05-05 15:32:56 +0800265 bool quit;
Juan Quintela56e93d22015-05-07 19:33:31 +0200266 QEMUFile *file;
267 QemuMutex mutex;
268 QemuCond cond;
269 RAMBlock *block;
270 ram_addr_t offset;
271};
272typedef struct CompressParam CompressParam;
273
274struct DecompressParam {
Liang Li73a89122016-05-05 15:32:51 +0800275 bool done;
Liang Li90e56fb2016-05-05 15:32:56 +0800276 bool quit;
Juan Quintela56e93d22015-05-07 19:33:31 +0200277 QemuMutex mutex;
278 QemuCond cond;
279 void *des;
Peter Maydelld341d9f2016-01-22 15:09:21 +0000280 uint8_t *compbuf;
Juan Quintela56e93d22015-05-07 19:33:31 +0200281 int len;
282};
283typedef struct DecompressParam DecompressParam;
284
285static CompressParam *comp_param;
286static QemuThread *compress_threads;
287/* comp_done_cond is used to wake up the migration thread when
288 * one of the compression threads has finished the compression.
289 * comp_done_lock is used to co-work with comp_done_cond.
290 */
Liang Li0d9f9a52016-05-05 15:32:59 +0800291static QemuMutex comp_done_lock;
292static QemuCond comp_done_cond;
Juan Quintela56e93d22015-05-07 19:33:31 +0200293/* The empty QEMUFileOps will be used by file in CompressParam */
294static const QEMUFileOps empty_ops = { };
295
Juan Quintela56e93d22015-05-07 19:33:31 +0200296static DecompressParam *decomp_param;
297static QemuThread *decompress_threads;
Liang Li73a89122016-05-05 15:32:51 +0800298static QemuMutex decomp_done_lock;
299static QemuCond decomp_done_cond;
Juan Quintela56e93d22015-05-07 19:33:31 +0200300
Liang Lia7a9a882016-05-05 15:32:57 +0800301static int do_compress_ram_page(QEMUFile *f, RAMBlock *block,
302 ram_addr_t offset);
Juan Quintela56e93d22015-05-07 19:33:31 +0200303
304static void *do_data_compress(void *opaque)
305{
306 CompressParam *param = opaque;
Liang Lia7a9a882016-05-05 15:32:57 +0800307 RAMBlock *block;
308 ram_addr_t offset;
Juan Quintela56e93d22015-05-07 19:33:31 +0200309
Liang Lia7a9a882016-05-05 15:32:57 +0800310 qemu_mutex_lock(&param->mutex);
Liang Li90e56fb2016-05-05 15:32:56 +0800311 while (!param->quit) {
Liang Lia7a9a882016-05-05 15:32:57 +0800312 if (param->block) {
313 block = param->block;
314 offset = param->offset;
315 param->block = NULL;
316 qemu_mutex_unlock(&param->mutex);
317
318 do_compress_ram_page(param->file, block, offset);
319
Liang Li0d9f9a52016-05-05 15:32:59 +0800320 qemu_mutex_lock(&comp_done_lock);
Liang Lia7a9a882016-05-05 15:32:57 +0800321 param->done = true;
Liang Li0d9f9a52016-05-05 15:32:59 +0800322 qemu_cond_signal(&comp_done_cond);
323 qemu_mutex_unlock(&comp_done_lock);
Liang Lia7a9a882016-05-05 15:32:57 +0800324
325 qemu_mutex_lock(&param->mutex);
326 } else {
Juan Quintela56e93d22015-05-07 19:33:31 +0200327 qemu_cond_wait(&param->cond, &param->mutex);
328 }
Juan Quintela56e93d22015-05-07 19:33:31 +0200329 }
Liang Lia7a9a882016-05-05 15:32:57 +0800330 qemu_mutex_unlock(&param->mutex);
Juan Quintela56e93d22015-05-07 19:33:31 +0200331
332 return NULL;
333}
334
335static inline void terminate_compression_threads(void)
336{
337 int idx, thread_count;
338
339 thread_count = migrate_compress_threads();
Juan Quintela3d0684b2017-03-23 15:06:39 +0100340
Juan Quintela56e93d22015-05-07 19:33:31 +0200341 for (idx = 0; idx < thread_count; idx++) {
342 qemu_mutex_lock(&comp_param[idx].mutex);
Liang Li90e56fb2016-05-05 15:32:56 +0800343 comp_param[idx].quit = true;
Juan Quintela56e93d22015-05-07 19:33:31 +0200344 qemu_cond_signal(&comp_param[idx].cond);
345 qemu_mutex_unlock(&comp_param[idx].mutex);
346 }
347}
348
Juan Quintelaf0afa332017-06-28 11:52:28 +0200349static void compress_threads_save_cleanup(void)
Juan Quintela56e93d22015-05-07 19:33:31 +0200350{
351 int i, thread_count;
352
353 if (!migrate_use_compression()) {
354 return;
355 }
356 terminate_compression_threads();
357 thread_count = migrate_compress_threads();
358 for (i = 0; i < thread_count; i++) {
359 qemu_thread_join(compress_threads + i);
360 qemu_fclose(comp_param[i].file);
361 qemu_mutex_destroy(&comp_param[i].mutex);
362 qemu_cond_destroy(&comp_param[i].cond);
363 }
Liang Li0d9f9a52016-05-05 15:32:59 +0800364 qemu_mutex_destroy(&comp_done_lock);
365 qemu_cond_destroy(&comp_done_cond);
Juan Quintela56e93d22015-05-07 19:33:31 +0200366 g_free(compress_threads);
367 g_free(comp_param);
Juan Quintela56e93d22015-05-07 19:33:31 +0200368 compress_threads = NULL;
369 comp_param = NULL;
Juan Quintela56e93d22015-05-07 19:33:31 +0200370}
371
Juan Quintelaf0afa332017-06-28 11:52:28 +0200372static void compress_threads_save_setup(void)
Juan Quintela56e93d22015-05-07 19:33:31 +0200373{
374 int i, thread_count;
375
376 if (!migrate_use_compression()) {
377 return;
378 }
Juan Quintela56e93d22015-05-07 19:33:31 +0200379 thread_count = migrate_compress_threads();
380 compress_threads = g_new0(QemuThread, thread_count);
381 comp_param = g_new0(CompressParam, thread_count);
Liang Li0d9f9a52016-05-05 15:32:59 +0800382 qemu_cond_init(&comp_done_cond);
383 qemu_mutex_init(&comp_done_lock);
Juan Quintela56e93d22015-05-07 19:33:31 +0200384 for (i = 0; i < thread_count; i++) {
Cao jine110aa92016-07-29 15:10:31 +0800385 /* comp_param[i].file is just used as a dummy buffer to save data,
386 * set its ops to empty.
Juan Quintela56e93d22015-05-07 19:33:31 +0200387 */
388 comp_param[i].file = qemu_fopen_ops(NULL, &empty_ops);
389 comp_param[i].done = true;
Liang Li90e56fb2016-05-05 15:32:56 +0800390 comp_param[i].quit = false;
Juan Quintela56e93d22015-05-07 19:33:31 +0200391 qemu_mutex_init(&comp_param[i].mutex);
392 qemu_cond_init(&comp_param[i].cond);
393 qemu_thread_create(compress_threads + i, "compress",
394 do_data_compress, comp_param + i,
395 QEMU_THREAD_JOINABLE);
396 }
397}
398
Juan Quintelaf986c3d2016-01-14 16:52:55 +0100399/* Multiple fd's */
400
401struct MultiFDSendParams {
402 uint8_t id;
403 char *name;
404 QemuThread thread;
405 QemuSemaphore sem;
406 QemuMutex mutex;
407 bool quit;
408};
409typedef struct MultiFDSendParams MultiFDSendParams;
410
411struct {
412 MultiFDSendParams *params;
413 /* number of created threads */
414 int count;
415} *multifd_send_state;
416
417static void terminate_multifd_send_threads(Error *errp)
418{
419 int i;
420
421 for (i = 0; i < multifd_send_state->count; i++) {
422 MultiFDSendParams *p = &multifd_send_state->params[i];
423
424 qemu_mutex_lock(&p->mutex);
425 p->quit = true;
426 qemu_sem_post(&p->sem);
427 qemu_mutex_unlock(&p->mutex);
428 }
429}
430
431int multifd_save_cleanup(Error **errp)
432{
433 int i;
434 int ret = 0;
435
436 if (!migrate_use_multifd()) {
437 return 0;
438 }
439 terminate_multifd_send_threads(NULL);
440 for (i = 0; i < multifd_send_state->count; i++) {
441 MultiFDSendParams *p = &multifd_send_state->params[i];
442
443 qemu_thread_join(&p->thread);
444 qemu_mutex_destroy(&p->mutex);
445 qemu_sem_destroy(&p->sem);
446 g_free(p->name);
447 p->name = NULL;
448 }
449 g_free(multifd_send_state->params);
450 multifd_send_state->params = NULL;
451 g_free(multifd_send_state);
452 multifd_send_state = NULL;
453 return ret;
454}
455
456static void *multifd_send_thread(void *opaque)
457{
458 MultiFDSendParams *p = opaque;
459
460 while (true) {
461 qemu_mutex_lock(&p->mutex);
462 if (p->quit) {
463 qemu_mutex_unlock(&p->mutex);
464 break;
465 }
466 qemu_mutex_unlock(&p->mutex);
467 qemu_sem_wait(&p->sem);
468 }
469
470 return NULL;
471}
472
473int multifd_save_setup(void)
474{
475 int thread_count;
476 uint8_t i;
477
478 if (!migrate_use_multifd()) {
479 return 0;
480 }
481 thread_count = migrate_multifd_channels();
482 multifd_send_state = g_malloc0(sizeof(*multifd_send_state));
483 multifd_send_state->params = g_new0(MultiFDSendParams, thread_count);
484 multifd_send_state->count = 0;
485 for (i = 0; i < thread_count; i++) {
486 MultiFDSendParams *p = &multifd_send_state->params[i];
487
488 qemu_mutex_init(&p->mutex);
489 qemu_sem_init(&p->sem, 0);
490 p->quit = false;
491 p->id = i;
492 p->name = g_strdup_printf("multifdsend_%d", i);
493 qemu_thread_create(&p->thread, p->name, multifd_send_thread, p,
494 QEMU_THREAD_JOINABLE);
495
496 multifd_send_state->count++;
497 }
498 return 0;
499}
500
501struct MultiFDRecvParams {
502 uint8_t id;
503 char *name;
504 QemuThread thread;
505 QemuSemaphore sem;
506 QemuMutex mutex;
507 bool quit;
508};
509typedef struct MultiFDRecvParams MultiFDRecvParams;
510
511struct {
512 MultiFDRecvParams *params;
513 /* number of created threads */
514 int count;
515} *multifd_recv_state;
516
517static void terminate_multifd_recv_threads(Error *errp)
518{
519 int i;
520
521 for (i = 0; i < multifd_recv_state->count; i++) {
522 MultiFDRecvParams *p = &multifd_recv_state->params[i];
523
524 qemu_mutex_lock(&p->mutex);
525 p->quit = true;
526 qemu_sem_post(&p->sem);
527 qemu_mutex_unlock(&p->mutex);
528 }
529}
530
531int multifd_load_cleanup(Error **errp)
532{
533 int i;
534 int ret = 0;
535
536 if (!migrate_use_multifd()) {
537 return 0;
538 }
539 terminate_multifd_recv_threads(NULL);
540 for (i = 0; i < multifd_recv_state->count; i++) {
541 MultiFDRecvParams *p = &multifd_recv_state->params[i];
542
543 qemu_thread_join(&p->thread);
544 qemu_mutex_destroy(&p->mutex);
545 qemu_sem_destroy(&p->sem);
546 g_free(p->name);
547 p->name = NULL;
548 }
549 g_free(multifd_recv_state->params);
550 multifd_recv_state->params = NULL;
551 g_free(multifd_recv_state);
552 multifd_recv_state = NULL;
553
554 return ret;
555}
556
557static void *multifd_recv_thread(void *opaque)
558{
559 MultiFDRecvParams *p = opaque;
560
561 while (true) {
562 qemu_mutex_lock(&p->mutex);
563 if (p->quit) {
564 qemu_mutex_unlock(&p->mutex);
565 break;
566 }
567 qemu_mutex_unlock(&p->mutex);
568 qemu_sem_wait(&p->sem);
569 }
570
571 return NULL;
572}
573
574int multifd_load_setup(void)
575{
576 int thread_count;
577 uint8_t i;
578
579 if (!migrate_use_multifd()) {
580 return 0;
581 }
582 thread_count = migrate_multifd_channels();
583 multifd_recv_state = g_malloc0(sizeof(*multifd_recv_state));
584 multifd_recv_state->params = g_new0(MultiFDRecvParams, thread_count);
585 multifd_recv_state->count = 0;
586 for (i = 0; i < thread_count; i++) {
587 MultiFDRecvParams *p = &multifd_recv_state->params[i];
588
589 qemu_mutex_init(&p->mutex);
590 qemu_sem_init(&p->sem, 0);
591 p->quit = false;
592 p->id = i;
593 p->name = g_strdup_printf("multifdrecv_%d", i);
594 qemu_thread_create(&p->thread, p->name, multifd_recv_thread, p,
595 QEMU_THREAD_JOINABLE);
596 multifd_recv_state->count++;
597 }
598 return 0;
599}
600
Juan Quintela56e93d22015-05-07 19:33:31 +0200601/**
Juan Quintela3d0684b2017-03-23 15:06:39 +0100602 * save_page_header: write page header to wire
Juan Quintela56e93d22015-05-07 19:33:31 +0200603 *
604 * If this is the 1st block, it also writes the block identification
605 *
Juan Quintela3d0684b2017-03-23 15:06:39 +0100606 * Returns the number of bytes written
Juan Quintela56e93d22015-05-07 19:33:31 +0200607 *
608 * @f: QEMUFile where to send the data
609 * @block: block that contains the page we want to send
610 * @offset: offset inside the block for the page
611 * in the lower bits, it contains flags
612 */
Juan Quintela2bf3aa82017-05-10 13:28:13 +0200613static size_t save_page_header(RAMState *rs, QEMUFile *f, RAMBlock *block,
614 ram_addr_t offset)
Juan Quintela56e93d22015-05-07 19:33:31 +0200615{
Liang Li9f5f3802015-07-13 17:34:10 +0800616 size_t size, len;
Juan Quintela56e93d22015-05-07 19:33:31 +0200617
Juan Quintela24795692017-03-21 11:45:01 +0100618 if (block == rs->last_sent_block) {
619 offset |= RAM_SAVE_FLAG_CONTINUE;
620 }
Juan Quintela2bf3aa82017-05-10 13:28:13 +0200621 qemu_put_be64(f, offset);
Juan Quintela56e93d22015-05-07 19:33:31 +0200622 size = 8;
623
624 if (!(offset & RAM_SAVE_FLAG_CONTINUE)) {
Liang Li9f5f3802015-07-13 17:34:10 +0800625 len = strlen(block->idstr);
Juan Quintela2bf3aa82017-05-10 13:28:13 +0200626 qemu_put_byte(f, len);
627 qemu_put_buffer(f, (uint8_t *)block->idstr, len);
Liang Li9f5f3802015-07-13 17:34:10 +0800628 size += 1 + len;
Juan Quintela24795692017-03-21 11:45:01 +0100629 rs->last_sent_block = block;
Juan Quintela56e93d22015-05-07 19:33:31 +0200630 }
631 return size;
632}
633
Juan Quintela3d0684b2017-03-23 15:06:39 +0100634/**
635 * mig_throttle_guest_down: throotle down the guest
636 *
637 * Reduce amount of guest cpu execution to hopefully slow down memory
638 * writes. If guest dirty memory rate is reduced below the rate at
639 * which we can transfer pages to the destination then we should be
640 * able to complete migration. Some workloads dirty memory way too
641 * fast and will not effectively converge, even with auto-converge.
Jason J. Herne070afca2015-09-08 13:12:35 -0400642 */
643static void mig_throttle_guest_down(void)
644{
645 MigrationState *s = migrate_get_current();
Daniel P. Berrange2594f562016-04-27 11:05:14 +0100646 uint64_t pct_initial = s->parameters.cpu_throttle_initial;
647 uint64_t pct_icrement = s->parameters.cpu_throttle_increment;
Jason J. Herne070afca2015-09-08 13:12:35 -0400648
649 /* We have not started throttling yet. Let's start it. */
650 if (!cpu_throttle_active()) {
651 cpu_throttle_set(pct_initial);
652 } else {
653 /* Throttling already on, just increase the rate */
654 cpu_throttle_set(cpu_throttle_get_percentage() + pct_icrement);
655 }
656}
657
Juan Quintela3d0684b2017-03-23 15:06:39 +0100658/**
659 * xbzrle_cache_zero_page: insert a zero page in the XBZRLE cache
660 *
Juan Quintela6f37bb82017-03-13 19:26:29 +0100661 * @rs: current RAM state
Juan Quintela3d0684b2017-03-23 15:06:39 +0100662 * @current_addr: address for the zero page
663 *
664 * Update the xbzrle cache to reflect a page that's been sent as all 0.
Juan Quintela56e93d22015-05-07 19:33:31 +0200665 * The important thing is that a stale (not-yet-0'd) page be replaced
666 * by the new data.
667 * As a bonus, if the page wasn't in the cache it gets added so that
Juan Quintela3d0684b2017-03-23 15:06:39 +0100668 * when a small write is made into the 0'd page it gets XBZRLE sent.
Juan Quintela56e93d22015-05-07 19:33:31 +0200669 */
Juan Quintela6f37bb82017-03-13 19:26:29 +0100670static void xbzrle_cache_zero_page(RAMState *rs, ram_addr_t current_addr)
Juan Quintela56e93d22015-05-07 19:33:31 +0200671{
Juan Quintela6f37bb82017-03-13 19:26:29 +0100672 if (rs->ram_bulk_stage || !migrate_use_xbzrle()) {
Juan Quintela56e93d22015-05-07 19:33:31 +0200673 return;
674 }
675
676 /* We don't care if this fails to allocate a new cache page
677 * as long as it updated an old one */
Juan Quintelac00e0922017-05-09 16:22:01 +0200678 cache_insert(XBZRLE.cache, current_addr, XBZRLE.zero_target_page,
Juan Quintela93604472017-06-06 19:49:03 +0200679 ram_counters.dirty_sync_count);
Juan Quintela56e93d22015-05-07 19:33:31 +0200680}
681
682#define ENCODING_FLAG_XBZRLE 0x1
683
684/**
685 * save_xbzrle_page: compress and send current page
686 *
687 * Returns: 1 means that we wrote the page
688 * 0 means that page is identical to the one already sent
689 * -1 means that xbzrle would be longer than normal
690 *
Juan Quintela5a987732017-03-13 19:39:02 +0100691 * @rs: current RAM state
Juan Quintela3d0684b2017-03-23 15:06:39 +0100692 * @current_data: pointer to the address of the page contents
693 * @current_addr: addr of the page
Juan Quintela56e93d22015-05-07 19:33:31 +0200694 * @block: block that contains the page we want to send
695 * @offset: offset inside the block for the page
696 * @last_stage: if we are at the completion stage
Juan Quintela56e93d22015-05-07 19:33:31 +0200697 */
Juan Quintela204b88b2017-03-15 09:16:57 +0100698static int save_xbzrle_page(RAMState *rs, uint8_t **current_data,
Juan Quintela56e93d22015-05-07 19:33:31 +0200699 ram_addr_t current_addr, RAMBlock *block,
Juan Quintela072c2512017-03-14 10:27:31 +0100700 ram_addr_t offset, bool last_stage)
Juan Quintela56e93d22015-05-07 19:33:31 +0200701{
702 int encoded_len = 0, bytes_xbzrle;
703 uint8_t *prev_cached_page;
704
Juan Quintela93604472017-06-06 19:49:03 +0200705 if (!cache_is_cached(XBZRLE.cache, current_addr,
706 ram_counters.dirty_sync_count)) {
707 xbzrle_counters.cache_miss++;
Juan Quintela56e93d22015-05-07 19:33:31 +0200708 if (!last_stage) {
709 if (cache_insert(XBZRLE.cache, current_addr, *current_data,
Juan Quintela93604472017-06-06 19:49:03 +0200710 ram_counters.dirty_sync_count) == -1) {
Juan Quintela56e93d22015-05-07 19:33:31 +0200711 return -1;
712 } else {
713 /* update *current_data when the page has been
714 inserted into cache */
715 *current_data = get_cached_data(XBZRLE.cache, current_addr);
716 }
717 }
718 return -1;
719 }
720
721 prev_cached_page = get_cached_data(XBZRLE.cache, current_addr);
722
723 /* save current buffer into memory */
724 memcpy(XBZRLE.current_buf, *current_data, TARGET_PAGE_SIZE);
725
726 /* XBZRLE encoding (if there is no overflow) */
727 encoded_len = xbzrle_encode_buffer(prev_cached_page, XBZRLE.current_buf,
728 TARGET_PAGE_SIZE, XBZRLE.encoded_buf,
729 TARGET_PAGE_SIZE);
730 if (encoded_len == 0) {
Juan Quintela55c44462017-01-23 22:32:05 +0100731 trace_save_xbzrle_page_skipping();
Juan Quintela56e93d22015-05-07 19:33:31 +0200732 return 0;
733 } else if (encoded_len == -1) {
Juan Quintela55c44462017-01-23 22:32:05 +0100734 trace_save_xbzrle_page_overflow();
Juan Quintela93604472017-06-06 19:49:03 +0200735 xbzrle_counters.overflow++;
Juan Quintela56e93d22015-05-07 19:33:31 +0200736 /* update data in the cache */
737 if (!last_stage) {
738 memcpy(prev_cached_page, *current_data, TARGET_PAGE_SIZE);
739 *current_data = prev_cached_page;
740 }
741 return -1;
742 }
743
744 /* we need to update the data in the cache, in order to get the same data */
745 if (!last_stage) {
746 memcpy(prev_cached_page, XBZRLE.current_buf, TARGET_PAGE_SIZE);
747 }
748
749 /* Send XBZRLE based compressed page */
Juan Quintela2bf3aa82017-05-10 13:28:13 +0200750 bytes_xbzrle = save_page_header(rs, rs->f, block,
Juan Quintela204b88b2017-03-15 09:16:57 +0100751 offset | RAM_SAVE_FLAG_XBZRLE);
752 qemu_put_byte(rs->f, ENCODING_FLAG_XBZRLE);
753 qemu_put_be16(rs->f, encoded_len);
754 qemu_put_buffer(rs->f, XBZRLE.encoded_buf, encoded_len);
Juan Quintela56e93d22015-05-07 19:33:31 +0200755 bytes_xbzrle += encoded_len + 1 + 2;
Juan Quintela93604472017-06-06 19:49:03 +0200756 xbzrle_counters.pages++;
757 xbzrle_counters.bytes += bytes_xbzrle;
758 ram_counters.transferred += bytes_xbzrle;
Juan Quintela56e93d22015-05-07 19:33:31 +0200759
760 return 1;
761}
762
Juan Quintela3d0684b2017-03-23 15:06:39 +0100763/**
764 * migration_bitmap_find_dirty: find the next dirty page from start
Dr. David Alan Gilbertf3f491f2015-11-05 18:11:01 +0000765 *
Juan Quintela3d0684b2017-03-23 15:06:39 +0100766 * Called with rcu_read_lock() to protect migration_bitmap
767 *
768 * Returns the byte offset within memory region of the start of a dirty page
769 *
Juan Quintela6f37bb82017-03-13 19:26:29 +0100770 * @rs: current RAM state
Juan Quintela3d0684b2017-03-23 15:06:39 +0100771 * @rb: RAMBlock where to search for dirty pages
Juan Quintelaa935e302017-03-21 15:36:51 +0100772 * @start: page where we start the search
Dr. David Alan Gilbertf3f491f2015-11-05 18:11:01 +0000773 */
Juan Quintela56e93d22015-05-07 19:33:31 +0200774static inline
Juan Quintelaa935e302017-03-21 15:36:51 +0100775unsigned long migration_bitmap_find_dirty(RAMState *rs, RAMBlock *rb,
Juan Quintelaf20e2862017-03-21 16:19:05 +0100776 unsigned long start)
Juan Quintela56e93d22015-05-07 19:33:31 +0200777{
Juan Quintela6b6712e2017-03-22 15:18:04 +0100778 unsigned long size = rb->used_length >> TARGET_PAGE_BITS;
779 unsigned long *bitmap = rb->bmap;
Juan Quintela56e93d22015-05-07 19:33:31 +0200780 unsigned long next;
781
Juan Quintela6b6712e2017-03-22 15:18:04 +0100782 if (rs->ram_bulk_stage && start > 0) {
783 next = start + 1;
Juan Quintela56e93d22015-05-07 19:33:31 +0200784 } else {
Juan Quintela6b6712e2017-03-22 15:18:04 +0100785 next = find_next_bit(bitmap, size, start);
Juan Quintela56e93d22015-05-07 19:33:31 +0200786 }
787
Juan Quintela6b6712e2017-03-22 15:18:04 +0100788 return next;
Juan Quintela56e93d22015-05-07 19:33:31 +0200789}
790
Juan Quintela06b10682017-03-21 15:18:05 +0100791static inline bool migration_bitmap_clear_dirty(RAMState *rs,
Juan Quintelaf20e2862017-03-21 16:19:05 +0100792 RAMBlock *rb,
793 unsigned long page)
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +0000794{
795 bool ret;
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +0000796
Juan Quintela6b6712e2017-03-22 15:18:04 +0100797 ret = test_and_clear_bit(page, rb->bmap);
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +0000798
799 if (ret) {
Juan Quintela0d8ec882017-03-13 21:21:41 +0100800 rs->migration_dirty_pages--;
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +0000801 }
802 return ret;
803}
804
Juan Quintela15440dd2017-03-21 09:35:04 +0100805static void migration_bitmap_sync_range(RAMState *rs, RAMBlock *rb,
806 ram_addr_t start, ram_addr_t length)
Juan Quintela56e93d22015-05-07 19:33:31 +0200807{
Juan Quintela0d8ec882017-03-13 21:21:41 +0100808 rs->migration_dirty_pages +=
Juan Quintela6b6712e2017-03-22 15:18:04 +0100809 cpu_physical_memory_sync_dirty_bitmap(rb, start, length,
Juan Quintela0d8ec882017-03-13 21:21:41 +0100810 &rs->num_dirty_pages_period);
Juan Quintela56e93d22015-05-07 19:33:31 +0200811}
812
Juan Quintela3d0684b2017-03-23 15:06:39 +0100813/**
814 * ram_pagesize_summary: calculate all the pagesizes of a VM
815 *
816 * Returns a summary bitmap of the page sizes of all RAMBlocks
817 *
818 * For VMs with just normal pages this is equivalent to the host page
819 * size. If it's got some huge pages then it's the OR of all the
820 * different page sizes.
Dr. David Alan Gilberte8ca1db2017-02-24 18:28:29 +0000821 */
822uint64_t ram_pagesize_summary(void)
823{
824 RAMBlock *block;
825 uint64_t summary = 0;
826
Peter Xu99e15582017-05-12 12:17:39 +0800827 RAMBLOCK_FOREACH(block) {
Dr. David Alan Gilberte8ca1db2017-02-24 18:28:29 +0000828 summary |= block->page_size;
829 }
830
831 return summary;
832}
833
Juan Quintela8d820d62017-03-13 19:35:50 +0100834static void migration_bitmap_sync(RAMState *rs)
Juan Quintela56e93d22015-05-07 19:33:31 +0200835{
836 RAMBlock *block;
Juan Quintela56e93d22015-05-07 19:33:31 +0200837 int64_t end_time;
Juan Quintelac4bdf0c2017-03-28 14:59:54 +0200838 uint64_t bytes_xfer_now;
Juan Quintela56e93d22015-05-07 19:33:31 +0200839
Juan Quintela93604472017-06-06 19:49:03 +0200840 ram_counters.dirty_sync_count++;
Juan Quintela56e93d22015-05-07 19:33:31 +0200841
Juan Quintelaf664da82017-03-13 19:44:57 +0100842 if (!rs->time_last_bitmap_sync) {
843 rs->time_last_bitmap_sync = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
Juan Quintela56e93d22015-05-07 19:33:31 +0200844 }
845
846 trace_migration_bitmap_sync_start();
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +0200847 memory_global_dirty_log_sync();
Juan Quintela56e93d22015-05-07 19:33:31 +0200848
Juan Quintela108cfae2017-03-13 21:38:09 +0100849 qemu_mutex_lock(&rs->bitmap_mutex);
Juan Quintela56e93d22015-05-07 19:33:31 +0200850 rcu_read_lock();
Peter Xu99e15582017-05-12 12:17:39 +0800851 RAMBLOCK_FOREACH(block) {
Juan Quintela15440dd2017-03-21 09:35:04 +0100852 migration_bitmap_sync_range(rs, block, 0, block->used_length);
Juan Quintela56e93d22015-05-07 19:33:31 +0200853 }
854 rcu_read_unlock();
Juan Quintela108cfae2017-03-13 21:38:09 +0100855 qemu_mutex_unlock(&rs->bitmap_mutex);
Juan Quintela56e93d22015-05-07 19:33:31 +0200856
Juan Quintelaa66cd902017-03-28 15:02:43 +0200857 trace_migration_bitmap_sync_end(rs->num_dirty_pages_period);
Chao Fan1ffb5df2017-03-14 09:55:07 +0800858
Juan Quintela56e93d22015-05-07 19:33:31 +0200859 end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
860
861 /* more than 1 second = 1000 millisecons */
Juan Quintelaf664da82017-03-13 19:44:57 +0100862 if (end_time > rs->time_last_bitmap_sync + 1000) {
Felipe Franciosid693c6f2017-05-24 17:10:01 +0100863 /* calculate period counters */
Juan Quintela93604472017-06-06 19:49:03 +0200864 ram_counters.dirty_pages_rate = rs->num_dirty_pages_period * 1000
Felipe Franciosid693c6f2017-05-24 17:10:01 +0100865 / (end_time - rs->time_last_bitmap_sync);
Juan Quintela93604472017-06-06 19:49:03 +0200866 bytes_xfer_now = ram_counters.transferred;
Felipe Franciosid693c6f2017-05-24 17:10:01 +0100867
Peter Lieven9ac78b62017-09-26 12:33:16 +0200868 /* During block migration the auto-converge logic incorrectly detects
869 * that ram migration makes no progress. Avoid this by disabling the
870 * throttling logic during the bulk phase of block migration. */
871 if (migrate_auto_converge() && !blk_mig_bulk_active()) {
Juan Quintela56e93d22015-05-07 19:33:31 +0200872 /* The following detection logic can be refined later. For now:
873 Check to see if the dirtied bytes is 50% more than the approx.
874 amount of bytes that just got transferred since the last time we
Jason J. Herne070afca2015-09-08 13:12:35 -0400875 were in this routine. If that happens twice, start or increase
876 throttling */
Jason J. Herne070afca2015-09-08 13:12:35 -0400877
Felipe Franciosid693c6f2017-05-24 17:10:01 +0100878 if ((rs->num_dirty_pages_period * TARGET_PAGE_SIZE >
Juan Quintelaeac74152017-03-28 14:59:01 +0200879 (bytes_xfer_now - rs->bytes_xfer_prev) / 2) &&
Felipe Franciosib4a3c642017-05-24 17:10:03 +0100880 (++rs->dirty_rate_high_cnt >= 2)) {
Juan Quintela56e93d22015-05-07 19:33:31 +0200881 trace_migration_throttle();
Juan Quintela8d820d62017-03-13 19:35:50 +0100882 rs->dirty_rate_high_cnt = 0;
Jason J. Herne070afca2015-09-08 13:12:35 -0400883 mig_throttle_guest_down();
Felipe Franciosid693c6f2017-05-24 17:10:01 +0100884 }
Juan Quintela56e93d22015-05-07 19:33:31 +0200885 }
Jason J. Herne070afca2015-09-08 13:12:35 -0400886
Juan Quintela56e93d22015-05-07 19:33:31 +0200887 if (migrate_use_xbzrle()) {
Juan Quintela23b28c32017-03-13 20:51:34 +0100888 if (rs->iterations_prev != rs->iterations) {
Juan Quintela93604472017-06-06 19:49:03 +0200889 xbzrle_counters.cache_miss_rate =
890 (double)(xbzrle_counters.cache_miss -
Juan Quintelab5833fd2017-03-13 19:49:19 +0100891 rs->xbzrle_cache_miss_prev) /
Juan Quintela23b28c32017-03-13 20:51:34 +0100892 (rs->iterations - rs->iterations_prev);
Juan Quintela56e93d22015-05-07 19:33:31 +0200893 }
Juan Quintela23b28c32017-03-13 20:51:34 +0100894 rs->iterations_prev = rs->iterations;
Juan Quintela93604472017-06-06 19:49:03 +0200895 rs->xbzrle_cache_miss_prev = xbzrle_counters.cache_miss;
Juan Quintela56e93d22015-05-07 19:33:31 +0200896 }
Felipe Franciosid693c6f2017-05-24 17:10:01 +0100897
898 /* reset period counters */
Juan Quintelaf664da82017-03-13 19:44:57 +0100899 rs->time_last_bitmap_sync = end_time;
Juan Quintelaa66cd902017-03-28 15:02:43 +0200900 rs->num_dirty_pages_period = 0;
Felipe Franciosid2a4d852017-05-24 17:10:02 +0100901 rs->bytes_xfer_prev = bytes_xfer_now;
Juan Quintela56e93d22015-05-07 19:33:31 +0200902 }
Dr. David Alan Gilbert4addcd42015-12-16 11:47:36 +0000903 if (migrate_use_events()) {
Juan Quintela93604472017-06-06 19:49:03 +0200904 qapi_event_send_migration_pass(ram_counters.dirty_sync_count, NULL);
Dr. David Alan Gilbert4addcd42015-12-16 11:47:36 +0000905 }
Juan Quintela56e93d22015-05-07 19:33:31 +0200906}
907
908/**
Juan Quintela3d0684b2017-03-23 15:06:39 +0100909 * save_zero_page: send the zero page to the stream
Juan Quintela56e93d22015-05-07 19:33:31 +0200910 *
Juan Quintela3d0684b2017-03-23 15:06:39 +0100911 * Returns the number of pages written.
Juan Quintela56e93d22015-05-07 19:33:31 +0200912 *
Juan Quintelaf7ccd612017-03-13 20:30:21 +0100913 * @rs: current RAM state
Juan Quintela56e93d22015-05-07 19:33:31 +0200914 * @block: block that contains the page we want to send
915 * @offset: offset inside the block for the page
916 * @p: pointer to the page
Juan Quintela56e93d22015-05-07 19:33:31 +0200917 */
Juan Quintelace25d332017-03-15 11:00:51 +0100918static int save_zero_page(RAMState *rs, RAMBlock *block, ram_addr_t offset,
919 uint8_t *p)
Juan Quintela56e93d22015-05-07 19:33:31 +0200920{
921 int pages = -1;
922
923 if (is_zero_range(p, TARGET_PAGE_SIZE)) {
Juan Quintela93604472017-06-06 19:49:03 +0200924 ram_counters.duplicate++;
925 ram_counters.transferred +=
Juan Quintelabb890ed2017-04-28 09:39:55 +0200926 save_page_header(rs, rs->f, block, offset | RAM_SAVE_FLAG_ZERO);
Juan Quintelace25d332017-03-15 11:00:51 +0100927 qemu_put_byte(rs->f, 0);
Juan Quintela93604472017-06-06 19:49:03 +0200928 ram_counters.transferred += 1;
Juan Quintela56e93d22015-05-07 19:33:31 +0200929 pages = 1;
930 }
931
932 return pages;
933}
934
Juan Quintela57273092017-03-20 22:25:28 +0100935static void ram_release_pages(const char *rbname, uint64_t offset, int pages)
Pavel Butsykin53f09a12017-02-03 18:23:20 +0300936{
Juan Quintela57273092017-03-20 22:25:28 +0100937 if (!migrate_release_ram() || !migration_in_postcopy()) {
Pavel Butsykin53f09a12017-02-03 18:23:20 +0300938 return;
939 }
940
Juan Quintelaaaa20642017-03-21 11:35:24 +0100941 ram_discard_range(rbname, offset, pages << TARGET_PAGE_BITS);
Pavel Butsykin53f09a12017-02-03 18:23:20 +0300942}
943
Juan Quintela56e93d22015-05-07 19:33:31 +0200944/**
Juan Quintela3d0684b2017-03-23 15:06:39 +0100945 * ram_save_page: send the given page to the stream
Juan Quintela56e93d22015-05-07 19:33:31 +0200946 *
Juan Quintela3d0684b2017-03-23 15:06:39 +0100947 * Returns the number of pages written.
Dr. David Alan Gilbert3fd3c4b2015-12-10 16:31:46 +0000948 * < 0 - error
949 * >=0 - Number of pages written - this might legally be 0
950 * if xbzrle noticed the page was the same.
Juan Quintela56e93d22015-05-07 19:33:31 +0200951 *
Juan Quintela6f37bb82017-03-13 19:26:29 +0100952 * @rs: current RAM state
Juan Quintela56e93d22015-05-07 19:33:31 +0200953 * @block: block that contains the page we want to send
954 * @offset: offset inside the block for the page
955 * @last_stage: if we are at the completion stage
Juan Quintela56e93d22015-05-07 19:33:31 +0200956 */
Juan Quintelaa0a8aa12017-03-20 22:29:07 +0100957static int ram_save_page(RAMState *rs, PageSearchStatus *pss, bool last_stage)
Juan Quintela56e93d22015-05-07 19:33:31 +0200958{
959 int pages = -1;
960 uint64_t bytes_xmit;
961 ram_addr_t current_addr;
Juan Quintela56e93d22015-05-07 19:33:31 +0200962 uint8_t *p;
963 int ret;
964 bool send_async = true;
zhanghailianga08f6892016-01-15 11:37:44 +0800965 RAMBlock *block = pss->block;
Juan Quintelaa935e302017-03-21 15:36:51 +0100966 ram_addr_t offset = pss->page << TARGET_PAGE_BITS;
Juan Quintela56e93d22015-05-07 19:33:31 +0200967
Dr. David Alan Gilbert2f68e392015-08-13 11:51:30 +0100968 p = block->host + offset;
Dr. David Alan Gilbert1db9d8e2017-04-26 19:37:21 +0100969 trace_ram_save_page(block->idstr, (uint64_t)offset, p);
Juan Quintela56e93d22015-05-07 19:33:31 +0200970
971 /* In doubt sent page as normal */
972 bytes_xmit = 0;
Juan Quintelace25d332017-03-15 11:00:51 +0100973 ret = ram_control_save_page(rs->f, block->offset,
Juan Quintela56e93d22015-05-07 19:33:31 +0200974 offset, TARGET_PAGE_SIZE, &bytes_xmit);
975 if (bytes_xmit) {
Juan Quintela93604472017-06-06 19:49:03 +0200976 ram_counters.transferred += bytes_xmit;
Juan Quintela56e93d22015-05-07 19:33:31 +0200977 pages = 1;
978 }
979
980 XBZRLE_cache_lock();
981
982 current_addr = block->offset + offset;
983
Juan Quintela56e93d22015-05-07 19:33:31 +0200984 if (ret != RAM_SAVE_CONTROL_NOT_SUPP) {
985 if (ret != RAM_SAVE_CONTROL_DELAYED) {
986 if (bytes_xmit > 0) {
Juan Quintela93604472017-06-06 19:49:03 +0200987 ram_counters.normal++;
Juan Quintela56e93d22015-05-07 19:33:31 +0200988 } else if (bytes_xmit == 0) {
Juan Quintela93604472017-06-06 19:49:03 +0200989 ram_counters.duplicate++;
Juan Quintela56e93d22015-05-07 19:33:31 +0200990 }
991 }
992 } else {
Juan Quintelace25d332017-03-15 11:00:51 +0100993 pages = save_zero_page(rs, block, offset, p);
Juan Quintela56e93d22015-05-07 19:33:31 +0200994 if (pages > 0) {
995 /* Must let xbzrle know, otherwise a previous (now 0'd) cached
996 * page would be stale
997 */
Juan Quintela6f37bb82017-03-13 19:26:29 +0100998 xbzrle_cache_zero_page(rs, current_addr);
Juan Quintelaa935e302017-03-21 15:36:51 +0100999 ram_release_pages(block->idstr, offset, pages);
Juan Quintela6f37bb82017-03-13 19:26:29 +01001000 } else if (!rs->ram_bulk_stage &&
Juan Quintela57273092017-03-20 22:25:28 +01001001 !migration_in_postcopy() && migrate_use_xbzrle()) {
Juan Quintela204b88b2017-03-15 09:16:57 +01001002 pages = save_xbzrle_page(rs, &p, current_addr, block,
Juan Quintela072c2512017-03-14 10:27:31 +01001003 offset, last_stage);
Juan Quintela56e93d22015-05-07 19:33:31 +02001004 if (!last_stage) {
1005 /* Can't send this cached data async, since the cache page
1006 * might get updated before it gets to the wire
1007 */
1008 send_async = false;
1009 }
1010 }
1011 }
1012
1013 /* XBZRLE overflow or normal page */
1014 if (pages == -1) {
Juan Quintela93604472017-06-06 19:49:03 +02001015 ram_counters.transferred +=
1016 save_page_header(rs, rs->f, block, offset | RAM_SAVE_FLAG_PAGE);
Juan Quintela56e93d22015-05-07 19:33:31 +02001017 if (send_async) {
Juan Quintelace25d332017-03-15 11:00:51 +01001018 qemu_put_buffer_async(rs->f, p, TARGET_PAGE_SIZE,
Pavel Butsykin53f09a12017-02-03 18:23:20 +03001019 migrate_release_ram() &
Juan Quintela57273092017-03-20 22:25:28 +01001020 migration_in_postcopy());
Juan Quintela56e93d22015-05-07 19:33:31 +02001021 } else {
Juan Quintelace25d332017-03-15 11:00:51 +01001022 qemu_put_buffer(rs->f, p, TARGET_PAGE_SIZE);
Juan Quintela56e93d22015-05-07 19:33:31 +02001023 }
Juan Quintela93604472017-06-06 19:49:03 +02001024 ram_counters.transferred += TARGET_PAGE_SIZE;
Juan Quintela56e93d22015-05-07 19:33:31 +02001025 pages = 1;
Juan Quintela93604472017-06-06 19:49:03 +02001026 ram_counters.normal++;
Juan Quintela56e93d22015-05-07 19:33:31 +02001027 }
1028
1029 XBZRLE_cache_unlock();
1030
1031 return pages;
1032}
1033
Liang Lia7a9a882016-05-05 15:32:57 +08001034static int do_compress_ram_page(QEMUFile *f, RAMBlock *block,
1035 ram_addr_t offset)
Juan Quintela56e93d22015-05-07 19:33:31 +02001036{
Juan Quintela53518d92017-05-04 11:46:24 +02001037 RAMState *rs = ram_state;
Juan Quintela56e93d22015-05-07 19:33:31 +02001038 int bytes_sent, blen;
Liang Lia7a9a882016-05-05 15:32:57 +08001039 uint8_t *p = block->host + (offset & TARGET_PAGE_MASK);
Juan Quintela56e93d22015-05-07 19:33:31 +02001040
Juan Quintela2bf3aa82017-05-10 13:28:13 +02001041 bytes_sent = save_page_header(rs, f, block, offset |
Juan Quintela56e93d22015-05-07 19:33:31 +02001042 RAM_SAVE_FLAG_COMPRESS_PAGE);
Liang Lia7a9a882016-05-05 15:32:57 +08001043 blen = qemu_put_compression_data(f, p, TARGET_PAGE_SIZE,
Juan Quintela56e93d22015-05-07 19:33:31 +02001044 migrate_compress_level());
Liang Lib3be2892016-05-05 15:32:54 +08001045 if (blen < 0) {
1046 bytes_sent = 0;
1047 qemu_file_set_error(migrate_get_current()->to_dst_file, blen);
1048 error_report("compressed data failed!");
1049 } else {
1050 bytes_sent += blen;
Juan Quintela57273092017-03-20 22:25:28 +01001051 ram_release_pages(block->idstr, offset & TARGET_PAGE_MASK, 1);
Liang Lib3be2892016-05-05 15:32:54 +08001052 }
Juan Quintela56e93d22015-05-07 19:33:31 +02001053
1054 return bytes_sent;
1055}
1056
Juan Quintelace25d332017-03-15 11:00:51 +01001057static void flush_compressed_data(RAMState *rs)
Juan Quintela56e93d22015-05-07 19:33:31 +02001058{
1059 int idx, len, thread_count;
1060
1061 if (!migrate_use_compression()) {
1062 return;
1063 }
1064 thread_count = migrate_compress_threads();
Liang Lia7a9a882016-05-05 15:32:57 +08001065
Liang Li0d9f9a52016-05-05 15:32:59 +08001066 qemu_mutex_lock(&comp_done_lock);
Juan Quintela56e93d22015-05-07 19:33:31 +02001067 for (idx = 0; idx < thread_count; idx++) {
Liang Lia7a9a882016-05-05 15:32:57 +08001068 while (!comp_param[idx].done) {
Liang Li0d9f9a52016-05-05 15:32:59 +08001069 qemu_cond_wait(&comp_done_cond, &comp_done_lock);
Juan Quintela56e93d22015-05-07 19:33:31 +02001070 }
Liang Lia7a9a882016-05-05 15:32:57 +08001071 }
Liang Li0d9f9a52016-05-05 15:32:59 +08001072 qemu_mutex_unlock(&comp_done_lock);
Liang Lia7a9a882016-05-05 15:32:57 +08001073
1074 for (idx = 0; idx < thread_count; idx++) {
1075 qemu_mutex_lock(&comp_param[idx].mutex);
Liang Li90e56fb2016-05-05 15:32:56 +08001076 if (!comp_param[idx].quit) {
Juan Quintelace25d332017-03-15 11:00:51 +01001077 len = qemu_put_qemu_file(rs->f, comp_param[idx].file);
Juan Quintela93604472017-06-06 19:49:03 +02001078 ram_counters.transferred += len;
Juan Quintela56e93d22015-05-07 19:33:31 +02001079 }
Liang Lia7a9a882016-05-05 15:32:57 +08001080 qemu_mutex_unlock(&comp_param[idx].mutex);
Juan Quintela56e93d22015-05-07 19:33:31 +02001081 }
1082}
1083
1084static inline void set_compress_params(CompressParam *param, RAMBlock *block,
1085 ram_addr_t offset)
1086{
1087 param->block = block;
1088 param->offset = offset;
1089}
1090
Juan Quintelace25d332017-03-15 11:00:51 +01001091static int compress_page_with_multi_thread(RAMState *rs, RAMBlock *block,
1092 ram_addr_t offset)
Juan Quintela56e93d22015-05-07 19:33:31 +02001093{
1094 int idx, thread_count, bytes_xmit = -1, pages = -1;
1095
1096 thread_count = migrate_compress_threads();
Liang Li0d9f9a52016-05-05 15:32:59 +08001097 qemu_mutex_lock(&comp_done_lock);
Juan Quintela56e93d22015-05-07 19:33:31 +02001098 while (true) {
1099 for (idx = 0; idx < thread_count; idx++) {
1100 if (comp_param[idx].done) {
Liang Lia7a9a882016-05-05 15:32:57 +08001101 comp_param[idx].done = false;
Juan Quintelace25d332017-03-15 11:00:51 +01001102 bytes_xmit = qemu_put_qemu_file(rs->f, comp_param[idx].file);
Liang Lia7a9a882016-05-05 15:32:57 +08001103 qemu_mutex_lock(&comp_param[idx].mutex);
Juan Quintela56e93d22015-05-07 19:33:31 +02001104 set_compress_params(&comp_param[idx], block, offset);
Liang Lia7a9a882016-05-05 15:32:57 +08001105 qemu_cond_signal(&comp_param[idx].cond);
1106 qemu_mutex_unlock(&comp_param[idx].mutex);
Juan Quintela56e93d22015-05-07 19:33:31 +02001107 pages = 1;
Juan Quintela93604472017-06-06 19:49:03 +02001108 ram_counters.normal++;
1109 ram_counters.transferred += bytes_xmit;
Juan Quintela56e93d22015-05-07 19:33:31 +02001110 break;
1111 }
1112 }
1113 if (pages > 0) {
1114 break;
1115 } else {
Liang Li0d9f9a52016-05-05 15:32:59 +08001116 qemu_cond_wait(&comp_done_cond, &comp_done_lock);
Juan Quintela56e93d22015-05-07 19:33:31 +02001117 }
1118 }
Liang Li0d9f9a52016-05-05 15:32:59 +08001119 qemu_mutex_unlock(&comp_done_lock);
Juan Quintela56e93d22015-05-07 19:33:31 +02001120
1121 return pages;
1122}
1123
1124/**
1125 * ram_save_compressed_page: compress the given page and send it to the stream
1126 *
Juan Quintela3d0684b2017-03-23 15:06:39 +01001127 * Returns the number of pages written.
Juan Quintela56e93d22015-05-07 19:33:31 +02001128 *
Juan Quintela6f37bb82017-03-13 19:26:29 +01001129 * @rs: current RAM state
Juan Quintela56e93d22015-05-07 19:33:31 +02001130 * @block: block that contains the page we want to send
1131 * @offset: offset inside the block for the page
1132 * @last_stage: if we are at the completion stage
Juan Quintela56e93d22015-05-07 19:33:31 +02001133 */
Juan Quintelaa0a8aa12017-03-20 22:29:07 +01001134static int ram_save_compressed_page(RAMState *rs, PageSearchStatus *pss,
1135 bool last_stage)
Juan Quintela56e93d22015-05-07 19:33:31 +02001136{
1137 int pages = -1;
Liang Lifc504382016-05-05 15:32:55 +08001138 uint64_t bytes_xmit = 0;
Juan Quintela56e93d22015-05-07 19:33:31 +02001139 uint8_t *p;
Liang Lifc504382016-05-05 15:32:55 +08001140 int ret, blen;
zhanghailianga08f6892016-01-15 11:37:44 +08001141 RAMBlock *block = pss->block;
Juan Quintelaa935e302017-03-21 15:36:51 +01001142 ram_addr_t offset = pss->page << TARGET_PAGE_BITS;
Juan Quintela56e93d22015-05-07 19:33:31 +02001143
Dr. David Alan Gilbert2f68e392015-08-13 11:51:30 +01001144 p = block->host + offset;
Juan Quintela56e93d22015-05-07 19:33:31 +02001145
Juan Quintelace25d332017-03-15 11:00:51 +01001146 ret = ram_control_save_page(rs->f, block->offset,
Juan Quintela56e93d22015-05-07 19:33:31 +02001147 offset, TARGET_PAGE_SIZE, &bytes_xmit);
1148 if (bytes_xmit) {
Juan Quintela93604472017-06-06 19:49:03 +02001149 ram_counters.transferred += bytes_xmit;
Juan Quintela56e93d22015-05-07 19:33:31 +02001150 pages = 1;
1151 }
Juan Quintela56e93d22015-05-07 19:33:31 +02001152 if (ret != RAM_SAVE_CONTROL_NOT_SUPP) {
1153 if (ret != RAM_SAVE_CONTROL_DELAYED) {
1154 if (bytes_xmit > 0) {
Juan Quintela93604472017-06-06 19:49:03 +02001155 ram_counters.normal++;
Juan Quintela56e93d22015-05-07 19:33:31 +02001156 } else if (bytes_xmit == 0) {
Juan Quintela93604472017-06-06 19:49:03 +02001157 ram_counters.duplicate++;
Juan Quintela56e93d22015-05-07 19:33:31 +02001158 }
1159 }
1160 } else {
1161 /* When starting the process of a new block, the first page of
1162 * the block should be sent out before other pages in the same
1163 * block, and all the pages in last block should have been sent
1164 * out, keeping this order is important, because the 'cont' flag
1165 * is used to avoid resending the block name.
1166 */
Juan Quintela6f37bb82017-03-13 19:26:29 +01001167 if (block != rs->last_sent_block) {
Juan Quintelace25d332017-03-15 11:00:51 +01001168 flush_compressed_data(rs);
1169 pages = save_zero_page(rs, block, offset, p);
Juan Quintela56e93d22015-05-07 19:33:31 +02001170 if (pages == -1) {
Liang Lifc504382016-05-05 15:32:55 +08001171 /* Make sure the first page is sent out before other pages */
Juan Quintela2bf3aa82017-05-10 13:28:13 +02001172 bytes_xmit = save_page_header(rs, rs->f, block, offset |
Liang Lifc504382016-05-05 15:32:55 +08001173 RAM_SAVE_FLAG_COMPRESS_PAGE);
Juan Quintelace25d332017-03-15 11:00:51 +01001174 blen = qemu_put_compression_data(rs->f, p, TARGET_PAGE_SIZE,
Liang Lifc504382016-05-05 15:32:55 +08001175 migrate_compress_level());
1176 if (blen > 0) {
Juan Quintela93604472017-06-06 19:49:03 +02001177 ram_counters.transferred += bytes_xmit + blen;
1178 ram_counters.normal++;
Liang Lib3be2892016-05-05 15:32:54 +08001179 pages = 1;
Liang Lifc504382016-05-05 15:32:55 +08001180 } else {
Juan Quintelace25d332017-03-15 11:00:51 +01001181 qemu_file_set_error(rs->f, blen);
Liang Lifc504382016-05-05 15:32:55 +08001182 error_report("compressed data failed!");
Liang Lib3be2892016-05-05 15:32:54 +08001183 }
Juan Quintela56e93d22015-05-07 19:33:31 +02001184 }
Pavel Butsykin53f09a12017-02-03 18:23:20 +03001185 if (pages > 0) {
Juan Quintelaa935e302017-03-21 15:36:51 +01001186 ram_release_pages(block->idstr, offset, pages);
Pavel Butsykin53f09a12017-02-03 18:23:20 +03001187 }
Juan Quintela56e93d22015-05-07 19:33:31 +02001188 } else {
Juan Quintelace25d332017-03-15 11:00:51 +01001189 pages = save_zero_page(rs, block, offset, p);
Juan Quintela56e93d22015-05-07 19:33:31 +02001190 if (pages == -1) {
Juan Quintelace25d332017-03-15 11:00:51 +01001191 pages = compress_page_with_multi_thread(rs, block, offset);
Pavel Butsykin53f09a12017-02-03 18:23:20 +03001192 } else {
Juan Quintelaa935e302017-03-21 15:36:51 +01001193 ram_release_pages(block->idstr, offset, pages);
Juan Quintela56e93d22015-05-07 19:33:31 +02001194 }
1195 }
1196 }
1197
1198 return pages;
1199}
1200
Juan Quintela3d0684b2017-03-23 15:06:39 +01001201/**
1202 * find_dirty_block: find the next dirty page and update any state
1203 * associated with the search process.
Dr. David Alan Gilbertb9e60922015-09-23 15:27:11 +01001204 *
Juan Quintela3d0684b2017-03-23 15:06:39 +01001205 * Returns if a page is found
Dr. David Alan Gilbertb9e60922015-09-23 15:27:11 +01001206 *
Juan Quintela6f37bb82017-03-13 19:26:29 +01001207 * @rs: current RAM state
Juan Quintela3d0684b2017-03-23 15:06:39 +01001208 * @pss: data about the state of the current dirty page scan
1209 * @again: set to false if the search has scanned the whole of RAM
Dr. David Alan Gilbertb9e60922015-09-23 15:27:11 +01001210 */
Juan Quintelaf20e2862017-03-21 16:19:05 +01001211static bool find_dirty_block(RAMState *rs, PageSearchStatus *pss, bool *again)
Dr. David Alan Gilbertb9e60922015-09-23 15:27:11 +01001212{
Juan Quintelaf20e2862017-03-21 16:19:05 +01001213 pss->page = migration_bitmap_find_dirty(rs, pss->block, pss->page);
Juan Quintela6f37bb82017-03-13 19:26:29 +01001214 if (pss->complete_round && pss->block == rs->last_seen_block &&
Juan Quintelaa935e302017-03-21 15:36:51 +01001215 pss->page >= rs->last_page) {
Dr. David Alan Gilbertb9e60922015-09-23 15:27:11 +01001216 /*
1217 * We've been once around the RAM and haven't found anything.
1218 * Give up.
1219 */
1220 *again = false;
1221 return false;
1222 }
Juan Quintelaa935e302017-03-21 15:36:51 +01001223 if ((pss->page << TARGET_PAGE_BITS) >= pss->block->used_length) {
Dr. David Alan Gilbertb9e60922015-09-23 15:27:11 +01001224 /* Didn't find anything in this RAM Block */
Juan Quintelaa935e302017-03-21 15:36:51 +01001225 pss->page = 0;
Dr. David Alan Gilbertb9e60922015-09-23 15:27:11 +01001226 pss->block = QLIST_NEXT_RCU(pss->block, next);
1227 if (!pss->block) {
1228 /* Hit the end of the list */
1229 pss->block = QLIST_FIRST_RCU(&ram_list.blocks);
1230 /* Flag that we've looped */
1231 pss->complete_round = true;
Juan Quintela6f37bb82017-03-13 19:26:29 +01001232 rs->ram_bulk_stage = false;
Dr. David Alan Gilbertb9e60922015-09-23 15:27:11 +01001233 if (migrate_use_xbzrle()) {
1234 /* If xbzrle is on, stop using the data compression at this
1235 * point. In theory, xbzrle can do better than compression.
1236 */
Juan Quintelace25d332017-03-15 11:00:51 +01001237 flush_compressed_data(rs);
Dr. David Alan Gilbertb9e60922015-09-23 15:27:11 +01001238 }
1239 }
1240 /* Didn't find anything this time, but try again on the new block */
1241 *again = true;
1242 return false;
1243 } else {
1244 /* Can go around again, but... */
1245 *again = true;
1246 /* We've found something so probably don't need to */
1247 return true;
1248 }
1249}
1250
Juan Quintela3d0684b2017-03-23 15:06:39 +01001251/**
1252 * unqueue_page: gets a page of the queue
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001253 *
Juan Quintela3d0684b2017-03-23 15:06:39 +01001254 * Helper for 'get_queued_page' - gets a page off the queue
1255 *
1256 * Returns the block of the page (or NULL if none available)
1257 *
Juan Quintelaec481c62017-03-20 22:12:40 +01001258 * @rs: current RAM state
Juan Quintela3d0684b2017-03-23 15:06:39 +01001259 * @offset: used to return the offset within the RAMBlock
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001260 */
Juan Quintelaf20e2862017-03-21 16:19:05 +01001261static RAMBlock *unqueue_page(RAMState *rs, ram_addr_t *offset)
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001262{
1263 RAMBlock *block = NULL;
1264
Juan Quintelaec481c62017-03-20 22:12:40 +01001265 qemu_mutex_lock(&rs->src_page_req_mutex);
1266 if (!QSIMPLEQ_EMPTY(&rs->src_page_requests)) {
1267 struct RAMSrcPageRequest *entry =
1268 QSIMPLEQ_FIRST(&rs->src_page_requests);
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001269 block = entry->rb;
1270 *offset = entry->offset;
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001271
1272 if (entry->len > TARGET_PAGE_SIZE) {
1273 entry->len -= TARGET_PAGE_SIZE;
1274 entry->offset += TARGET_PAGE_SIZE;
1275 } else {
1276 memory_region_unref(block->mr);
Juan Quintelaec481c62017-03-20 22:12:40 +01001277 QSIMPLEQ_REMOVE_HEAD(&rs->src_page_requests, next_req);
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001278 g_free(entry);
1279 }
1280 }
Juan Quintelaec481c62017-03-20 22:12:40 +01001281 qemu_mutex_unlock(&rs->src_page_req_mutex);
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001282
1283 return block;
1284}
1285
Juan Quintela3d0684b2017-03-23 15:06:39 +01001286/**
1287 * get_queued_page: unqueue a page from the postocpy requests
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001288 *
Juan Quintela3d0684b2017-03-23 15:06:39 +01001289 * Skips pages that are already sent (!dirty)
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001290 *
Juan Quintela3d0684b2017-03-23 15:06:39 +01001291 * Returns if a queued page is found
1292 *
Juan Quintela6f37bb82017-03-13 19:26:29 +01001293 * @rs: current RAM state
Juan Quintela3d0684b2017-03-23 15:06:39 +01001294 * @pss: data about the state of the current dirty page scan
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001295 */
Juan Quintelaf20e2862017-03-21 16:19:05 +01001296static bool get_queued_page(RAMState *rs, PageSearchStatus *pss)
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001297{
1298 RAMBlock *block;
1299 ram_addr_t offset;
1300 bool dirty;
1301
1302 do {
Juan Quintelaf20e2862017-03-21 16:19:05 +01001303 block = unqueue_page(rs, &offset);
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001304 /*
1305 * We're sending this page, and since it's postcopy nothing else
1306 * will dirty it, and we must make sure it doesn't get sent again
1307 * even if this queue request was received after the background
1308 * search already sent it.
1309 */
1310 if (block) {
Juan Quintelaf20e2862017-03-21 16:19:05 +01001311 unsigned long page;
1312
Juan Quintela6b6712e2017-03-22 15:18:04 +01001313 page = offset >> TARGET_PAGE_BITS;
1314 dirty = test_bit(page, block->bmap);
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001315 if (!dirty) {
Juan Quintela06b10682017-03-21 15:18:05 +01001316 trace_get_queued_page_not_dirty(block->idstr, (uint64_t)offset,
Juan Quintela6b6712e2017-03-22 15:18:04 +01001317 page, test_bit(page, block->unsentmap));
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001318 } else {
Juan Quintelaf20e2862017-03-21 16:19:05 +01001319 trace_get_queued_page(block->idstr, (uint64_t)offset, page);
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001320 }
1321 }
1322
1323 } while (block && !dirty);
1324
1325 if (block) {
1326 /*
1327 * As soon as we start servicing pages out of order, then we have
1328 * to kill the bulk stage, since the bulk stage assumes
1329 * in (migration_bitmap_find_and_reset_dirty) that every page is
1330 * dirty, that's no longer true.
1331 */
Juan Quintela6f37bb82017-03-13 19:26:29 +01001332 rs->ram_bulk_stage = false;
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001333
1334 /*
1335 * We want the background search to continue from the queued page
1336 * since the guest is likely to want other pages near to the page
1337 * it just requested.
1338 */
1339 pss->block = block;
Juan Quintelaa935e302017-03-21 15:36:51 +01001340 pss->page = offset >> TARGET_PAGE_BITS;
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001341 }
1342
1343 return !!block;
1344}
1345
Juan Quintela56e93d22015-05-07 19:33:31 +02001346/**
Juan Quintela5e58f962017-04-03 22:06:54 +02001347 * migration_page_queue_free: drop any remaining pages in the ram
1348 * request queue
Dr. David Alan Gilbert6c595cd2015-11-05 18:11:08 +00001349 *
Juan Quintela3d0684b2017-03-23 15:06:39 +01001350 * It should be empty at the end anyway, but in error cases there may
1351 * be some left. in case that there is any page left, we drop it.
1352 *
Dr. David Alan Gilbert6c595cd2015-11-05 18:11:08 +00001353 */
Juan Quintela83c13382017-05-04 11:45:01 +02001354static void migration_page_queue_free(RAMState *rs)
Dr. David Alan Gilbert6c595cd2015-11-05 18:11:08 +00001355{
Juan Quintelaec481c62017-03-20 22:12:40 +01001356 struct RAMSrcPageRequest *mspr, *next_mspr;
Dr. David Alan Gilbert6c595cd2015-11-05 18:11:08 +00001357 /* This queue generally should be empty - but in the case of a failed
1358 * migration might have some droppings in.
1359 */
1360 rcu_read_lock();
Juan Quintelaec481c62017-03-20 22:12:40 +01001361 QSIMPLEQ_FOREACH_SAFE(mspr, &rs->src_page_requests, next_req, next_mspr) {
Dr. David Alan Gilbert6c595cd2015-11-05 18:11:08 +00001362 memory_region_unref(mspr->rb->mr);
Juan Quintelaec481c62017-03-20 22:12:40 +01001363 QSIMPLEQ_REMOVE_HEAD(&rs->src_page_requests, next_req);
Dr. David Alan Gilbert6c595cd2015-11-05 18:11:08 +00001364 g_free(mspr);
1365 }
1366 rcu_read_unlock();
1367}
1368
1369/**
Juan Quintela3d0684b2017-03-23 15:06:39 +01001370 * ram_save_queue_pages: queue the page for transmission
1371 *
1372 * A request from postcopy destination for example.
1373 *
1374 * Returns zero on success or negative on error
1375 *
Juan Quintela3d0684b2017-03-23 15:06:39 +01001376 * @rbname: Name of the RAMBLock of the request. NULL means the
1377 * same that last one.
1378 * @start: starting address from the start of the RAMBlock
1379 * @len: length (in bytes) to send
Dr. David Alan Gilbert6c595cd2015-11-05 18:11:08 +00001380 */
Juan Quintela96506892017-03-14 18:41:03 +01001381int ram_save_queue_pages(const char *rbname, ram_addr_t start, ram_addr_t len)
Dr. David Alan Gilbert6c595cd2015-11-05 18:11:08 +00001382{
1383 RAMBlock *ramblock;
Juan Quintela53518d92017-05-04 11:46:24 +02001384 RAMState *rs = ram_state;
Dr. David Alan Gilbert6c595cd2015-11-05 18:11:08 +00001385
Juan Quintela93604472017-06-06 19:49:03 +02001386 ram_counters.postcopy_requests++;
Dr. David Alan Gilbert6c595cd2015-11-05 18:11:08 +00001387 rcu_read_lock();
1388 if (!rbname) {
1389 /* Reuse last RAMBlock */
Juan Quintela68a098f2017-03-14 13:48:42 +01001390 ramblock = rs->last_req_rb;
Dr. David Alan Gilbert6c595cd2015-11-05 18:11:08 +00001391
1392 if (!ramblock) {
1393 /*
1394 * Shouldn't happen, we can't reuse the last RAMBlock if
1395 * it's the 1st request.
1396 */
1397 error_report("ram_save_queue_pages no previous block");
1398 goto err;
1399 }
1400 } else {
1401 ramblock = qemu_ram_block_by_name(rbname);
1402
1403 if (!ramblock) {
1404 /* We shouldn't be asked for a non-existent RAMBlock */
1405 error_report("ram_save_queue_pages no block '%s'", rbname);
1406 goto err;
1407 }
Juan Quintela68a098f2017-03-14 13:48:42 +01001408 rs->last_req_rb = ramblock;
Dr. David Alan Gilbert6c595cd2015-11-05 18:11:08 +00001409 }
1410 trace_ram_save_queue_pages(ramblock->idstr, start, len);
1411 if (start+len > ramblock->used_length) {
Juan Quintela9458ad62015-11-10 17:42:05 +01001412 error_report("%s request overrun start=" RAM_ADDR_FMT " len="
1413 RAM_ADDR_FMT " blocklen=" RAM_ADDR_FMT,
Dr. David Alan Gilbert6c595cd2015-11-05 18:11:08 +00001414 __func__, start, len, ramblock->used_length);
1415 goto err;
1416 }
1417
Juan Quintelaec481c62017-03-20 22:12:40 +01001418 struct RAMSrcPageRequest *new_entry =
1419 g_malloc0(sizeof(struct RAMSrcPageRequest));
Dr. David Alan Gilbert6c595cd2015-11-05 18:11:08 +00001420 new_entry->rb = ramblock;
1421 new_entry->offset = start;
1422 new_entry->len = len;
1423
1424 memory_region_ref(ramblock->mr);
Juan Quintelaec481c62017-03-20 22:12:40 +01001425 qemu_mutex_lock(&rs->src_page_req_mutex);
1426 QSIMPLEQ_INSERT_TAIL(&rs->src_page_requests, new_entry, next_req);
1427 qemu_mutex_unlock(&rs->src_page_req_mutex);
Dr. David Alan Gilbert6c595cd2015-11-05 18:11:08 +00001428 rcu_read_unlock();
1429
1430 return 0;
1431
1432err:
1433 rcu_read_unlock();
1434 return -1;
1435}
1436
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001437/**
Juan Quintela3d0684b2017-03-23 15:06:39 +01001438 * ram_save_target_page: save one target page
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001439 *
Juan Quintela3d0684b2017-03-23 15:06:39 +01001440 * Returns the number of pages written
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001441 *
Juan Quintela6f37bb82017-03-13 19:26:29 +01001442 * @rs: current RAM state
Juan Quintela3d0684b2017-03-23 15:06:39 +01001443 * @ms: current migration state
Juan Quintela3d0684b2017-03-23 15:06:39 +01001444 * @pss: data about the page we want to send
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001445 * @last_stage: if we are at the completion stage
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001446 */
Juan Quintelaa0a8aa12017-03-20 22:29:07 +01001447static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss,
Juan Quintelaf20e2862017-03-21 16:19:05 +01001448 bool last_stage)
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001449{
1450 int res = 0;
1451
1452 /* Check the pages is dirty and if it is send it */
Juan Quintelaf20e2862017-03-21 16:19:05 +01001453 if (migration_bitmap_clear_dirty(rs, pss->block, pss->page)) {
Juan Quintela6d358d92017-03-16 21:29:34 +01001454 /*
1455 * If xbzrle is on, stop using the data compression after first
1456 * round of migration even if compression is enabled. In theory,
1457 * xbzrle can do better than compression.
1458 */
Juan Quintela6b6712e2017-03-22 15:18:04 +01001459 if (migrate_use_compression() &&
1460 (rs->ram_bulk_stage || !migrate_use_xbzrle())) {
Juan Quintelaa0a8aa12017-03-20 22:29:07 +01001461 res = ram_save_compressed_page(rs, pss, last_stage);
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001462 } else {
Juan Quintelaa0a8aa12017-03-20 22:29:07 +01001463 res = ram_save_page(rs, pss, last_stage);
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001464 }
1465
1466 if (res < 0) {
1467 return res;
1468 }
Juan Quintela6b6712e2017-03-22 15:18:04 +01001469 if (pss->block->unsentmap) {
1470 clear_bit(pss->page, pss->block->unsentmap);
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001471 }
1472 }
1473
1474 return res;
1475}
1476
1477/**
Juan Quintela3d0684b2017-03-23 15:06:39 +01001478 * ram_save_host_page: save a whole host page
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001479 *
Juan Quintela3d0684b2017-03-23 15:06:39 +01001480 * Starting at *offset send pages up to the end of the current host
1481 * page. It's valid for the initial offset to point into the middle of
1482 * a host page in which case the remainder of the hostpage is sent.
1483 * Only dirty target pages are sent. Note that the host page size may
1484 * be a huge page for this block.
Dr. David Alan Gilbert1eb3fc02017-05-17 17:58:09 +01001485 * The saving stops at the boundary of the used_length of the block
1486 * if the RAMBlock isn't a multiple of the host page size.
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001487 *
Juan Quintela3d0684b2017-03-23 15:06:39 +01001488 * Returns the number of pages written or negative on error
1489 *
Juan Quintela6f37bb82017-03-13 19:26:29 +01001490 * @rs: current RAM state
Juan Quintela3d0684b2017-03-23 15:06:39 +01001491 * @ms: current migration state
Juan Quintela3d0684b2017-03-23 15:06:39 +01001492 * @pss: data about the page we want to send
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001493 * @last_stage: if we are at the completion stage
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001494 */
Juan Quintelaa0a8aa12017-03-20 22:29:07 +01001495static int ram_save_host_page(RAMState *rs, PageSearchStatus *pss,
Juan Quintelaf20e2862017-03-21 16:19:05 +01001496 bool last_stage)
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001497{
1498 int tmppages, pages = 0;
Juan Quintelaa935e302017-03-21 15:36:51 +01001499 size_t pagesize_bits =
1500 qemu_ram_pagesize(pss->block) >> TARGET_PAGE_BITS;
Dr. David Alan Gilbert4c011c32017-02-24 18:28:39 +00001501
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001502 do {
Juan Quintelaf20e2862017-03-21 16:19:05 +01001503 tmppages = ram_save_target_page(rs, pss, last_stage);
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001504 if (tmppages < 0) {
1505 return tmppages;
1506 }
1507
1508 pages += tmppages;
Juan Quintelaa935e302017-03-21 15:36:51 +01001509 pss->page++;
Dr. David Alan Gilbert1eb3fc02017-05-17 17:58:09 +01001510 } while ((pss->page & (pagesize_bits - 1)) &&
1511 offset_in_ramblock(pss->block, pss->page << TARGET_PAGE_BITS));
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001512
1513 /* The offset we leave with is the last one we looked at */
Juan Quintelaa935e302017-03-21 15:36:51 +01001514 pss->page--;
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001515 return pages;
1516}
Dr. David Alan Gilbert6c595cd2015-11-05 18:11:08 +00001517
1518/**
Juan Quintela3d0684b2017-03-23 15:06:39 +01001519 * ram_find_and_save_block: finds a dirty page and sends it to f
Juan Quintela56e93d22015-05-07 19:33:31 +02001520 *
1521 * Called within an RCU critical section.
1522 *
Juan Quintela3d0684b2017-03-23 15:06:39 +01001523 * Returns the number of pages written where zero means no dirty pages
Juan Quintela56e93d22015-05-07 19:33:31 +02001524 *
Juan Quintela6f37bb82017-03-13 19:26:29 +01001525 * @rs: current RAM state
Juan Quintela56e93d22015-05-07 19:33:31 +02001526 * @last_stage: if we are at the completion stage
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001527 *
1528 * On systems where host-page-size > target-page-size it will send all the
1529 * pages in a host page that are dirty.
Juan Quintela56e93d22015-05-07 19:33:31 +02001530 */
1531
Juan Quintelace25d332017-03-15 11:00:51 +01001532static int ram_find_and_save_block(RAMState *rs, bool last_stage)
Juan Quintela56e93d22015-05-07 19:33:31 +02001533{
Dr. David Alan Gilbertb8fb8cb2015-09-23 15:27:10 +01001534 PageSearchStatus pss;
Juan Quintela56e93d22015-05-07 19:33:31 +02001535 int pages = 0;
Dr. David Alan Gilbertb9e60922015-09-23 15:27:11 +01001536 bool again, found;
Juan Quintela56e93d22015-05-07 19:33:31 +02001537
Ashijeet Acharya0827b9e2017-02-08 19:58:45 +05301538 /* No dirty page as there is zero RAM */
1539 if (!ram_bytes_total()) {
1540 return pages;
1541 }
1542
Juan Quintela6f37bb82017-03-13 19:26:29 +01001543 pss.block = rs->last_seen_block;
Juan Quintelaa935e302017-03-21 15:36:51 +01001544 pss.page = rs->last_page;
Dr. David Alan Gilbertb8fb8cb2015-09-23 15:27:10 +01001545 pss.complete_round = false;
1546
1547 if (!pss.block) {
1548 pss.block = QLIST_FIRST_RCU(&ram_list.blocks);
1549 }
Juan Quintela56e93d22015-05-07 19:33:31 +02001550
Dr. David Alan Gilbertb9e60922015-09-23 15:27:11 +01001551 do {
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001552 again = true;
Juan Quintelaf20e2862017-03-21 16:19:05 +01001553 found = get_queued_page(rs, &pss);
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001554
1555 if (!found) {
1556 /* priority queue empty, so just search for something dirty */
Juan Quintelaf20e2862017-03-21 16:19:05 +01001557 found = find_dirty_block(rs, &pss, &again);
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001558 }
Dr. David Alan Gilbertb9e60922015-09-23 15:27:11 +01001559
1560 if (found) {
Juan Quintelaf20e2862017-03-21 16:19:05 +01001561 pages = ram_save_host_page(rs, &pss, last_stage);
Juan Quintela56e93d22015-05-07 19:33:31 +02001562 }
Dr. David Alan Gilbertb9e60922015-09-23 15:27:11 +01001563 } while (!pages && again);
Juan Quintela56e93d22015-05-07 19:33:31 +02001564
Juan Quintela6f37bb82017-03-13 19:26:29 +01001565 rs->last_seen_block = pss.block;
Juan Quintelaa935e302017-03-21 15:36:51 +01001566 rs->last_page = pss.page;
Juan Quintela56e93d22015-05-07 19:33:31 +02001567
1568 return pages;
1569}
1570
1571void acct_update_position(QEMUFile *f, size_t size, bool zero)
1572{
1573 uint64_t pages = size / TARGET_PAGE_SIZE;
Juan Quintelaf7ccd612017-03-13 20:30:21 +01001574
Juan Quintela56e93d22015-05-07 19:33:31 +02001575 if (zero) {
Juan Quintela93604472017-06-06 19:49:03 +02001576 ram_counters.duplicate += pages;
Juan Quintela56e93d22015-05-07 19:33:31 +02001577 } else {
Juan Quintela93604472017-06-06 19:49:03 +02001578 ram_counters.normal += pages;
1579 ram_counters.transferred += size;
Juan Quintela56e93d22015-05-07 19:33:31 +02001580 qemu_update_position(f, size);
1581 }
1582}
1583
Juan Quintela56e93d22015-05-07 19:33:31 +02001584uint64_t ram_bytes_total(void)
1585{
1586 RAMBlock *block;
1587 uint64_t total = 0;
1588
1589 rcu_read_lock();
Peter Xu99e15582017-05-12 12:17:39 +08001590 RAMBLOCK_FOREACH(block) {
Juan Quintela56e93d22015-05-07 19:33:31 +02001591 total += block->used_length;
Peter Xu99e15582017-05-12 12:17:39 +08001592 }
Juan Quintela56e93d22015-05-07 19:33:31 +02001593 rcu_read_unlock();
1594 return total;
1595}
1596
Juan Quintelaf265e0e2017-06-28 11:52:27 +02001597static void xbzrle_load_setup(void)
Juan Quintela56e93d22015-05-07 19:33:31 +02001598{
Juan Quintelaf265e0e2017-06-28 11:52:27 +02001599 XBZRLE.decoded_buf = g_malloc(TARGET_PAGE_SIZE);
Juan Quintela56e93d22015-05-07 19:33:31 +02001600}
1601
Juan Quintelaf265e0e2017-06-28 11:52:27 +02001602static void xbzrle_load_cleanup(void)
1603{
1604 g_free(XBZRLE.decoded_buf);
1605 XBZRLE.decoded_buf = NULL;
1606}
1607
Peter Xu7d7c96b2017-10-19 14:31:58 +08001608static void ram_state_cleanup(RAMState **rsp)
1609{
1610 migration_page_queue_free(*rsp);
1611 qemu_mutex_destroy(&(*rsp)->bitmap_mutex);
1612 qemu_mutex_destroy(&(*rsp)->src_page_req_mutex);
1613 g_free(*rsp);
1614 *rsp = NULL;
1615}
1616
Peter Xu84593a02017-10-19 14:31:59 +08001617static void xbzrle_cleanup(void)
1618{
1619 XBZRLE_cache_lock();
1620 if (XBZRLE.cache) {
1621 cache_fini(XBZRLE.cache);
1622 g_free(XBZRLE.encoded_buf);
1623 g_free(XBZRLE.current_buf);
1624 g_free(XBZRLE.zero_target_page);
1625 XBZRLE.cache = NULL;
1626 XBZRLE.encoded_buf = NULL;
1627 XBZRLE.current_buf = NULL;
1628 XBZRLE.zero_target_page = NULL;
1629 }
1630 XBZRLE_cache_unlock();
1631}
1632
Juan Quintelaf265e0e2017-06-28 11:52:27 +02001633static void ram_save_cleanup(void *opaque)
Juan Quintela56e93d22015-05-07 19:33:31 +02001634{
Juan Quintela53518d92017-05-04 11:46:24 +02001635 RAMState **rsp = opaque;
Juan Quintela6b6712e2017-03-22 15:18:04 +01001636 RAMBlock *block;
Juan Quintelaeb859c52017-03-13 21:51:55 +01001637
Li Zhijian2ff64032015-07-02 20:18:05 +08001638 /* caller have hold iothread lock or is in a bh, so there is
1639 * no writing race against this migration_bitmap
1640 */
Juan Quintela6b6712e2017-03-22 15:18:04 +01001641 memory_global_dirty_log_stop();
1642
1643 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1644 g_free(block->bmap);
1645 block->bmap = NULL;
1646 g_free(block->unsentmap);
1647 block->unsentmap = NULL;
Juan Quintela56e93d22015-05-07 19:33:31 +02001648 }
1649
Peter Xu84593a02017-10-19 14:31:59 +08001650 xbzrle_cleanup();
Juan Quintelaf0afa332017-06-28 11:52:28 +02001651 compress_threads_save_cleanup();
Peter Xu7d7c96b2017-10-19 14:31:58 +08001652 ram_state_cleanup(rsp);
Juan Quintela56e93d22015-05-07 19:33:31 +02001653}
1654
Juan Quintela6f37bb82017-03-13 19:26:29 +01001655static void ram_state_reset(RAMState *rs)
Juan Quintela56e93d22015-05-07 19:33:31 +02001656{
Juan Quintela6f37bb82017-03-13 19:26:29 +01001657 rs->last_seen_block = NULL;
1658 rs->last_sent_block = NULL;
Juan Quintela269ace22017-03-21 15:23:31 +01001659 rs->last_page = 0;
Juan Quintela6f37bb82017-03-13 19:26:29 +01001660 rs->last_version = ram_list.version;
1661 rs->ram_bulk_stage = true;
Juan Quintela56e93d22015-05-07 19:33:31 +02001662}
1663
1664#define MAX_WAIT 50 /* ms, half buffered_file limit */
1665
Dr. David Alan Gilbert4f2e4252015-11-05 18:10:38 +00001666/*
1667 * 'expected' is the value you expect the bitmap mostly to be full
1668 * of; it won't bother printing lines that are all this value.
1669 * If 'todump' is null the migration bitmap is dumped.
1670 */
Juan Quintela6b6712e2017-03-22 15:18:04 +01001671void ram_debug_dump_bitmap(unsigned long *todump, bool expected,
1672 unsigned long pages)
Dr. David Alan Gilbert4f2e4252015-11-05 18:10:38 +00001673{
Dr. David Alan Gilbert4f2e4252015-11-05 18:10:38 +00001674 int64_t cur;
1675 int64_t linelen = 128;
1676 char linebuf[129];
1677
Juan Quintela6b6712e2017-03-22 15:18:04 +01001678 for (cur = 0; cur < pages; cur += linelen) {
Dr. David Alan Gilbert4f2e4252015-11-05 18:10:38 +00001679 int64_t curb;
1680 bool found = false;
1681 /*
1682 * Last line; catch the case where the line length
1683 * is longer than remaining ram
1684 */
Juan Quintela6b6712e2017-03-22 15:18:04 +01001685 if (cur + linelen > pages) {
1686 linelen = pages - cur;
Dr. David Alan Gilbert4f2e4252015-11-05 18:10:38 +00001687 }
1688 for (curb = 0; curb < linelen; curb++) {
1689 bool thisbit = test_bit(cur + curb, todump);
1690 linebuf[curb] = thisbit ? '1' : '.';
1691 found = found || (thisbit != expected);
1692 }
1693 if (found) {
1694 linebuf[curb] = '\0';
1695 fprintf(stderr, "0x%08" PRIx64 " : %s\n", cur, linebuf);
1696 }
1697 }
1698}
1699
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00001700/* **** functions for postcopy ***** */
1701
Pavel Butsykinced1c612017-02-03 18:23:21 +03001702void ram_postcopy_migrated_memory_release(MigrationState *ms)
1703{
1704 struct RAMBlock *block;
Pavel Butsykinced1c612017-02-03 18:23:21 +03001705
Peter Xu99e15582017-05-12 12:17:39 +08001706 RAMBLOCK_FOREACH(block) {
Juan Quintela6b6712e2017-03-22 15:18:04 +01001707 unsigned long *bitmap = block->bmap;
1708 unsigned long range = block->used_length >> TARGET_PAGE_BITS;
1709 unsigned long run_start = find_next_zero_bit(bitmap, range, 0);
Pavel Butsykinced1c612017-02-03 18:23:21 +03001710
1711 while (run_start < range) {
1712 unsigned long run_end = find_next_bit(bitmap, range, run_start + 1);
Juan Quintelaaaa20642017-03-21 11:35:24 +01001713 ram_discard_range(block->idstr, run_start << TARGET_PAGE_BITS,
Pavel Butsykinced1c612017-02-03 18:23:21 +03001714 (run_end - run_start) << TARGET_PAGE_BITS);
1715 run_start = find_next_zero_bit(bitmap, range, run_end + 1);
1716 }
1717 }
1718}
1719
Juan Quintela3d0684b2017-03-23 15:06:39 +01001720/**
1721 * postcopy_send_discard_bm_ram: discard a RAMBlock
1722 *
1723 * Returns zero on success
1724 *
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00001725 * Callback from postcopy_each_ram_send_discard for each RAMBlock
1726 * Note: At this point the 'unsentmap' is the processed bitmap combined
1727 * with the dirtymap; so a '1' means it's either dirty or unsent.
Juan Quintela3d0684b2017-03-23 15:06:39 +01001728 *
1729 * @ms: current migration state
1730 * @pds: state for postcopy
1731 * @start: RAMBlock starting page
1732 * @length: RAMBlock size
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00001733 */
1734static int postcopy_send_discard_bm_ram(MigrationState *ms,
1735 PostcopyDiscardState *pds,
Juan Quintela6b6712e2017-03-22 15:18:04 +01001736 RAMBlock *block)
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00001737{
Juan Quintela6b6712e2017-03-22 15:18:04 +01001738 unsigned long end = block->used_length >> TARGET_PAGE_BITS;
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00001739 unsigned long current;
Juan Quintela6b6712e2017-03-22 15:18:04 +01001740 unsigned long *unsentmap = block->unsentmap;
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00001741
Juan Quintela6b6712e2017-03-22 15:18:04 +01001742 for (current = 0; current < end; ) {
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00001743 unsigned long one = find_next_bit(unsentmap, end, current);
1744
1745 if (one <= end) {
1746 unsigned long zero = find_next_zero_bit(unsentmap, end, one + 1);
1747 unsigned long discard_length;
1748
1749 if (zero >= end) {
1750 discard_length = end - one;
1751 } else {
1752 discard_length = zero - one;
1753 }
Dr. David Alan Gilbertd688c622016-06-13 12:16:40 +01001754 if (discard_length) {
1755 postcopy_discard_send_range(ms, pds, one, discard_length);
1756 }
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00001757 current = one + discard_length;
1758 } else {
1759 current = one;
1760 }
1761 }
1762
1763 return 0;
1764}
1765
Juan Quintela3d0684b2017-03-23 15:06:39 +01001766/**
1767 * postcopy_each_ram_send_discard: discard all RAMBlocks
1768 *
1769 * Returns 0 for success or negative for error
1770 *
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00001771 * Utility for the outgoing postcopy code.
1772 * Calls postcopy_send_discard_bm_ram for each RAMBlock
1773 * passing it bitmap indexes and name.
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00001774 * (qemu_ram_foreach_block ends up passing unscaled lengths
1775 * which would mean postcopy code would have to deal with target page)
Juan Quintela3d0684b2017-03-23 15:06:39 +01001776 *
1777 * @ms: current migration state
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00001778 */
1779static int postcopy_each_ram_send_discard(MigrationState *ms)
1780{
1781 struct RAMBlock *block;
1782 int ret;
1783
Peter Xu99e15582017-05-12 12:17:39 +08001784 RAMBLOCK_FOREACH(block) {
Juan Quintela6b6712e2017-03-22 15:18:04 +01001785 PostcopyDiscardState *pds =
1786 postcopy_discard_send_init(ms, block->idstr);
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00001787
1788 /*
1789 * Postcopy sends chunks of bitmap over the wire, but it
1790 * just needs indexes at this point, avoids it having
1791 * target page specific code.
1792 */
Juan Quintela6b6712e2017-03-22 15:18:04 +01001793 ret = postcopy_send_discard_bm_ram(ms, pds, block);
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00001794 postcopy_discard_send_finish(ms, pds);
1795 if (ret) {
1796 return ret;
1797 }
1798 }
1799
1800 return 0;
1801}
1802
Juan Quintela3d0684b2017-03-23 15:06:39 +01001803/**
1804 * postcopy_chunk_hostpages_pass: canocalize bitmap in hostpages
Dr. David Alan Gilbert99e314e2015-11-05 18:11:15 +00001805 *
Juan Quintela3d0684b2017-03-23 15:06:39 +01001806 * Helper for postcopy_chunk_hostpages; it's called twice to
1807 * canonicalize the two bitmaps, that are similar, but one is
1808 * inverted.
Dr. David Alan Gilbert99e314e2015-11-05 18:11:15 +00001809 *
Juan Quintela3d0684b2017-03-23 15:06:39 +01001810 * Postcopy requires that all target pages in a hostpage are dirty or
1811 * clean, not a mix. This function canonicalizes the bitmaps.
1812 *
1813 * @ms: current migration state
1814 * @unsent_pass: if true we need to canonicalize partially unsent host pages
1815 * otherwise we need to canonicalize partially dirty host pages
1816 * @block: block that contains the page we want to canonicalize
1817 * @pds: state for postcopy
Dr. David Alan Gilbert99e314e2015-11-05 18:11:15 +00001818 */
1819static void postcopy_chunk_hostpages_pass(MigrationState *ms, bool unsent_pass,
1820 RAMBlock *block,
1821 PostcopyDiscardState *pds)
1822{
Juan Quintela53518d92017-05-04 11:46:24 +02001823 RAMState *rs = ram_state;
Juan Quintela6b6712e2017-03-22 15:18:04 +01001824 unsigned long *bitmap = block->bmap;
1825 unsigned long *unsentmap = block->unsentmap;
Dr. David Alan Gilbert29c59172017-02-24 18:28:31 +00001826 unsigned int host_ratio = block->page_size / TARGET_PAGE_SIZE;
Juan Quintela6b6712e2017-03-22 15:18:04 +01001827 unsigned long pages = block->used_length >> TARGET_PAGE_BITS;
Dr. David Alan Gilbert99e314e2015-11-05 18:11:15 +00001828 unsigned long run_start;
1829
Dr. David Alan Gilbert29c59172017-02-24 18:28:31 +00001830 if (block->page_size == TARGET_PAGE_SIZE) {
1831 /* Easy case - TPS==HPS for a non-huge page RAMBlock */
1832 return;
1833 }
1834
Dr. David Alan Gilbert99e314e2015-11-05 18:11:15 +00001835 if (unsent_pass) {
1836 /* Find a sent page */
Juan Quintela6b6712e2017-03-22 15:18:04 +01001837 run_start = find_next_zero_bit(unsentmap, pages, 0);
Dr. David Alan Gilbert99e314e2015-11-05 18:11:15 +00001838 } else {
1839 /* Find a dirty page */
Juan Quintela6b6712e2017-03-22 15:18:04 +01001840 run_start = find_next_bit(bitmap, pages, 0);
Dr. David Alan Gilbert99e314e2015-11-05 18:11:15 +00001841 }
1842
Juan Quintela6b6712e2017-03-22 15:18:04 +01001843 while (run_start < pages) {
Dr. David Alan Gilbert99e314e2015-11-05 18:11:15 +00001844 bool do_fixup = false;
1845 unsigned long fixup_start_addr;
1846 unsigned long host_offset;
1847
1848 /*
1849 * If the start of this run of pages is in the middle of a host
1850 * page, then we need to fixup this host page.
1851 */
1852 host_offset = run_start % host_ratio;
1853 if (host_offset) {
1854 do_fixup = true;
1855 run_start -= host_offset;
1856 fixup_start_addr = run_start;
1857 /* For the next pass */
1858 run_start = run_start + host_ratio;
1859 } else {
1860 /* Find the end of this run */
1861 unsigned long run_end;
1862 if (unsent_pass) {
Juan Quintela6b6712e2017-03-22 15:18:04 +01001863 run_end = find_next_bit(unsentmap, pages, run_start + 1);
Dr. David Alan Gilbert99e314e2015-11-05 18:11:15 +00001864 } else {
Juan Quintela6b6712e2017-03-22 15:18:04 +01001865 run_end = find_next_zero_bit(bitmap, pages, run_start + 1);
Dr. David Alan Gilbert99e314e2015-11-05 18:11:15 +00001866 }
1867 /*
1868 * If the end isn't at the start of a host page, then the
1869 * run doesn't finish at the end of a host page
1870 * and we need to discard.
1871 */
1872 host_offset = run_end % host_ratio;
1873 if (host_offset) {
1874 do_fixup = true;
1875 fixup_start_addr = run_end - host_offset;
1876 /*
1877 * This host page has gone, the next loop iteration starts
1878 * from after the fixup
1879 */
1880 run_start = fixup_start_addr + host_ratio;
1881 } else {
1882 /*
1883 * No discards on this iteration, next loop starts from
1884 * next sent/dirty page
1885 */
1886 run_start = run_end + 1;
1887 }
1888 }
1889
1890 if (do_fixup) {
1891 unsigned long page;
1892
1893 /* Tell the destination to discard this page */
1894 if (unsent_pass || !test_bit(fixup_start_addr, unsentmap)) {
1895 /* For the unsent_pass we:
1896 * discard partially sent pages
1897 * For the !unsent_pass (dirty) we:
1898 * discard partially dirty pages that were sent
1899 * (any partially sent pages were already discarded
1900 * by the previous unsent_pass)
1901 */
1902 postcopy_discard_send_range(ms, pds, fixup_start_addr,
1903 host_ratio);
1904 }
1905
1906 /* Clean up the bitmap */
1907 for (page = fixup_start_addr;
1908 page < fixup_start_addr + host_ratio; page++) {
1909 /* All pages in this host page are now not sent */
1910 set_bit(page, unsentmap);
1911
1912 /*
1913 * Remark them as dirty, updating the count for any pages
1914 * that weren't previously dirty.
1915 */
Juan Quintela0d8ec882017-03-13 21:21:41 +01001916 rs->migration_dirty_pages += !test_and_set_bit(page, bitmap);
Dr. David Alan Gilbert99e314e2015-11-05 18:11:15 +00001917 }
1918 }
1919
1920 if (unsent_pass) {
1921 /* Find the next sent page for the next iteration */
Juan Quintela6b6712e2017-03-22 15:18:04 +01001922 run_start = find_next_zero_bit(unsentmap, pages, run_start);
Dr. David Alan Gilbert99e314e2015-11-05 18:11:15 +00001923 } else {
1924 /* Find the next dirty page for the next iteration */
Juan Quintela6b6712e2017-03-22 15:18:04 +01001925 run_start = find_next_bit(bitmap, pages, run_start);
Dr. David Alan Gilbert99e314e2015-11-05 18:11:15 +00001926 }
1927 }
1928}
1929
Juan Quintela3d0684b2017-03-23 15:06:39 +01001930/**
1931 * postcopy_chuck_hostpages: discrad any partially sent host page
1932 *
Dr. David Alan Gilbert99e314e2015-11-05 18:11:15 +00001933 * Utility for the outgoing postcopy code.
1934 *
1935 * Discard any partially sent host-page size chunks, mark any partially
Dr. David Alan Gilbert29c59172017-02-24 18:28:31 +00001936 * dirty host-page size chunks as all dirty. In this case the host-page
1937 * is the host-page for the particular RAMBlock, i.e. it might be a huge page
Dr. David Alan Gilbert99e314e2015-11-05 18:11:15 +00001938 *
Juan Quintela3d0684b2017-03-23 15:06:39 +01001939 * Returns zero on success
1940 *
1941 * @ms: current migration state
Juan Quintela6b6712e2017-03-22 15:18:04 +01001942 * @block: block we want to work with
Dr. David Alan Gilbert99e314e2015-11-05 18:11:15 +00001943 */
Juan Quintela6b6712e2017-03-22 15:18:04 +01001944static int postcopy_chunk_hostpages(MigrationState *ms, RAMBlock *block)
Dr. David Alan Gilbert99e314e2015-11-05 18:11:15 +00001945{
Juan Quintela6b6712e2017-03-22 15:18:04 +01001946 PostcopyDiscardState *pds =
1947 postcopy_discard_send_init(ms, block->idstr);
Dr. David Alan Gilbert99e314e2015-11-05 18:11:15 +00001948
Juan Quintela6b6712e2017-03-22 15:18:04 +01001949 /* First pass: Discard all partially sent host pages */
1950 postcopy_chunk_hostpages_pass(ms, true, block, pds);
1951 /*
1952 * Second pass: Ensure that all partially dirty host pages are made
1953 * fully dirty.
1954 */
1955 postcopy_chunk_hostpages_pass(ms, false, block, pds);
Dr. David Alan Gilbert99e314e2015-11-05 18:11:15 +00001956
Juan Quintela6b6712e2017-03-22 15:18:04 +01001957 postcopy_discard_send_finish(ms, pds);
Dr. David Alan Gilbert99e314e2015-11-05 18:11:15 +00001958 return 0;
1959}
1960
Juan Quintela3d0684b2017-03-23 15:06:39 +01001961/**
1962 * ram_postcopy_send_discard_bitmap: transmit the discard bitmap
1963 *
1964 * Returns zero on success
1965 *
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00001966 * Transmit the set of pages to be discarded after precopy to the target
1967 * these are pages that:
1968 * a) Have been previously transmitted but are now dirty again
1969 * b) Pages that have never been transmitted, this ensures that
1970 * any pages on the destination that have been mapped by background
1971 * tasks get discarded (transparent huge pages is the specific concern)
1972 * Hopefully this is pretty sparse
Juan Quintela3d0684b2017-03-23 15:06:39 +01001973 *
1974 * @ms: current migration state
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00001975 */
1976int ram_postcopy_send_discard_bitmap(MigrationState *ms)
1977{
Juan Quintela53518d92017-05-04 11:46:24 +02001978 RAMState *rs = ram_state;
Juan Quintela6b6712e2017-03-22 15:18:04 +01001979 RAMBlock *block;
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00001980 int ret;
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00001981
1982 rcu_read_lock();
1983
1984 /* This should be our last sync, the src is now paused */
Juan Quintelaeb859c52017-03-13 21:51:55 +01001985 migration_bitmap_sync(rs);
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00001986
Juan Quintela6b6712e2017-03-22 15:18:04 +01001987 /* Easiest way to make sure we don't resume in the middle of a host-page */
1988 rs->last_seen_block = NULL;
1989 rs->last_sent_block = NULL;
1990 rs->last_page = 0;
1991
1992 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1993 unsigned long pages = block->used_length >> TARGET_PAGE_BITS;
1994 unsigned long *bitmap = block->bmap;
1995 unsigned long *unsentmap = block->unsentmap;
1996
1997 if (!unsentmap) {
1998 /* We don't have a safe way to resize the sentmap, so
1999 * if the bitmap was resized it will be NULL at this
2000 * point.
2001 */
2002 error_report("migration ram resized during precopy phase");
2003 rcu_read_unlock();
2004 return -EINVAL;
2005 }
2006 /* Deal with TPS != HPS and huge pages */
2007 ret = postcopy_chunk_hostpages(ms, block);
2008 if (ret) {
2009 rcu_read_unlock();
2010 return ret;
2011 }
2012
2013 /*
2014 * Update the unsentmap to be unsentmap = unsentmap | dirty
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00002015 */
Juan Quintela6b6712e2017-03-22 15:18:04 +01002016 bitmap_or(unsentmap, unsentmap, bitmap, pages);
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00002017#ifdef DEBUG_POSTCOPY
Juan Quintela6b6712e2017-03-22 15:18:04 +01002018 ram_debug_dump_bitmap(unsentmap, true, pages);
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00002019#endif
Juan Quintela6b6712e2017-03-22 15:18:04 +01002020 }
2021 trace_ram_postcopy_send_discard_bitmap();
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00002022
2023 ret = postcopy_each_ram_send_discard(ms);
2024 rcu_read_unlock();
2025
2026 return ret;
2027}
2028
Juan Quintela3d0684b2017-03-23 15:06:39 +01002029/**
2030 * ram_discard_range: discard dirtied pages at the beginning of postcopy
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00002031 *
Juan Quintela3d0684b2017-03-23 15:06:39 +01002032 * Returns zero on success
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00002033 *
Juan Quintela36449152017-03-23 15:11:59 +01002034 * @rbname: name of the RAMBlock of the request. NULL means the
2035 * same that last one.
Juan Quintela3d0684b2017-03-23 15:06:39 +01002036 * @start: RAMBlock starting page
2037 * @length: RAMBlock size
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00002038 */
Juan Quintelaaaa20642017-03-21 11:35:24 +01002039int ram_discard_range(const char *rbname, uint64_t start, size_t length)
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00002040{
2041 int ret = -1;
2042
Juan Quintela36449152017-03-23 15:11:59 +01002043 trace_ram_discard_range(rbname, start, length);
Dr. David Alan Gilbertd3a50382017-02-24 18:28:32 +00002044
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00002045 rcu_read_lock();
Juan Quintela36449152017-03-23 15:11:59 +01002046 RAMBlock *rb = qemu_ram_block_by_name(rbname);
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00002047
2048 if (!rb) {
Juan Quintela36449152017-03-23 15:11:59 +01002049 error_report("ram_discard_range: Failed to find block '%s'", rbname);
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00002050 goto err;
2051 }
2052
Alexey Perevalovf9494612017-10-05 14:13:20 +03002053 bitmap_clear(rb->receivedmap, start >> qemu_target_page_bits(),
2054 length >> qemu_target_page_bits());
Dr. David Alan Gilbertd3a50382017-02-24 18:28:32 +00002055 ret = ram_block_discard_range(rb, start, length);
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00002056
2057err:
2058 rcu_read_unlock();
2059
2060 return ret;
2061}
2062
Peter Xu84593a02017-10-19 14:31:59 +08002063/*
2064 * For every allocation, we will try not to crash the VM if the
2065 * allocation failed.
2066 */
2067static int xbzrle_init(void)
2068{
2069 Error *local_err = NULL;
2070
2071 if (!migrate_use_xbzrle()) {
2072 return 0;
2073 }
2074
2075 XBZRLE_cache_lock();
2076
2077 XBZRLE.zero_target_page = g_try_malloc0(TARGET_PAGE_SIZE);
2078 if (!XBZRLE.zero_target_page) {
2079 error_report("%s: Error allocating zero page", __func__);
2080 goto err_out;
2081 }
2082
2083 XBZRLE.cache = cache_init(migrate_xbzrle_cache_size(),
2084 TARGET_PAGE_SIZE, &local_err);
2085 if (!XBZRLE.cache) {
2086 error_report_err(local_err);
2087 goto free_zero_page;
2088 }
2089
2090 XBZRLE.encoded_buf = g_try_malloc0(TARGET_PAGE_SIZE);
2091 if (!XBZRLE.encoded_buf) {
2092 error_report("%s: Error allocating encoded_buf", __func__);
2093 goto free_cache;
2094 }
2095
2096 XBZRLE.current_buf = g_try_malloc(TARGET_PAGE_SIZE);
2097 if (!XBZRLE.current_buf) {
2098 error_report("%s: Error allocating current_buf", __func__);
2099 goto free_encoded_buf;
2100 }
2101
2102 /* We are all good */
2103 XBZRLE_cache_unlock();
2104 return 0;
2105
2106free_encoded_buf:
2107 g_free(XBZRLE.encoded_buf);
2108 XBZRLE.encoded_buf = NULL;
2109free_cache:
2110 cache_fini(XBZRLE.cache);
2111 XBZRLE.cache = NULL;
2112free_zero_page:
2113 g_free(XBZRLE.zero_target_page);
2114 XBZRLE.zero_target_page = NULL;
2115err_out:
2116 XBZRLE_cache_unlock();
2117 return -ENOMEM;
2118}
2119
Juan Quintela53518d92017-05-04 11:46:24 +02002120static int ram_state_init(RAMState **rsp)
Juan Quintela56e93d22015-05-07 19:33:31 +02002121{
Peter Xu7d00ee62017-10-19 14:31:57 +08002122 *rsp = g_try_new0(RAMState, 1);
2123
2124 if (!*rsp) {
2125 error_report("%s: Init ramstate fail", __func__);
2126 return -1;
2127 }
Juan Quintela53518d92017-05-04 11:46:24 +02002128
2129 qemu_mutex_init(&(*rsp)->bitmap_mutex);
2130 qemu_mutex_init(&(*rsp)->src_page_req_mutex);
2131 QSIMPLEQ_INIT(&(*rsp)->src_page_requests);
Juan Quintela56e93d22015-05-07 19:33:31 +02002132
Peter Xu7d00ee62017-10-19 14:31:57 +08002133 /*
2134 * Count the total number of pages used by ram blocks not including any
2135 * gaps due to alignment or unplugs.
2136 */
2137 (*rsp)->migration_dirty_pages = ram_bytes_total() >> TARGET_PAGE_BITS;
2138
2139 ram_state_reset(*rsp);
2140
2141 return 0;
2142}
2143
Peter Xud6eff5d2017-10-19 14:32:00 +08002144static void ram_list_init_bitmaps(void)
2145{
2146 RAMBlock *block;
2147 unsigned long pages;
2148
2149 /* Skip setting bitmap if there is no RAM */
2150 if (ram_bytes_total()) {
2151 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
2152 pages = block->max_length >> TARGET_PAGE_BITS;
2153 block->bmap = bitmap_new(pages);
2154 bitmap_set(block->bmap, 0, pages);
2155 if (migrate_postcopy_ram()) {
2156 block->unsentmap = bitmap_new(pages);
2157 bitmap_set(block->unsentmap, 0, pages);
2158 }
2159 }
2160 }
2161}
2162
2163static void ram_init_bitmaps(RAMState *rs)
2164{
2165 /* For memory_global_dirty_log_start below. */
2166 qemu_mutex_lock_iothread();
2167 qemu_mutex_lock_ramlist();
2168 rcu_read_lock();
2169
2170 ram_list_init_bitmaps();
2171 memory_global_dirty_log_start();
2172 migration_bitmap_sync(rs);
2173
2174 rcu_read_unlock();
2175 qemu_mutex_unlock_ramlist();
2176 qemu_mutex_unlock_iothread();
2177}
2178
Peter Xu7d00ee62017-10-19 14:31:57 +08002179static int ram_init_all(RAMState **rsp)
2180{
Peter Xu7d00ee62017-10-19 14:31:57 +08002181 if (ram_state_init(rsp)) {
2182 return -1;
2183 }
2184
Peter Xu84593a02017-10-19 14:31:59 +08002185 if (xbzrle_init()) {
2186 ram_state_cleanup(rsp);
2187 return -1;
Juan Quintela56e93d22015-05-07 19:33:31 +02002188 }
2189
Peter Xud6eff5d2017-10-19 14:32:00 +08002190 ram_init_bitmaps(*rsp);
zhanghailianga91246c2016-10-27 14:42:59 +08002191
2192 return 0;
2193}
2194
Juan Quintela3d0684b2017-03-23 15:06:39 +01002195/*
2196 * Each of ram_save_setup, ram_save_iterate and ram_save_complete has
zhanghailianga91246c2016-10-27 14:42:59 +08002197 * long-running RCU critical section. When rcu-reclaims in the code
2198 * start to become numerous it will be necessary to reduce the
2199 * granularity of these critical sections.
2200 */
2201
Juan Quintela3d0684b2017-03-23 15:06:39 +01002202/**
2203 * ram_save_setup: Setup RAM for migration
2204 *
2205 * Returns zero to indicate success and negative for error
2206 *
2207 * @f: QEMUFile where to send the data
2208 * @opaque: RAMState pointer
2209 */
zhanghailianga91246c2016-10-27 14:42:59 +08002210static int ram_save_setup(QEMUFile *f, void *opaque)
2211{
Juan Quintela53518d92017-05-04 11:46:24 +02002212 RAMState **rsp = opaque;
zhanghailianga91246c2016-10-27 14:42:59 +08002213 RAMBlock *block;
2214
2215 /* migration has already setup the bitmap, reuse it. */
2216 if (!migration_in_colo_state()) {
Peter Xu7d00ee62017-10-19 14:31:57 +08002217 if (ram_init_all(rsp) != 0) {
zhanghailianga91246c2016-10-27 14:42:59 +08002218 return -1;
Juan Quintela53518d92017-05-04 11:46:24 +02002219 }
zhanghailianga91246c2016-10-27 14:42:59 +08002220 }
Juan Quintela53518d92017-05-04 11:46:24 +02002221 (*rsp)->f = f;
zhanghailianga91246c2016-10-27 14:42:59 +08002222
2223 rcu_read_lock();
Juan Quintela56e93d22015-05-07 19:33:31 +02002224
2225 qemu_put_be64(f, ram_bytes_total() | RAM_SAVE_FLAG_MEM_SIZE);
2226
Peter Xu99e15582017-05-12 12:17:39 +08002227 RAMBLOCK_FOREACH(block) {
Juan Quintela56e93d22015-05-07 19:33:31 +02002228 qemu_put_byte(f, strlen(block->idstr));
2229 qemu_put_buffer(f, (uint8_t *)block->idstr, strlen(block->idstr));
2230 qemu_put_be64(f, block->used_length);
Dr. David Alan Gilbertef08fb32017-02-24 18:28:30 +00002231 if (migrate_postcopy_ram() && block->page_size != qemu_host_page_size) {
2232 qemu_put_be64(f, block->page_size);
2233 }
Juan Quintela56e93d22015-05-07 19:33:31 +02002234 }
2235
2236 rcu_read_unlock();
Juan Quintelaf0afa332017-06-28 11:52:28 +02002237 compress_threads_save_setup();
Juan Quintela56e93d22015-05-07 19:33:31 +02002238
2239 ram_control_before_iterate(f, RAM_CONTROL_SETUP);
2240 ram_control_after_iterate(f, RAM_CONTROL_SETUP);
2241
2242 qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
2243
2244 return 0;
2245}
2246
Juan Quintela3d0684b2017-03-23 15:06:39 +01002247/**
2248 * ram_save_iterate: iterative stage for migration
2249 *
2250 * Returns zero to indicate success and negative for error
2251 *
2252 * @f: QEMUFile where to send the data
2253 * @opaque: RAMState pointer
2254 */
Juan Quintela56e93d22015-05-07 19:33:31 +02002255static int ram_save_iterate(QEMUFile *f, void *opaque)
2256{
Juan Quintela53518d92017-05-04 11:46:24 +02002257 RAMState **temp = opaque;
2258 RAMState *rs = *temp;
Juan Quintela56e93d22015-05-07 19:33:31 +02002259 int ret;
2260 int i;
2261 int64_t t0;
Thomas Huth5c903082016-11-04 14:10:17 +01002262 int done = 0;
Juan Quintela56e93d22015-05-07 19:33:31 +02002263
2264 rcu_read_lock();
Juan Quintela6f37bb82017-03-13 19:26:29 +01002265 if (ram_list.version != rs->last_version) {
2266 ram_state_reset(rs);
Juan Quintela56e93d22015-05-07 19:33:31 +02002267 }
2268
2269 /* Read version before ram_list.blocks */
2270 smp_rmb();
2271
2272 ram_control_before_iterate(f, RAM_CONTROL_ROUND);
2273
2274 t0 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
2275 i = 0;
2276 while ((ret = qemu_file_rate_limit(f)) == 0) {
2277 int pages;
2278
Juan Quintelace25d332017-03-15 11:00:51 +01002279 pages = ram_find_and_save_block(rs, false);
Juan Quintela56e93d22015-05-07 19:33:31 +02002280 /* no more pages to sent */
2281 if (pages == 0) {
Thomas Huth5c903082016-11-04 14:10:17 +01002282 done = 1;
Juan Quintela56e93d22015-05-07 19:33:31 +02002283 break;
2284 }
Juan Quintela23b28c32017-03-13 20:51:34 +01002285 rs->iterations++;
Jason J. Herne070afca2015-09-08 13:12:35 -04002286
Juan Quintela56e93d22015-05-07 19:33:31 +02002287 /* we want to check in the 1st loop, just in case it was the 1st time
2288 and we had to sync the dirty bitmap.
2289 qemu_get_clock_ns() is a bit expensive, so we only check each some
2290 iterations
2291 */
2292 if ((i & 63) == 0) {
2293 uint64_t t1 = (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - t0) / 1000000;
2294 if (t1 > MAX_WAIT) {
Juan Quintela55c44462017-01-23 22:32:05 +01002295 trace_ram_save_iterate_big_wait(t1, i);
Juan Quintela56e93d22015-05-07 19:33:31 +02002296 break;
2297 }
2298 }
2299 i++;
2300 }
Juan Quintelace25d332017-03-15 11:00:51 +01002301 flush_compressed_data(rs);
Juan Quintela56e93d22015-05-07 19:33:31 +02002302 rcu_read_unlock();
2303
2304 /*
2305 * Must occur before EOS (or any QEMUFile operation)
2306 * because of RDMA protocol.
2307 */
2308 ram_control_after_iterate(f, RAM_CONTROL_ROUND);
2309
2310 qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
Juan Quintela93604472017-06-06 19:49:03 +02002311 ram_counters.transferred += 8;
Juan Quintela56e93d22015-05-07 19:33:31 +02002312
2313 ret = qemu_file_get_error(f);
2314 if (ret < 0) {
2315 return ret;
2316 }
2317
Thomas Huth5c903082016-11-04 14:10:17 +01002318 return done;
Juan Quintela56e93d22015-05-07 19:33:31 +02002319}
2320
Juan Quintela3d0684b2017-03-23 15:06:39 +01002321/**
2322 * ram_save_complete: function called to send the remaining amount of ram
2323 *
2324 * Returns zero to indicate success
2325 *
2326 * Called with iothread lock
2327 *
2328 * @f: QEMUFile where to send the data
2329 * @opaque: RAMState pointer
2330 */
Juan Quintela56e93d22015-05-07 19:33:31 +02002331static int ram_save_complete(QEMUFile *f, void *opaque)
2332{
Juan Quintela53518d92017-05-04 11:46:24 +02002333 RAMState **temp = opaque;
2334 RAMState *rs = *temp;
Juan Quintela6f37bb82017-03-13 19:26:29 +01002335
Juan Quintela56e93d22015-05-07 19:33:31 +02002336 rcu_read_lock();
2337
Juan Quintela57273092017-03-20 22:25:28 +01002338 if (!migration_in_postcopy()) {
Juan Quintela8d820d62017-03-13 19:35:50 +01002339 migration_bitmap_sync(rs);
Dr. David Alan Gilbert663e6c12015-11-05 18:11:13 +00002340 }
Juan Quintela56e93d22015-05-07 19:33:31 +02002341
2342 ram_control_before_iterate(f, RAM_CONTROL_FINISH);
2343
2344 /* try transferring iterative blocks of memory */
2345
2346 /* flush all remaining blocks regardless of rate limiting */
2347 while (true) {
2348 int pages;
2349
Juan Quintelace25d332017-03-15 11:00:51 +01002350 pages = ram_find_and_save_block(rs, !migration_in_colo_state());
Juan Quintela56e93d22015-05-07 19:33:31 +02002351 /* no more blocks to sent */
2352 if (pages == 0) {
2353 break;
2354 }
2355 }
2356
Juan Quintelace25d332017-03-15 11:00:51 +01002357 flush_compressed_data(rs);
Juan Quintela56e93d22015-05-07 19:33:31 +02002358 ram_control_after_iterate(f, RAM_CONTROL_FINISH);
Juan Quintela56e93d22015-05-07 19:33:31 +02002359
2360 rcu_read_unlock();
Paolo Bonzinid09a6fd2015-07-09 08:47:58 +02002361
Juan Quintela56e93d22015-05-07 19:33:31 +02002362 qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
2363
2364 return 0;
2365}
2366
Dr. David Alan Gilbertc31b0982015-11-05 18:10:54 +00002367static void ram_save_pending(QEMUFile *f, void *opaque, uint64_t max_size,
2368 uint64_t *non_postcopiable_pending,
2369 uint64_t *postcopiable_pending)
Juan Quintela56e93d22015-05-07 19:33:31 +02002370{
Juan Quintela53518d92017-05-04 11:46:24 +02002371 RAMState **temp = opaque;
2372 RAMState *rs = *temp;
Juan Quintela56e93d22015-05-07 19:33:31 +02002373 uint64_t remaining_size;
2374
Juan Quintela9edabd42017-03-14 12:02:16 +01002375 remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE;
Juan Quintela56e93d22015-05-07 19:33:31 +02002376
Juan Quintela57273092017-03-20 22:25:28 +01002377 if (!migration_in_postcopy() &&
Dr. David Alan Gilbert663e6c12015-11-05 18:11:13 +00002378 remaining_size < max_size) {
Juan Quintela56e93d22015-05-07 19:33:31 +02002379 qemu_mutex_lock_iothread();
2380 rcu_read_lock();
Juan Quintela8d820d62017-03-13 19:35:50 +01002381 migration_bitmap_sync(rs);
Juan Quintela56e93d22015-05-07 19:33:31 +02002382 rcu_read_unlock();
2383 qemu_mutex_unlock_iothread();
Juan Quintela9edabd42017-03-14 12:02:16 +01002384 remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE;
Juan Quintela56e93d22015-05-07 19:33:31 +02002385 }
Dr. David Alan Gilbertc31b0982015-11-05 18:10:54 +00002386
Vladimir Sementsov-Ogievskiy86e11672017-07-10 19:30:15 +03002387 if (migrate_postcopy_ram()) {
2388 /* We can do postcopy, and all the data is postcopiable */
2389 *postcopiable_pending += remaining_size;
2390 } else {
2391 *non_postcopiable_pending += remaining_size;
2392 }
Juan Quintela56e93d22015-05-07 19:33:31 +02002393}
2394
2395static int load_xbzrle(QEMUFile *f, ram_addr_t addr, void *host)
2396{
2397 unsigned int xh_len;
2398 int xh_flags;
Dr. David Alan Gilbert063e7602015-12-16 11:47:37 +00002399 uint8_t *loaded_data;
Juan Quintela56e93d22015-05-07 19:33:31 +02002400
Juan Quintela56e93d22015-05-07 19:33:31 +02002401 /* extract RLE header */
2402 xh_flags = qemu_get_byte(f);
2403 xh_len = qemu_get_be16(f);
2404
2405 if (xh_flags != ENCODING_FLAG_XBZRLE) {
2406 error_report("Failed to load XBZRLE page - wrong compression!");
2407 return -1;
2408 }
2409
2410 if (xh_len > TARGET_PAGE_SIZE) {
2411 error_report("Failed to load XBZRLE page - len overflow!");
2412 return -1;
2413 }
Juan Quintelaf265e0e2017-06-28 11:52:27 +02002414 loaded_data = XBZRLE.decoded_buf;
Juan Quintela56e93d22015-05-07 19:33:31 +02002415 /* load data and decode */
Juan Quintelaf265e0e2017-06-28 11:52:27 +02002416 /* it can change loaded_data to point to an internal buffer */
Dr. David Alan Gilbert063e7602015-12-16 11:47:37 +00002417 qemu_get_buffer_in_place(f, &loaded_data, xh_len);
Juan Quintela56e93d22015-05-07 19:33:31 +02002418
2419 /* decode RLE */
Dr. David Alan Gilbert063e7602015-12-16 11:47:37 +00002420 if (xbzrle_decode_buffer(loaded_data, xh_len, host,
Juan Quintela56e93d22015-05-07 19:33:31 +02002421 TARGET_PAGE_SIZE) == -1) {
2422 error_report("Failed to load XBZRLE page - decode error!");
2423 return -1;
2424 }
2425
2426 return 0;
2427}
2428
Juan Quintela3d0684b2017-03-23 15:06:39 +01002429/**
2430 * ram_block_from_stream: read a RAMBlock id from the migration stream
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00002431 *
Juan Quintela3d0684b2017-03-23 15:06:39 +01002432 * Must be called from within a rcu critical section.
2433 *
2434 * Returns a pointer from within the RCU-protected ram_list.
2435 *
2436 * @f: QEMUFile where to read the data from
2437 * @flags: Page flags (mostly to see if it's a continuation of previous block)
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00002438 */
Juan Quintela3d0684b2017-03-23 15:06:39 +01002439static inline RAMBlock *ram_block_from_stream(QEMUFile *f, int flags)
Juan Quintela56e93d22015-05-07 19:33:31 +02002440{
2441 static RAMBlock *block = NULL;
2442 char id[256];
2443 uint8_t len;
2444
2445 if (flags & RAM_SAVE_FLAG_CONTINUE) {
zhanghailiang4c4bad42016-01-15 11:37:41 +08002446 if (!block) {
Juan Quintela56e93d22015-05-07 19:33:31 +02002447 error_report("Ack, bad migration stream!");
2448 return NULL;
2449 }
zhanghailiang4c4bad42016-01-15 11:37:41 +08002450 return block;
Juan Quintela56e93d22015-05-07 19:33:31 +02002451 }
2452
2453 len = qemu_get_byte(f);
2454 qemu_get_buffer(f, (uint8_t *)id, len);
2455 id[len] = 0;
2456
Dr. David Alan Gilberte3dd7492015-11-05 18:10:33 +00002457 block = qemu_ram_block_by_name(id);
zhanghailiang4c4bad42016-01-15 11:37:41 +08002458 if (!block) {
2459 error_report("Can't find block %s", id);
2460 return NULL;
Juan Quintela56e93d22015-05-07 19:33:31 +02002461 }
2462
zhanghailiang4c4bad42016-01-15 11:37:41 +08002463 return block;
2464}
2465
2466static inline void *host_from_ram_block_offset(RAMBlock *block,
2467 ram_addr_t offset)
2468{
2469 if (!offset_in_ramblock(block, offset)) {
2470 return NULL;
2471 }
2472
2473 return block->host + offset;
Juan Quintela56e93d22015-05-07 19:33:31 +02002474}
2475
Juan Quintela3d0684b2017-03-23 15:06:39 +01002476/**
2477 * ram_handle_compressed: handle the zero page case
2478 *
Juan Quintela56e93d22015-05-07 19:33:31 +02002479 * If a page (or a whole RDMA chunk) has been
2480 * determined to be zero, then zap it.
Juan Quintela3d0684b2017-03-23 15:06:39 +01002481 *
2482 * @host: host address for the zero page
2483 * @ch: what the page is filled from. We only support zero
2484 * @size: size of the zero page
Juan Quintela56e93d22015-05-07 19:33:31 +02002485 */
2486void ram_handle_compressed(void *host, uint8_t ch, uint64_t size)
2487{
2488 if (ch != 0 || !is_zero_range(host, size)) {
2489 memset(host, ch, size);
2490 }
2491}
2492
2493static void *do_data_decompress(void *opaque)
2494{
2495 DecompressParam *param = opaque;
2496 unsigned long pagesize;
Liang Li33d151f2016-05-05 15:32:58 +08002497 uint8_t *des;
2498 int len;
Juan Quintela56e93d22015-05-07 19:33:31 +02002499
Liang Li33d151f2016-05-05 15:32:58 +08002500 qemu_mutex_lock(&param->mutex);
Liang Li90e56fb2016-05-05 15:32:56 +08002501 while (!param->quit) {
Liang Li33d151f2016-05-05 15:32:58 +08002502 if (param->des) {
2503 des = param->des;
2504 len = param->len;
2505 param->des = 0;
2506 qemu_mutex_unlock(&param->mutex);
2507
Liang Li73a89122016-05-05 15:32:51 +08002508 pagesize = TARGET_PAGE_SIZE;
2509 /* uncompress() will return failed in some case, especially
2510 * when the page is dirted when doing the compression, it's
2511 * not a problem because the dirty page will be retransferred
2512 * and uncompress() won't break the data in other pages.
2513 */
Liang Li33d151f2016-05-05 15:32:58 +08002514 uncompress((Bytef *)des, &pagesize,
2515 (const Bytef *)param->compbuf, len);
Liang Li73a89122016-05-05 15:32:51 +08002516
Liang Li33d151f2016-05-05 15:32:58 +08002517 qemu_mutex_lock(&decomp_done_lock);
2518 param->done = true;
2519 qemu_cond_signal(&decomp_done_cond);
2520 qemu_mutex_unlock(&decomp_done_lock);
2521
2522 qemu_mutex_lock(&param->mutex);
2523 } else {
2524 qemu_cond_wait(&param->cond, &param->mutex);
2525 }
Juan Quintela56e93d22015-05-07 19:33:31 +02002526 }
Liang Li33d151f2016-05-05 15:32:58 +08002527 qemu_mutex_unlock(&param->mutex);
Juan Quintela56e93d22015-05-07 19:33:31 +02002528
2529 return NULL;
2530}
2531
Liang Li5533b2e2016-05-05 15:32:52 +08002532static void wait_for_decompress_done(void)
2533{
2534 int idx, thread_count;
2535
2536 if (!migrate_use_compression()) {
2537 return;
2538 }
2539
2540 thread_count = migrate_decompress_threads();
2541 qemu_mutex_lock(&decomp_done_lock);
2542 for (idx = 0; idx < thread_count; idx++) {
2543 while (!decomp_param[idx].done) {
2544 qemu_cond_wait(&decomp_done_cond, &decomp_done_lock);
2545 }
2546 }
2547 qemu_mutex_unlock(&decomp_done_lock);
2548}
2549
Juan Quintelaf0afa332017-06-28 11:52:28 +02002550static void compress_threads_load_setup(void)
Juan Quintela56e93d22015-05-07 19:33:31 +02002551{
2552 int i, thread_count;
2553
Juan Quintela3416ab52016-04-20 11:56:01 +02002554 if (!migrate_use_compression()) {
2555 return;
2556 }
Juan Quintela56e93d22015-05-07 19:33:31 +02002557 thread_count = migrate_decompress_threads();
2558 decompress_threads = g_new0(QemuThread, thread_count);
2559 decomp_param = g_new0(DecompressParam, thread_count);
Liang Li73a89122016-05-05 15:32:51 +08002560 qemu_mutex_init(&decomp_done_lock);
2561 qemu_cond_init(&decomp_done_cond);
Juan Quintela56e93d22015-05-07 19:33:31 +02002562 for (i = 0; i < thread_count; i++) {
2563 qemu_mutex_init(&decomp_param[i].mutex);
2564 qemu_cond_init(&decomp_param[i].cond);
2565 decomp_param[i].compbuf = g_malloc0(compressBound(TARGET_PAGE_SIZE));
Liang Li73a89122016-05-05 15:32:51 +08002566 decomp_param[i].done = true;
Liang Li90e56fb2016-05-05 15:32:56 +08002567 decomp_param[i].quit = false;
Juan Quintela56e93d22015-05-07 19:33:31 +02002568 qemu_thread_create(decompress_threads + i, "decompress",
2569 do_data_decompress, decomp_param + i,
2570 QEMU_THREAD_JOINABLE);
2571 }
2572}
2573
Juan Quintelaf0afa332017-06-28 11:52:28 +02002574static void compress_threads_load_cleanup(void)
Juan Quintela56e93d22015-05-07 19:33:31 +02002575{
2576 int i, thread_count;
2577
Juan Quintela3416ab52016-04-20 11:56:01 +02002578 if (!migrate_use_compression()) {
2579 return;
2580 }
Juan Quintela56e93d22015-05-07 19:33:31 +02002581 thread_count = migrate_decompress_threads();
2582 for (i = 0; i < thread_count; i++) {
2583 qemu_mutex_lock(&decomp_param[i].mutex);
Liang Li90e56fb2016-05-05 15:32:56 +08002584 decomp_param[i].quit = true;
Juan Quintela56e93d22015-05-07 19:33:31 +02002585 qemu_cond_signal(&decomp_param[i].cond);
2586 qemu_mutex_unlock(&decomp_param[i].mutex);
2587 }
2588 for (i = 0; i < thread_count; i++) {
2589 qemu_thread_join(decompress_threads + i);
2590 qemu_mutex_destroy(&decomp_param[i].mutex);
2591 qemu_cond_destroy(&decomp_param[i].cond);
2592 g_free(decomp_param[i].compbuf);
2593 }
2594 g_free(decompress_threads);
2595 g_free(decomp_param);
Juan Quintela56e93d22015-05-07 19:33:31 +02002596 decompress_threads = NULL;
2597 decomp_param = NULL;
Juan Quintela56e93d22015-05-07 19:33:31 +02002598}
2599
Dr. David Alan Gilbertc1bc6622015-12-16 11:47:38 +00002600static void decompress_data_with_multi_threads(QEMUFile *f,
Juan Quintela56e93d22015-05-07 19:33:31 +02002601 void *host, int len)
2602{
2603 int idx, thread_count;
2604
2605 thread_count = migrate_decompress_threads();
Liang Li73a89122016-05-05 15:32:51 +08002606 qemu_mutex_lock(&decomp_done_lock);
Juan Quintela56e93d22015-05-07 19:33:31 +02002607 while (true) {
2608 for (idx = 0; idx < thread_count; idx++) {
Liang Li73a89122016-05-05 15:32:51 +08002609 if (decomp_param[idx].done) {
Liang Li33d151f2016-05-05 15:32:58 +08002610 decomp_param[idx].done = false;
2611 qemu_mutex_lock(&decomp_param[idx].mutex);
Dr. David Alan Gilbertc1bc6622015-12-16 11:47:38 +00002612 qemu_get_buffer(f, decomp_param[idx].compbuf, len);
Juan Quintela56e93d22015-05-07 19:33:31 +02002613 decomp_param[idx].des = host;
2614 decomp_param[idx].len = len;
Liang Li33d151f2016-05-05 15:32:58 +08002615 qemu_cond_signal(&decomp_param[idx].cond);
2616 qemu_mutex_unlock(&decomp_param[idx].mutex);
Juan Quintela56e93d22015-05-07 19:33:31 +02002617 break;
2618 }
2619 }
2620 if (idx < thread_count) {
2621 break;
Liang Li73a89122016-05-05 15:32:51 +08002622 } else {
2623 qemu_cond_wait(&decomp_done_cond, &decomp_done_lock);
Juan Quintela56e93d22015-05-07 19:33:31 +02002624 }
2625 }
Liang Li73a89122016-05-05 15:32:51 +08002626 qemu_mutex_unlock(&decomp_done_lock);
Juan Quintela56e93d22015-05-07 19:33:31 +02002627}
2628
Juan Quintela3d0684b2017-03-23 15:06:39 +01002629/**
Juan Quintelaf265e0e2017-06-28 11:52:27 +02002630 * ram_load_setup: Setup RAM for migration incoming side
2631 *
2632 * Returns zero to indicate success and negative for error
2633 *
2634 * @f: QEMUFile where to receive the data
2635 * @opaque: RAMState pointer
2636 */
2637static int ram_load_setup(QEMUFile *f, void *opaque)
2638{
2639 xbzrle_load_setup();
Juan Quintelaf0afa332017-06-28 11:52:28 +02002640 compress_threads_load_setup();
Alexey Perevalovf9494612017-10-05 14:13:20 +03002641 ramblock_recv_map_init();
Juan Quintelaf265e0e2017-06-28 11:52:27 +02002642 return 0;
2643}
2644
2645static int ram_load_cleanup(void *opaque)
2646{
Alexey Perevalovf9494612017-10-05 14:13:20 +03002647 RAMBlock *rb;
Juan Quintelaf265e0e2017-06-28 11:52:27 +02002648 xbzrle_load_cleanup();
Juan Quintelaf0afa332017-06-28 11:52:28 +02002649 compress_threads_load_cleanup();
Alexey Perevalovf9494612017-10-05 14:13:20 +03002650
2651 RAMBLOCK_FOREACH(rb) {
2652 g_free(rb->receivedmap);
2653 rb->receivedmap = NULL;
2654 }
Juan Quintelaf265e0e2017-06-28 11:52:27 +02002655 return 0;
2656}
2657
2658/**
Juan Quintela3d0684b2017-03-23 15:06:39 +01002659 * ram_postcopy_incoming_init: allocate postcopy data structures
2660 *
2661 * Returns 0 for success and negative if there was one error
2662 *
2663 * @mis: current migration incoming state
2664 *
2665 * Allocate data structures etc needed by incoming migration with
2666 * postcopy-ram. postcopy-ram's similarly names
2667 * postcopy_ram_incoming_init does the work.
Dr. David Alan Gilbert1caddf82015-11-05 18:11:03 +00002668 */
2669int ram_postcopy_incoming_init(MigrationIncomingState *mis)
2670{
Juan Quintelab8c48992017-03-21 17:44:30 +01002671 unsigned long ram_pages = last_ram_page();
Dr. David Alan Gilbert1caddf82015-11-05 18:11:03 +00002672
2673 return postcopy_ram_incoming_init(mis, ram_pages);
2674}
2675
Juan Quintela3d0684b2017-03-23 15:06:39 +01002676/**
2677 * ram_load_postcopy: load a page in postcopy case
2678 *
2679 * Returns 0 for success or -errno in case of error
2680 *
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00002681 * Called in postcopy mode by ram_load().
2682 * rcu_read_lock is taken prior to this being called.
Juan Quintela3d0684b2017-03-23 15:06:39 +01002683 *
2684 * @f: QEMUFile where to send the data
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00002685 */
2686static int ram_load_postcopy(QEMUFile *f)
2687{
2688 int flags = 0, ret = 0;
2689 bool place_needed = false;
Dr. David Alan Gilbert28abd202017-02-24 18:28:37 +00002690 bool matching_page_sizes = false;
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00002691 MigrationIncomingState *mis = migration_incoming_get_current();
2692 /* Temporary page that is later 'placed' */
2693 void *postcopy_host_page = postcopy_get_tmp_page(mis);
Dr. David Alan Gilbertc53b7dd2015-11-05 18:11:12 +00002694 void *last_host = NULL;
Dr. David Alan Gilberta3b6ff62015-11-11 14:02:28 +00002695 bool all_zero = false;
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00002696
2697 while (!ret && !(flags & RAM_SAVE_FLAG_EOS)) {
2698 ram_addr_t addr;
2699 void *host = NULL;
2700 void *page_buffer = NULL;
2701 void *place_source = NULL;
Dr. David Alan Gilbertdf9ff5e2017-02-24 18:28:35 +00002702 RAMBlock *block = NULL;
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00002703 uint8_t ch;
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00002704
2705 addr = qemu_get_be64(f);
2706 flags = addr & ~TARGET_PAGE_MASK;
2707 addr &= TARGET_PAGE_MASK;
2708
2709 trace_ram_load_postcopy_loop((uint64_t)addr, flags);
2710 place_needed = false;
Juan Quintelabb890ed2017-04-28 09:39:55 +02002711 if (flags & (RAM_SAVE_FLAG_ZERO | RAM_SAVE_FLAG_PAGE)) {
Dr. David Alan Gilbertdf9ff5e2017-02-24 18:28:35 +00002712 block = ram_block_from_stream(f, flags);
zhanghailiang4c4bad42016-01-15 11:37:41 +08002713
2714 host = host_from_ram_block_offset(block, addr);
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00002715 if (!host) {
2716 error_report("Illegal RAM offset " RAM_ADDR_FMT, addr);
2717 ret = -EINVAL;
2718 break;
2719 }
Dr. David Alan Gilbert28abd202017-02-24 18:28:37 +00002720 matching_page_sizes = block->page_size == TARGET_PAGE_SIZE;
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00002721 /*
Dr. David Alan Gilbert28abd202017-02-24 18:28:37 +00002722 * Postcopy requires that we place whole host pages atomically;
2723 * these may be huge pages for RAMBlocks that are backed by
2724 * hugetlbfs.
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00002725 * To make it atomic, the data is read into a temporary page
2726 * that's moved into place later.
2727 * The migration protocol uses, possibly smaller, target-pages
2728 * however the source ensures it always sends all the components
2729 * of a host page in order.
2730 */
2731 page_buffer = postcopy_host_page +
Dr. David Alan Gilbert28abd202017-02-24 18:28:37 +00002732 ((uintptr_t)host & (block->page_size - 1));
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00002733 /* If all TP are zero then we can optimise the place */
Dr. David Alan Gilbert28abd202017-02-24 18:28:37 +00002734 if (!((uintptr_t)host & (block->page_size - 1))) {
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00002735 all_zero = true;
Dr. David Alan Gilbertc53b7dd2015-11-05 18:11:12 +00002736 } else {
2737 /* not the 1st TP within the HP */
2738 if (host != (last_host + TARGET_PAGE_SIZE)) {
Markus Armbruster9af9e0f2015-12-18 16:35:19 +01002739 error_report("Non-sequential target page %p/%p",
Dr. David Alan Gilbertc53b7dd2015-11-05 18:11:12 +00002740 host, last_host);
2741 ret = -EINVAL;
2742 break;
2743 }
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00002744 }
2745
Dr. David Alan Gilbertc53b7dd2015-11-05 18:11:12 +00002746
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00002747 /*
2748 * If it's the last part of a host page then we place the host
2749 * page
2750 */
2751 place_needed = (((uintptr_t)host + TARGET_PAGE_SIZE) &
Dr. David Alan Gilbert28abd202017-02-24 18:28:37 +00002752 (block->page_size - 1)) == 0;
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00002753 place_source = postcopy_host_page;
2754 }
Dr. David Alan Gilbertc53b7dd2015-11-05 18:11:12 +00002755 last_host = host;
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00002756
2757 switch (flags & ~RAM_SAVE_FLAG_CONTINUE) {
Juan Quintelabb890ed2017-04-28 09:39:55 +02002758 case RAM_SAVE_FLAG_ZERO:
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00002759 ch = qemu_get_byte(f);
2760 memset(page_buffer, ch, TARGET_PAGE_SIZE);
2761 if (ch) {
2762 all_zero = false;
2763 }
2764 break;
2765
2766 case RAM_SAVE_FLAG_PAGE:
2767 all_zero = false;
2768 if (!place_needed || !matching_page_sizes) {
2769 qemu_get_buffer(f, page_buffer, TARGET_PAGE_SIZE);
2770 } else {
2771 /* Avoids the qemu_file copy during postcopy, which is
2772 * going to do a copy later; can only do it when we
2773 * do this read in one go (matching page sizes)
2774 */
2775 qemu_get_buffer_in_place(f, (uint8_t **)&place_source,
2776 TARGET_PAGE_SIZE);
2777 }
2778 break;
2779 case RAM_SAVE_FLAG_EOS:
2780 /* normal exit */
2781 break;
2782 default:
2783 error_report("Unknown combination of migration flags: %#x"
2784 " (postcopy mode)", flags);
2785 ret = -EINVAL;
2786 }
2787
2788 if (place_needed) {
2789 /* This gets called at the last target page in the host page */
Dr. David Alan Gilbertdf9ff5e2017-02-24 18:28:35 +00002790 void *place_dest = host + TARGET_PAGE_SIZE - block->page_size;
2791
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00002792 if (all_zero) {
Dr. David Alan Gilbertdf9ff5e2017-02-24 18:28:35 +00002793 ret = postcopy_place_page_zero(mis, place_dest,
Alexey Perevalov8be46202017-10-05 14:13:18 +03002794 block);
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00002795 } else {
Dr. David Alan Gilbertdf9ff5e2017-02-24 18:28:35 +00002796 ret = postcopy_place_page(mis, place_dest,
Alexey Perevalov8be46202017-10-05 14:13:18 +03002797 place_source, block);
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00002798 }
2799 }
2800 if (!ret) {
2801 ret = qemu_file_get_error(f);
2802 }
2803 }
2804
2805 return ret;
2806}
2807
Juan Quintela56e93d22015-05-07 19:33:31 +02002808static int ram_load(QEMUFile *f, void *opaque, int version_id)
2809{
Juan Quintelaedc60122016-11-02 12:40:46 +01002810 int flags = 0, ret = 0, invalid_flags = 0;
Juan Quintela56e93d22015-05-07 19:33:31 +02002811 static uint64_t seq_iter;
2812 int len = 0;
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00002813 /*
2814 * If system is running in postcopy mode, page inserts to host memory must
2815 * be atomic
2816 */
2817 bool postcopy_running = postcopy_state_get() >= POSTCOPY_INCOMING_LISTENING;
Dr. David Alan Gilbertef08fb32017-02-24 18:28:30 +00002818 /* ADVISE is earlier, it shows the source has the postcopy capability on */
2819 bool postcopy_advised = postcopy_state_get() >= POSTCOPY_INCOMING_ADVISE;
Juan Quintela56e93d22015-05-07 19:33:31 +02002820
2821 seq_iter++;
2822
2823 if (version_id != 4) {
2824 ret = -EINVAL;
2825 }
2826
Juan Quintelaedc60122016-11-02 12:40:46 +01002827 if (!migrate_use_compression()) {
2828 invalid_flags |= RAM_SAVE_FLAG_COMPRESS_PAGE;
2829 }
Juan Quintela56e93d22015-05-07 19:33:31 +02002830 /* This RCU critical section can be very long running.
2831 * When RCU reclaims in the code start to become numerous,
2832 * it will be necessary to reduce the granularity of this
2833 * critical section.
2834 */
2835 rcu_read_lock();
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00002836
2837 if (postcopy_running) {
2838 ret = ram_load_postcopy(f);
2839 }
2840
2841 while (!postcopy_running && !ret && !(flags & RAM_SAVE_FLAG_EOS)) {
Juan Quintela56e93d22015-05-07 19:33:31 +02002842 ram_addr_t addr, total_ram_bytes;
Dr. David Alan Gilberta776aa12015-11-05 18:10:39 +00002843 void *host = NULL;
Juan Quintela56e93d22015-05-07 19:33:31 +02002844 uint8_t ch;
2845
2846 addr = qemu_get_be64(f);
2847 flags = addr & ~TARGET_PAGE_MASK;
2848 addr &= TARGET_PAGE_MASK;
2849
Juan Quintelaedc60122016-11-02 12:40:46 +01002850 if (flags & invalid_flags) {
2851 if (flags & invalid_flags & RAM_SAVE_FLAG_COMPRESS_PAGE) {
2852 error_report("Received an unexpected compressed page");
2853 }
2854
2855 ret = -EINVAL;
2856 break;
2857 }
2858
Juan Quintelabb890ed2017-04-28 09:39:55 +02002859 if (flags & (RAM_SAVE_FLAG_ZERO | RAM_SAVE_FLAG_PAGE |
Dr. David Alan Gilberta776aa12015-11-05 18:10:39 +00002860 RAM_SAVE_FLAG_COMPRESS_PAGE | RAM_SAVE_FLAG_XBZRLE)) {
zhanghailiang4c4bad42016-01-15 11:37:41 +08002861 RAMBlock *block = ram_block_from_stream(f, flags);
2862
2863 host = host_from_ram_block_offset(block, addr);
Dr. David Alan Gilberta776aa12015-11-05 18:10:39 +00002864 if (!host) {
2865 error_report("Illegal RAM offset " RAM_ADDR_FMT, addr);
2866 ret = -EINVAL;
2867 break;
2868 }
Alexey Perevalovf9494612017-10-05 14:13:20 +03002869 ramblock_recv_bitmap_set(block, host);
Dr. David Alan Gilbert1db9d8e2017-04-26 19:37:21 +01002870 trace_ram_load_loop(block->idstr, (uint64_t)addr, flags, host);
Dr. David Alan Gilberta776aa12015-11-05 18:10:39 +00002871 }
2872
Juan Quintela56e93d22015-05-07 19:33:31 +02002873 switch (flags & ~RAM_SAVE_FLAG_CONTINUE) {
2874 case RAM_SAVE_FLAG_MEM_SIZE:
2875 /* Synchronize RAM block list */
2876 total_ram_bytes = addr;
2877 while (!ret && total_ram_bytes) {
2878 RAMBlock *block;
Juan Quintela56e93d22015-05-07 19:33:31 +02002879 char id[256];
2880 ram_addr_t length;
2881
2882 len = qemu_get_byte(f);
2883 qemu_get_buffer(f, (uint8_t *)id, len);
2884 id[len] = 0;
2885 length = qemu_get_be64(f);
2886
Dr. David Alan Gilberte3dd7492015-11-05 18:10:33 +00002887 block = qemu_ram_block_by_name(id);
2888 if (block) {
2889 if (length != block->used_length) {
2890 Error *local_err = NULL;
Juan Quintela56e93d22015-05-07 19:33:31 +02002891
Gongleifa53a0e2016-05-10 10:04:59 +08002892 ret = qemu_ram_resize(block, length,
Dr. David Alan Gilberte3dd7492015-11-05 18:10:33 +00002893 &local_err);
2894 if (local_err) {
2895 error_report_err(local_err);
Juan Quintela56e93d22015-05-07 19:33:31 +02002896 }
Juan Quintela56e93d22015-05-07 19:33:31 +02002897 }
Dr. David Alan Gilbertef08fb32017-02-24 18:28:30 +00002898 /* For postcopy we need to check hugepage sizes match */
2899 if (postcopy_advised &&
2900 block->page_size != qemu_host_page_size) {
2901 uint64_t remote_page_size = qemu_get_be64(f);
2902 if (remote_page_size != block->page_size) {
2903 error_report("Mismatched RAM page size %s "
2904 "(local) %zd != %" PRId64,
2905 id, block->page_size,
2906 remote_page_size);
2907 ret = -EINVAL;
2908 }
2909 }
Dr. David Alan Gilberte3dd7492015-11-05 18:10:33 +00002910 ram_control_load_hook(f, RAM_CONTROL_BLOCK_REG,
2911 block->idstr);
2912 } else {
Juan Quintela56e93d22015-05-07 19:33:31 +02002913 error_report("Unknown ramblock \"%s\", cannot "
2914 "accept migration", id);
2915 ret = -EINVAL;
2916 }
2917
2918 total_ram_bytes -= length;
2919 }
2920 break;
Dr. David Alan Gilberta776aa12015-11-05 18:10:39 +00002921
Juan Quintelabb890ed2017-04-28 09:39:55 +02002922 case RAM_SAVE_FLAG_ZERO:
Juan Quintela56e93d22015-05-07 19:33:31 +02002923 ch = qemu_get_byte(f);
2924 ram_handle_compressed(host, ch, TARGET_PAGE_SIZE);
2925 break;
Dr. David Alan Gilberta776aa12015-11-05 18:10:39 +00002926
Juan Quintela56e93d22015-05-07 19:33:31 +02002927 case RAM_SAVE_FLAG_PAGE:
Juan Quintela56e93d22015-05-07 19:33:31 +02002928 qemu_get_buffer(f, host, TARGET_PAGE_SIZE);
2929 break;
Juan Quintela56e93d22015-05-07 19:33:31 +02002930
Dr. David Alan Gilberta776aa12015-11-05 18:10:39 +00002931 case RAM_SAVE_FLAG_COMPRESS_PAGE:
Juan Quintela56e93d22015-05-07 19:33:31 +02002932 len = qemu_get_be32(f);
2933 if (len < 0 || len > compressBound(TARGET_PAGE_SIZE)) {
2934 error_report("Invalid compressed data length: %d", len);
2935 ret = -EINVAL;
2936 break;
2937 }
Dr. David Alan Gilbertc1bc6622015-12-16 11:47:38 +00002938 decompress_data_with_multi_threads(f, host, len);
Juan Quintela56e93d22015-05-07 19:33:31 +02002939 break;
Dr. David Alan Gilberta776aa12015-11-05 18:10:39 +00002940
Juan Quintela56e93d22015-05-07 19:33:31 +02002941 case RAM_SAVE_FLAG_XBZRLE:
Juan Quintela56e93d22015-05-07 19:33:31 +02002942 if (load_xbzrle(f, addr, host) < 0) {
2943 error_report("Failed to decompress XBZRLE page at "
2944 RAM_ADDR_FMT, addr);
2945 ret = -EINVAL;
2946 break;
2947 }
2948 break;
2949 case RAM_SAVE_FLAG_EOS:
2950 /* normal exit */
2951 break;
2952 default:
2953 if (flags & RAM_SAVE_FLAG_HOOK) {
Dr. David Alan Gilbert632e3a52015-06-11 18:17:23 +01002954 ram_control_load_hook(f, RAM_CONTROL_HOOK, NULL);
Juan Quintela56e93d22015-05-07 19:33:31 +02002955 } else {
2956 error_report("Unknown combination of migration flags: %#x",
2957 flags);
2958 ret = -EINVAL;
2959 }
2960 }
2961 if (!ret) {
2962 ret = qemu_file_get_error(f);
2963 }
2964 }
2965
Liang Li5533b2e2016-05-05 15:32:52 +08002966 wait_for_decompress_done();
Juan Quintela56e93d22015-05-07 19:33:31 +02002967 rcu_read_unlock();
Juan Quintela55c44462017-01-23 22:32:05 +01002968 trace_ram_load_complete(ret, seq_iter);
Juan Quintela56e93d22015-05-07 19:33:31 +02002969 return ret;
2970}
2971
Vladimir Sementsov-Ogievskiyc6467622017-07-10 19:30:14 +03002972static bool ram_has_postcopy(void *opaque)
2973{
2974 return migrate_postcopy_ram();
2975}
2976
Juan Quintela56e93d22015-05-07 19:33:31 +02002977static SaveVMHandlers savevm_ram_handlers = {
Juan Quintela9907e842017-06-28 11:52:24 +02002978 .save_setup = ram_save_setup,
Juan Quintela56e93d22015-05-07 19:33:31 +02002979 .save_live_iterate = ram_save_iterate,
Dr. David Alan Gilbert763c9062015-11-05 18:11:00 +00002980 .save_live_complete_postcopy = ram_save_complete,
Dr. David Alan Gilberta3e06c32015-11-05 18:10:41 +00002981 .save_live_complete_precopy = ram_save_complete,
Vladimir Sementsov-Ogievskiyc6467622017-07-10 19:30:14 +03002982 .has_postcopy = ram_has_postcopy,
Juan Quintela56e93d22015-05-07 19:33:31 +02002983 .save_live_pending = ram_save_pending,
2984 .load_state = ram_load,
Juan Quintelaf265e0e2017-06-28 11:52:27 +02002985 .save_cleanup = ram_save_cleanup,
2986 .load_setup = ram_load_setup,
2987 .load_cleanup = ram_load_cleanup,
Juan Quintela56e93d22015-05-07 19:33:31 +02002988};
2989
2990void ram_mig_init(void)
2991{
2992 qemu_mutex_init(&XBZRLE.lock);
Juan Quintela6f37bb82017-03-13 19:26:29 +01002993 register_savevm_live(NULL, "ram", 0, 4, &savevm_ram_handlers, &ram_state);
Juan Quintela56e93d22015-05-07 19:33:31 +02002994}