blob: 32b1cbb0cd8aa9f12687b5c30bcb8c640c1dc62c [file] [log] [blame]
Juan Quintela56e93d22015-05-07 19:33:31 +02001/*
2 * QEMU System Emulator
3 *
4 * Copyright (c) 2003-2008 Fabrice Bellard
Juan Quintela76cc7b52015-05-08 13:20:21 +02005 * Copyright (c) 2011-2015 Red Hat Inc
6 *
7 * Authors:
8 * Juan Quintela <quintela@redhat.com>
Juan Quintela56e93d22015-05-07 19:33:31 +02009 *
10 * Permission is hereby granted, free of charge, to any person obtaining a copy
11 * of this software and associated documentation files (the "Software"), to deal
12 * in the Software without restriction, including without limitation the rights
13 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
14 * copies of the Software, and to permit persons to whom the Software is
15 * furnished to do so, subject to the following conditions:
16 *
17 * The above copyright notice and this permission notice shall be included in
18 * all copies or substantial portions of the Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 * THE SOFTWARE.
27 */
Peter Maydell1393a482016-01-26 18:16:54 +000028#include "qemu/osdep.h"
Paolo Bonzini33c11872016-03-15 16:58:45 +010029#include "qemu-common.h"
30#include "cpu.h"
Juan Quintela56e93d22015-05-07 19:33:31 +020031#include <zlib.h>
Dr. David Alan Gilbert4addcd42015-12-16 11:47:36 +000032#include "qapi-event.h"
Veronia Bahaaf348b6d2016-03-20 19:16:19 +020033#include "qemu/cutils.h"
Juan Quintela56e93d22015-05-07 19:33:31 +020034#include "qemu/bitops.h"
35#include "qemu/bitmap.h"
Juan Quintela7205c9e2015-05-08 13:54:36 +020036#include "qemu/timer.h"
37#include "qemu/main-loop.h"
Juan Quintela56e93d22015-05-07 19:33:31 +020038#include "migration/migration.h"
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +000039#include "migration/postcopy-ram.h"
Juan Quintela56e93d22015-05-07 19:33:31 +020040#include "exec/address-spaces.h"
41#include "migration/page_cache.h"
Juan Quintela56e93d22015-05-07 19:33:31 +020042#include "qemu/error-report.h"
Juan Quintela56e93d22015-05-07 19:33:31 +020043#include "trace.h"
Juan Quintela56e93d22015-05-07 19:33:31 +020044#include "exec/ram_addr.h"
Juan Quintela56e93d22015-05-07 19:33:31 +020045#include "qemu/rcu_queue.h"
zhanghailianga91246c2016-10-27 14:42:59 +080046#include "migration/colo.h"
Juan Quintela56e93d22015-05-07 19:33:31 +020047
Juan Quintela56e93d22015-05-07 19:33:31 +020048/***********************************************************/
49/* ram save/restore */
50
51#define RAM_SAVE_FLAG_FULL 0x01 /* Obsolete, not used anymore */
52#define RAM_SAVE_FLAG_COMPRESS 0x02
53#define RAM_SAVE_FLAG_MEM_SIZE 0x04
54#define RAM_SAVE_FLAG_PAGE 0x08
55#define RAM_SAVE_FLAG_EOS 0x10
56#define RAM_SAVE_FLAG_CONTINUE 0x20
57#define RAM_SAVE_FLAG_XBZRLE 0x40
58/* 0x80 is reserved in migration.h start with 0x100 next */
59#define RAM_SAVE_FLAG_COMPRESS_PAGE 0x100
60
Vijaya Kumar Kadb65de2016-10-24 16:26:49 +010061static uint8_t *ZERO_TARGET_PAGE;
Juan Quintela56e93d22015-05-07 19:33:31 +020062
63static inline bool is_zero_range(uint8_t *p, uint64_t size)
64{
Richard Hendersona1febc42016-08-29 11:46:14 -070065 return buffer_is_zero(p, size);
Juan Quintela56e93d22015-05-07 19:33:31 +020066}
67
68/* struct contains XBZRLE cache and a static page
69 used by the compression */
70static struct {
71 /* buffer used for XBZRLE encoding */
72 uint8_t *encoded_buf;
73 /* buffer for storing page content */
74 uint8_t *current_buf;
75 /* Cache for XBZRLE, Protected by lock. */
76 PageCache *cache;
77 QemuMutex lock;
78} XBZRLE;
79
80/* buffer used for XBZRLE decoding */
81static uint8_t *xbzrle_decoded_buf;
82
83static void XBZRLE_cache_lock(void)
84{
85 if (migrate_use_xbzrle())
86 qemu_mutex_lock(&XBZRLE.lock);
87}
88
89static void XBZRLE_cache_unlock(void)
90{
91 if (migrate_use_xbzrle())
92 qemu_mutex_unlock(&XBZRLE.lock);
93}
94
Juan Quintela3d0684b2017-03-23 15:06:39 +010095/**
96 * xbzrle_cache_resize: resize the xbzrle cache
97 *
98 * This function is called from qmp_migrate_set_cache_size in main
99 * thread, possibly while a migration is in progress. A running
100 * migration may be using the cache and might finish during this call,
101 * hence changes to the cache are protected by XBZRLE.lock().
102 *
103 * Returns the new_size or negative in case of error.
104 *
105 * @new_size: new cache size
Juan Quintela56e93d22015-05-07 19:33:31 +0200106 */
107int64_t xbzrle_cache_resize(int64_t new_size)
108{
109 PageCache *new_cache;
110 int64_t ret;
111
112 if (new_size < TARGET_PAGE_SIZE) {
113 return -1;
114 }
115
116 XBZRLE_cache_lock();
117
118 if (XBZRLE.cache != NULL) {
119 if (pow2floor(new_size) == migrate_xbzrle_cache_size()) {
120 goto out_new_size;
121 }
122 new_cache = cache_init(new_size / TARGET_PAGE_SIZE,
123 TARGET_PAGE_SIZE);
124 if (!new_cache) {
125 error_report("Error creating cache");
126 ret = -1;
127 goto out;
128 }
129
130 cache_fini(XBZRLE.cache);
131 XBZRLE.cache = new_cache;
132 }
133
134out_new_size:
135 ret = pow2floor(new_size);
136out:
137 XBZRLE_cache_unlock();
138 return ret;
139}
140
Juan Quintela6f37bb82017-03-13 19:26:29 +0100141/* State of RAM for migration */
142struct RAMState {
143 /* Last block that we have visited searching for dirty pages */
144 RAMBlock *last_seen_block;
145 /* Last block from where we have sent data */
146 RAMBlock *last_sent_block;
147 /* Last offset we have sent data from */
148 ram_addr_t last_offset;
149 /* last ram version we have seen */
150 uint32_t last_version;
151 /* We are in the first round */
152 bool ram_bulk_stage;
Juan Quintela8d820d62017-03-13 19:35:50 +0100153 /* How many times we have dirty too many pages */
154 int dirty_rate_high_cnt;
Juan Quintela5a987732017-03-13 19:39:02 +0100155 /* How many times we have synchronized the bitmap */
156 uint64_t bitmap_sync_count;
Juan Quintelaf664da82017-03-13 19:44:57 +0100157 /* these variables are used for bitmap sync */
158 /* last time we did a full bitmap_sync */
159 int64_t time_last_bitmap_sync;
Juan Quintelaeac74152017-03-28 14:59:01 +0200160 /* bytes transferred at start_time */
Juan Quintelac4bdf0c2017-03-28 14:59:54 +0200161 uint64_t bytes_xfer_prev;
Juan Quintelaa66cd902017-03-28 15:02:43 +0200162 /* number of dirty pages since start_time */
Juan Quintela68908ed2017-03-28 15:05:53 +0200163 uint64_t num_dirty_pages_period;
Juan Quintelab5833fd2017-03-13 19:49:19 +0100164 /* xbzrle misses since the beginning of the period */
165 uint64_t xbzrle_cache_miss_prev;
Juan Quintela6f37bb82017-03-13 19:26:29 +0100166};
167typedef struct RAMState RAMState;
168
169static RAMState ram_state;
170
Juan Quintela56e93d22015-05-07 19:33:31 +0200171/* accounting for migration statistics */
172typedef struct AccountingInfo {
173 uint64_t dup_pages;
174 uint64_t skipped_pages;
175 uint64_t norm_pages;
176 uint64_t iterations;
177 uint64_t xbzrle_bytes;
178 uint64_t xbzrle_pages;
179 uint64_t xbzrle_cache_miss;
180 double xbzrle_cache_miss_rate;
181 uint64_t xbzrle_overflows;
182} AccountingInfo;
183
184static AccountingInfo acct_info;
185
186static void acct_clear(void)
187{
188 memset(&acct_info, 0, sizeof(acct_info));
189}
190
191uint64_t dup_mig_bytes_transferred(void)
192{
193 return acct_info.dup_pages * TARGET_PAGE_SIZE;
194}
195
196uint64_t dup_mig_pages_transferred(void)
197{
198 return acct_info.dup_pages;
199}
200
201uint64_t skipped_mig_bytes_transferred(void)
202{
203 return acct_info.skipped_pages * TARGET_PAGE_SIZE;
204}
205
206uint64_t skipped_mig_pages_transferred(void)
207{
208 return acct_info.skipped_pages;
209}
210
211uint64_t norm_mig_bytes_transferred(void)
212{
213 return acct_info.norm_pages * TARGET_PAGE_SIZE;
214}
215
216uint64_t norm_mig_pages_transferred(void)
217{
218 return acct_info.norm_pages;
219}
220
221uint64_t xbzrle_mig_bytes_transferred(void)
222{
223 return acct_info.xbzrle_bytes;
224}
225
226uint64_t xbzrle_mig_pages_transferred(void)
227{
228 return acct_info.xbzrle_pages;
229}
230
231uint64_t xbzrle_mig_pages_cache_miss(void)
232{
233 return acct_info.xbzrle_cache_miss;
234}
235
236double xbzrle_mig_cache_miss_rate(void)
237{
238 return acct_info.xbzrle_cache_miss_rate;
239}
240
241uint64_t xbzrle_mig_pages_overflow(void)
242{
243 return acct_info.xbzrle_overflows;
244}
245
Li Zhijiandd631692015-07-02 20:18:06 +0800246static QemuMutex migration_bitmap_mutex;
Juan Quintela56e93d22015-05-07 19:33:31 +0200247static uint64_t migration_dirty_pages;
Juan Quintela56e93d22015-05-07 19:33:31 +0200248
Dr. David Alan Gilbertb8fb8cb2015-09-23 15:27:10 +0100249/* used by the search for pages to send */
250struct PageSearchStatus {
251 /* Current block being searched */
252 RAMBlock *block;
253 /* Current offset to search from */
254 ram_addr_t offset;
255 /* Set once we wrap around */
256 bool complete_round;
257};
258typedef struct PageSearchStatus PageSearchStatus;
259
Denis V. Lunev60be6342015-09-28 14:41:58 +0300260static struct BitmapRcu {
261 struct rcu_head rcu;
Dr. David Alan Gilbertf3f491f2015-11-05 18:11:01 +0000262 /* Main migration bitmap */
Denis V. Lunev60be6342015-09-28 14:41:58 +0300263 unsigned long *bmap;
Dr. David Alan Gilbertf3f491f2015-11-05 18:11:01 +0000264 /* bitmap of pages that haven't been sent even once
265 * only maintained and used in postcopy at the moment
266 * where it's used to send the dirtymap at the start
267 * of the postcopy phase
268 */
269 unsigned long *unsentmap;
Denis V. Lunev60be6342015-09-28 14:41:58 +0300270} *migration_bitmap_rcu;
271
Juan Quintela56e93d22015-05-07 19:33:31 +0200272struct CompressParam {
Juan Quintela56e93d22015-05-07 19:33:31 +0200273 bool done;
Liang Li90e56fb2016-05-05 15:32:56 +0800274 bool quit;
Juan Quintela56e93d22015-05-07 19:33:31 +0200275 QEMUFile *file;
276 QemuMutex mutex;
277 QemuCond cond;
278 RAMBlock *block;
279 ram_addr_t offset;
280};
281typedef struct CompressParam CompressParam;
282
283struct DecompressParam {
Liang Li73a89122016-05-05 15:32:51 +0800284 bool done;
Liang Li90e56fb2016-05-05 15:32:56 +0800285 bool quit;
Juan Quintela56e93d22015-05-07 19:33:31 +0200286 QemuMutex mutex;
287 QemuCond cond;
288 void *des;
Peter Maydelld341d9f2016-01-22 15:09:21 +0000289 uint8_t *compbuf;
Juan Quintela56e93d22015-05-07 19:33:31 +0200290 int len;
291};
292typedef struct DecompressParam DecompressParam;
293
294static CompressParam *comp_param;
295static QemuThread *compress_threads;
296/* comp_done_cond is used to wake up the migration thread when
297 * one of the compression threads has finished the compression.
298 * comp_done_lock is used to co-work with comp_done_cond.
299 */
Liang Li0d9f9a52016-05-05 15:32:59 +0800300static QemuMutex comp_done_lock;
301static QemuCond comp_done_cond;
Juan Quintela56e93d22015-05-07 19:33:31 +0200302/* The empty QEMUFileOps will be used by file in CompressParam */
303static const QEMUFileOps empty_ops = { };
304
305static bool compression_switch;
Juan Quintela56e93d22015-05-07 19:33:31 +0200306static DecompressParam *decomp_param;
307static QemuThread *decompress_threads;
Liang Li73a89122016-05-05 15:32:51 +0800308static QemuMutex decomp_done_lock;
309static QemuCond decomp_done_cond;
Juan Quintela56e93d22015-05-07 19:33:31 +0200310
Liang Lia7a9a882016-05-05 15:32:57 +0800311static int do_compress_ram_page(QEMUFile *f, RAMBlock *block,
312 ram_addr_t offset);
Juan Quintela56e93d22015-05-07 19:33:31 +0200313
314static void *do_data_compress(void *opaque)
315{
316 CompressParam *param = opaque;
Liang Lia7a9a882016-05-05 15:32:57 +0800317 RAMBlock *block;
318 ram_addr_t offset;
Juan Quintela56e93d22015-05-07 19:33:31 +0200319
Liang Lia7a9a882016-05-05 15:32:57 +0800320 qemu_mutex_lock(&param->mutex);
Liang Li90e56fb2016-05-05 15:32:56 +0800321 while (!param->quit) {
Liang Lia7a9a882016-05-05 15:32:57 +0800322 if (param->block) {
323 block = param->block;
324 offset = param->offset;
325 param->block = NULL;
326 qemu_mutex_unlock(&param->mutex);
327
328 do_compress_ram_page(param->file, block, offset);
329
Liang Li0d9f9a52016-05-05 15:32:59 +0800330 qemu_mutex_lock(&comp_done_lock);
Liang Lia7a9a882016-05-05 15:32:57 +0800331 param->done = true;
Liang Li0d9f9a52016-05-05 15:32:59 +0800332 qemu_cond_signal(&comp_done_cond);
333 qemu_mutex_unlock(&comp_done_lock);
Liang Lia7a9a882016-05-05 15:32:57 +0800334
335 qemu_mutex_lock(&param->mutex);
336 } else {
Juan Quintela56e93d22015-05-07 19:33:31 +0200337 qemu_cond_wait(&param->cond, &param->mutex);
338 }
Juan Quintela56e93d22015-05-07 19:33:31 +0200339 }
Liang Lia7a9a882016-05-05 15:32:57 +0800340 qemu_mutex_unlock(&param->mutex);
Juan Quintela56e93d22015-05-07 19:33:31 +0200341
342 return NULL;
343}
344
345static inline void terminate_compression_threads(void)
346{
347 int idx, thread_count;
348
349 thread_count = migrate_compress_threads();
Juan Quintela3d0684b2017-03-23 15:06:39 +0100350
Juan Quintela56e93d22015-05-07 19:33:31 +0200351 for (idx = 0; idx < thread_count; idx++) {
352 qemu_mutex_lock(&comp_param[idx].mutex);
Liang Li90e56fb2016-05-05 15:32:56 +0800353 comp_param[idx].quit = true;
Juan Quintela56e93d22015-05-07 19:33:31 +0200354 qemu_cond_signal(&comp_param[idx].cond);
355 qemu_mutex_unlock(&comp_param[idx].mutex);
356 }
357}
358
359void migrate_compress_threads_join(void)
360{
361 int i, thread_count;
362
363 if (!migrate_use_compression()) {
364 return;
365 }
366 terminate_compression_threads();
367 thread_count = migrate_compress_threads();
368 for (i = 0; i < thread_count; i++) {
369 qemu_thread_join(compress_threads + i);
370 qemu_fclose(comp_param[i].file);
371 qemu_mutex_destroy(&comp_param[i].mutex);
372 qemu_cond_destroy(&comp_param[i].cond);
373 }
Liang Li0d9f9a52016-05-05 15:32:59 +0800374 qemu_mutex_destroy(&comp_done_lock);
375 qemu_cond_destroy(&comp_done_cond);
Juan Quintela56e93d22015-05-07 19:33:31 +0200376 g_free(compress_threads);
377 g_free(comp_param);
Juan Quintela56e93d22015-05-07 19:33:31 +0200378 compress_threads = NULL;
379 comp_param = NULL;
Juan Quintela56e93d22015-05-07 19:33:31 +0200380}
381
382void migrate_compress_threads_create(void)
383{
384 int i, thread_count;
385
386 if (!migrate_use_compression()) {
387 return;
388 }
Juan Quintela56e93d22015-05-07 19:33:31 +0200389 compression_switch = true;
390 thread_count = migrate_compress_threads();
391 compress_threads = g_new0(QemuThread, thread_count);
392 comp_param = g_new0(CompressParam, thread_count);
Liang Li0d9f9a52016-05-05 15:32:59 +0800393 qemu_cond_init(&comp_done_cond);
394 qemu_mutex_init(&comp_done_lock);
Juan Quintela56e93d22015-05-07 19:33:31 +0200395 for (i = 0; i < thread_count; i++) {
Cao jine110aa92016-07-29 15:10:31 +0800396 /* comp_param[i].file is just used as a dummy buffer to save data,
397 * set its ops to empty.
Juan Quintela56e93d22015-05-07 19:33:31 +0200398 */
399 comp_param[i].file = qemu_fopen_ops(NULL, &empty_ops);
400 comp_param[i].done = true;
Liang Li90e56fb2016-05-05 15:32:56 +0800401 comp_param[i].quit = false;
Juan Quintela56e93d22015-05-07 19:33:31 +0200402 qemu_mutex_init(&comp_param[i].mutex);
403 qemu_cond_init(&comp_param[i].cond);
404 qemu_thread_create(compress_threads + i, "compress",
405 do_data_compress, comp_param + i,
406 QEMU_THREAD_JOINABLE);
407 }
408}
409
410/**
Juan Quintela3d0684b2017-03-23 15:06:39 +0100411 * save_page_header: write page header to wire
Juan Quintela56e93d22015-05-07 19:33:31 +0200412 *
413 * If this is the 1st block, it also writes the block identification
414 *
Juan Quintela3d0684b2017-03-23 15:06:39 +0100415 * Returns the number of bytes written
Juan Quintela56e93d22015-05-07 19:33:31 +0200416 *
417 * @f: QEMUFile where to send the data
418 * @block: block that contains the page we want to send
419 * @offset: offset inside the block for the page
420 * in the lower bits, it contains flags
421 */
422static size_t save_page_header(QEMUFile *f, RAMBlock *block, ram_addr_t offset)
423{
Liang Li9f5f3802015-07-13 17:34:10 +0800424 size_t size, len;
Juan Quintela56e93d22015-05-07 19:33:31 +0200425
426 qemu_put_be64(f, offset);
427 size = 8;
428
429 if (!(offset & RAM_SAVE_FLAG_CONTINUE)) {
Liang Li9f5f3802015-07-13 17:34:10 +0800430 len = strlen(block->idstr);
431 qemu_put_byte(f, len);
432 qemu_put_buffer(f, (uint8_t *)block->idstr, len);
433 size += 1 + len;
Juan Quintela56e93d22015-05-07 19:33:31 +0200434 }
435 return size;
436}
437
Juan Quintela3d0684b2017-03-23 15:06:39 +0100438/**
439 * mig_throttle_guest_down: throotle down the guest
440 *
441 * Reduce amount of guest cpu execution to hopefully slow down memory
442 * writes. If guest dirty memory rate is reduced below the rate at
443 * which we can transfer pages to the destination then we should be
444 * able to complete migration. Some workloads dirty memory way too
445 * fast and will not effectively converge, even with auto-converge.
Jason J. Herne070afca2015-09-08 13:12:35 -0400446 */
447static void mig_throttle_guest_down(void)
448{
449 MigrationState *s = migrate_get_current();
Daniel P. Berrange2594f562016-04-27 11:05:14 +0100450 uint64_t pct_initial = s->parameters.cpu_throttle_initial;
451 uint64_t pct_icrement = s->parameters.cpu_throttle_increment;
Jason J. Herne070afca2015-09-08 13:12:35 -0400452
453 /* We have not started throttling yet. Let's start it. */
454 if (!cpu_throttle_active()) {
455 cpu_throttle_set(pct_initial);
456 } else {
457 /* Throttling already on, just increase the rate */
458 cpu_throttle_set(cpu_throttle_get_percentage() + pct_icrement);
459 }
460}
461
Juan Quintela3d0684b2017-03-23 15:06:39 +0100462/**
463 * xbzrle_cache_zero_page: insert a zero page in the XBZRLE cache
464 *
Juan Quintela6f37bb82017-03-13 19:26:29 +0100465 * @rs: current RAM state
Juan Quintela3d0684b2017-03-23 15:06:39 +0100466 * @current_addr: address for the zero page
467 *
468 * Update the xbzrle cache to reflect a page that's been sent as all 0.
Juan Quintela56e93d22015-05-07 19:33:31 +0200469 * The important thing is that a stale (not-yet-0'd) page be replaced
470 * by the new data.
471 * As a bonus, if the page wasn't in the cache it gets added so that
Juan Quintela3d0684b2017-03-23 15:06:39 +0100472 * when a small write is made into the 0'd page it gets XBZRLE sent.
Juan Quintela56e93d22015-05-07 19:33:31 +0200473 */
Juan Quintela6f37bb82017-03-13 19:26:29 +0100474static void xbzrle_cache_zero_page(RAMState *rs, ram_addr_t current_addr)
Juan Quintela56e93d22015-05-07 19:33:31 +0200475{
Juan Quintela6f37bb82017-03-13 19:26:29 +0100476 if (rs->ram_bulk_stage || !migrate_use_xbzrle()) {
Juan Quintela56e93d22015-05-07 19:33:31 +0200477 return;
478 }
479
480 /* We don't care if this fails to allocate a new cache page
481 * as long as it updated an old one */
482 cache_insert(XBZRLE.cache, current_addr, ZERO_TARGET_PAGE,
Juan Quintela5a987732017-03-13 19:39:02 +0100483 rs->bitmap_sync_count);
Juan Quintela56e93d22015-05-07 19:33:31 +0200484}
485
486#define ENCODING_FLAG_XBZRLE 0x1
487
488/**
489 * save_xbzrle_page: compress and send current page
490 *
491 * Returns: 1 means that we wrote the page
492 * 0 means that page is identical to the one already sent
493 * -1 means that xbzrle would be longer than normal
494 *
Juan Quintela5a987732017-03-13 19:39:02 +0100495 * @rs: current RAM state
Juan Quintela56e93d22015-05-07 19:33:31 +0200496 * @f: QEMUFile where to send the data
Juan Quintela3d0684b2017-03-23 15:06:39 +0100497 * @current_data: pointer to the address of the page contents
498 * @current_addr: addr of the page
Juan Quintela56e93d22015-05-07 19:33:31 +0200499 * @block: block that contains the page we want to send
500 * @offset: offset inside the block for the page
501 * @last_stage: if we are at the completion stage
502 * @bytes_transferred: increase it with the number of transferred bytes
503 */
Juan Quintela5a987732017-03-13 19:39:02 +0100504static int save_xbzrle_page(RAMState *rs, QEMUFile *f, uint8_t **current_data,
Juan Quintela56e93d22015-05-07 19:33:31 +0200505 ram_addr_t current_addr, RAMBlock *block,
506 ram_addr_t offset, bool last_stage,
507 uint64_t *bytes_transferred)
508{
509 int encoded_len = 0, bytes_xbzrle;
510 uint8_t *prev_cached_page;
511
Juan Quintela5a987732017-03-13 19:39:02 +0100512 if (!cache_is_cached(XBZRLE.cache, current_addr, rs->bitmap_sync_count)) {
Juan Quintela56e93d22015-05-07 19:33:31 +0200513 acct_info.xbzrle_cache_miss++;
514 if (!last_stage) {
515 if (cache_insert(XBZRLE.cache, current_addr, *current_data,
Juan Quintela5a987732017-03-13 19:39:02 +0100516 rs->bitmap_sync_count) == -1) {
Juan Quintela56e93d22015-05-07 19:33:31 +0200517 return -1;
518 } else {
519 /* update *current_data when the page has been
520 inserted into cache */
521 *current_data = get_cached_data(XBZRLE.cache, current_addr);
522 }
523 }
524 return -1;
525 }
526
527 prev_cached_page = get_cached_data(XBZRLE.cache, current_addr);
528
529 /* save current buffer into memory */
530 memcpy(XBZRLE.current_buf, *current_data, TARGET_PAGE_SIZE);
531
532 /* XBZRLE encoding (if there is no overflow) */
533 encoded_len = xbzrle_encode_buffer(prev_cached_page, XBZRLE.current_buf,
534 TARGET_PAGE_SIZE, XBZRLE.encoded_buf,
535 TARGET_PAGE_SIZE);
536 if (encoded_len == 0) {
Juan Quintela55c44462017-01-23 22:32:05 +0100537 trace_save_xbzrle_page_skipping();
Juan Quintela56e93d22015-05-07 19:33:31 +0200538 return 0;
539 } else if (encoded_len == -1) {
Juan Quintela55c44462017-01-23 22:32:05 +0100540 trace_save_xbzrle_page_overflow();
Juan Quintela56e93d22015-05-07 19:33:31 +0200541 acct_info.xbzrle_overflows++;
542 /* update data in the cache */
543 if (!last_stage) {
544 memcpy(prev_cached_page, *current_data, TARGET_PAGE_SIZE);
545 *current_data = prev_cached_page;
546 }
547 return -1;
548 }
549
550 /* we need to update the data in the cache, in order to get the same data */
551 if (!last_stage) {
552 memcpy(prev_cached_page, XBZRLE.current_buf, TARGET_PAGE_SIZE);
553 }
554
555 /* Send XBZRLE based compressed page */
556 bytes_xbzrle = save_page_header(f, block, offset | RAM_SAVE_FLAG_XBZRLE);
557 qemu_put_byte(f, ENCODING_FLAG_XBZRLE);
558 qemu_put_be16(f, encoded_len);
559 qemu_put_buffer(f, XBZRLE.encoded_buf, encoded_len);
560 bytes_xbzrle += encoded_len + 1 + 2;
561 acct_info.xbzrle_pages++;
562 acct_info.xbzrle_bytes += bytes_xbzrle;
563 *bytes_transferred += bytes_xbzrle;
564
565 return 1;
566}
567
Juan Quintela3d0684b2017-03-23 15:06:39 +0100568/**
569 * migration_bitmap_find_dirty: find the next dirty page from start
Dr. David Alan Gilbertf3f491f2015-11-05 18:11:01 +0000570 *
Juan Quintela3d0684b2017-03-23 15:06:39 +0100571 * Called with rcu_read_lock() to protect migration_bitmap
572 *
573 * Returns the byte offset within memory region of the start of a dirty page
574 *
Juan Quintela6f37bb82017-03-13 19:26:29 +0100575 * @rs: current RAM state
Juan Quintela3d0684b2017-03-23 15:06:39 +0100576 * @rb: RAMBlock where to search for dirty pages
577 * @start: starting address (typically so we can continue from previous page)
578 * @ram_addr_abs: pointer into which to store the address of the dirty page
579 * within the global ram_addr space
Dr. David Alan Gilbertf3f491f2015-11-05 18:11:01 +0000580 */
Juan Quintela56e93d22015-05-07 19:33:31 +0200581static inline
Juan Quintela6f37bb82017-03-13 19:26:29 +0100582ram_addr_t migration_bitmap_find_dirty(RAMState *rs, RAMBlock *rb,
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +0000583 ram_addr_t start,
584 ram_addr_t *ram_addr_abs)
Juan Quintela56e93d22015-05-07 19:33:31 +0200585{
Dr. David Alan Gilbert2f68e392015-08-13 11:51:30 +0100586 unsigned long base = rb->offset >> TARGET_PAGE_BITS;
Juan Quintela56e93d22015-05-07 19:33:31 +0200587 unsigned long nr = base + (start >> TARGET_PAGE_BITS);
Dr. David Alan Gilbert2f68e392015-08-13 11:51:30 +0100588 uint64_t rb_size = rb->used_length;
589 unsigned long size = base + (rb_size >> TARGET_PAGE_BITS);
Li Zhijian2ff64032015-07-02 20:18:05 +0800590 unsigned long *bitmap;
Juan Quintela56e93d22015-05-07 19:33:31 +0200591
592 unsigned long next;
593
Denis V. Lunev60be6342015-09-28 14:41:58 +0300594 bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap;
Juan Quintela6f37bb82017-03-13 19:26:29 +0100595 if (rs->ram_bulk_stage && nr > base) {
Juan Quintela56e93d22015-05-07 19:33:31 +0200596 next = nr + 1;
597 } else {
Li Zhijian2ff64032015-07-02 20:18:05 +0800598 next = find_next_bit(bitmap, size, nr);
Juan Quintela56e93d22015-05-07 19:33:31 +0200599 }
600
Dr. David Alan Gilbertf3f491f2015-11-05 18:11:01 +0000601 *ram_addr_abs = next << TARGET_PAGE_BITS;
Juan Quintela56e93d22015-05-07 19:33:31 +0200602 return (next - base) << TARGET_PAGE_BITS;
603}
604
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +0000605static inline bool migration_bitmap_clear_dirty(ram_addr_t addr)
606{
607 bool ret;
608 int nr = addr >> TARGET_PAGE_BITS;
609 unsigned long *bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap;
610
611 ret = test_and_clear_bit(nr, bitmap);
612
613 if (ret) {
614 migration_dirty_pages--;
615 }
616 return ret;
617}
618
Juan Quintelaa66cd902017-03-28 15:02:43 +0200619static void migration_bitmap_sync_range(RAMState *rs, ram_addr_t start,
620 ram_addr_t length)
Juan Quintela56e93d22015-05-07 19:33:31 +0200621{
Li Zhijian2ff64032015-07-02 20:18:05 +0800622 unsigned long *bitmap;
Denis V. Lunev60be6342015-09-28 14:41:58 +0300623 bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap;
Chao Fan1ffb5df2017-03-14 09:55:07 +0800624 migration_dirty_pages += cpu_physical_memory_sync_dirty_bitmap(bitmap,
Juan Quintelaa66cd902017-03-28 15:02:43 +0200625 start, length, &rs->num_dirty_pages_period);
Juan Quintela56e93d22015-05-07 19:33:31 +0200626}
627
Juan Quintela56e93d22015-05-07 19:33:31 +0200628/* Fix me: there are too many global variables used in migration process. */
Juan Quintela56e93d22015-05-07 19:33:31 +0200629static uint64_t iterations_prev;
630
Juan Quintelaf664da82017-03-13 19:44:57 +0100631static void migration_bitmap_sync_init(RAMState *rs)
Juan Quintela56e93d22015-05-07 19:33:31 +0200632{
Juan Quintelaf664da82017-03-13 19:44:57 +0100633 rs->time_last_bitmap_sync = 0;
Juan Quintelaeac74152017-03-28 14:59:01 +0200634 rs->bytes_xfer_prev = 0;
Juan Quintelaa66cd902017-03-28 15:02:43 +0200635 rs->num_dirty_pages_period = 0;
Juan Quintelab5833fd2017-03-13 19:49:19 +0100636 rs->xbzrle_cache_miss_prev = 0;
Juan Quintela56e93d22015-05-07 19:33:31 +0200637 iterations_prev = 0;
638}
639
Juan Quintela3d0684b2017-03-23 15:06:39 +0100640/**
641 * ram_pagesize_summary: calculate all the pagesizes of a VM
642 *
643 * Returns a summary bitmap of the page sizes of all RAMBlocks
644 *
645 * For VMs with just normal pages this is equivalent to the host page
646 * size. If it's got some huge pages then it's the OR of all the
647 * different page sizes.
Dr. David Alan Gilberte8ca1db2017-02-24 18:28:29 +0000648 */
649uint64_t ram_pagesize_summary(void)
650{
651 RAMBlock *block;
652 uint64_t summary = 0;
653
654 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
655 summary |= block->page_size;
656 }
657
658 return summary;
659}
660
Juan Quintela8d820d62017-03-13 19:35:50 +0100661static void migration_bitmap_sync(RAMState *rs)
Juan Quintela56e93d22015-05-07 19:33:31 +0200662{
663 RAMBlock *block;
Juan Quintela56e93d22015-05-07 19:33:31 +0200664 MigrationState *s = migrate_get_current();
665 int64_t end_time;
Juan Quintelac4bdf0c2017-03-28 14:59:54 +0200666 uint64_t bytes_xfer_now;
Juan Quintela56e93d22015-05-07 19:33:31 +0200667
Juan Quintela5a987732017-03-13 19:39:02 +0100668 rs->bitmap_sync_count++;
Juan Quintela56e93d22015-05-07 19:33:31 +0200669
Juan Quintelaeac74152017-03-28 14:59:01 +0200670 if (!rs->bytes_xfer_prev) {
671 rs->bytes_xfer_prev = ram_bytes_transferred();
Juan Quintela56e93d22015-05-07 19:33:31 +0200672 }
673
Juan Quintelaf664da82017-03-13 19:44:57 +0100674 if (!rs->time_last_bitmap_sync) {
675 rs->time_last_bitmap_sync = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
Juan Quintela56e93d22015-05-07 19:33:31 +0200676 }
677
678 trace_migration_bitmap_sync_start();
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +0200679 memory_global_dirty_log_sync();
Juan Quintela56e93d22015-05-07 19:33:31 +0200680
Li Zhijiandd631692015-07-02 20:18:06 +0800681 qemu_mutex_lock(&migration_bitmap_mutex);
Juan Quintela56e93d22015-05-07 19:33:31 +0200682 rcu_read_lock();
683 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Juan Quintelaa66cd902017-03-28 15:02:43 +0200684 migration_bitmap_sync_range(rs, block->offset, block->used_length);
Juan Quintela56e93d22015-05-07 19:33:31 +0200685 }
686 rcu_read_unlock();
Li Zhijiandd631692015-07-02 20:18:06 +0800687 qemu_mutex_unlock(&migration_bitmap_mutex);
Juan Quintela56e93d22015-05-07 19:33:31 +0200688
Juan Quintelaa66cd902017-03-28 15:02:43 +0200689 trace_migration_bitmap_sync_end(rs->num_dirty_pages_period);
Chao Fan1ffb5df2017-03-14 09:55:07 +0800690
Juan Quintela56e93d22015-05-07 19:33:31 +0200691 end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
692
693 /* more than 1 second = 1000 millisecons */
Juan Quintelaf664da82017-03-13 19:44:57 +0100694 if (end_time > rs->time_last_bitmap_sync + 1000) {
Juan Quintela56e93d22015-05-07 19:33:31 +0200695 if (migrate_auto_converge()) {
696 /* The following detection logic can be refined later. For now:
697 Check to see if the dirtied bytes is 50% more than the approx.
698 amount of bytes that just got transferred since the last time we
Jason J. Herne070afca2015-09-08 13:12:35 -0400699 were in this routine. If that happens twice, start or increase
700 throttling */
Juan Quintela56e93d22015-05-07 19:33:31 +0200701 bytes_xfer_now = ram_bytes_transferred();
Jason J. Herne070afca2015-09-08 13:12:35 -0400702
Juan Quintela56e93d22015-05-07 19:33:31 +0200703 if (s->dirty_pages_rate &&
Juan Quintelaa66cd902017-03-28 15:02:43 +0200704 (rs->num_dirty_pages_period * TARGET_PAGE_SIZE >
Juan Quintelaeac74152017-03-28 14:59:01 +0200705 (bytes_xfer_now - rs->bytes_xfer_prev) / 2) &&
Juan Quintela8d820d62017-03-13 19:35:50 +0100706 (rs->dirty_rate_high_cnt++ >= 2)) {
Juan Quintela56e93d22015-05-07 19:33:31 +0200707 trace_migration_throttle();
Juan Quintela8d820d62017-03-13 19:35:50 +0100708 rs->dirty_rate_high_cnt = 0;
Jason J. Herne070afca2015-09-08 13:12:35 -0400709 mig_throttle_guest_down();
Juan Quintela56e93d22015-05-07 19:33:31 +0200710 }
Juan Quintelaeac74152017-03-28 14:59:01 +0200711 rs->bytes_xfer_prev = bytes_xfer_now;
Juan Quintela56e93d22015-05-07 19:33:31 +0200712 }
Jason J. Herne070afca2015-09-08 13:12:35 -0400713
Juan Quintela56e93d22015-05-07 19:33:31 +0200714 if (migrate_use_xbzrle()) {
715 if (iterations_prev != acct_info.iterations) {
716 acct_info.xbzrle_cache_miss_rate =
717 (double)(acct_info.xbzrle_cache_miss -
Juan Quintelab5833fd2017-03-13 19:49:19 +0100718 rs->xbzrle_cache_miss_prev) /
Juan Quintela56e93d22015-05-07 19:33:31 +0200719 (acct_info.iterations - iterations_prev);
720 }
721 iterations_prev = acct_info.iterations;
Juan Quintelab5833fd2017-03-13 19:49:19 +0100722 rs->xbzrle_cache_miss_prev = acct_info.xbzrle_cache_miss;
Juan Quintela56e93d22015-05-07 19:33:31 +0200723 }
Juan Quintelaa66cd902017-03-28 15:02:43 +0200724 s->dirty_pages_rate = rs->num_dirty_pages_period * 1000
Juan Quintelaf664da82017-03-13 19:44:57 +0100725 / (end_time - rs->time_last_bitmap_sync);
Juan Quintela56e93d22015-05-07 19:33:31 +0200726 s->dirty_bytes_rate = s->dirty_pages_rate * TARGET_PAGE_SIZE;
Juan Quintelaf664da82017-03-13 19:44:57 +0100727 rs->time_last_bitmap_sync = end_time;
Juan Quintelaa66cd902017-03-28 15:02:43 +0200728 rs->num_dirty_pages_period = 0;
Juan Quintela56e93d22015-05-07 19:33:31 +0200729 }
Juan Quintela5a987732017-03-13 19:39:02 +0100730 s->dirty_sync_count = rs->bitmap_sync_count;
Dr. David Alan Gilbert4addcd42015-12-16 11:47:36 +0000731 if (migrate_use_events()) {
Juan Quintela5a987732017-03-13 19:39:02 +0100732 qapi_event_send_migration_pass(rs->bitmap_sync_count, NULL);
Dr. David Alan Gilbert4addcd42015-12-16 11:47:36 +0000733 }
Juan Quintela56e93d22015-05-07 19:33:31 +0200734}
735
736/**
Juan Quintela3d0684b2017-03-23 15:06:39 +0100737 * save_zero_page: send the zero page to the stream
Juan Quintela56e93d22015-05-07 19:33:31 +0200738 *
Juan Quintela3d0684b2017-03-23 15:06:39 +0100739 * Returns the number of pages written.
Juan Quintela56e93d22015-05-07 19:33:31 +0200740 *
741 * @f: QEMUFile where to send the data
742 * @block: block that contains the page we want to send
743 * @offset: offset inside the block for the page
744 * @p: pointer to the page
745 * @bytes_transferred: increase it with the number of transferred bytes
746 */
747static int save_zero_page(QEMUFile *f, RAMBlock *block, ram_addr_t offset,
748 uint8_t *p, uint64_t *bytes_transferred)
749{
750 int pages = -1;
751
752 if (is_zero_range(p, TARGET_PAGE_SIZE)) {
753 acct_info.dup_pages++;
754 *bytes_transferred += save_page_header(f, block,
755 offset | RAM_SAVE_FLAG_COMPRESS);
756 qemu_put_byte(f, 0);
757 *bytes_transferred += 1;
758 pages = 1;
759 }
760
761 return pages;
762}
763
Juan Quintela36449152017-03-23 15:11:59 +0100764static void ram_release_pages(MigrationState *ms, const char *rbname,
Pavel Butsykin53f09a12017-02-03 18:23:20 +0300765 uint64_t offset, int pages)
766{
767 if (!migrate_release_ram() || !migration_in_postcopy(ms)) {
768 return;
769 }
770
Juan Quintela36449152017-03-23 15:11:59 +0100771 ram_discard_range(NULL, rbname, offset, pages << TARGET_PAGE_BITS);
Pavel Butsykin53f09a12017-02-03 18:23:20 +0300772}
773
Juan Quintela56e93d22015-05-07 19:33:31 +0200774/**
Juan Quintela3d0684b2017-03-23 15:06:39 +0100775 * ram_save_page: send the given page to the stream
Juan Quintela56e93d22015-05-07 19:33:31 +0200776 *
Juan Quintela3d0684b2017-03-23 15:06:39 +0100777 * Returns the number of pages written.
Dr. David Alan Gilbert3fd3c4b2015-12-10 16:31:46 +0000778 * < 0 - error
779 * >=0 - Number of pages written - this might legally be 0
780 * if xbzrle noticed the page was the same.
Juan Quintela56e93d22015-05-07 19:33:31 +0200781 *
Juan Quintela6f37bb82017-03-13 19:26:29 +0100782 * @rs: current RAM state
Juan Quintela3d0684b2017-03-23 15:06:39 +0100783 * @ms: current migration state
Juan Quintela56e93d22015-05-07 19:33:31 +0200784 * @f: QEMUFile where to send the data
785 * @block: block that contains the page we want to send
786 * @offset: offset inside the block for the page
787 * @last_stage: if we are at the completion stage
788 * @bytes_transferred: increase it with the number of transferred bytes
789 */
Juan Quintela6f37bb82017-03-13 19:26:29 +0100790static int ram_save_page(RAMState *rs, MigrationState *ms, QEMUFile *f,
791 PageSearchStatus *pss, bool last_stage,
792 uint64_t *bytes_transferred)
Juan Quintela56e93d22015-05-07 19:33:31 +0200793{
794 int pages = -1;
795 uint64_t bytes_xmit;
796 ram_addr_t current_addr;
Juan Quintela56e93d22015-05-07 19:33:31 +0200797 uint8_t *p;
798 int ret;
799 bool send_async = true;
zhanghailianga08f6892016-01-15 11:37:44 +0800800 RAMBlock *block = pss->block;
801 ram_addr_t offset = pss->offset;
Juan Quintela56e93d22015-05-07 19:33:31 +0200802
Dr. David Alan Gilbert2f68e392015-08-13 11:51:30 +0100803 p = block->host + offset;
Juan Quintela56e93d22015-05-07 19:33:31 +0200804
805 /* In doubt sent page as normal */
806 bytes_xmit = 0;
807 ret = ram_control_save_page(f, block->offset,
808 offset, TARGET_PAGE_SIZE, &bytes_xmit);
809 if (bytes_xmit) {
810 *bytes_transferred += bytes_xmit;
811 pages = 1;
812 }
813
814 XBZRLE_cache_lock();
815
816 current_addr = block->offset + offset;
817
Juan Quintela6f37bb82017-03-13 19:26:29 +0100818 if (block == rs->last_sent_block) {
Juan Quintela56e93d22015-05-07 19:33:31 +0200819 offset |= RAM_SAVE_FLAG_CONTINUE;
820 }
821 if (ret != RAM_SAVE_CONTROL_NOT_SUPP) {
822 if (ret != RAM_SAVE_CONTROL_DELAYED) {
823 if (bytes_xmit > 0) {
824 acct_info.norm_pages++;
825 } else if (bytes_xmit == 0) {
826 acct_info.dup_pages++;
827 }
828 }
829 } else {
830 pages = save_zero_page(f, block, offset, p, bytes_transferred);
831 if (pages > 0) {
832 /* Must let xbzrle know, otherwise a previous (now 0'd) cached
833 * page would be stale
834 */
Juan Quintela6f37bb82017-03-13 19:26:29 +0100835 xbzrle_cache_zero_page(rs, current_addr);
Pavel Butsykin53f09a12017-02-03 18:23:20 +0300836 ram_release_pages(ms, block->idstr, pss->offset, pages);
Juan Quintela6f37bb82017-03-13 19:26:29 +0100837 } else if (!rs->ram_bulk_stage &&
Pavel Butsykin9eb14762017-02-03 18:23:19 +0300838 !migration_in_postcopy(ms) && migrate_use_xbzrle()) {
Juan Quintela5a987732017-03-13 19:39:02 +0100839 pages = save_xbzrle_page(rs, f, &p, current_addr, block,
Juan Quintela56e93d22015-05-07 19:33:31 +0200840 offset, last_stage, bytes_transferred);
841 if (!last_stage) {
842 /* Can't send this cached data async, since the cache page
843 * might get updated before it gets to the wire
844 */
845 send_async = false;
846 }
847 }
848 }
849
850 /* XBZRLE overflow or normal page */
851 if (pages == -1) {
852 *bytes_transferred += save_page_header(f, block,
853 offset | RAM_SAVE_FLAG_PAGE);
854 if (send_async) {
Pavel Butsykin53f09a12017-02-03 18:23:20 +0300855 qemu_put_buffer_async(f, p, TARGET_PAGE_SIZE,
856 migrate_release_ram() &
857 migration_in_postcopy(ms));
Juan Quintela56e93d22015-05-07 19:33:31 +0200858 } else {
859 qemu_put_buffer(f, p, TARGET_PAGE_SIZE);
860 }
861 *bytes_transferred += TARGET_PAGE_SIZE;
862 pages = 1;
863 acct_info.norm_pages++;
864 }
865
866 XBZRLE_cache_unlock();
867
868 return pages;
869}
870
Liang Lia7a9a882016-05-05 15:32:57 +0800871static int do_compress_ram_page(QEMUFile *f, RAMBlock *block,
872 ram_addr_t offset)
Juan Quintela56e93d22015-05-07 19:33:31 +0200873{
874 int bytes_sent, blen;
Liang Lia7a9a882016-05-05 15:32:57 +0800875 uint8_t *p = block->host + (offset & TARGET_PAGE_MASK);
Juan Quintela56e93d22015-05-07 19:33:31 +0200876
Liang Lia7a9a882016-05-05 15:32:57 +0800877 bytes_sent = save_page_header(f, block, offset |
Juan Quintela56e93d22015-05-07 19:33:31 +0200878 RAM_SAVE_FLAG_COMPRESS_PAGE);
Liang Lia7a9a882016-05-05 15:32:57 +0800879 blen = qemu_put_compression_data(f, p, TARGET_PAGE_SIZE,
Juan Quintela56e93d22015-05-07 19:33:31 +0200880 migrate_compress_level());
Liang Lib3be2892016-05-05 15:32:54 +0800881 if (blen < 0) {
882 bytes_sent = 0;
883 qemu_file_set_error(migrate_get_current()->to_dst_file, blen);
884 error_report("compressed data failed!");
885 } else {
886 bytes_sent += blen;
Pavel Butsykin53f09a12017-02-03 18:23:20 +0300887 ram_release_pages(migrate_get_current(), block->idstr,
888 offset & TARGET_PAGE_MASK, 1);
Liang Lib3be2892016-05-05 15:32:54 +0800889 }
Juan Quintela56e93d22015-05-07 19:33:31 +0200890
891 return bytes_sent;
892}
893
Juan Quintela56e93d22015-05-07 19:33:31 +0200894static uint64_t bytes_transferred;
895
896static void flush_compressed_data(QEMUFile *f)
897{
898 int idx, len, thread_count;
899
900 if (!migrate_use_compression()) {
901 return;
902 }
903 thread_count = migrate_compress_threads();
Liang Lia7a9a882016-05-05 15:32:57 +0800904
Liang Li0d9f9a52016-05-05 15:32:59 +0800905 qemu_mutex_lock(&comp_done_lock);
Juan Quintela56e93d22015-05-07 19:33:31 +0200906 for (idx = 0; idx < thread_count; idx++) {
Liang Lia7a9a882016-05-05 15:32:57 +0800907 while (!comp_param[idx].done) {
Liang Li0d9f9a52016-05-05 15:32:59 +0800908 qemu_cond_wait(&comp_done_cond, &comp_done_lock);
Juan Quintela56e93d22015-05-07 19:33:31 +0200909 }
Liang Lia7a9a882016-05-05 15:32:57 +0800910 }
Liang Li0d9f9a52016-05-05 15:32:59 +0800911 qemu_mutex_unlock(&comp_done_lock);
Liang Lia7a9a882016-05-05 15:32:57 +0800912
913 for (idx = 0; idx < thread_count; idx++) {
914 qemu_mutex_lock(&comp_param[idx].mutex);
Liang Li90e56fb2016-05-05 15:32:56 +0800915 if (!comp_param[idx].quit) {
Juan Quintela56e93d22015-05-07 19:33:31 +0200916 len = qemu_put_qemu_file(f, comp_param[idx].file);
917 bytes_transferred += len;
918 }
Liang Lia7a9a882016-05-05 15:32:57 +0800919 qemu_mutex_unlock(&comp_param[idx].mutex);
Juan Quintela56e93d22015-05-07 19:33:31 +0200920 }
921}
922
923static inline void set_compress_params(CompressParam *param, RAMBlock *block,
924 ram_addr_t offset)
925{
926 param->block = block;
927 param->offset = offset;
928}
929
930static int compress_page_with_multi_thread(QEMUFile *f, RAMBlock *block,
931 ram_addr_t offset,
932 uint64_t *bytes_transferred)
933{
934 int idx, thread_count, bytes_xmit = -1, pages = -1;
935
936 thread_count = migrate_compress_threads();
Liang Li0d9f9a52016-05-05 15:32:59 +0800937 qemu_mutex_lock(&comp_done_lock);
Juan Quintela56e93d22015-05-07 19:33:31 +0200938 while (true) {
939 for (idx = 0; idx < thread_count; idx++) {
940 if (comp_param[idx].done) {
Liang Lia7a9a882016-05-05 15:32:57 +0800941 comp_param[idx].done = false;
Juan Quintela56e93d22015-05-07 19:33:31 +0200942 bytes_xmit = qemu_put_qemu_file(f, comp_param[idx].file);
Liang Lia7a9a882016-05-05 15:32:57 +0800943 qemu_mutex_lock(&comp_param[idx].mutex);
Juan Quintela56e93d22015-05-07 19:33:31 +0200944 set_compress_params(&comp_param[idx], block, offset);
Liang Lia7a9a882016-05-05 15:32:57 +0800945 qemu_cond_signal(&comp_param[idx].cond);
946 qemu_mutex_unlock(&comp_param[idx].mutex);
Juan Quintela56e93d22015-05-07 19:33:31 +0200947 pages = 1;
948 acct_info.norm_pages++;
949 *bytes_transferred += bytes_xmit;
950 break;
951 }
952 }
953 if (pages > 0) {
954 break;
955 } else {
Liang Li0d9f9a52016-05-05 15:32:59 +0800956 qemu_cond_wait(&comp_done_cond, &comp_done_lock);
Juan Quintela56e93d22015-05-07 19:33:31 +0200957 }
958 }
Liang Li0d9f9a52016-05-05 15:32:59 +0800959 qemu_mutex_unlock(&comp_done_lock);
Juan Quintela56e93d22015-05-07 19:33:31 +0200960
961 return pages;
962}
963
964/**
965 * ram_save_compressed_page: compress the given page and send it to the stream
966 *
Juan Quintela3d0684b2017-03-23 15:06:39 +0100967 * Returns the number of pages written.
Juan Quintela56e93d22015-05-07 19:33:31 +0200968 *
Juan Quintela6f37bb82017-03-13 19:26:29 +0100969 * @rs: current RAM state
Juan Quintela3d0684b2017-03-23 15:06:39 +0100970 * @ms: current migration state
Juan Quintela56e93d22015-05-07 19:33:31 +0200971 * @f: QEMUFile where to send the data
972 * @block: block that contains the page we want to send
973 * @offset: offset inside the block for the page
974 * @last_stage: if we are at the completion stage
975 * @bytes_transferred: increase it with the number of transferred bytes
976 */
Juan Quintela6f37bb82017-03-13 19:26:29 +0100977static int ram_save_compressed_page(RAMState *rs, MigrationState *ms,
978 QEMUFile *f,
Pavel Butsykin9eb14762017-02-03 18:23:19 +0300979 PageSearchStatus *pss, bool last_stage,
Juan Quintela56e93d22015-05-07 19:33:31 +0200980 uint64_t *bytes_transferred)
981{
982 int pages = -1;
Liang Lifc504382016-05-05 15:32:55 +0800983 uint64_t bytes_xmit = 0;
Juan Quintela56e93d22015-05-07 19:33:31 +0200984 uint8_t *p;
Liang Lifc504382016-05-05 15:32:55 +0800985 int ret, blen;
zhanghailianga08f6892016-01-15 11:37:44 +0800986 RAMBlock *block = pss->block;
987 ram_addr_t offset = pss->offset;
Juan Quintela56e93d22015-05-07 19:33:31 +0200988
Dr. David Alan Gilbert2f68e392015-08-13 11:51:30 +0100989 p = block->host + offset;
Juan Quintela56e93d22015-05-07 19:33:31 +0200990
Juan Quintela56e93d22015-05-07 19:33:31 +0200991 ret = ram_control_save_page(f, block->offset,
992 offset, TARGET_PAGE_SIZE, &bytes_xmit);
993 if (bytes_xmit) {
994 *bytes_transferred += bytes_xmit;
995 pages = 1;
996 }
Juan Quintela56e93d22015-05-07 19:33:31 +0200997 if (ret != RAM_SAVE_CONTROL_NOT_SUPP) {
998 if (ret != RAM_SAVE_CONTROL_DELAYED) {
999 if (bytes_xmit > 0) {
1000 acct_info.norm_pages++;
1001 } else if (bytes_xmit == 0) {
1002 acct_info.dup_pages++;
1003 }
1004 }
1005 } else {
1006 /* When starting the process of a new block, the first page of
1007 * the block should be sent out before other pages in the same
1008 * block, and all the pages in last block should have been sent
1009 * out, keeping this order is important, because the 'cont' flag
1010 * is used to avoid resending the block name.
1011 */
Juan Quintela6f37bb82017-03-13 19:26:29 +01001012 if (block != rs->last_sent_block) {
Juan Quintela56e93d22015-05-07 19:33:31 +02001013 flush_compressed_data(f);
1014 pages = save_zero_page(f, block, offset, p, bytes_transferred);
1015 if (pages == -1) {
Liang Lifc504382016-05-05 15:32:55 +08001016 /* Make sure the first page is sent out before other pages */
1017 bytes_xmit = save_page_header(f, block, offset |
1018 RAM_SAVE_FLAG_COMPRESS_PAGE);
1019 blen = qemu_put_compression_data(f, p, TARGET_PAGE_SIZE,
1020 migrate_compress_level());
1021 if (blen > 0) {
1022 *bytes_transferred += bytes_xmit + blen;
Liang Lib3be2892016-05-05 15:32:54 +08001023 acct_info.norm_pages++;
Liang Lib3be2892016-05-05 15:32:54 +08001024 pages = 1;
Liang Lifc504382016-05-05 15:32:55 +08001025 } else {
1026 qemu_file_set_error(f, blen);
1027 error_report("compressed data failed!");
Liang Lib3be2892016-05-05 15:32:54 +08001028 }
Juan Quintela56e93d22015-05-07 19:33:31 +02001029 }
Pavel Butsykin53f09a12017-02-03 18:23:20 +03001030 if (pages > 0) {
1031 ram_release_pages(ms, block->idstr, pss->offset, pages);
1032 }
Juan Quintela56e93d22015-05-07 19:33:31 +02001033 } else {
Liang Lifc504382016-05-05 15:32:55 +08001034 offset |= RAM_SAVE_FLAG_CONTINUE;
Juan Quintela56e93d22015-05-07 19:33:31 +02001035 pages = save_zero_page(f, block, offset, p, bytes_transferred);
1036 if (pages == -1) {
1037 pages = compress_page_with_multi_thread(f, block, offset,
1038 bytes_transferred);
Pavel Butsykin53f09a12017-02-03 18:23:20 +03001039 } else {
1040 ram_release_pages(ms, block->idstr, pss->offset, pages);
Juan Quintela56e93d22015-05-07 19:33:31 +02001041 }
1042 }
1043 }
1044
1045 return pages;
1046}
1047
Juan Quintela3d0684b2017-03-23 15:06:39 +01001048/**
1049 * find_dirty_block: find the next dirty page and update any state
1050 * associated with the search process.
Dr. David Alan Gilbertb9e60922015-09-23 15:27:11 +01001051 *
Juan Quintela3d0684b2017-03-23 15:06:39 +01001052 * Returns if a page is found
Dr. David Alan Gilbertb9e60922015-09-23 15:27:11 +01001053 *
Juan Quintela6f37bb82017-03-13 19:26:29 +01001054 * @rs: current RAM state
Juan Quintela3d0684b2017-03-23 15:06:39 +01001055 * @f: QEMUFile where to send the data
1056 * @pss: data about the state of the current dirty page scan
1057 * @again: set to false if the search has scanned the whole of RAM
1058 * @ram_addr_abs: pointer into which to store the address of the dirty page
1059 * within the global ram_addr space
Dr. David Alan Gilbertb9e60922015-09-23 15:27:11 +01001060 */
Juan Quintela6f37bb82017-03-13 19:26:29 +01001061static bool find_dirty_block(RAMState *rs, QEMUFile *f, PageSearchStatus *pss,
Dr. David Alan Gilbertf3f491f2015-11-05 18:11:01 +00001062 bool *again, ram_addr_t *ram_addr_abs)
Dr. David Alan Gilbertb9e60922015-09-23 15:27:11 +01001063{
Juan Quintela6f37bb82017-03-13 19:26:29 +01001064 pss->offset = migration_bitmap_find_dirty(rs, pss->block, pss->offset,
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001065 ram_addr_abs);
Juan Quintela6f37bb82017-03-13 19:26:29 +01001066 if (pss->complete_round && pss->block == rs->last_seen_block &&
1067 pss->offset >= rs->last_offset) {
Dr. David Alan Gilbertb9e60922015-09-23 15:27:11 +01001068 /*
1069 * We've been once around the RAM and haven't found anything.
1070 * Give up.
1071 */
1072 *again = false;
1073 return false;
1074 }
1075 if (pss->offset >= pss->block->used_length) {
1076 /* Didn't find anything in this RAM Block */
1077 pss->offset = 0;
1078 pss->block = QLIST_NEXT_RCU(pss->block, next);
1079 if (!pss->block) {
1080 /* Hit the end of the list */
1081 pss->block = QLIST_FIRST_RCU(&ram_list.blocks);
1082 /* Flag that we've looped */
1083 pss->complete_round = true;
Juan Quintela6f37bb82017-03-13 19:26:29 +01001084 rs->ram_bulk_stage = false;
Dr. David Alan Gilbertb9e60922015-09-23 15:27:11 +01001085 if (migrate_use_xbzrle()) {
1086 /* If xbzrle is on, stop using the data compression at this
1087 * point. In theory, xbzrle can do better than compression.
1088 */
1089 flush_compressed_data(f);
1090 compression_switch = false;
1091 }
1092 }
1093 /* Didn't find anything this time, but try again on the new block */
1094 *again = true;
1095 return false;
1096 } else {
1097 /* Can go around again, but... */
1098 *again = true;
1099 /* We've found something so probably don't need to */
1100 return true;
1101 }
1102}
1103
Juan Quintela3d0684b2017-03-23 15:06:39 +01001104/**
1105 * unqueue_page: gets a page of the queue
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001106 *
Juan Quintela3d0684b2017-03-23 15:06:39 +01001107 * Helper for 'get_queued_page' - gets a page off the queue
1108 *
1109 * Returns the block of the page (or NULL if none available)
1110 *
1111 * @ms: current migration state
1112 * @offset: used to return the offset within the RAMBlock
1113 * @ram_addr_abs: pointer into which to store the address of the dirty page
1114 * within the global ram_addr space
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001115 */
1116static RAMBlock *unqueue_page(MigrationState *ms, ram_addr_t *offset,
1117 ram_addr_t *ram_addr_abs)
1118{
1119 RAMBlock *block = NULL;
1120
1121 qemu_mutex_lock(&ms->src_page_req_mutex);
1122 if (!QSIMPLEQ_EMPTY(&ms->src_page_requests)) {
1123 struct MigrationSrcPageRequest *entry =
1124 QSIMPLEQ_FIRST(&ms->src_page_requests);
1125 block = entry->rb;
1126 *offset = entry->offset;
1127 *ram_addr_abs = (entry->offset + entry->rb->offset) &
1128 TARGET_PAGE_MASK;
1129
1130 if (entry->len > TARGET_PAGE_SIZE) {
1131 entry->len -= TARGET_PAGE_SIZE;
1132 entry->offset += TARGET_PAGE_SIZE;
1133 } else {
1134 memory_region_unref(block->mr);
1135 QSIMPLEQ_REMOVE_HEAD(&ms->src_page_requests, next_req);
1136 g_free(entry);
1137 }
1138 }
1139 qemu_mutex_unlock(&ms->src_page_req_mutex);
1140
1141 return block;
1142}
1143
Juan Quintela3d0684b2017-03-23 15:06:39 +01001144/**
1145 * get_queued_page: unqueue a page from the postocpy requests
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001146 *
Juan Quintela3d0684b2017-03-23 15:06:39 +01001147 * Skips pages that are already sent (!dirty)
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001148 *
Juan Quintela3d0684b2017-03-23 15:06:39 +01001149 * Returns if a queued page is found
1150 *
Juan Quintela6f37bb82017-03-13 19:26:29 +01001151 * @rs: current RAM state
Juan Quintela3d0684b2017-03-23 15:06:39 +01001152 * @ms: current migration state
1153 * @pss: data about the state of the current dirty page scan
1154 * @ram_addr_abs: pointer into which to store the address of the dirty page
1155 * within the global ram_addr space
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001156 */
Juan Quintela6f37bb82017-03-13 19:26:29 +01001157static bool get_queued_page(RAMState *rs, MigrationState *ms,
1158 PageSearchStatus *pss,
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001159 ram_addr_t *ram_addr_abs)
1160{
1161 RAMBlock *block;
1162 ram_addr_t offset;
1163 bool dirty;
1164
1165 do {
1166 block = unqueue_page(ms, &offset, ram_addr_abs);
1167 /*
1168 * We're sending this page, and since it's postcopy nothing else
1169 * will dirty it, and we must make sure it doesn't get sent again
1170 * even if this queue request was received after the background
1171 * search already sent it.
1172 */
1173 if (block) {
1174 unsigned long *bitmap;
1175 bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap;
1176 dirty = test_bit(*ram_addr_abs >> TARGET_PAGE_BITS, bitmap);
1177 if (!dirty) {
1178 trace_get_queued_page_not_dirty(
1179 block->idstr, (uint64_t)offset,
1180 (uint64_t)*ram_addr_abs,
1181 test_bit(*ram_addr_abs >> TARGET_PAGE_BITS,
1182 atomic_rcu_read(&migration_bitmap_rcu)->unsentmap));
1183 } else {
1184 trace_get_queued_page(block->idstr,
1185 (uint64_t)offset,
1186 (uint64_t)*ram_addr_abs);
1187 }
1188 }
1189
1190 } while (block && !dirty);
1191
1192 if (block) {
1193 /*
1194 * As soon as we start servicing pages out of order, then we have
1195 * to kill the bulk stage, since the bulk stage assumes
1196 * in (migration_bitmap_find_and_reset_dirty) that every page is
1197 * dirty, that's no longer true.
1198 */
Juan Quintela6f37bb82017-03-13 19:26:29 +01001199 rs->ram_bulk_stage = false;
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001200
1201 /*
1202 * We want the background search to continue from the queued page
1203 * since the guest is likely to want other pages near to the page
1204 * it just requested.
1205 */
1206 pss->block = block;
1207 pss->offset = offset;
1208 }
1209
1210 return !!block;
1211}
1212
Juan Quintela56e93d22015-05-07 19:33:31 +02001213/**
Juan Quintela5e58f962017-04-03 22:06:54 +02001214 * migration_page_queue_free: drop any remaining pages in the ram
1215 * request queue
Dr. David Alan Gilbert6c595cd2015-11-05 18:11:08 +00001216 *
Juan Quintela3d0684b2017-03-23 15:06:39 +01001217 * It should be empty at the end anyway, but in error cases there may
1218 * be some left. in case that there is any page left, we drop it.
1219 *
1220 * @ms: current migration state
Dr. David Alan Gilbert6c595cd2015-11-05 18:11:08 +00001221 */
Juan Quintela5e58f962017-04-03 22:06:54 +02001222void migration_page_queue_free(MigrationState *ms)
Dr. David Alan Gilbert6c595cd2015-11-05 18:11:08 +00001223{
1224 struct MigrationSrcPageRequest *mspr, *next_mspr;
1225 /* This queue generally should be empty - but in the case of a failed
1226 * migration might have some droppings in.
1227 */
1228 rcu_read_lock();
1229 QSIMPLEQ_FOREACH_SAFE(mspr, &ms->src_page_requests, next_req, next_mspr) {
1230 memory_region_unref(mspr->rb->mr);
1231 QSIMPLEQ_REMOVE_HEAD(&ms->src_page_requests, next_req);
1232 g_free(mspr);
1233 }
1234 rcu_read_unlock();
1235}
1236
1237/**
Juan Quintela3d0684b2017-03-23 15:06:39 +01001238 * ram_save_queue_pages: queue the page for transmission
1239 *
1240 * A request from postcopy destination for example.
1241 *
1242 * Returns zero on success or negative on error
1243 *
1244 * @ms: current migration state
1245 * @rbname: Name of the RAMBLock of the request. NULL means the
1246 * same that last one.
1247 * @start: starting address from the start of the RAMBlock
1248 * @len: length (in bytes) to send
Dr. David Alan Gilbert6c595cd2015-11-05 18:11:08 +00001249 */
1250int ram_save_queue_pages(MigrationState *ms, const char *rbname,
1251 ram_addr_t start, ram_addr_t len)
1252{
1253 RAMBlock *ramblock;
1254
Dr. David Alan Gilbertd3bf5412016-06-13 12:16:42 +01001255 ms->postcopy_requests++;
Dr. David Alan Gilbert6c595cd2015-11-05 18:11:08 +00001256 rcu_read_lock();
1257 if (!rbname) {
1258 /* Reuse last RAMBlock */
1259 ramblock = ms->last_req_rb;
1260
1261 if (!ramblock) {
1262 /*
1263 * Shouldn't happen, we can't reuse the last RAMBlock if
1264 * it's the 1st request.
1265 */
1266 error_report("ram_save_queue_pages no previous block");
1267 goto err;
1268 }
1269 } else {
1270 ramblock = qemu_ram_block_by_name(rbname);
1271
1272 if (!ramblock) {
1273 /* We shouldn't be asked for a non-existent RAMBlock */
1274 error_report("ram_save_queue_pages no block '%s'", rbname);
1275 goto err;
1276 }
1277 ms->last_req_rb = ramblock;
1278 }
1279 trace_ram_save_queue_pages(ramblock->idstr, start, len);
1280 if (start+len > ramblock->used_length) {
Juan Quintela9458ad62015-11-10 17:42:05 +01001281 error_report("%s request overrun start=" RAM_ADDR_FMT " len="
1282 RAM_ADDR_FMT " blocklen=" RAM_ADDR_FMT,
Dr. David Alan Gilbert6c595cd2015-11-05 18:11:08 +00001283 __func__, start, len, ramblock->used_length);
1284 goto err;
1285 }
1286
1287 struct MigrationSrcPageRequest *new_entry =
1288 g_malloc0(sizeof(struct MigrationSrcPageRequest));
1289 new_entry->rb = ramblock;
1290 new_entry->offset = start;
1291 new_entry->len = len;
1292
1293 memory_region_ref(ramblock->mr);
1294 qemu_mutex_lock(&ms->src_page_req_mutex);
1295 QSIMPLEQ_INSERT_TAIL(&ms->src_page_requests, new_entry, next_req);
1296 qemu_mutex_unlock(&ms->src_page_req_mutex);
1297 rcu_read_unlock();
1298
1299 return 0;
1300
1301err:
1302 rcu_read_unlock();
1303 return -1;
1304}
1305
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001306/**
Juan Quintela3d0684b2017-03-23 15:06:39 +01001307 * ram_save_target_page: save one target page
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001308 *
Juan Quintela3d0684b2017-03-23 15:06:39 +01001309 * Returns the number of pages written
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001310 *
Juan Quintela6f37bb82017-03-13 19:26:29 +01001311 * @rs: current RAM state
Juan Quintela3d0684b2017-03-23 15:06:39 +01001312 * @ms: current migration state
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001313 * @f: QEMUFile where to send the data
Juan Quintela3d0684b2017-03-23 15:06:39 +01001314 * @pss: data about the page we want to send
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001315 * @last_stage: if we are at the completion stage
1316 * @bytes_transferred: increase it with the number of transferred bytes
Juan Quintela3d0684b2017-03-23 15:06:39 +01001317 * @dirty_ram_abs: address of the start of the dirty page in ram_addr_t space
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001318 */
Juan Quintela6f37bb82017-03-13 19:26:29 +01001319static int ram_save_target_page(RAMState *rs, MigrationState *ms, QEMUFile *f,
zhanghailianga08f6892016-01-15 11:37:44 +08001320 PageSearchStatus *pss,
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001321 bool last_stage,
1322 uint64_t *bytes_transferred,
1323 ram_addr_t dirty_ram_abs)
1324{
1325 int res = 0;
1326
1327 /* Check the pages is dirty and if it is send it */
1328 if (migration_bitmap_clear_dirty(dirty_ram_abs)) {
1329 unsigned long *unsentmap;
1330 if (compression_switch && migrate_use_compression()) {
Juan Quintela6f37bb82017-03-13 19:26:29 +01001331 res = ram_save_compressed_page(rs, ms, f, pss,
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001332 last_stage,
1333 bytes_transferred);
1334 } else {
Juan Quintela6f37bb82017-03-13 19:26:29 +01001335 res = ram_save_page(rs, ms, f, pss, last_stage,
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001336 bytes_transferred);
1337 }
1338
1339 if (res < 0) {
1340 return res;
1341 }
1342 unsentmap = atomic_rcu_read(&migration_bitmap_rcu)->unsentmap;
1343 if (unsentmap) {
1344 clear_bit(dirty_ram_abs >> TARGET_PAGE_BITS, unsentmap);
1345 }
Dr. David Alan Gilbert3fd3c4b2015-12-10 16:31:46 +00001346 /* Only update last_sent_block if a block was actually sent; xbzrle
1347 * might have decided the page was identical so didn't bother writing
1348 * to the stream.
1349 */
1350 if (res > 0) {
Juan Quintela6f37bb82017-03-13 19:26:29 +01001351 rs->last_sent_block = pss->block;
Dr. David Alan Gilbert3fd3c4b2015-12-10 16:31:46 +00001352 }
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001353 }
1354
1355 return res;
1356}
1357
1358/**
Juan Quintela3d0684b2017-03-23 15:06:39 +01001359 * ram_save_host_page: save a whole host page
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001360 *
Juan Quintela3d0684b2017-03-23 15:06:39 +01001361 * Starting at *offset send pages up to the end of the current host
1362 * page. It's valid for the initial offset to point into the middle of
1363 * a host page in which case the remainder of the hostpage is sent.
1364 * Only dirty target pages are sent. Note that the host page size may
1365 * be a huge page for this block.
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001366 *
Juan Quintela3d0684b2017-03-23 15:06:39 +01001367 * Returns the number of pages written or negative on error
1368 *
Juan Quintela6f37bb82017-03-13 19:26:29 +01001369 * @rs: current RAM state
Juan Quintela3d0684b2017-03-23 15:06:39 +01001370 * @ms: current migration state
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001371 * @f: QEMUFile where to send the data
Juan Quintela3d0684b2017-03-23 15:06:39 +01001372 * @pss: data about the page we want to send
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001373 * @last_stage: if we are at the completion stage
1374 * @bytes_transferred: increase it with the number of transferred bytes
1375 * @dirty_ram_abs: Address of the start of the dirty page in ram_addr_t space
1376 */
Juan Quintela6f37bb82017-03-13 19:26:29 +01001377static int ram_save_host_page(RAMState *rs, MigrationState *ms, QEMUFile *f,
zhanghailianga08f6892016-01-15 11:37:44 +08001378 PageSearchStatus *pss,
1379 bool last_stage,
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001380 uint64_t *bytes_transferred,
1381 ram_addr_t dirty_ram_abs)
1382{
1383 int tmppages, pages = 0;
Dr. David Alan Gilbert4c011c32017-02-24 18:28:39 +00001384 size_t pagesize = qemu_ram_pagesize(pss->block);
1385
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001386 do {
Juan Quintela6f37bb82017-03-13 19:26:29 +01001387 tmppages = ram_save_target_page(rs, ms, f, pss, last_stage,
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001388 bytes_transferred, dirty_ram_abs);
1389 if (tmppages < 0) {
1390 return tmppages;
1391 }
1392
1393 pages += tmppages;
zhanghailianga08f6892016-01-15 11:37:44 +08001394 pss->offset += TARGET_PAGE_SIZE;
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001395 dirty_ram_abs += TARGET_PAGE_SIZE;
Dr. David Alan Gilbert4c011c32017-02-24 18:28:39 +00001396 } while (pss->offset & (pagesize - 1));
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001397
1398 /* The offset we leave with is the last one we looked at */
zhanghailianga08f6892016-01-15 11:37:44 +08001399 pss->offset -= TARGET_PAGE_SIZE;
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001400 return pages;
1401}
Dr. David Alan Gilbert6c595cd2015-11-05 18:11:08 +00001402
1403/**
Juan Quintela3d0684b2017-03-23 15:06:39 +01001404 * ram_find_and_save_block: finds a dirty page and sends it to f
Juan Quintela56e93d22015-05-07 19:33:31 +02001405 *
1406 * Called within an RCU critical section.
1407 *
Juan Quintela3d0684b2017-03-23 15:06:39 +01001408 * Returns the number of pages written where zero means no dirty pages
Juan Quintela56e93d22015-05-07 19:33:31 +02001409 *
Juan Quintela6f37bb82017-03-13 19:26:29 +01001410 * @rs: current RAM state
Juan Quintela56e93d22015-05-07 19:33:31 +02001411 * @f: QEMUFile where to send the data
1412 * @last_stage: if we are at the completion stage
1413 * @bytes_transferred: increase it with the number of transferred bytes
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001414 *
1415 * On systems where host-page-size > target-page-size it will send all the
1416 * pages in a host page that are dirty.
Juan Quintela56e93d22015-05-07 19:33:31 +02001417 */
1418
Juan Quintela6f37bb82017-03-13 19:26:29 +01001419static int ram_find_and_save_block(RAMState *rs, QEMUFile *f, bool last_stage,
Juan Quintela56e93d22015-05-07 19:33:31 +02001420 uint64_t *bytes_transferred)
1421{
Dr. David Alan Gilbertb8fb8cb2015-09-23 15:27:10 +01001422 PageSearchStatus pss;
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001423 MigrationState *ms = migrate_get_current();
Juan Quintela56e93d22015-05-07 19:33:31 +02001424 int pages = 0;
Dr. David Alan Gilbertb9e60922015-09-23 15:27:11 +01001425 bool again, found;
Dr. David Alan Gilbertf3f491f2015-11-05 18:11:01 +00001426 ram_addr_t dirty_ram_abs; /* Address of the start of the dirty page in
1427 ram_addr_t space */
Juan Quintela56e93d22015-05-07 19:33:31 +02001428
Ashijeet Acharya0827b9e2017-02-08 19:58:45 +05301429 /* No dirty page as there is zero RAM */
1430 if (!ram_bytes_total()) {
1431 return pages;
1432 }
1433
Juan Quintela6f37bb82017-03-13 19:26:29 +01001434 pss.block = rs->last_seen_block;
1435 pss.offset = rs->last_offset;
Dr. David Alan Gilbertb8fb8cb2015-09-23 15:27:10 +01001436 pss.complete_round = false;
1437
1438 if (!pss.block) {
1439 pss.block = QLIST_FIRST_RCU(&ram_list.blocks);
1440 }
Juan Quintela56e93d22015-05-07 19:33:31 +02001441
Dr. David Alan Gilbertb9e60922015-09-23 15:27:11 +01001442 do {
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001443 again = true;
Juan Quintela6f37bb82017-03-13 19:26:29 +01001444 found = get_queued_page(rs, ms, &pss, &dirty_ram_abs);
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001445
1446 if (!found) {
1447 /* priority queue empty, so just search for something dirty */
Juan Quintela6f37bb82017-03-13 19:26:29 +01001448 found = find_dirty_block(rs, f, &pss, &again, &dirty_ram_abs);
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001449 }
Dr. David Alan Gilbertb9e60922015-09-23 15:27:11 +01001450
1451 if (found) {
Juan Quintela6f37bb82017-03-13 19:26:29 +01001452 pages = ram_save_host_page(rs, ms, f, &pss,
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001453 last_stage, bytes_transferred,
1454 dirty_ram_abs);
Juan Quintela56e93d22015-05-07 19:33:31 +02001455 }
Dr. David Alan Gilbertb9e60922015-09-23 15:27:11 +01001456 } while (!pages && again);
Juan Quintela56e93d22015-05-07 19:33:31 +02001457
Juan Quintela6f37bb82017-03-13 19:26:29 +01001458 rs->last_seen_block = pss.block;
1459 rs->last_offset = pss.offset;
Juan Quintela56e93d22015-05-07 19:33:31 +02001460
1461 return pages;
1462}
1463
1464void acct_update_position(QEMUFile *f, size_t size, bool zero)
1465{
1466 uint64_t pages = size / TARGET_PAGE_SIZE;
1467 if (zero) {
1468 acct_info.dup_pages += pages;
1469 } else {
1470 acct_info.norm_pages += pages;
1471 bytes_transferred += size;
1472 qemu_update_position(f, size);
1473 }
1474}
1475
1476static ram_addr_t ram_save_remaining(void)
1477{
1478 return migration_dirty_pages;
1479}
1480
1481uint64_t ram_bytes_remaining(void)
1482{
1483 return ram_save_remaining() * TARGET_PAGE_SIZE;
1484}
1485
1486uint64_t ram_bytes_transferred(void)
1487{
1488 return bytes_transferred;
1489}
1490
1491uint64_t ram_bytes_total(void)
1492{
1493 RAMBlock *block;
1494 uint64_t total = 0;
1495
1496 rcu_read_lock();
1497 QLIST_FOREACH_RCU(block, &ram_list.blocks, next)
1498 total += block->used_length;
1499 rcu_read_unlock();
1500 return total;
1501}
1502
1503void free_xbzrle_decoded_buf(void)
1504{
1505 g_free(xbzrle_decoded_buf);
1506 xbzrle_decoded_buf = NULL;
1507}
1508
Denis V. Lunev60be6342015-09-28 14:41:58 +03001509static void migration_bitmap_free(struct BitmapRcu *bmap)
1510{
1511 g_free(bmap->bmap);
Dr. David Alan Gilbertf3f491f2015-11-05 18:11:01 +00001512 g_free(bmap->unsentmap);
Denis V. Lunev60be6342015-09-28 14:41:58 +03001513 g_free(bmap);
1514}
1515
Liang Li6ad2a212015-11-02 15:37:03 +08001516static void ram_migration_cleanup(void *opaque)
Juan Quintela56e93d22015-05-07 19:33:31 +02001517{
Li Zhijian2ff64032015-07-02 20:18:05 +08001518 /* caller have hold iothread lock or is in a bh, so there is
1519 * no writing race against this migration_bitmap
1520 */
Denis V. Lunev60be6342015-09-28 14:41:58 +03001521 struct BitmapRcu *bitmap = migration_bitmap_rcu;
1522 atomic_rcu_set(&migration_bitmap_rcu, NULL);
Li Zhijian2ff64032015-07-02 20:18:05 +08001523 if (bitmap) {
Juan Quintela56e93d22015-05-07 19:33:31 +02001524 memory_global_dirty_log_stop();
Denis V. Lunev60be6342015-09-28 14:41:58 +03001525 call_rcu(bitmap, migration_bitmap_free, rcu);
Juan Quintela56e93d22015-05-07 19:33:31 +02001526 }
1527
1528 XBZRLE_cache_lock();
1529 if (XBZRLE.cache) {
1530 cache_fini(XBZRLE.cache);
1531 g_free(XBZRLE.encoded_buf);
1532 g_free(XBZRLE.current_buf);
Vijaya Kumar Kadb65de2016-10-24 16:26:49 +01001533 g_free(ZERO_TARGET_PAGE);
Juan Quintela56e93d22015-05-07 19:33:31 +02001534 XBZRLE.cache = NULL;
1535 XBZRLE.encoded_buf = NULL;
1536 XBZRLE.current_buf = NULL;
1537 }
1538 XBZRLE_cache_unlock();
1539}
1540
Juan Quintela6f37bb82017-03-13 19:26:29 +01001541static void ram_state_reset(RAMState *rs)
Juan Quintela56e93d22015-05-07 19:33:31 +02001542{
Juan Quintela6f37bb82017-03-13 19:26:29 +01001543 rs->last_seen_block = NULL;
1544 rs->last_sent_block = NULL;
1545 rs->last_offset = 0;
1546 rs->last_version = ram_list.version;
1547 rs->ram_bulk_stage = true;
Juan Quintela56e93d22015-05-07 19:33:31 +02001548}
1549
1550#define MAX_WAIT 50 /* ms, half buffered_file limit */
1551
Li Zhijiandd631692015-07-02 20:18:06 +08001552void migration_bitmap_extend(ram_addr_t old, ram_addr_t new)
1553{
1554 /* called in qemu main thread, so there is
1555 * no writing race against this migration_bitmap
1556 */
Denis V. Lunev60be6342015-09-28 14:41:58 +03001557 if (migration_bitmap_rcu) {
1558 struct BitmapRcu *old_bitmap = migration_bitmap_rcu, *bitmap;
1559 bitmap = g_new(struct BitmapRcu, 1);
1560 bitmap->bmap = bitmap_new(new);
Li Zhijiandd631692015-07-02 20:18:06 +08001561
1562 /* prevent migration_bitmap content from being set bit
1563 * by migration_bitmap_sync_range() at the same time.
1564 * it is safe to migration if migration_bitmap is cleared bit
1565 * at the same time.
1566 */
1567 qemu_mutex_lock(&migration_bitmap_mutex);
Denis V. Lunev60be6342015-09-28 14:41:58 +03001568 bitmap_copy(bitmap->bmap, old_bitmap->bmap, old);
1569 bitmap_set(bitmap->bmap, old, new - old);
Dr. David Alan Gilbertf3f491f2015-11-05 18:11:01 +00001570
1571 /* We don't have a way to safely extend the sentmap
1572 * with RCU; so mark it as missing, entry to postcopy
1573 * will fail.
1574 */
1575 bitmap->unsentmap = NULL;
1576
Denis V. Lunev60be6342015-09-28 14:41:58 +03001577 atomic_rcu_set(&migration_bitmap_rcu, bitmap);
Li Zhijiandd631692015-07-02 20:18:06 +08001578 qemu_mutex_unlock(&migration_bitmap_mutex);
1579 migration_dirty_pages += new - old;
Denis V. Lunev60be6342015-09-28 14:41:58 +03001580 call_rcu(old_bitmap, migration_bitmap_free, rcu);
Li Zhijiandd631692015-07-02 20:18:06 +08001581 }
1582}
Juan Quintela56e93d22015-05-07 19:33:31 +02001583
Dr. David Alan Gilbert4f2e4252015-11-05 18:10:38 +00001584/*
1585 * 'expected' is the value you expect the bitmap mostly to be full
1586 * of; it won't bother printing lines that are all this value.
1587 * If 'todump' is null the migration bitmap is dumped.
1588 */
1589void ram_debug_dump_bitmap(unsigned long *todump, bool expected)
1590{
1591 int64_t ram_pages = last_ram_offset() >> TARGET_PAGE_BITS;
1592
1593 int64_t cur;
1594 int64_t linelen = 128;
1595 char linebuf[129];
1596
1597 if (!todump) {
1598 todump = atomic_rcu_read(&migration_bitmap_rcu)->bmap;
1599 }
1600
1601 for (cur = 0; cur < ram_pages; cur += linelen) {
1602 int64_t curb;
1603 bool found = false;
1604 /*
1605 * Last line; catch the case where the line length
1606 * is longer than remaining ram
1607 */
1608 if (cur + linelen > ram_pages) {
1609 linelen = ram_pages - cur;
1610 }
1611 for (curb = 0; curb < linelen; curb++) {
1612 bool thisbit = test_bit(cur + curb, todump);
1613 linebuf[curb] = thisbit ? '1' : '.';
1614 found = found || (thisbit != expected);
1615 }
1616 if (found) {
1617 linebuf[curb] = '\0';
1618 fprintf(stderr, "0x%08" PRIx64 " : %s\n", cur, linebuf);
1619 }
1620 }
1621}
1622
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00001623/* **** functions for postcopy ***** */
1624
Pavel Butsykinced1c612017-02-03 18:23:21 +03001625void ram_postcopy_migrated_memory_release(MigrationState *ms)
1626{
1627 struct RAMBlock *block;
1628 unsigned long *bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap;
1629
1630 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1631 unsigned long first = block->offset >> TARGET_PAGE_BITS;
1632 unsigned long range = first + (block->used_length >> TARGET_PAGE_BITS);
1633 unsigned long run_start = find_next_zero_bit(bitmap, range, first);
1634
1635 while (run_start < range) {
1636 unsigned long run_end = find_next_bit(bitmap, range, run_start + 1);
1637 ram_discard_range(NULL, block->idstr, run_start << TARGET_PAGE_BITS,
1638 (run_end - run_start) << TARGET_PAGE_BITS);
1639 run_start = find_next_zero_bit(bitmap, range, run_end + 1);
1640 }
1641 }
1642}
1643
Juan Quintela3d0684b2017-03-23 15:06:39 +01001644/**
1645 * postcopy_send_discard_bm_ram: discard a RAMBlock
1646 *
1647 * Returns zero on success
1648 *
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00001649 * Callback from postcopy_each_ram_send_discard for each RAMBlock
1650 * Note: At this point the 'unsentmap' is the processed bitmap combined
1651 * with the dirtymap; so a '1' means it's either dirty or unsent.
Juan Quintela3d0684b2017-03-23 15:06:39 +01001652 *
1653 * @ms: current migration state
1654 * @pds: state for postcopy
1655 * @start: RAMBlock starting page
1656 * @length: RAMBlock size
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00001657 */
1658static int postcopy_send_discard_bm_ram(MigrationState *ms,
1659 PostcopyDiscardState *pds,
1660 unsigned long start,
1661 unsigned long length)
1662{
1663 unsigned long end = start + length; /* one after the end */
1664 unsigned long current;
1665 unsigned long *unsentmap;
1666
1667 unsentmap = atomic_rcu_read(&migration_bitmap_rcu)->unsentmap;
1668 for (current = start; current < end; ) {
1669 unsigned long one = find_next_bit(unsentmap, end, current);
1670
1671 if (one <= end) {
1672 unsigned long zero = find_next_zero_bit(unsentmap, end, one + 1);
1673 unsigned long discard_length;
1674
1675 if (zero >= end) {
1676 discard_length = end - one;
1677 } else {
1678 discard_length = zero - one;
1679 }
Dr. David Alan Gilbertd688c622016-06-13 12:16:40 +01001680 if (discard_length) {
1681 postcopy_discard_send_range(ms, pds, one, discard_length);
1682 }
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00001683 current = one + discard_length;
1684 } else {
1685 current = one;
1686 }
1687 }
1688
1689 return 0;
1690}
1691
Juan Quintela3d0684b2017-03-23 15:06:39 +01001692/**
1693 * postcopy_each_ram_send_discard: discard all RAMBlocks
1694 *
1695 * Returns 0 for success or negative for error
1696 *
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00001697 * Utility for the outgoing postcopy code.
1698 * Calls postcopy_send_discard_bm_ram for each RAMBlock
1699 * passing it bitmap indexes and name.
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00001700 * (qemu_ram_foreach_block ends up passing unscaled lengths
1701 * which would mean postcopy code would have to deal with target page)
Juan Quintela3d0684b2017-03-23 15:06:39 +01001702 *
1703 * @ms: current migration state
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00001704 */
1705static int postcopy_each_ram_send_discard(MigrationState *ms)
1706{
1707 struct RAMBlock *block;
1708 int ret;
1709
1710 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1711 unsigned long first = block->offset >> TARGET_PAGE_BITS;
1712 PostcopyDiscardState *pds = postcopy_discard_send_init(ms,
1713 first,
1714 block->idstr);
1715
1716 /*
1717 * Postcopy sends chunks of bitmap over the wire, but it
1718 * just needs indexes at this point, avoids it having
1719 * target page specific code.
1720 */
1721 ret = postcopy_send_discard_bm_ram(ms, pds, first,
1722 block->used_length >> TARGET_PAGE_BITS);
1723 postcopy_discard_send_finish(ms, pds);
1724 if (ret) {
1725 return ret;
1726 }
1727 }
1728
1729 return 0;
1730}
1731
Juan Quintela3d0684b2017-03-23 15:06:39 +01001732/**
1733 * postcopy_chunk_hostpages_pass: canocalize bitmap in hostpages
Dr. David Alan Gilbert99e314e2015-11-05 18:11:15 +00001734 *
Juan Quintela3d0684b2017-03-23 15:06:39 +01001735 * Helper for postcopy_chunk_hostpages; it's called twice to
1736 * canonicalize the two bitmaps, that are similar, but one is
1737 * inverted.
Dr. David Alan Gilbert99e314e2015-11-05 18:11:15 +00001738 *
Juan Quintela3d0684b2017-03-23 15:06:39 +01001739 * Postcopy requires that all target pages in a hostpage are dirty or
1740 * clean, not a mix. This function canonicalizes the bitmaps.
1741 *
1742 * @ms: current migration state
1743 * @unsent_pass: if true we need to canonicalize partially unsent host pages
1744 * otherwise we need to canonicalize partially dirty host pages
1745 * @block: block that contains the page we want to canonicalize
1746 * @pds: state for postcopy
Dr. David Alan Gilbert99e314e2015-11-05 18:11:15 +00001747 */
1748static void postcopy_chunk_hostpages_pass(MigrationState *ms, bool unsent_pass,
1749 RAMBlock *block,
1750 PostcopyDiscardState *pds)
1751{
1752 unsigned long *bitmap;
1753 unsigned long *unsentmap;
Dr. David Alan Gilbert29c59172017-02-24 18:28:31 +00001754 unsigned int host_ratio = block->page_size / TARGET_PAGE_SIZE;
Dr. David Alan Gilbert99e314e2015-11-05 18:11:15 +00001755 unsigned long first = block->offset >> TARGET_PAGE_BITS;
1756 unsigned long len = block->used_length >> TARGET_PAGE_BITS;
1757 unsigned long last = first + (len - 1);
1758 unsigned long run_start;
1759
Dr. David Alan Gilbert29c59172017-02-24 18:28:31 +00001760 if (block->page_size == TARGET_PAGE_SIZE) {
1761 /* Easy case - TPS==HPS for a non-huge page RAMBlock */
1762 return;
1763 }
1764
Dr. David Alan Gilbert99e314e2015-11-05 18:11:15 +00001765 bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap;
1766 unsentmap = atomic_rcu_read(&migration_bitmap_rcu)->unsentmap;
1767
1768 if (unsent_pass) {
1769 /* Find a sent page */
1770 run_start = find_next_zero_bit(unsentmap, last + 1, first);
1771 } else {
1772 /* Find a dirty page */
1773 run_start = find_next_bit(bitmap, last + 1, first);
1774 }
1775
1776 while (run_start <= last) {
1777 bool do_fixup = false;
1778 unsigned long fixup_start_addr;
1779 unsigned long host_offset;
1780
1781 /*
1782 * If the start of this run of pages is in the middle of a host
1783 * page, then we need to fixup this host page.
1784 */
1785 host_offset = run_start % host_ratio;
1786 if (host_offset) {
1787 do_fixup = true;
1788 run_start -= host_offset;
1789 fixup_start_addr = run_start;
1790 /* For the next pass */
1791 run_start = run_start + host_ratio;
1792 } else {
1793 /* Find the end of this run */
1794 unsigned long run_end;
1795 if (unsent_pass) {
1796 run_end = find_next_bit(unsentmap, last + 1, run_start + 1);
1797 } else {
1798 run_end = find_next_zero_bit(bitmap, last + 1, run_start + 1);
1799 }
1800 /*
1801 * If the end isn't at the start of a host page, then the
1802 * run doesn't finish at the end of a host page
1803 * and we need to discard.
1804 */
1805 host_offset = run_end % host_ratio;
1806 if (host_offset) {
1807 do_fixup = true;
1808 fixup_start_addr = run_end - host_offset;
1809 /*
1810 * This host page has gone, the next loop iteration starts
1811 * from after the fixup
1812 */
1813 run_start = fixup_start_addr + host_ratio;
1814 } else {
1815 /*
1816 * No discards on this iteration, next loop starts from
1817 * next sent/dirty page
1818 */
1819 run_start = run_end + 1;
1820 }
1821 }
1822
1823 if (do_fixup) {
1824 unsigned long page;
1825
1826 /* Tell the destination to discard this page */
1827 if (unsent_pass || !test_bit(fixup_start_addr, unsentmap)) {
1828 /* For the unsent_pass we:
1829 * discard partially sent pages
1830 * For the !unsent_pass (dirty) we:
1831 * discard partially dirty pages that were sent
1832 * (any partially sent pages were already discarded
1833 * by the previous unsent_pass)
1834 */
1835 postcopy_discard_send_range(ms, pds, fixup_start_addr,
1836 host_ratio);
1837 }
1838
1839 /* Clean up the bitmap */
1840 for (page = fixup_start_addr;
1841 page < fixup_start_addr + host_ratio; page++) {
1842 /* All pages in this host page are now not sent */
1843 set_bit(page, unsentmap);
1844
1845 /*
1846 * Remark them as dirty, updating the count for any pages
1847 * that weren't previously dirty.
1848 */
1849 migration_dirty_pages += !test_and_set_bit(page, bitmap);
1850 }
1851 }
1852
1853 if (unsent_pass) {
1854 /* Find the next sent page for the next iteration */
1855 run_start = find_next_zero_bit(unsentmap, last + 1,
1856 run_start);
1857 } else {
1858 /* Find the next dirty page for the next iteration */
1859 run_start = find_next_bit(bitmap, last + 1, run_start);
1860 }
1861 }
1862}
1863
Juan Quintela3d0684b2017-03-23 15:06:39 +01001864/**
1865 * postcopy_chuck_hostpages: discrad any partially sent host page
1866 *
Dr. David Alan Gilbert99e314e2015-11-05 18:11:15 +00001867 * Utility for the outgoing postcopy code.
1868 *
1869 * Discard any partially sent host-page size chunks, mark any partially
Dr. David Alan Gilbert29c59172017-02-24 18:28:31 +00001870 * dirty host-page size chunks as all dirty. In this case the host-page
1871 * is the host-page for the particular RAMBlock, i.e. it might be a huge page
Dr. David Alan Gilbert99e314e2015-11-05 18:11:15 +00001872 *
Juan Quintela3d0684b2017-03-23 15:06:39 +01001873 * Returns zero on success
1874 *
1875 * @ms: current migration state
Dr. David Alan Gilbert99e314e2015-11-05 18:11:15 +00001876 */
1877static int postcopy_chunk_hostpages(MigrationState *ms)
1878{
Juan Quintela6f37bb82017-03-13 19:26:29 +01001879 RAMState *rs = &ram_state;
Dr. David Alan Gilbert99e314e2015-11-05 18:11:15 +00001880 struct RAMBlock *block;
1881
Dr. David Alan Gilbert99e314e2015-11-05 18:11:15 +00001882 /* Easiest way to make sure we don't resume in the middle of a host-page */
Juan Quintela6f37bb82017-03-13 19:26:29 +01001883 rs->last_seen_block = NULL;
1884 rs->last_sent_block = NULL;
1885 rs->last_offset = 0;
Dr. David Alan Gilbert99e314e2015-11-05 18:11:15 +00001886
1887 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1888 unsigned long first = block->offset >> TARGET_PAGE_BITS;
1889
1890 PostcopyDiscardState *pds =
1891 postcopy_discard_send_init(ms, first, block->idstr);
1892
1893 /* First pass: Discard all partially sent host pages */
1894 postcopy_chunk_hostpages_pass(ms, true, block, pds);
1895 /*
1896 * Second pass: Ensure that all partially dirty host pages are made
1897 * fully dirty.
1898 */
1899 postcopy_chunk_hostpages_pass(ms, false, block, pds);
1900
1901 postcopy_discard_send_finish(ms, pds);
1902 } /* ram_list loop */
1903
1904 return 0;
1905}
1906
Juan Quintela3d0684b2017-03-23 15:06:39 +01001907/**
1908 * ram_postcopy_send_discard_bitmap: transmit the discard bitmap
1909 *
1910 * Returns zero on success
1911 *
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00001912 * Transmit the set of pages to be discarded after precopy to the target
1913 * these are pages that:
1914 * a) Have been previously transmitted but are now dirty again
1915 * b) Pages that have never been transmitted, this ensures that
1916 * any pages on the destination that have been mapped by background
1917 * tasks get discarded (transparent huge pages is the specific concern)
1918 * Hopefully this is pretty sparse
Juan Quintela3d0684b2017-03-23 15:06:39 +01001919 *
1920 * @ms: current migration state
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00001921 */
1922int ram_postcopy_send_discard_bitmap(MigrationState *ms)
1923{
1924 int ret;
1925 unsigned long *bitmap, *unsentmap;
1926
1927 rcu_read_lock();
1928
1929 /* This should be our last sync, the src is now paused */
Juan Quintela8d820d62017-03-13 19:35:50 +01001930 migration_bitmap_sync(&ram_state);
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00001931
1932 unsentmap = atomic_rcu_read(&migration_bitmap_rcu)->unsentmap;
1933 if (!unsentmap) {
1934 /* We don't have a safe way to resize the sentmap, so
1935 * if the bitmap was resized it will be NULL at this
1936 * point.
1937 */
1938 error_report("migration ram resized during precopy phase");
1939 rcu_read_unlock();
1940 return -EINVAL;
1941 }
1942
Dr. David Alan Gilbert29c59172017-02-24 18:28:31 +00001943 /* Deal with TPS != HPS and huge pages */
Dr. David Alan Gilbert99e314e2015-11-05 18:11:15 +00001944 ret = postcopy_chunk_hostpages(ms);
1945 if (ret) {
1946 rcu_read_unlock();
1947 return ret;
1948 }
1949
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00001950 /*
1951 * Update the unsentmap to be unsentmap = unsentmap | dirty
1952 */
1953 bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap;
1954 bitmap_or(unsentmap, unsentmap, bitmap,
1955 last_ram_offset() >> TARGET_PAGE_BITS);
1956
1957
1958 trace_ram_postcopy_send_discard_bitmap();
1959#ifdef DEBUG_POSTCOPY
1960 ram_debug_dump_bitmap(unsentmap, true);
1961#endif
1962
1963 ret = postcopy_each_ram_send_discard(ms);
1964 rcu_read_unlock();
1965
1966 return ret;
1967}
1968
Juan Quintela3d0684b2017-03-23 15:06:39 +01001969/**
1970 * ram_discard_range: discard dirtied pages at the beginning of postcopy
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00001971 *
Juan Quintela3d0684b2017-03-23 15:06:39 +01001972 * Returns zero on success
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00001973 *
Juan Quintela3d0684b2017-03-23 15:06:39 +01001974 * @mis: current migration incoming state
Juan Quintela36449152017-03-23 15:11:59 +01001975 * @rbname: name of the RAMBlock of the request. NULL means the
1976 * same that last one.
Juan Quintela3d0684b2017-03-23 15:06:39 +01001977 * @start: RAMBlock starting page
1978 * @length: RAMBlock size
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00001979 */
1980int ram_discard_range(MigrationIncomingState *mis,
Juan Quintela36449152017-03-23 15:11:59 +01001981 const char *rbname,
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00001982 uint64_t start, size_t length)
1983{
1984 int ret = -1;
1985
Juan Quintela36449152017-03-23 15:11:59 +01001986 trace_ram_discard_range(rbname, start, length);
Dr. David Alan Gilbertd3a50382017-02-24 18:28:32 +00001987
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00001988 rcu_read_lock();
Juan Quintela36449152017-03-23 15:11:59 +01001989 RAMBlock *rb = qemu_ram_block_by_name(rbname);
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00001990
1991 if (!rb) {
Juan Quintela36449152017-03-23 15:11:59 +01001992 error_report("ram_discard_range: Failed to find block '%s'", rbname);
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00001993 goto err;
1994 }
1995
Dr. David Alan Gilbertd3a50382017-02-24 18:28:32 +00001996 ret = ram_block_discard_range(rb, start, length);
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00001997
1998err:
1999 rcu_read_unlock();
2000
2001 return ret;
2002}
2003
Juan Quintela6f37bb82017-03-13 19:26:29 +01002004static int ram_save_init_globals(RAMState *rs)
Juan Quintela56e93d22015-05-07 19:33:31 +02002005{
Juan Quintela56e93d22015-05-07 19:33:31 +02002006 int64_t ram_bitmap_pages; /* Size of bitmap in pages, including gaps */
2007
Juan Quintela8d820d62017-03-13 19:35:50 +01002008 rs->dirty_rate_high_cnt = 0;
Juan Quintela5a987732017-03-13 19:39:02 +01002009 rs->bitmap_sync_count = 0;
Juan Quintelaf664da82017-03-13 19:44:57 +01002010 migration_bitmap_sync_init(rs);
Li Zhijiandd631692015-07-02 20:18:06 +08002011 qemu_mutex_init(&migration_bitmap_mutex);
Juan Quintela56e93d22015-05-07 19:33:31 +02002012
2013 if (migrate_use_xbzrle()) {
2014 XBZRLE_cache_lock();
Vijaya Kumar Kadb65de2016-10-24 16:26:49 +01002015 ZERO_TARGET_PAGE = g_malloc0(TARGET_PAGE_SIZE);
Juan Quintela56e93d22015-05-07 19:33:31 +02002016 XBZRLE.cache = cache_init(migrate_xbzrle_cache_size() /
2017 TARGET_PAGE_SIZE,
2018 TARGET_PAGE_SIZE);
2019 if (!XBZRLE.cache) {
2020 XBZRLE_cache_unlock();
2021 error_report("Error creating cache");
2022 return -1;
2023 }
2024 XBZRLE_cache_unlock();
2025
2026 /* We prefer not to abort if there is no memory */
2027 XBZRLE.encoded_buf = g_try_malloc0(TARGET_PAGE_SIZE);
2028 if (!XBZRLE.encoded_buf) {
2029 error_report("Error allocating encoded_buf");
2030 return -1;
2031 }
2032
2033 XBZRLE.current_buf = g_try_malloc(TARGET_PAGE_SIZE);
2034 if (!XBZRLE.current_buf) {
2035 error_report("Error allocating current_buf");
2036 g_free(XBZRLE.encoded_buf);
2037 XBZRLE.encoded_buf = NULL;
2038 return -1;
2039 }
2040
2041 acct_clear();
2042 }
2043
Paolo Bonzini49877832016-02-15 19:57:57 +01002044 /* For memory_global_dirty_log_start below. */
2045 qemu_mutex_lock_iothread();
2046
Juan Quintela56e93d22015-05-07 19:33:31 +02002047 qemu_mutex_lock_ramlist();
2048 rcu_read_lock();
2049 bytes_transferred = 0;
Juan Quintela6f37bb82017-03-13 19:26:29 +01002050 ram_state_reset(rs);
Juan Quintela56e93d22015-05-07 19:33:31 +02002051
Dr. David Alan Gilbertf3f491f2015-11-05 18:11:01 +00002052 migration_bitmap_rcu = g_new0(struct BitmapRcu, 1);
Ashijeet Acharya0827b9e2017-02-08 19:58:45 +05302053 /* Skip setting bitmap if there is no RAM */
2054 if (ram_bytes_total()) {
2055 ram_bitmap_pages = last_ram_offset() >> TARGET_PAGE_BITS;
2056 migration_bitmap_rcu->bmap = bitmap_new(ram_bitmap_pages);
2057 bitmap_set(migration_bitmap_rcu->bmap, 0, ram_bitmap_pages);
Juan Quintela56e93d22015-05-07 19:33:31 +02002058
Ashijeet Acharya0827b9e2017-02-08 19:58:45 +05302059 if (migrate_postcopy_ram()) {
2060 migration_bitmap_rcu->unsentmap = bitmap_new(ram_bitmap_pages);
2061 bitmap_set(migration_bitmap_rcu->unsentmap, 0, ram_bitmap_pages);
2062 }
Dr. David Alan Gilbertf3f491f2015-11-05 18:11:01 +00002063 }
2064
Juan Quintela56e93d22015-05-07 19:33:31 +02002065 /*
2066 * Count the total number of pages used by ram blocks not including any
2067 * gaps due to alignment or unplugs.
2068 */
2069 migration_dirty_pages = ram_bytes_total() >> TARGET_PAGE_BITS;
2070
2071 memory_global_dirty_log_start();
Juan Quintela8d820d62017-03-13 19:35:50 +01002072 migration_bitmap_sync(rs);
Juan Quintela56e93d22015-05-07 19:33:31 +02002073 qemu_mutex_unlock_ramlist();
Paolo Bonzini49877832016-02-15 19:57:57 +01002074 qemu_mutex_unlock_iothread();
zhanghailianga91246c2016-10-27 14:42:59 +08002075 rcu_read_unlock();
2076
2077 return 0;
2078}
2079
Juan Quintela3d0684b2017-03-23 15:06:39 +01002080/*
2081 * Each of ram_save_setup, ram_save_iterate and ram_save_complete has
zhanghailianga91246c2016-10-27 14:42:59 +08002082 * long-running RCU critical section. When rcu-reclaims in the code
2083 * start to become numerous it will be necessary to reduce the
2084 * granularity of these critical sections.
2085 */
2086
Juan Quintela3d0684b2017-03-23 15:06:39 +01002087/**
2088 * ram_save_setup: Setup RAM for migration
2089 *
2090 * Returns zero to indicate success and negative for error
2091 *
2092 * @f: QEMUFile where to send the data
2093 * @opaque: RAMState pointer
2094 */
zhanghailianga91246c2016-10-27 14:42:59 +08002095static int ram_save_setup(QEMUFile *f, void *opaque)
2096{
Juan Quintela6f37bb82017-03-13 19:26:29 +01002097 RAMState *rs = opaque;
zhanghailianga91246c2016-10-27 14:42:59 +08002098 RAMBlock *block;
2099
2100 /* migration has already setup the bitmap, reuse it. */
2101 if (!migration_in_colo_state()) {
Juan Quintela6f37bb82017-03-13 19:26:29 +01002102 if (ram_save_init_globals(rs) < 0) {
zhanghailianga91246c2016-10-27 14:42:59 +08002103 return -1;
2104 }
2105 }
2106
2107 rcu_read_lock();
Juan Quintela56e93d22015-05-07 19:33:31 +02002108
2109 qemu_put_be64(f, ram_bytes_total() | RAM_SAVE_FLAG_MEM_SIZE);
2110
2111 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
2112 qemu_put_byte(f, strlen(block->idstr));
2113 qemu_put_buffer(f, (uint8_t *)block->idstr, strlen(block->idstr));
2114 qemu_put_be64(f, block->used_length);
Dr. David Alan Gilbertef08fb32017-02-24 18:28:30 +00002115 if (migrate_postcopy_ram() && block->page_size != qemu_host_page_size) {
2116 qemu_put_be64(f, block->page_size);
2117 }
Juan Quintela56e93d22015-05-07 19:33:31 +02002118 }
2119
2120 rcu_read_unlock();
2121
2122 ram_control_before_iterate(f, RAM_CONTROL_SETUP);
2123 ram_control_after_iterate(f, RAM_CONTROL_SETUP);
2124
2125 qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
2126
2127 return 0;
2128}
2129
Juan Quintela3d0684b2017-03-23 15:06:39 +01002130/**
2131 * ram_save_iterate: iterative stage for migration
2132 *
2133 * Returns zero to indicate success and negative for error
2134 *
2135 * @f: QEMUFile where to send the data
2136 * @opaque: RAMState pointer
2137 */
Juan Quintela56e93d22015-05-07 19:33:31 +02002138static int ram_save_iterate(QEMUFile *f, void *opaque)
2139{
Juan Quintela6f37bb82017-03-13 19:26:29 +01002140 RAMState *rs = opaque;
Juan Quintela56e93d22015-05-07 19:33:31 +02002141 int ret;
2142 int i;
2143 int64_t t0;
Thomas Huth5c903082016-11-04 14:10:17 +01002144 int done = 0;
Juan Quintela56e93d22015-05-07 19:33:31 +02002145
2146 rcu_read_lock();
Juan Quintela6f37bb82017-03-13 19:26:29 +01002147 if (ram_list.version != rs->last_version) {
2148 ram_state_reset(rs);
Juan Quintela56e93d22015-05-07 19:33:31 +02002149 }
2150
2151 /* Read version before ram_list.blocks */
2152 smp_rmb();
2153
2154 ram_control_before_iterate(f, RAM_CONTROL_ROUND);
2155
2156 t0 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
2157 i = 0;
2158 while ((ret = qemu_file_rate_limit(f)) == 0) {
2159 int pages;
2160
Juan Quintela6f37bb82017-03-13 19:26:29 +01002161 pages = ram_find_and_save_block(rs, f, false, &bytes_transferred);
Juan Quintela56e93d22015-05-07 19:33:31 +02002162 /* no more pages to sent */
2163 if (pages == 0) {
Thomas Huth5c903082016-11-04 14:10:17 +01002164 done = 1;
Juan Quintela56e93d22015-05-07 19:33:31 +02002165 break;
2166 }
Juan Quintela56e93d22015-05-07 19:33:31 +02002167 acct_info.iterations++;
Jason J. Herne070afca2015-09-08 13:12:35 -04002168
Juan Quintela56e93d22015-05-07 19:33:31 +02002169 /* we want to check in the 1st loop, just in case it was the 1st time
2170 and we had to sync the dirty bitmap.
2171 qemu_get_clock_ns() is a bit expensive, so we only check each some
2172 iterations
2173 */
2174 if ((i & 63) == 0) {
2175 uint64_t t1 = (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - t0) / 1000000;
2176 if (t1 > MAX_WAIT) {
Juan Quintela55c44462017-01-23 22:32:05 +01002177 trace_ram_save_iterate_big_wait(t1, i);
Juan Quintela56e93d22015-05-07 19:33:31 +02002178 break;
2179 }
2180 }
2181 i++;
2182 }
2183 flush_compressed_data(f);
2184 rcu_read_unlock();
2185
2186 /*
2187 * Must occur before EOS (or any QEMUFile operation)
2188 * because of RDMA protocol.
2189 */
2190 ram_control_after_iterate(f, RAM_CONTROL_ROUND);
2191
2192 qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
2193 bytes_transferred += 8;
2194
2195 ret = qemu_file_get_error(f);
2196 if (ret < 0) {
2197 return ret;
2198 }
2199
Thomas Huth5c903082016-11-04 14:10:17 +01002200 return done;
Juan Quintela56e93d22015-05-07 19:33:31 +02002201}
2202
Juan Quintela3d0684b2017-03-23 15:06:39 +01002203/**
2204 * ram_save_complete: function called to send the remaining amount of ram
2205 *
2206 * Returns zero to indicate success
2207 *
2208 * Called with iothread lock
2209 *
2210 * @f: QEMUFile where to send the data
2211 * @opaque: RAMState pointer
2212 */
Juan Quintela56e93d22015-05-07 19:33:31 +02002213static int ram_save_complete(QEMUFile *f, void *opaque)
2214{
Juan Quintela6f37bb82017-03-13 19:26:29 +01002215 RAMState *rs = opaque;
2216
Juan Quintela56e93d22015-05-07 19:33:31 +02002217 rcu_read_lock();
2218
Dr. David Alan Gilbert663e6c12015-11-05 18:11:13 +00002219 if (!migration_in_postcopy(migrate_get_current())) {
Juan Quintela8d820d62017-03-13 19:35:50 +01002220 migration_bitmap_sync(rs);
Dr. David Alan Gilbert663e6c12015-11-05 18:11:13 +00002221 }
Juan Quintela56e93d22015-05-07 19:33:31 +02002222
2223 ram_control_before_iterate(f, RAM_CONTROL_FINISH);
2224
2225 /* try transferring iterative blocks of memory */
2226
2227 /* flush all remaining blocks regardless of rate limiting */
2228 while (true) {
2229 int pages;
2230
Juan Quintela6f37bb82017-03-13 19:26:29 +01002231 pages = ram_find_and_save_block(rs, f, !migration_in_colo_state(),
zhanghailianga91246c2016-10-27 14:42:59 +08002232 &bytes_transferred);
Juan Quintela56e93d22015-05-07 19:33:31 +02002233 /* no more blocks to sent */
2234 if (pages == 0) {
2235 break;
2236 }
2237 }
2238
2239 flush_compressed_data(f);
2240 ram_control_after_iterate(f, RAM_CONTROL_FINISH);
Juan Quintela56e93d22015-05-07 19:33:31 +02002241
2242 rcu_read_unlock();
Paolo Bonzinid09a6fd2015-07-09 08:47:58 +02002243
Juan Quintela56e93d22015-05-07 19:33:31 +02002244 qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
2245
2246 return 0;
2247}
2248
Dr. David Alan Gilbertc31b0982015-11-05 18:10:54 +00002249static void ram_save_pending(QEMUFile *f, void *opaque, uint64_t max_size,
2250 uint64_t *non_postcopiable_pending,
2251 uint64_t *postcopiable_pending)
Juan Quintela56e93d22015-05-07 19:33:31 +02002252{
Juan Quintela8d820d62017-03-13 19:35:50 +01002253 RAMState *rs = opaque;
Juan Quintela56e93d22015-05-07 19:33:31 +02002254 uint64_t remaining_size;
2255
2256 remaining_size = ram_save_remaining() * TARGET_PAGE_SIZE;
2257
Dr. David Alan Gilbert663e6c12015-11-05 18:11:13 +00002258 if (!migration_in_postcopy(migrate_get_current()) &&
2259 remaining_size < max_size) {
Juan Quintela56e93d22015-05-07 19:33:31 +02002260 qemu_mutex_lock_iothread();
2261 rcu_read_lock();
Juan Quintela8d820d62017-03-13 19:35:50 +01002262 migration_bitmap_sync(rs);
Juan Quintela56e93d22015-05-07 19:33:31 +02002263 rcu_read_unlock();
2264 qemu_mutex_unlock_iothread();
2265 remaining_size = ram_save_remaining() * TARGET_PAGE_SIZE;
2266 }
Dr. David Alan Gilbertc31b0982015-11-05 18:10:54 +00002267
2268 /* We can do postcopy, and all the data is postcopiable */
2269 *postcopiable_pending += remaining_size;
Juan Quintela56e93d22015-05-07 19:33:31 +02002270}
2271
2272static int load_xbzrle(QEMUFile *f, ram_addr_t addr, void *host)
2273{
2274 unsigned int xh_len;
2275 int xh_flags;
Dr. David Alan Gilbert063e7602015-12-16 11:47:37 +00002276 uint8_t *loaded_data;
Juan Quintela56e93d22015-05-07 19:33:31 +02002277
2278 if (!xbzrle_decoded_buf) {
2279 xbzrle_decoded_buf = g_malloc(TARGET_PAGE_SIZE);
2280 }
Dr. David Alan Gilbert063e7602015-12-16 11:47:37 +00002281 loaded_data = xbzrle_decoded_buf;
Juan Quintela56e93d22015-05-07 19:33:31 +02002282
2283 /* extract RLE header */
2284 xh_flags = qemu_get_byte(f);
2285 xh_len = qemu_get_be16(f);
2286
2287 if (xh_flags != ENCODING_FLAG_XBZRLE) {
2288 error_report("Failed to load XBZRLE page - wrong compression!");
2289 return -1;
2290 }
2291
2292 if (xh_len > TARGET_PAGE_SIZE) {
2293 error_report("Failed to load XBZRLE page - len overflow!");
2294 return -1;
2295 }
2296 /* load data and decode */
Dr. David Alan Gilbert063e7602015-12-16 11:47:37 +00002297 qemu_get_buffer_in_place(f, &loaded_data, xh_len);
Juan Quintela56e93d22015-05-07 19:33:31 +02002298
2299 /* decode RLE */
Dr. David Alan Gilbert063e7602015-12-16 11:47:37 +00002300 if (xbzrle_decode_buffer(loaded_data, xh_len, host,
Juan Quintela56e93d22015-05-07 19:33:31 +02002301 TARGET_PAGE_SIZE) == -1) {
2302 error_report("Failed to load XBZRLE page - decode error!");
2303 return -1;
2304 }
2305
2306 return 0;
2307}
2308
Juan Quintela3d0684b2017-03-23 15:06:39 +01002309/**
2310 * ram_block_from_stream: read a RAMBlock id from the migration stream
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00002311 *
Juan Quintela3d0684b2017-03-23 15:06:39 +01002312 * Must be called from within a rcu critical section.
2313 *
2314 * Returns a pointer from within the RCU-protected ram_list.
2315 *
2316 * @f: QEMUFile where to read the data from
2317 * @flags: Page flags (mostly to see if it's a continuation of previous block)
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00002318 */
Juan Quintela3d0684b2017-03-23 15:06:39 +01002319static inline RAMBlock *ram_block_from_stream(QEMUFile *f, int flags)
Juan Quintela56e93d22015-05-07 19:33:31 +02002320{
2321 static RAMBlock *block = NULL;
2322 char id[256];
2323 uint8_t len;
2324
2325 if (flags & RAM_SAVE_FLAG_CONTINUE) {
zhanghailiang4c4bad42016-01-15 11:37:41 +08002326 if (!block) {
Juan Quintela56e93d22015-05-07 19:33:31 +02002327 error_report("Ack, bad migration stream!");
2328 return NULL;
2329 }
zhanghailiang4c4bad42016-01-15 11:37:41 +08002330 return block;
Juan Quintela56e93d22015-05-07 19:33:31 +02002331 }
2332
2333 len = qemu_get_byte(f);
2334 qemu_get_buffer(f, (uint8_t *)id, len);
2335 id[len] = 0;
2336
Dr. David Alan Gilberte3dd7492015-11-05 18:10:33 +00002337 block = qemu_ram_block_by_name(id);
zhanghailiang4c4bad42016-01-15 11:37:41 +08002338 if (!block) {
2339 error_report("Can't find block %s", id);
2340 return NULL;
Juan Quintela56e93d22015-05-07 19:33:31 +02002341 }
2342
zhanghailiang4c4bad42016-01-15 11:37:41 +08002343 return block;
2344}
2345
2346static inline void *host_from_ram_block_offset(RAMBlock *block,
2347 ram_addr_t offset)
2348{
2349 if (!offset_in_ramblock(block, offset)) {
2350 return NULL;
2351 }
2352
2353 return block->host + offset;
Juan Quintela56e93d22015-05-07 19:33:31 +02002354}
2355
Juan Quintela3d0684b2017-03-23 15:06:39 +01002356/**
2357 * ram_handle_compressed: handle the zero page case
2358 *
Juan Quintela56e93d22015-05-07 19:33:31 +02002359 * If a page (or a whole RDMA chunk) has been
2360 * determined to be zero, then zap it.
Juan Quintela3d0684b2017-03-23 15:06:39 +01002361 *
2362 * @host: host address for the zero page
2363 * @ch: what the page is filled from. We only support zero
2364 * @size: size of the zero page
Juan Quintela56e93d22015-05-07 19:33:31 +02002365 */
2366void ram_handle_compressed(void *host, uint8_t ch, uint64_t size)
2367{
2368 if (ch != 0 || !is_zero_range(host, size)) {
2369 memset(host, ch, size);
2370 }
2371}
2372
2373static void *do_data_decompress(void *opaque)
2374{
2375 DecompressParam *param = opaque;
2376 unsigned long pagesize;
Liang Li33d151f2016-05-05 15:32:58 +08002377 uint8_t *des;
2378 int len;
Juan Quintela56e93d22015-05-07 19:33:31 +02002379
Liang Li33d151f2016-05-05 15:32:58 +08002380 qemu_mutex_lock(&param->mutex);
Liang Li90e56fb2016-05-05 15:32:56 +08002381 while (!param->quit) {
Liang Li33d151f2016-05-05 15:32:58 +08002382 if (param->des) {
2383 des = param->des;
2384 len = param->len;
2385 param->des = 0;
2386 qemu_mutex_unlock(&param->mutex);
2387
Liang Li73a89122016-05-05 15:32:51 +08002388 pagesize = TARGET_PAGE_SIZE;
2389 /* uncompress() will return failed in some case, especially
2390 * when the page is dirted when doing the compression, it's
2391 * not a problem because the dirty page will be retransferred
2392 * and uncompress() won't break the data in other pages.
2393 */
Liang Li33d151f2016-05-05 15:32:58 +08002394 uncompress((Bytef *)des, &pagesize,
2395 (const Bytef *)param->compbuf, len);
Liang Li73a89122016-05-05 15:32:51 +08002396
Liang Li33d151f2016-05-05 15:32:58 +08002397 qemu_mutex_lock(&decomp_done_lock);
2398 param->done = true;
2399 qemu_cond_signal(&decomp_done_cond);
2400 qemu_mutex_unlock(&decomp_done_lock);
2401
2402 qemu_mutex_lock(&param->mutex);
2403 } else {
2404 qemu_cond_wait(&param->cond, &param->mutex);
2405 }
Juan Quintela56e93d22015-05-07 19:33:31 +02002406 }
Liang Li33d151f2016-05-05 15:32:58 +08002407 qemu_mutex_unlock(&param->mutex);
Juan Quintela56e93d22015-05-07 19:33:31 +02002408
2409 return NULL;
2410}
2411
Liang Li5533b2e2016-05-05 15:32:52 +08002412static void wait_for_decompress_done(void)
2413{
2414 int idx, thread_count;
2415
2416 if (!migrate_use_compression()) {
2417 return;
2418 }
2419
2420 thread_count = migrate_decompress_threads();
2421 qemu_mutex_lock(&decomp_done_lock);
2422 for (idx = 0; idx < thread_count; idx++) {
2423 while (!decomp_param[idx].done) {
2424 qemu_cond_wait(&decomp_done_cond, &decomp_done_lock);
2425 }
2426 }
2427 qemu_mutex_unlock(&decomp_done_lock);
2428}
2429
Juan Quintela56e93d22015-05-07 19:33:31 +02002430void migrate_decompress_threads_create(void)
2431{
2432 int i, thread_count;
2433
2434 thread_count = migrate_decompress_threads();
2435 decompress_threads = g_new0(QemuThread, thread_count);
2436 decomp_param = g_new0(DecompressParam, thread_count);
Liang Li73a89122016-05-05 15:32:51 +08002437 qemu_mutex_init(&decomp_done_lock);
2438 qemu_cond_init(&decomp_done_cond);
Juan Quintela56e93d22015-05-07 19:33:31 +02002439 for (i = 0; i < thread_count; i++) {
2440 qemu_mutex_init(&decomp_param[i].mutex);
2441 qemu_cond_init(&decomp_param[i].cond);
2442 decomp_param[i].compbuf = g_malloc0(compressBound(TARGET_PAGE_SIZE));
Liang Li73a89122016-05-05 15:32:51 +08002443 decomp_param[i].done = true;
Liang Li90e56fb2016-05-05 15:32:56 +08002444 decomp_param[i].quit = false;
Juan Quintela56e93d22015-05-07 19:33:31 +02002445 qemu_thread_create(decompress_threads + i, "decompress",
2446 do_data_decompress, decomp_param + i,
2447 QEMU_THREAD_JOINABLE);
2448 }
2449}
2450
2451void migrate_decompress_threads_join(void)
2452{
2453 int i, thread_count;
2454
Juan Quintela56e93d22015-05-07 19:33:31 +02002455 thread_count = migrate_decompress_threads();
2456 for (i = 0; i < thread_count; i++) {
2457 qemu_mutex_lock(&decomp_param[i].mutex);
Liang Li90e56fb2016-05-05 15:32:56 +08002458 decomp_param[i].quit = true;
Juan Quintela56e93d22015-05-07 19:33:31 +02002459 qemu_cond_signal(&decomp_param[i].cond);
2460 qemu_mutex_unlock(&decomp_param[i].mutex);
2461 }
2462 for (i = 0; i < thread_count; i++) {
2463 qemu_thread_join(decompress_threads + i);
2464 qemu_mutex_destroy(&decomp_param[i].mutex);
2465 qemu_cond_destroy(&decomp_param[i].cond);
2466 g_free(decomp_param[i].compbuf);
2467 }
2468 g_free(decompress_threads);
2469 g_free(decomp_param);
Juan Quintela56e93d22015-05-07 19:33:31 +02002470 decompress_threads = NULL;
2471 decomp_param = NULL;
Juan Quintela56e93d22015-05-07 19:33:31 +02002472}
2473
Dr. David Alan Gilbertc1bc6622015-12-16 11:47:38 +00002474static void decompress_data_with_multi_threads(QEMUFile *f,
Juan Quintela56e93d22015-05-07 19:33:31 +02002475 void *host, int len)
2476{
2477 int idx, thread_count;
2478
2479 thread_count = migrate_decompress_threads();
Liang Li73a89122016-05-05 15:32:51 +08002480 qemu_mutex_lock(&decomp_done_lock);
Juan Quintela56e93d22015-05-07 19:33:31 +02002481 while (true) {
2482 for (idx = 0; idx < thread_count; idx++) {
Liang Li73a89122016-05-05 15:32:51 +08002483 if (decomp_param[idx].done) {
Liang Li33d151f2016-05-05 15:32:58 +08002484 decomp_param[idx].done = false;
2485 qemu_mutex_lock(&decomp_param[idx].mutex);
Dr. David Alan Gilbertc1bc6622015-12-16 11:47:38 +00002486 qemu_get_buffer(f, decomp_param[idx].compbuf, len);
Juan Quintela56e93d22015-05-07 19:33:31 +02002487 decomp_param[idx].des = host;
2488 decomp_param[idx].len = len;
Liang Li33d151f2016-05-05 15:32:58 +08002489 qemu_cond_signal(&decomp_param[idx].cond);
2490 qemu_mutex_unlock(&decomp_param[idx].mutex);
Juan Quintela56e93d22015-05-07 19:33:31 +02002491 break;
2492 }
2493 }
2494 if (idx < thread_count) {
2495 break;
Liang Li73a89122016-05-05 15:32:51 +08002496 } else {
2497 qemu_cond_wait(&decomp_done_cond, &decomp_done_lock);
Juan Quintela56e93d22015-05-07 19:33:31 +02002498 }
2499 }
Liang Li73a89122016-05-05 15:32:51 +08002500 qemu_mutex_unlock(&decomp_done_lock);
Juan Quintela56e93d22015-05-07 19:33:31 +02002501}
2502
Juan Quintela3d0684b2017-03-23 15:06:39 +01002503/**
2504 * ram_postcopy_incoming_init: allocate postcopy data structures
2505 *
2506 * Returns 0 for success and negative if there was one error
2507 *
2508 * @mis: current migration incoming state
2509 *
2510 * Allocate data structures etc needed by incoming migration with
2511 * postcopy-ram. postcopy-ram's similarly names
2512 * postcopy_ram_incoming_init does the work.
Dr. David Alan Gilbert1caddf82015-11-05 18:11:03 +00002513 */
2514int ram_postcopy_incoming_init(MigrationIncomingState *mis)
2515{
2516 size_t ram_pages = last_ram_offset() >> TARGET_PAGE_BITS;
2517
2518 return postcopy_ram_incoming_init(mis, ram_pages);
2519}
2520
Juan Quintela3d0684b2017-03-23 15:06:39 +01002521/**
2522 * ram_load_postcopy: load a page in postcopy case
2523 *
2524 * Returns 0 for success or -errno in case of error
2525 *
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00002526 * Called in postcopy mode by ram_load().
2527 * rcu_read_lock is taken prior to this being called.
Juan Quintela3d0684b2017-03-23 15:06:39 +01002528 *
2529 * @f: QEMUFile where to send the data
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00002530 */
2531static int ram_load_postcopy(QEMUFile *f)
2532{
2533 int flags = 0, ret = 0;
2534 bool place_needed = false;
Dr. David Alan Gilbert28abd202017-02-24 18:28:37 +00002535 bool matching_page_sizes = false;
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00002536 MigrationIncomingState *mis = migration_incoming_get_current();
2537 /* Temporary page that is later 'placed' */
2538 void *postcopy_host_page = postcopy_get_tmp_page(mis);
Dr. David Alan Gilbertc53b7dd2015-11-05 18:11:12 +00002539 void *last_host = NULL;
Dr. David Alan Gilberta3b6ff62015-11-11 14:02:28 +00002540 bool all_zero = false;
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00002541
2542 while (!ret && !(flags & RAM_SAVE_FLAG_EOS)) {
2543 ram_addr_t addr;
2544 void *host = NULL;
2545 void *page_buffer = NULL;
2546 void *place_source = NULL;
Dr. David Alan Gilbertdf9ff5e2017-02-24 18:28:35 +00002547 RAMBlock *block = NULL;
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00002548 uint8_t ch;
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00002549
2550 addr = qemu_get_be64(f);
2551 flags = addr & ~TARGET_PAGE_MASK;
2552 addr &= TARGET_PAGE_MASK;
2553
2554 trace_ram_load_postcopy_loop((uint64_t)addr, flags);
2555 place_needed = false;
2556 if (flags & (RAM_SAVE_FLAG_COMPRESS | RAM_SAVE_FLAG_PAGE)) {
Dr. David Alan Gilbertdf9ff5e2017-02-24 18:28:35 +00002557 block = ram_block_from_stream(f, flags);
zhanghailiang4c4bad42016-01-15 11:37:41 +08002558
2559 host = host_from_ram_block_offset(block, addr);
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00002560 if (!host) {
2561 error_report("Illegal RAM offset " RAM_ADDR_FMT, addr);
2562 ret = -EINVAL;
2563 break;
2564 }
Dr. David Alan Gilbert28abd202017-02-24 18:28:37 +00002565 matching_page_sizes = block->page_size == TARGET_PAGE_SIZE;
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00002566 /*
Dr. David Alan Gilbert28abd202017-02-24 18:28:37 +00002567 * Postcopy requires that we place whole host pages atomically;
2568 * these may be huge pages for RAMBlocks that are backed by
2569 * hugetlbfs.
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00002570 * To make it atomic, the data is read into a temporary page
2571 * that's moved into place later.
2572 * The migration protocol uses, possibly smaller, target-pages
2573 * however the source ensures it always sends all the components
2574 * of a host page in order.
2575 */
2576 page_buffer = postcopy_host_page +
Dr. David Alan Gilbert28abd202017-02-24 18:28:37 +00002577 ((uintptr_t)host & (block->page_size - 1));
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00002578 /* If all TP are zero then we can optimise the place */
Dr. David Alan Gilbert28abd202017-02-24 18:28:37 +00002579 if (!((uintptr_t)host & (block->page_size - 1))) {
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00002580 all_zero = true;
Dr. David Alan Gilbertc53b7dd2015-11-05 18:11:12 +00002581 } else {
2582 /* not the 1st TP within the HP */
2583 if (host != (last_host + TARGET_PAGE_SIZE)) {
Markus Armbruster9af9e0f2015-12-18 16:35:19 +01002584 error_report("Non-sequential target page %p/%p",
Dr. David Alan Gilbertc53b7dd2015-11-05 18:11:12 +00002585 host, last_host);
2586 ret = -EINVAL;
2587 break;
2588 }
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00002589 }
2590
Dr. David Alan Gilbertc53b7dd2015-11-05 18:11:12 +00002591
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00002592 /*
2593 * If it's the last part of a host page then we place the host
2594 * page
2595 */
2596 place_needed = (((uintptr_t)host + TARGET_PAGE_SIZE) &
Dr. David Alan Gilbert28abd202017-02-24 18:28:37 +00002597 (block->page_size - 1)) == 0;
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00002598 place_source = postcopy_host_page;
2599 }
Dr. David Alan Gilbertc53b7dd2015-11-05 18:11:12 +00002600 last_host = host;
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00002601
2602 switch (flags & ~RAM_SAVE_FLAG_CONTINUE) {
2603 case RAM_SAVE_FLAG_COMPRESS:
2604 ch = qemu_get_byte(f);
2605 memset(page_buffer, ch, TARGET_PAGE_SIZE);
2606 if (ch) {
2607 all_zero = false;
2608 }
2609 break;
2610
2611 case RAM_SAVE_FLAG_PAGE:
2612 all_zero = false;
2613 if (!place_needed || !matching_page_sizes) {
2614 qemu_get_buffer(f, page_buffer, TARGET_PAGE_SIZE);
2615 } else {
2616 /* Avoids the qemu_file copy during postcopy, which is
2617 * going to do a copy later; can only do it when we
2618 * do this read in one go (matching page sizes)
2619 */
2620 qemu_get_buffer_in_place(f, (uint8_t **)&place_source,
2621 TARGET_PAGE_SIZE);
2622 }
2623 break;
2624 case RAM_SAVE_FLAG_EOS:
2625 /* normal exit */
2626 break;
2627 default:
2628 error_report("Unknown combination of migration flags: %#x"
2629 " (postcopy mode)", flags);
2630 ret = -EINVAL;
2631 }
2632
2633 if (place_needed) {
2634 /* This gets called at the last target page in the host page */
Dr. David Alan Gilbertdf9ff5e2017-02-24 18:28:35 +00002635 void *place_dest = host + TARGET_PAGE_SIZE - block->page_size;
2636
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00002637 if (all_zero) {
Dr. David Alan Gilbertdf9ff5e2017-02-24 18:28:35 +00002638 ret = postcopy_place_page_zero(mis, place_dest,
2639 block->page_size);
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00002640 } else {
Dr. David Alan Gilbertdf9ff5e2017-02-24 18:28:35 +00002641 ret = postcopy_place_page(mis, place_dest,
2642 place_source, block->page_size);
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00002643 }
2644 }
2645 if (!ret) {
2646 ret = qemu_file_get_error(f);
2647 }
2648 }
2649
2650 return ret;
2651}
2652
Juan Quintela56e93d22015-05-07 19:33:31 +02002653static int ram_load(QEMUFile *f, void *opaque, int version_id)
2654{
2655 int flags = 0, ret = 0;
2656 static uint64_t seq_iter;
2657 int len = 0;
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00002658 /*
2659 * If system is running in postcopy mode, page inserts to host memory must
2660 * be atomic
2661 */
2662 bool postcopy_running = postcopy_state_get() >= POSTCOPY_INCOMING_LISTENING;
Dr. David Alan Gilbertef08fb32017-02-24 18:28:30 +00002663 /* ADVISE is earlier, it shows the source has the postcopy capability on */
2664 bool postcopy_advised = postcopy_state_get() >= POSTCOPY_INCOMING_ADVISE;
Juan Quintela56e93d22015-05-07 19:33:31 +02002665
2666 seq_iter++;
2667
2668 if (version_id != 4) {
2669 ret = -EINVAL;
2670 }
2671
2672 /* This RCU critical section can be very long running.
2673 * When RCU reclaims in the code start to become numerous,
2674 * it will be necessary to reduce the granularity of this
2675 * critical section.
2676 */
2677 rcu_read_lock();
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00002678
2679 if (postcopy_running) {
2680 ret = ram_load_postcopy(f);
2681 }
2682
2683 while (!postcopy_running && !ret && !(flags & RAM_SAVE_FLAG_EOS)) {
Juan Quintela56e93d22015-05-07 19:33:31 +02002684 ram_addr_t addr, total_ram_bytes;
Dr. David Alan Gilberta776aa12015-11-05 18:10:39 +00002685 void *host = NULL;
Juan Quintela56e93d22015-05-07 19:33:31 +02002686 uint8_t ch;
2687
2688 addr = qemu_get_be64(f);
2689 flags = addr & ~TARGET_PAGE_MASK;
2690 addr &= TARGET_PAGE_MASK;
2691
Dr. David Alan Gilberta776aa12015-11-05 18:10:39 +00002692 if (flags & (RAM_SAVE_FLAG_COMPRESS | RAM_SAVE_FLAG_PAGE |
2693 RAM_SAVE_FLAG_COMPRESS_PAGE | RAM_SAVE_FLAG_XBZRLE)) {
zhanghailiang4c4bad42016-01-15 11:37:41 +08002694 RAMBlock *block = ram_block_from_stream(f, flags);
2695
2696 host = host_from_ram_block_offset(block, addr);
Dr. David Alan Gilberta776aa12015-11-05 18:10:39 +00002697 if (!host) {
2698 error_report("Illegal RAM offset " RAM_ADDR_FMT, addr);
2699 ret = -EINVAL;
2700 break;
2701 }
2702 }
2703
Juan Quintela56e93d22015-05-07 19:33:31 +02002704 switch (flags & ~RAM_SAVE_FLAG_CONTINUE) {
2705 case RAM_SAVE_FLAG_MEM_SIZE:
2706 /* Synchronize RAM block list */
2707 total_ram_bytes = addr;
2708 while (!ret && total_ram_bytes) {
2709 RAMBlock *block;
Juan Quintela56e93d22015-05-07 19:33:31 +02002710 char id[256];
2711 ram_addr_t length;
2712
2713 len = qemu_get_byte(f);
2714 qemu_get_buffer(f, (uint8_t *)id, len);
2715 id[len] = 0;
2716 length = qemu_get_be64(f);
2717
Dr. David Alan Gilberte3dd7492015-11-05 18:10:33 +00002718 block = qemu_ram_block_by_name(id);
2719 if (block) {
2720 if (length != block->used_length) {
2721 Error *local_err = NULL;
Juan Quintela56e93d22015-05-07 19:33:31 +02002722
Gongleifa53a0e2016-05-10 10:04:59 +08002723 ret = qemu_ram_resize(block, length,
Dr. David Alan Gilberte3dd7492015-11-05 18:10:33 +00002724 &local_err);
2725 if (local_err) {
2726 error_report_err(local_err);
Juan Quintela56e93d22015-05-07 19:33:31 +02002727 }
Juan Quintela56e93d22015-05-07 19:33:31 +02002728 }
Dr. David Alan Gilbertef08fb32017-02-24 18:28:30 +00002729 /* For postcopy we need to check hugepage sizes match */
2730 if (postcopy_advised &&
2731 block->page_size != qemu_host_page_size) {
2732 uint64_t remote_page_size = qemu_get_be64(f);
2733 if (remote_page_size != block->page_size) {
2734 error_report("Mismatched RAM page size %s "
2735 "(local) %zd != %" PRId64,
2736 id, block->page_size,
2737 remote_page_size);
2738 ret = -EINVAL;
2739 }
2740 }
Dr. David Alan Gilberte3dd7492015-11-05 18:10:33 +00002741 ram_control_load_hook(f, RAM_CONTROL_BLOCK_REG,
2742 block->idstr);
2743 } else {
Juan Quintela56e93d22015-05-07 19:33:31 +02002744 error_report("Unknown ramblock \"%s\", cannot "
2745 "accept migration", id);
2746 ret = -EINVAL;
2747 }
2748
2749 total_ram_bytes -= length;
2750 }
2751 break;
Dr. David Alan Gilberta776aa12015-11-05 18:10:39 +00002752
Juan Quintela56e93d22015-05-07 19:33:31 +02002753 case RAM_SAVE_FLAG_COMPRESS:
Juan Quintela56e93d22015-05-07 19:33:31 +02002754 ch = qemu_get_byte(f);
2755 ram_handle_compressed(host, ch, TARGET_PAGE_SIZE);
2756 break;
Dr. David Alan Gilberta776aa12015-11-05 18:10:39 +00002757
Juan Quintela56e93d22015-05-07 19:33:31 +02002758 case RAM_SAVE_FLAG_PAGE:
Juan Quintela56e93d22015-05-07 19:33:31 +02002759 qemu_get_buffer(f, host, TARGET_PAGE_SIZE);
2760 break;
Juan Quintela56e93d22015-05-07 19:33:31 +02002761
Dr. David Alan Gilberta776aa12015-11-05 18:10:39 +00002762 case RAM_SAVE_FLAG_COMPRESS_PAGE:
Juan Quintela56e93d22015-05-07 19:33:31 +02002763 len = qemu_get_be32(f);
2764 if (len < 0 || len > compressBound(TARGET_PAGE_SIZE)) {
2765 error_report("Invalid compressed data length: %d", len);
2766 ret = -EINVAL;
2767 break;
2768 }
Dr. David Alan Gilbertc1bc6622015-12-16 11:47:38 +00002769 decompress_data_with_multi_threads(f, host, len);
Juan Quintela56e93d22015-05-07 19:33:31 +02002770 break;
Dr. David Alan Gilberta776aa12015-11-05 18:10:39 +00002771
Juan Quintela56e93d22015-05-07 19:33:31 +02002772 case RAM_SAVE_FLAG_XBZRLE:
Juan Quintela56e93d22015-05-07 19:33:31 +02002773 if (load_xbzrle(f, addr, host) < 0) {
2774 error_report("Failed to decompress XBZRLE page at "
2775 RAM_ADDR_FMT, addr);
2776 ret = -EINVAL;
2777 break;
2778 }
2779 break;
2780 case RAM_SAVE_FLAG_EOS:
2781 /* normal exit */
2782 break;
2783 default:
2784 if (flags & RAM_SAVE_FLAG_HOOK) {
Dr. David Alan Gilbert632e3a52015-06-11 18:17:23 +01002785 ram_control_load_hook(f, RAM_CONTROL_HOOK, NULL);
Juan Quintela56e93d22015-05-07 19:33:31 +02002786 } else {
2787 error_report("Unknown combination of migration flags: %#x",
2788 flags);
2789 ret = -EINVAL;
2790 }
2791 }
2792 if (!ret) {
2793 ret = qemu_file_get_error(f);
2794 }
2795 }
2796
Liang Li5533b2e2016-05-05 15:32:52 +08002797 wait_for_decompress_done();
Juan Quintela56e93d22015-05-07 19:33:31 +02002798 rcu_read_unlock();
Juan Quintela55c44462017-01-23 22:32:05 +01002799 trace_ram_load_complete(ret, seq_iter);
Juan Quintela56e93d22015-05-07 19:33:31 +02002800 return ret;
2801}
2802
2803static SaveVMHandlers savevm_ram_handlers = {
2804 .save_live_setup = ram_save_setup,
2805 .save_live_iterate = ram_save_iterate,
Dr. David Alan Gilbert763c9062015-11-05 18:11:00 +00002806 .save_live_complete_postcopy = ram_save_complete,
Dr. David Alan Gilberta3e06c32015-11-05 18:10:41 +00002807 .save_live_complete_precopy = ram_save_complete,
Juan Quintela56e93d22015-05-07 19:33:31 +02002808 .save_live_pending = ram_save_pending,
2809 .load_state = ram_load,
Liang Li6ad2a212015-11-02 15:37:03 +08002810 .cleanup = ram_migration_cleanup,
Juan Quintela56e93d22015-05-07 19:33:31 +02002811};
2812
2813void ram_mig_init(void)
2814{
2815 qemu_mutex_init(&XBZRLE.lock);
Juan Quintela6f37bb82017-03-13 19:26:29 +01002816 register_savevm_live(NULL, "ram", 0, 4, &savevm_ram_handlers, &ram_state);
Juan Quintela56e93d22015-05-07 19:33:31 +02002817}