blob: 40d05330ab749f7269f2e1043ebd93744b6af190 [file] [log] [blame]
Juan Quintela56e93d22015-05-07 19:33:31 +02001/*
2 * QEMU System Emulator
3 *
4 * Copyright (c) 2003-2008 Fabrice Bellard
Juan Quintela76cc7b52015-05-08 13:20:21 +02005 * Copyright (c) 2011-2015 Red Hat Inc
6 *
7 * Authors:
8 * Juan Quintela <quintela@redhat.com>
Juan Quintela56e93d22015-05-07 19:33:31 +02009 *
10 * Permission is hereby granted, free of charge, to any person obtaining a copy
11 * of this software and associated documentation files (the "Software"), to deal
12 * in the Software without restriction, including without limitation the rights
13 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
14 * copies of the Software, and to permit persons to whom the Software is
15 * furnished to do so, subject to the following conditions:
16 *
17 * The above copyright notice and this permission notice shall be included in
18 * all copies or substantial portions of the Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 * THE SOFTWARE.
27 */
Peter Maydell1393a482016-01-26 18:16:54 +000028#include "qemu/osdep.h"
Juan Quintela56e93d22015-05-07 19:33:31 +020029#include <zlib.h>
Dr. David Alan Gilbert4addcd42015-12-16 11:47:36 +000030#include "qapi-event.h"
Juan Quintela56e93d22015-05-07 19:33:31 +020031#include "qemu/bitops.h"
32#include "qemu/bitmap.h"
Juan Quintela7205c9e2015-05-08 13:54:36 +020033#include "qemu/timer.h"
34#include "qemu/main-loop.h"
Juan Quintela56e93d22015-05-07 19:33:31 +020035#include "migration/migration.h"
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +000036#include "migration/postcopy-ram.h"
Juan Quintela56e93d22015-05-07 19:33:31 +020037#include "exec/address-spaces.h"
38#include "migration/page_cache.h"
Juan Quintela56e93d22015-05-07 19:33:31 +020039#include "qemu/error-report.h"
Juan Quintela56e93d22015-05-07 19:33:31 +020040#include "trace.h"
Juan Quintela56e93d22015-05-07 19:33:31 +020041#include "exec/ram_addr.h"
Juan Quintela56e93d22015-05-07 19:33:31 +020042#include "qemu/rcu_queue.h"
43
44#ifdef DEBUG_MIGRATION_RAM
45#define DPRINTF(fmt, ...) \
46 do { fprintf(stdout, "migration_ram: " fmt, ## __VA_ARGS__); } while (0)
47#else
48#define DPRINTF(fmt, ...) \
49 do { } while (0)
50#endif
51
Juan Quintela56e93d22015-05-07 19:33:31 +020052static int dirty_rate_high_cnt;
Juan Quintela56e93d22015-05-07 19:33:31 +020053
54static uint64_t bitmap_sync_count;
55
56/***********************************************************/
57/* ram save/restore */
58
59#define RAM_SAVE_FLAG_FULL 0x01 /* Obsolete, not used anymore */
60#define RAM_SAVE_FLAG_COMPRESS 0x02
61#define RAM_SAVE_FLAG_MEM_SIZE 0x04
62#define RAM_SAVE_FLAG_PAGE 0x08
63#define RAM_SAVE_FLAG_EOS 0x10
64#define RAM_SAVE_FLAG_CONTINUE 0x20
65#define RAM_SAVE_FLAG_XBZRLE 0x40
66/* 0x80 is reserved in migration.h start with 0x100 next */
67#define RAM_SAVE_FLAG_COMPRESS_PAGE 0x100
68
69static const uint8_t ZERO_TARGET_PAGE[TARGET_PAGE_SIZE];
70
71static inline bool is_zero_range(uint8_t *p, uint64_t size)
72{
73 return buffer_find_nonzero_offset(p, size) == size;
74}
75
76/* struct contains XBZRLE cache and a static page
77 used by the compression */
78static struct {
79 /* buffer used for XBZRLE encoding */
80 uint8_t *encoded_buf;
81 /* buffer for storing page content */
82 uint8_t *current_buf;
83 /* Cache for XBZRLE, Protected by lock. */
84 PageCache *cache;
85 QemuMutex lock;
86} XBZRLE;
87
88/* buffer used for XBZRLE decoding */
89static uint8_t *xbzrle_decoded_buf;
90
91static void XBZRLE_cache_lock(void)
92{
93 if (migrate_use_xbzrle())
94 qemu_mutex_lock(&XBZRLE.lock);
95}
96
97static void XBZRLE_cache_unlock(void)
98{
99 if (migrate_use_xbzrle())
100 qemu_mutex_unlock(&XBZRLE.lock);
101}
102
103/*
104 * called from qmp_migrate_set_cache_size in main thread, possibly while
105 * a migration is in progress.
106 * A running migration maybe using the cache and might finish during this
107 * call, hence changes to the cache are protected by XBZRLE.lock().
108 */
109int64_t xbzrle_cache_resize(int64_t new_size)
110{
111 PageCache *new_cache;
112 int64_t ret;
113
114 if (new_size < TARGET_PAGE_SIZE) {
115 return -1;
116 }
117
118 XBZRLE_cache_lock();
119
120 if (XBZRLE.cache != NULL) {
121 if (pow2floor(new_size) == migrate_xbzrle_cache_size()) {
122 goto out_new_size;
123 }
124 new_cache = cache_init(new_size / TARGET_PAGE_SIZE,
125 TARGET_PAGE_SIZE);
126 if (!new_cache) {
127 error_report("Error creating cache");
128 ret = -1;
129 goto out;
130 }
131
132 cache_fini(XBZRLE.cache);
133 XBZRLE.cache = new_cache;
134 }
135
136out_new_size:
137 ret = pow2floor(new_size);
138out:
139 XBZRLE_cache_unlock();
140 return ret;
141}
142
143/* accounting for migration statistics */
144typedef struct AccountingInfo {
145 uint64_t dup_pages;
146 uint64_t skipped_pages;
147 uint64_t norm_pages;
148 uint64_t iterations;
149 uint64_t xbzrle_bytes;
150 uint64_t xbzrle_pages;
151 uint64_t xbzrle_cache_miss;
152 double xbzrle_cache_miss_rate;
153 uint64_t xbzrle_overflows;
154} AccountingInfo;
155
156static AccountingInfo acct_info;
157
158static void acct_clear(void)
159{
160 memset(&acct_info, 0, sizeof(acct_info));
161}
162
163uint64_t dup_mig_bytes_transferred(void)
164{
165 return acct_info.dup_pages * TARGET_PAGE_SIZE;
166}
167
168uint64_t dup_mig_pages_transferred(void)
169{
170 return acct_info.dup_pages;
171}
172
173uint64_t skipped_mig_bytes_transferred(void)
174{
175 return acct_info.skipped_pages * TARGET_PAGE_SIZE;
176}
177
178uint64_t skipped_mig_pages_transferred(void)
179{
180 return acct_info.skipped_pages;
181}
182
183uint64_t norm_mig_bytes_transferred(void)
184{
185 return acct_info.norm_pages * TARGET_PAGE_SIZE;
186}
187
188uint64_t norm_mig_pages_transferred(void)
189{
190 return acct_info.norm_pages;
191}
192
193uint64_t xbzrle_mig_bytes_transferred(void)
194{
195 return acct_info.xbzrle_bytes;
196}
197
198uint64_t xbzrle_mig_pages_transferred(void)
199{
200 return acct_info.xbzrle_pages;
201}
202
203uint64_t xbzrle_mig_pages_cache_miss(void)
204{
205 return acct_info.xbzrle_cache_miss;
206}
207
208double xbzrle_mig_cache_miss_rate(void)
209{
210 return acct_info.xbzrle_cache_miss_rate;
211}
212
213uint64_t xbzrle_mig_pages_overflow(void)
214{
215 return acct_info.xbzrle_overflows;
216}
217
218/* This is the last block that we have visited serching for dirty pages
219 */
220static RAMBlock *last_seen_block;
221/* This is the last block from where we have sent data */
222static RAMBlock *last_sent_block;
223static ram_addr_t last_offset;
Li Zhijiandd631692015-07-02 20:18:06 +0800224static QemuMutex migration_bitmap_mutex;
Juan Quintela56e93d22015-05-07 19:33:31 +0200225static uint64_t migration_dirty_pages;
226static uint32_t last_version;
227static bool ram_bulk_stage;
228
Dr. David Alan Gilbertb8fb8cb2015-09-23 15:27:10 +0100229/* used by the search for pages to send */
230struct PageSearchStatus {
231 /* Current block being searched */
232 RAMBlock *block;
233 /* Current offset to search from */
234 ram_addr_t offset;
235 /* Set once we wrap around */
236 bool complete_round;
237};
238typedef struct PageSearchStatus PageSearchStatus;
239
Denis V. Lunev60be6342015-09-28 14:41:58 +0300240static struct BitmapRcu {
241 struct rcu_head rcu;
Dr. David Alan Gilbertf3f491f2015-11-05 18:11:01 +0000242 /* Main migration bitmap */
Denis V. Lunev60be6342015-09-28 14:41:58 +0300243 unsigned long *bmap;
Dr. David Alan Gilbertf3f491f2015-11-05 18:11:01 +0000244 /* bitmap of pages that haven't been sent even once
245 * only maintained and used in postcopy at the moment
246 * where it's used to send the dirtymap at the start
247 * of the postcopy phase
248 */
249 unsigned long *unsentmap;
Denis V. Lunev60be6342015-09-28 14:41:58 +0300250} *migration_bitmap_rcu;
251
Juan Quintela56e93d22015-05-07 19:33:31 +0200252struct CompressParam {
253 bool start;
254 bool done;
255 QEMUFile *file;
256 QemuMutex mutex;
257 QemuCond cond;
258 RAMBlock *block;
259 ram_addr_t offset;
260};
261typedef struct CompressParam CompressParam;
262
263struct DecompressParam {
264 bool start;
265 QemuMutex mutex;
266 QemuCond cond;
267 void *des;
Peter Maydelld341d9f2016-01-22 15:09:21 +0000268 uint8_t *compbuf;
Juan Quintela56e93d22015-05-07 19:33:31 +0200269 int len;
270};
271typedef struct DecompressParam DecompressParam;
272
273static CompressParam *comp_param;
274static QemuThread *compress_threads;
275/* comp_done_cond is used to wake up the migration thread when
276 * one of the compression threads has finished the compression.
277 * comp_done_lock is used to co-work with comp_done_cond.
278 */
279static QemuMutex *comp_done_lock;
280static QemuCond *comp_done_cond;
281/* The empty QEMUFileOps will be used by file in CompressParam */
282static const QEMUFileOps empty_ops = { };
283
284static bool compression_switch;
285static bool quit_comp_thread;
286static bool quit_decomp_thread;
287static DecompressParam *decomp_param;
288static QemuThread *decompress_threads;
Juan Quintela56e93d22015-05-07 19:33:31 +0200289
290static int do_compress_ram_page(CompressParam *param);
291
292static void *do_data_compress(void *opaque)
293{
294 CompressParam *param = opaque;
295
296 while (!quit_comp_thread) {
297 qemu_mutex_lock(&param->mutex);
298 /* Re-check the quit_comp_thread in case of
299 * terminate_compression_threads is called just before
300 * qemu_mutex_lock(&param->mutex) and after
301 * while(!quit_comp_thread), re-check it here can make
302 * sure the compression thread terminate as expected.
303 */
304 while (!param->start && !quit_comp_thread) {
305 qemu_cond_wait(&param->cond, &param->mutex);
306 }
307 if (!quit_comp_thread) {
308 do_compress_ram_page(param);
309 }
310 param->start = false;
311 qemu_mutex_unlock(&param->mutex);
312
313 qemu_mutex_lock(comp_done_lock);
314 param->done = true;
315 qemu_cond_signal(comp_done_cond);
316 qemu_mutex_unlock(comp_done_lock);
317 }
318
319 return NULL;
320}
321
322static inline void terminate_compression_threads(void)
323{
324 int idx, thread_count;
325
326 thread_count = migrate_compress_threads();
327 quit_comp_thread = true;
328 for (idx = 0; idx < thread_count; idx++) {
329 qemu_mutex_lock(&comp_param[idx].mutex);
330 qemu_cond_signal(&comp_param[idx].cond);
331 qemu_mutex_unlock(&comp_param[idx].mutex);
332 }
333}
334
335void migrate_compress_threads_join(void)
336{
337 int i, thread_count;
338
339 if (!migrate_use_compression()) {
340 return;
341 }
342 terminate_compression_threads();
343 thread_count = migrate_compress_threads();
344 for (i = 0; i < thread_count; i++) {
345 qemu_thread_join(compress_threads + i);
346 qemu_fclose(comp_param[i].file);
347 qemu_mutex_destroy(&comp_param[i].mutex);
348 qemu_cond_destroy(&comp_param[i].cond);
349 }
350 qemu_mutex_destroy(comp_done_lock);
351 qemu_cond_destroy(comp_done_cond);
352 g_free(compress_threads);
353 g_free(comp_param);
354 g_free(comp_done_cond);
355 g_free(comp_done_lock);
356 compress_threads = NULL;
357 comp_param = NULL;
358 comp_done_cond = NULL;
359 comp_done_lock = NULL;
360}
361
362void migrate_compress_threads_create(void)
363{
364 int i, thread_count;
365
366 if (!migrate_use_compression()) {
367 return;
368 }
369 quit_comp_thread = false;
370 compression_switch = true;
371 thread_count = migrate_compress_threads();
372 compress_threads = g_new0(QemuThread, thread_count);
373 comp_param = g_new0(CompressParam, thread_count);
374 comp_done_cond = g_new0(QemuCond, 1);
375 comp_done_lock = g_new0(QemuMutex, 1);
376 qemu_cond_init(comp_done_cond);
377 qemu_mutex_init(comp_done_lock);
378 for (i = 0; i < thread_count; i++) {
379 /* com_param[i].file is just used as a dummy buffer to save data, set
380 * it's ops to empty.
381 */
382 comp_param[i].file = qemu_fopen_ops(NULL, &empty_ops);
383 comp_param[i].done = true;
384 qemu_mutex_init(&comp_param[i].mutex);
385 qemu_cond_init(&comp_param[i].cond);
386 qemu_thread_create(compress_threads + i, "compress",
387 do_data_compress, comp_param + i,
388 QEMU_THREAD_JOINABLE);
389 }
390}
391
392/**
393 * save_page_header: Write page header to wire
394 *
395 * If this is the 1st block, it also writes the block identification
396 *
397 * Returns: Number of bytes written
398 *
399 * @f: QEMUFile where to send the data
400 * @block: block that contains the page we want to send
401 * @offset: offset inside the block for the page
402 * in the lower bits, it contains flags
403 */
404static size_t save_page_header(QEMUFile *f, RAMBlock *block, ram_addr_t offset)
405{
Liang Li9f5f3802015-07-13 17:34:10 +0800406 size_t size, len;
Juan Quintela56e93d22015-05-07 19:33:31 +0200407
408 qemu_put_be64(f, offset);
409 size = 8;
410
411 if (!(offset & RAM_SAVE_FLAG_CONTINUE)) {
Liang Li9f5f3802015-07-13 17:34:10 +0800412 len = strlen(block->idstr);
413 qemu_put_byte(f, len);
414 qemu_put_buffer(f, (uint8_t *)block->idstr, len);
415 size += 1 + len;
Juan Quintela56e93d22015-05-07 19:33:31 +0200416 }
417 return size;
418}
419
Jason J. Herne070afca2015-09-08 13:12:35 -0400420/* Reduce amount of guest cpu execution to hopefully slow down memory writes.
421 * If guest dirty memory rate is reduced below the rate at which we can
422 * transfer pages to the destination then we should be able to complete
423 * migration. Some workloads dirty memory way too fast and will not effectively
424 * converge, even with auto-converge.
425 */
426static void mig_throttle_guest_down(void)
427{
428 MigrationState *s = migrate_get_current();
429 uint64_t pct_initial =
430 s->parameters[MIGRATION_PARAMETER_X_CPU_THROTTLE_INITIAL];
431 uint64_t pct_icrement =
432 s->parameters[MIGRATION_PARAMETER_X_CPU_THROTTLE_INCREMENT];
433
434 /* We have not started throttling yet. Let's start it. */
435 if (!cpu_throttle_active()) {
436 cpu_throttle_set(pct_initial);
437 } else {
438 /* Throttling already on, just increase the rate */
439 cpu_throttle_set(cpu_throttle_get_percentage() + pct_icrement);
440 }
441}
442
Juan Quintela56e93d22015-05-07 19:33:31 +0200443/* Update the xbzrle cache to reflect a page that's been sent as all 0.
444 * The important thing is that a stale (not-yet-0'd) page be replaced
445 * by the new data.
446 * As a bonus, if the page wasn't in the cache it gets added so that
447 * when a small write is made into the 0'd page it gets XBZRLE sent
448 */
449static void xbzrle_cache_zero_page(ram_addr_t current_addr)
450{
451 if (ram_bulk_stage || !migrate_use_xbzrle()) {
452 return;
453 }
454
455 /* We don't care if this fails to allocate a new cache page
456 * as long as it updated an old one */
457 cache_insert(XBZRLE.cache, current_addr, ZERO_TARGET_PAGE,
458 bitmap_sync_count);
459}
460
461#define ENCODING_FLAG_XBZRLE 0x1
462
463/**
464 * save_xbzrle_page: compress and send current page
465 *
466 * Returns: 1 means that we wrote the page
467 * 0 means that page is identical to the one already sent
468 * -1 means that xbzrle would be longer than normal
469 *
470 * @f: QEMUFile where to send the data
471 * @current_data:
472 * @current_addr:
473 * @block: block that contains the page we want to send
474 * @offset: offset inside the block for the page
475 * @last_stage: if we are at the completion stage
476 * @bytes_transferred: increase it with the number of transferred bytes
477 */
478static int save_xbzrle_page(QEMUFile *f, uint8_t **current_data,
479 ram_addr_t current_addr, RAMBlock *block,
480 ram_addr_t offset, bool last_stage,
481 uint64_t *bytes_transferred)
482{
483 int encoded_len = 0, bytes_xbzrle;
484 uint8_t *prev_cached_page;
485
486 if (!cache_is_cached(XBZRLE.cache, current_addr, bitmap_sync_count)) {
487 acct_info.xbzrle_cache_miss++;
488 if (!last_stage) {
489 if (cache_insert(XBZRLE.cache, current_addr, *current_data,
490 bitmap_sync_count) == -1) {
491 return -1;
492 } else {
493 /* update *current_data when the page has been
494 inserted into cache */
495 *current_data = get_cached_data(XBZRLE.cache, current_addr);
496 }
497 }
498 return -1;
499 }
500
501 prev_cached_page = get_cached_data(XBZRLE.cache, current_addr);
502
503 /* save current buffer into memory */
504 memcpy(XBZRLE.current_buf, *current_data, TARGET_PAGE_SIZE);
505
506 /* XBZRLE encoding (if there is no overflow) */
507 encoded_len = xbzrle_encode_buffer(prev_cached_page, XBZRLE.current_buf,
508 TARGET_PAGE_SIZE, XBZRLE.encoded_buf,
509 TARGET_PAGE_SIZE);
510 if (encoded_len == 0) {
511 DPRINTF("Skipping unmodified page\n");
512 return 0;
513 } else if (encoded_len == -1) {
514 DPRINTF("Overflow\n");
515 acct_info.xbzrle_overflows++;
516 /* update data in the cache */
517 if (!last_stage) {
518 memcpy(prev_cached_page, *current_data, TARGET_PAGE_SIZE);
519 *current_data = prev_cached_page;
520 }
521 return -1;
522 }
523
524 /* we need to update the data in the cache, in order to get the same data */
525 if (!last_stage) {
526 memcpy(prev_cached_page, XBZRLE.current_buf, TARGET_PAGE_SIZE);
527 }
528
529 /* Send XBZRLE based compressed page */
530 bytes_xbzrle = save_page_header(f, block, offset | RAM_SAVE_FLAG_XBZRLE);
531 qemu_put_byte(f, ENCODING_FLAG_XBZRLE);
532 qemu_put_be16(f, encoded_len);
533 qemu_put_buffer(f, XBZRLE.encoded_buf, encoded_len);
534 bytes_xbzrle += encoded_len + 1 + 2;
535 acct_info.xbzrle_pages++;
536 acct_info.xbzrle_bytes += bytes_xbzrle;
537 *bytes_transferred += bytes_xbzrle;
538
539 return 1;
540}
541
Dr. David Alan Gilbertf3f491f2015-11-05 18:11:01 +0000542/* Called with rcu_read_lock() to protect migration_bitmap
543 * rb: The RAMBlock to search for dirty pages in
544 * start: Start address (typically so we can continue from previous page)
545 * ram_addr_abs: Pointer into which to store the address of the dirty page
546 * within the global ram_addr space
547 *
548 * Returns: byte offset within memory region of the start of a dirty page
549 */
Juan Quintela56e93d22015-05-07 19:33:31 +0200550static inline
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +0000551ram_addr_t migration_bitmap_find_dirty(RAMBlock *rb,
552 ram_addr_t start,
553 ram_addr_t *ram_addr_abs)
Juan Quintela56e93d22015-05-07 19:33:31 +0200554{
Dr. David Alan Gilbert2f68e392015-08-13 11:51:30 +0100555 unsigned long base = rb->offset >> TARGET_PAGE_BITS;
Juan Quintela56e93d22015-05-07 19:33:31 +0200556 unsigned long nr = base + (start >> TARGET_PAGE_BITS);
Dr. David Alan Gilbert2f68e392015-08-13 11:51:30 +0100557 uint64_t rb_size = rb->used_length;
558 unsigned long size = base + (rb_size >> TARGET_PAGE_BITS);
Li Zhijian2ff64032015-07-02 20:18:05 +0800559 unsigned long *bitmap;
Juan Quintela56e93d22015-05-07 19:33:31 +0200560
561 unsigned long next;
562
Denis V. Lunev60be6342015-09-28 14:41:58 +0300563 bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap;
Juan Quintela56e93d22015-05-07 19:33:31 +0200564 if (ram_bulk_stage && nr > base) {
565 next = nr + 1;
566 } else {
Li Zhijian2ff64032015-07-02 20:18:05 +0800567 next = find_next_bit(bitmap, size, nr);
Juan Quintela56e93d22015-05-07 19:33:31 +0200568 }
569
Dr. David Alan Gilbertf3f491f2015-11-05 18:11:01 +0000570 *ram_addr_abs = next << TARGET_PAGE_BITS;
Juan Quintela56e93d22015-05-07 19:33:31 +0200571 return (next - base) << TARGET_PAGE_BITS;
572}
573
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +0000574static inline bool migration_bitmap_clear_dirty(ram_addr_t addr)
575{
576 bool ret;
577 int nr = addr >> TARGET_PAGE_BITS;
578 unsigned long *bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap;
579
580 ret = test_and_clear_bit(nr, bitmap);
581
582 if (ret) {
583 migration_dirty_pages--;
584 }
585 return ret;
586}
587
Juan Quintela56e93d22015-05-07 19:33:31 +0200588static void migration_bitmap_sync_range(ram_addr_t start, ram_addr_t length)
589{
Li Zhijian2ff64032015-07-02 20:18:05 +0800590 unsigned long *bitmap;
Denis V. Lunev60be6342015-09-28 14:41:58 +0300591 bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap;
Juan Quintela56e93d22015-05-07 19:33:31 +0200592 migration_dirty_pages +=
Li Zhijian2ff64032015-07-02 20:18:05 +0800593 cpu_physical_memory_sync_dirty_bitmap(bitmap, start, length);
Juan Quintela56e93d22015-05-07 19:33:31 +0200594}
595
Juan Quintela56e93d22015-05-07 19:33:31 +0200596/* Fix me: there are too many global variables used in migration process. */
597static int64_t start_time;
598static int64_t bytes_xfer_prev;
599static int64_t num_dirty_pages_period;
600static uint64_t xbzrle_cache_miss_prev;
601static uint64_t iterations_prev;
602
603static void migration_bitmap_sync_init(void)
604{
605 start_time = 0;
606 bytes_xfer_prev = 0;
607 num_dirty_pages_period = 0;
608 xbzrle_cache_miss_prev = 0;
609 iterations_prev = 0;
610}
611
612/* Called with iothread lock held, to protect ram_list.dirty_memory[] */
613static void migration_bitmap_sync(void)
614{
615 RAMBlock *block;
616 uint64_t num_dirty_pages_init = migration_dirty_pages;
617 MigrationState *s = migrate_get_current();
618 int64_t end_time;
619 int64_t bytes_xfer_now;
620
621 bitmap_sync_count++;
622
623 if (!bytes_xfer_prev) {
624 bytes_xfer_prev = ram_bytes_transferred();
625 }
626
627 if (!start_time) {
628 start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
629 }
630
631 trace_migration_bitmap_sync_start();
632 address_space_sync_dirty_bitmap(&address_space_memory);
633
Li Zhijiandd631692015-07-02 20:18:06 +0800634 qemu_mutex_lock(&migration_bitmap_mutex);
Juan Quintela56e93d22015-05-07 19:33:31 +0200635 rcu_read_lock();
636 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Dr. David Alan Gilbert2f68e392015-08-13 11:51:30 +0100637 migration_bitmap_sync_range(block->offset, block->used_length);
Juan Quintela56e93d22015-05-07 19:33:31 +0200638 }
639 rcu_read_unlock();
Li Zhijiandd631692015-07-02 20:18:06 +0800640 qemu_mutex_unlock(&migration_bitmap_mutex);
Juan Quintela56e93d22015-05-07 19:33:31 +0200641
642 trace_migration_bitmap_sync_end(migration_dirty_pages
643 - num_dirty_pages_init);
644 num_dirty_pages_period += migration_dirty_pages - num_dirty_pages_init;
645 end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
646
647 /* more than 1 second = 1000 millisecons */
648 if (end_time > start_time + 1000) {
649 if (migrate_auto_converge()) {
650 /* The following detection logic can be refined later. For now:
651 Check to see if the dirtied bytes is 50% more than the approx.
652 amount of bytes that just got transferred since the last time we
Jason J. Herne070afca2015-09-08 13:12:35 -0400653 were in this routine. If that happens twice, start or increase
654 throttling */
Juan Quintela56e93d22015-05-07 19:33:31 +0200655 bytes_xfer_now = ram_bytes_transferred();
Jason J. Herne070afca2015-09-08 13:12:35 -0400656
Juan Quintela56e93d22015-05-07 19:33:31 +0200657 if (s->dirty_pages_rate &&
658 (num_dirty_pages_period * TARGET_PAGE_SIZE >
659 (bytes_xfer_now - bytes_xfer_prev)/2) &&
Jason J. Herne070afca2015-09-08 13:12:35 -0400660 (dirty_rate_high_cnt++ >= 2)) {
Juan Quintela56e93d22015-05-07 19:33:31 +0200661 trace_migration_throttle();
Juan Quintela56e93d22015-05-07 19:33:31 +0200662 dirty_rate_high_cnt = 0;
Jason J. Herne070afca2015-09-08 13:12:35 -0400663 mig_throttle_guest_down();
Juan Quintela56e93d22015-05-07 19:33:31 +0200664 }
665 bytes_xfer_prev = bytes_xfer_now;
Juan Quintela56e93d22015-05-07 19:33:31 +0200666 }
Jason J. Herne070afca2015-09-08 13:12:35 -0400667
Juan Quintela56e93d22015-05-07 19:33:31 +0200668 if (migrate_use_xbzrle()) {
669 if (iterations_prev != acct_info.iterations) {
670 acct_info.xbzrle_cache_miss_rate =
671 (double)(acct_info.xbzrle_cache_miss -
672 xbzrle_cache_miss_prev) /
673 (acct_info.iterations - iterations_prev);
674 }
675 iterations_prev = acct_info.iterations;
676 xbzrle_cache_miss_prev = acct_info.xbzrle_cache_miss;
677 }
678 s->dirty_pages_rate = num_dirty_pages_period * 1000
679 / (end_time - start_time);
680 s->dirty_bytes_rate = s->dirty_pages_rate * TARGET_PAGE_SIZE;
681 start_time = end_time;
682 num_dirty_pages_period = 0;
683 }
684 s->dirty_sync_count = bitmap_sync_count;
Dr. David Alan Gilbert4addcd42015-12-16 11:47:36 +0000685 if (migrate_use_events()) {
686 qapi_event_send_migration_pass(bitmap_sync_count, NULL);
687 }
Juan Quintela56e93d22015-05-07 19:33:31 +0200688}
689
690/**
691 * save_zero_page: Send the zero page to the stream
692 *
693 * Returns: Number of pages written.
694 *
695 * @f: QEMUFile where to send the data
696 * @block: block that contains the page we want to send
697 * @offset: offset inside the block for the page
698 * @p: pointer to the page
699 * @bytes_transferred: increase it with the number of transferred bytes
700 */
701static int save_zero_page(QEMUFile *f, RAMBlock *block, ram_addr_t offset,
702 uint8_t *p, uint64_t *bytes_transferred)
703{
704 int pages = -1;
705
706 if (is_zero_range(p, TARGET_PAGE_SIZE)) {
707 acct_info.dup_pages++;
708 *bytes_transferred += save_page_header(f, block,
709 offset | RAM_SAVE_FLAG_COMPRESS);
710 qemu_put_byte(f, 0);
711 *bytes_transferred += 1;
712 pages = 1;
713 }
714
715 return pages;
716}
717
718/**
719 * ram_save_page: Send the given page to the stream
720 *
721 * Returns: Number of pages written.
Dr. David Alan Gilbert3fd3c4b2015-12-10 16:31:46 +0000722 * < 0 - error
723 * >=0 - Number of pages written - this might legally be 0
724 * if xbzrle noticed the page was the same.
Juan Quintela56e93d22015-05-07 19:33:31 +0200725 *
726 * @f: QEMUFile where to send the data
727 * @block: block that contains the page we want to send
728 * @offset: offset inside the block for the page
729 * @last_stage: if we are at the completion stage
730 * @bytes_transferred: increase it with the number of transferred bytes
731 */
732static int ram_save_page(QEMUFile *f, RAMBlock* block, ram_addr_t offset,
733 bool last_stage, uint64_t *bytes_transferred)
734{
735 int pages = -1;
736 uint64_t bytes_xmit;
737 ram_addr_t current_addr;
Juan Quintela56e93d22015-05-07 19:33:31 +0200738 uint8_t *p;
739 int ret;
740 bool send_async = true;
741
Dr. David Alan Gilbert2f68e392015-08-13 11:51:30 +0100742 p = block->host + offset;
Juan Quintela56e93d22015-05-07 19:33:31 +0200743
744 /* In doubt sent page as normal */
745 bytes_xmit = 0;
746 ret = ram_control_save_page(f, block->offset,
747 offset, TARGET_PAGE_SIZE, &bytes_xmit);
748 if (bytes_xmit) {
749 *bytes_transferred += bytes_xmit;
750 pages = 1;
751 }
752
753 XBZRLE_cache_lock();
754
755 current_addr = block->offset + offset;
756
757 if (block == last_sent_block) {
758 offset |= RAM_SAVE_FLAG_CONTINUE;
759 }
760 if (ret != RAM_SAVE_CONTROL_NOT_SUPP) {
761 if (ret != RAM_SAVE_CONTROL_DELAYED) {
762 if (bytes_xmit > 0) {
763 acct_info.norm_pages++;
764 } else if (bytes_xmit == 0) {
765 acct_info.dup_pages++;
766 }
767 }
768 } else {
769 pages = save_zero_page(f, block, offset, p, bytes_transferred);
770 if (pages > 0) {
771 /* Must let xbzrle know, otherwise a previous (now 0'd) cached
772 * page would be stale
773 */
774 xbzrle_cache_zero_page(current_addr);
775 } else if (!ram_bulk_stage && migrate_use_xbzrle()) {
776 pages = save_xbzrle_page(f, &p, current_addr, block,
777 offset, last_stage, bytes_transferred);
778 if (!last_stage) {
779 /* Can't send this cached data async, since the cache page
780 * might get updated before it gets to the wire
781 */
782 send_async = false;
783 }
784 }
785 }
786
787 /* XBZRLE overflow or normal page */
788 if (pages == -1) {
789 *bytes_transferred += save_page_header(f, block,
790 offset | RAM_SAVE_FLAG_PAGE);
791 if (send_async) {
792 qemu_put_buffer_async(f, p, TARGET_PAGE_SIZE);
793 } else {
794 qemu_put_buffer(f, p, TARGET_PAGE_SIZE);
795 }
796 *bytes_transferred += TARGET_PAGE_SIZE;
797 pages = 1;
798 acct_info.norm_pages++;
799 }
800
801 XBZRLE_cache_unlock();
802
803 return pages;
804}
805
806static int do_compress_ram_page(CompressParam *param)
807{
808 int bytes_sent, blen;
809 uint8_t *p;
810 RAMBlock *block = param->block;
811 ram_addr_t offset = param->offset;
812
Dr. David Alan Gilbert2f68e392015-08-13 11:51:30 +0100813 p = block->host + (offset & TARGET_PAGE_MASK);
Juan Quintela56e93d22015-05-07 19:33:31 +0200814
815 bytes_sent = save_page_header(param->file, block, offset |
816 RAM_SAVE_FLAG_COMPRESS_PAGE);
817 blen = qemu_put_compression_data(param->file, p, TARGET_PAGE_SIZE,
818 migrate_compress_level());
819 bytes_sent += blen;
820
821 return bytes_sent;
822}
823
824static inline void start_compression(CompressParam *param)
825{
826 param->done = false;
827 qemu_mutex_lock(&param->mutex);
828 param->start = true;
829 qemu_cond_signal(&param->cond);
830 qemu_mutex_unlock(&param->mutex);
831}
832
833static inline void start_decompression(DecompressParam *param)
834{
835 qemu_mutex_lock(&param->mutex);
836 param->start = true;
837 qemu_cond_signal(&param->cond);
838 qemu_mutex_unlock(&param->mutex);
839}
840
841static uint64_t bytes_transferred;
842
843static void flush_compressed_data(QEMUFile *f)
844{
845 int idx, len, thread_count;
846
847 if (!migrate_use_compression()) {
848 return;
849 }
850 thread_count = migrate_compress_threads();
851 for (idx = 0; idx < thread_count; idx++) {
852 if (!comp_param[idx].done) {
853 qemu_mutex_lock(comp_done_lock);
854 while (!comp_param[idx].done && !quit_comp_thread) {
855 qemu_cond_wait(comp_done_cond, comp_done_lock);
856 }
857 qemu_mutex_unlock(comp_done_lock);
858 }
859 if (!quit_comp_thread) {
860 len = qemu_put_qemu_file(f, comp_param[idx].file);
861 bytes_transferred += len;
862 }
863 }
864}
865
866static inline void set_compress_params(CompressParam *param, RAMBlock *block,
867 ram_addr_t offset)
868{
869 param->block = block;
870 param->offset = offset;
871}
872
873static int compress_page_with_multi_thread(QEMUFile *f, RAMBlock *block,
874 ram_addr_t offset,
875 uint64_t *bytes_transferred)
876{
877 int idx, thread_count, bytes_xmit = -1, pages = -1;
878
879 thread_count = migrate_compress_threads();
880 qemu_mutex_lock(comp_done_lock);
881 while (true) {
882 for (idx = 0; idx < thread_count; idx++) {
883 if (comp_param[idx].done) {
884 bytes_xmit = qemu_put_qemu_file(f, comp_param[idx].file);
885 set_compress_params(&comp_param[idx], block, offset);
886 start_compression(&comp_param[idx]);
887 pages = 1;
888 acct_info.norm_pages++;
889 *bytes_transferred += bytes_xmit;
890 break;
891 }
892 }
893 if (pages > 0) {
894 break;
895 } else {
896 qemu_cond_wait(comp_done_cond, comp_done_lock);
897 }
898 }
899 qemu_mutex_unlock(comp_done_lock);
900
901 return pages;
902}
903
904/**
905 * ram_save_compressed_page: compress the given page and send it to the stream
906 *
907 * Returns: Number of pages written.
908 *
909 * @f: QEMUFile where to send the data
910 * @block: block that contains the page we want to send
911 * @offset: offset inside the block for the page
912 * @last_stage: if we are at the completion stage
913 * @bytes_transferred: increase it with the number of transferred bytes
914 */
915static int ram_save_compressed_page(QEMUFile *f, RAMBlock *block,
916 ram_addr_t offset, bool last_stage,
917 uint64_t *bytes_transferred)
918{
919 int pages = -1;
920 uint64_t bytes_xmit;
Juan Quintela56e93d22015-05-07 19:33:31 +0200921 uint8_t *p;
922 int ret;
923
Dr. David Alan Gilbert2f68e392015-08-13 11:51:30 +0100924 p = block->host + offset;
Juan Quintela56e93d22015-05-07 19:33:31 +0200925
926 bytes_xmit = 0;
927 ret = ram_control_save_page(f, block->offset,
928 offset, TARGET_PAGE_SIZE, &bytes_xmit);
929 if (bytes_xmit) {
930 *bytes_transferred += bytes_xmit;
931 pages = 1;
932 }
933 if (block == last_sent_block) {
934 offset |= RAM_SAVE_FLAG_CONTINUE;
935 }
936 if (ret != RAM_SAVE_CONTROL_NOT_SUPP) {
937 if (ret != RAM_SAVE_CONTROL_DELAYED) {
938 if (bytes_xmit > 0) {
939 acct_info.norm_pages++;
940 } else if (bytes_xmit == 0) {
941 acct_info.dup_pages++;
942 }
943 }
944 } else {
945 /* When starting the process of a new block, the first page of
946 * the block should be sent out before other pages in the same
947 * block, and all the pages in last block should have been sent
948 * out, keeping this order is important, because the 'cont' flag
949 * is used to avoid resending the block name.
950 */
951 if (block != last_sent_block) {
952 flush_compressed_data(f);
953 pages = save_zero_page(f, block, offset, p, bytes_transferred);
954 if (pages == -1) {
955 set_compress_params(&comp_param[0], block, offset);
956 /* Use the qemu thread to compress the data to make sure the
957 * first page is sent out before other pages
958 */
959 bytes_xmit = do_compress_ram_page(&comp_param[0]);
960 acct_info.norm_pages++;
961 qemu_put_qemu_file(f, comp_param[0].file);
962 *bytes_transferred += bytes_xmit;
963 pages = 1;
964 }
965 } else {
966 pages = save_zero_page(f, block, offset, p, bytes_transferred);
967 if (pages == -1) {
968 pages = compress_page_with_multi_thread(f, block, offset,
969 bytes_transferred);
970 }
971 }
972 }
973
974 return pages;
975}
976
Dr. David Alan Gilbertb9e60922015-09-23 15:27:11 +0100977/*
978 * Find the next dirty page and update any state associated with
979 * the search process.
980 *
981 * Returns: True if a page is found
982 *
983 * @f: Current migration stream.
984 * @pss: Data about the state of the current dirty page scan.
985 * @*again: Set to false if the search has scanned the whole of RAM
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +0000986 * *ram_addr_abs: Pointer into which to store the address of the dirty page
987 * within the global ram_addr space
Dr. David Alan Gilbertb9e60922015-09-23 15:27:11 +0100988 */
989static bool find_dirty_block(QEMUFile *f, PageSearchStatus *pss,
Dr. David Alan Gilbertf3f491f2015-11-05 18:11:01 +0000990 bool *again, ram_addr_t *ram_addr_abs)
Dr. David Alan Gilbertb9e60922015-09-23 15:27:11 +0100991{
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +0000992 pss->offset = migration_bitmap_find_dirty(pss->block, pss->offset,
993 ram_addr_abs);
Dr. David Alan Gilbertb9e60922015-09-23 15:27:11 +0100994 if (pss->complete_round && pss->block == last_seen_block &&
995 pss->offset >= last_offset) {
996 /*
997 * We've been once around the RAM and haven't found anything.
998 * Give up.
999 */
1000 *again = false;
1001 return false;
1002 }
1003 if (pss->offset >= pss->block->used_length) {
1004 /* Didn't find anything in this RAM Block */
1005 pss->offset = 0;
1006 pss->block = QLIST_NEXT_RCU(pss->block, next);
1007 if (!pss->block) {
1008 /* Hit the end of the list */
1009 pss->block = QLIST_FIRST_RCU(&ram_list.blocks);
1010 /* Flag that we've looped */
1011 pss->complete_round = true;
1012 ram_bulk_stage = false;
1013 if (migrate_use_xbzrle()) {
1014 /* If xbzrle is on, stop using the data compression at this
1015 * point. In theory, xbzrle can do better than compression.
1016 */
1017 flush_compressed_data(f);
1018 compression_switch = false;
1019 }
1020 }
1021 /* Didn't find anything this time, but try again on the new block */
1022 *again = true;
1023 return false;
1024 } else {
1025 /* Can go around again, but... */
1026 *again = true;
1027 /* We've found something so probably don't need to */
1028 return true;
1029 }
1030}
1031
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001032/*
1033 * Helper for 'get_queued_page' - gets a page off the queue
1034 * ms: MigrationState in
1035 * *offset: Used to return the offset within the RAMBlock
1036 * ram_addr_abs: global offset in the dirty/sent bitmaps
1037 *
1038 * Returns: block (or NULL if none available)
1039 */
1040static RAMBlock *unqueue_page(MigrationState *ms, ram_addr_t *offset,
1041 ram_addr_t *ram_addr_abs)
1042{
1043 RAMBlock *block = NULL;
1044
1045 qemu_mutex_lock(&ms->src_page_req_mutex);
1046 if (!QSIMPLEQ_EMPTY(&ms->src_page_requests)) {
1047 struct MigrationSrcPageRequest *entry =
1048 QSIMPLEQ_FIRST(&ms->src_page_requests);
1049 block = entry->rb;
1050 *offset = entry->offset;
1051 *ram_addr_abs = (entry->offset + entry->rb->offset) &
1052 TARGET_PAGE_MASK;
1053
1054 if (entry->len > TARGET_PAGE_SIZE) {
1055 entry->len -= TARGET_PAGE_SIZE;
1056 entry->offset += TARGET_PAGE_SIZE;
1057 } else {
1058 memory_region_unref(block->mr);
1059 QSIMPLEQ_REMOVE_HEAD(&ms->src_page_requests, next_req);
1060 g_free(entry);
1061 }
1062 }
1063 qemu_mutex_unlock(&ms->src_page_req_mutex);
1064
1065 return block;
1066}
1067
1068/*
1069 * Unqueue a page from the queue fed by postcopy page requests; skips pages
1070 * that are already sent (!dirty)
1071 *
1072 * ms: MigrationState in
1073 * pss: PageSearchStatus structure updated with found block/offset
1074 * ram_addr_abs: global offset in the dirty/sent bitmaps
1075 *
1076 * Returns: true if a queued page is found
1077 */
1078static bool get_queued_page(MigrationState *ms, PageSearchStatus *pss,
1079 ram_addr_t *ram_addr_abs)
1080{
1081 RAMBlock *block;
1082 ram_addr_t offset;
1083 bool dirty;
1084
1085 do {
1086 block = unqueue_page(ms, &offset, ram_addr_abs);
1087 /*
1088 * We're sending this page, and since it's postcopy nothing else
1089 * will dirty it, and we must make sure it doesn't get sent again
1090 * even if this queue request was received after the background
1091 * search already sent it.
1092 */
1093 if (block) {
1094 unsigned long *bitmap;
1095 bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap;
1096 dirty = test_bit(*ram_addr_abs >> TARGET_PAGE_BITS, bitmap);
1097 if (!dirty) {
1098 trace_get_queued_page_not_dirty(
1099 block->idstr, (uint64_t)offset,
1100 (uint64_t)*ram_addr_abs,
1101 test_bit(*ram_addr_abs >> TARGET_PAGE_BITS,
1102 atomic_rcu_read(&migration_bitmap_rcu)->unsentmap));
1103 } else {
1104 trace_get_queued_page(block->idstr,
1105 (uint64_t)offset,
1106 (uint64_t)*ram_addr_abs);
1107 }
1108 }
1109
1110 } while (block && !dirty);
1111
1112 if (block) {
1113 /*
1114 * As soon as we start servicing pages out of order, then we have
1115 * to kill the bulk stage, since the bulk stage assumes
1116 * in (migration_bitmap_find_and_reset_dirty) that every page is
1117 * dirty, that's no longer true.
1118 */
1119 ram_bulk_stage = false;
1120
1121 /*
1122 * We want the background search to continue from the queued page
1123 * since the guest is likely to want other pages near to the page
1124 * it just requested.
1125 */
1126 pss->block = block;
1127 pss->offset = offset;
1128 }
1129
1130 return !!block;
1131}
1132
Juan Quintela56e93d22015-05-07 19:33:31 +02001133/**
Dr. David Alan Gilbert6c595cd2015-11-05 18:11:08 +00001134 * flush_page_queue: Flush any remaining pages in the ram request queue
1135 * it should be empty at the end anyway, but in error cases there may be
1136 * some left.
1137 *
1138 * ms: MigrationState
1139 */
1140void flush_page_queue(MigrationState *ms)
1141{
1142 struct MigrationSrcPageRequest *mspr, *next_mspr;
1143 /* This queue generally should be empty - but in the case of a failed
1144 * migration might have some droppings in.
1145 */
1146 rcu_read_lock();
1147 QSIMPLEQ_FOREACH_SAFE(mspr, &ms->src_page_requests, next_req, next_mspr) {
1148 memory_region_unref(mspr->rb->mr);
1149 QSIMPLEQ_REMOVE_HEAD(&ms->src_page_requests, next_req);
1150 g_free(mspr);
1151 }
1152 rcu_read_unlock();
1153}
1154
1155/**
1156 * Queue the pages for transmission, e.g. a request from postcopy destination
1157 * ms: MigrationStatus in which the queue is held
1158 * rbname: The RAMBlock the request is for - may be NULL (to mean reuse last)
1159 * start: Offset from the start of the RAMBlock
1160 * len: Length (in bytes) to send
1161 * Return: 0 on success
1162 */
1163int ram_save_queue_pages(MigrationState *ms, const char *rbname,
1164 ram_addr_t start, ram_addr_t len)
1165{
1166 RAMBlock *ramblock;
1167
1168 rcu_read_lock();
1169 if (!rbname) {
1170 /* Reuse last RAMBlock */
1171 ramblock = ms->last_req_rb;
1172
1173 if (!ramblock) {
1174 /*
1175 * Shouldn't happen, we can't reuse the last RAMBlock if
1176 * it's the 1st request.
1177 */
1178 error_report("ram_save_queue_pages no previous block");
1179 goto err;
1180 }
1181 } else {
1182 ramblock = qemu_ram_block_by_name(rbname);
1183
1184 if (!ramblock) {
1185 /* We shouldn't be asked for a non-existent RAMBlock */
1186 error_report("ram_save_queue_pages no block '%s'", rbname);
1187 goto err;
1188 }
1189 ms->last_req_rb = ramblock;
1190 }
1191 trace_ram_save_queue_pages(ramblock->idstr, start, len);
1192 if (start+len > ramblock->used_length) {
Juan Quintela9458ad62015-11-10 17:42:05 +01001193 error_report("%s request overrun start=" RAM_ADDR_FMT " len="
1194 RAM_ADDR_FMT " blocklen=" RAM_ADDR_FMT,
Dr. David Alan Gilbert6c595cd2015-11-05 18:11:08 +00001195 __func__, start, len, ramblock->used_length);
1196 goto err;
1197 }
1198
1199 struct MigrationSrcPageRequest *new_entry =
1200 g_malloc0(sizeof(struct MigrationSrcPageRequest));
1201 new_entry->rb = ramblock;
1202 new_entry->offset = start;
1203 new_entry->len = len;
1204
1205 memory_region_ref(ramblock->mr);
1206 qemu_mutex_lock(&ms->src_page_req_mutex);
1207 QSIMPLEQ_INSERT_TAIL(&ms->src_page_requests, new_entry, next_req);
1208 qemu_mutex_unlock(&ms->src_page_req_mutex);
1209 rcu_read_unlock();
1210
1211 return 0;
1212
1213err:
1214 rcu_read_unlock();
1215 return -1;
1216}
1217
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001218/**
1219 * ram_save_target_page: Save one target page
1220 *
1221 *
1222 * @f: QEMUFile where to send the data
1223 * @block: pointer to block that contains the page we want to send
1224 * @offset: offset inside the block for the page;
1225 * @last_stage: if we are at the completion stage
1226 * @bytes_transferred: increase it with the number of transferred bytes
1227 * @dirty_ram_abs: Address of the start of the dirty page in ram_addr_t space
1228 *
1229 * Returns: Number of pages written.
1230 */
1231static int ram_save_target_page(MigrationState *ms, QEMUFile *f,
1232 RAMBlock *block, ram_addr_t offset,
1233 bool last_stage,
1234 uint64_t *bytes_transferred,
1235 ram_addr_t dirty_ram_abs)
1236{
1237 int res = 0;
1238
1239 /* Check the pages is dirty and if it is send it */
1240 if (migration_bitmap_clear_dirty(dirty_ram_abs)) {
1241 unsigned long *unsentmap;
1242 if (compression_switch && migrate_use_compression()) {
1243 res = ram_save_compressed_page(f, block, offset,
1244 last_stage,
1245 bytes_transferred);
1246 } else {
1247 res = ram_save_page(f, block, offset, last_stage,
1248 bytes_transferred);
1249 }
1250
1251 if (res < 0) {
1252 return res;
1253 }
1254 unsentmap = atomic_rcu_read(&migration_bitmap_rcu)->unsentmap;
1255 if (unsentmap) {
1256 clear_bit(dirty_ram_abs >> TARGET_PAGE_BITS, unsentmap);
1257 }
Dr. David Alan Gilbert3fd3c4b2015-12-10 16:31:46 +00001258 /* Only update last_sent_block if a block was actually sent; xbzrle
1259 * might have decided the page was identical so didn't bother writing
1260 * to the stream.
1261 */
1262 if (res > 0) {
1263 last_sent_block = block;
1264 }
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001265 }
1266
1267 return res;
1268}
1269
1270/**
1271 * ram_save_host_page: Starting at *offset send pages upto the end
1272 * of the current host page. It's valid for the initial
1273 * offset to point into the middle of a host page
1274 * in which case the remainder of the hostpage is sent.
1275 * Only dirty target pages are sent.
1276 *
1277 * Returns: Number of pages written.
1278 *
1279 * @f: QEMUFile where to send the data
1280 * @block: pointer to block that contains the page we want to send
1281 * @offset: offset inside the block for the page; updated to last target page
1282 * sent
1283 * @last_stage: if we are at the completion stage
1284 * @bytes_transferred: increase it with the number of transferred bytes
1285 * @dirty_ram_abs: Address of the start of the dirty page in ram_addr_t space
1286 */
1287static int ram_save_host_page(MigrationState *ms, QEMUFile *f, RAMBlock *block,
1288 ram_addr_t *offset, bool last_stage,
1289 uint64_t *bytes_transferred,
1290 ram_addr_t dirty_ram_abs)
1291{
1292 int tmppages, pages = 0;
1293 do {
1294 tmppages = ram_save_target_page(ms, f, block, *offset, last_stage,
1295 bytes_transferred, dirty_ram_abs);
1296 if (tmppages < 0) {
1297 return tmppages;
1298 }
1299
1300 pages += tmppages;
1301 *offset += TARGET_PAGE_SIZE;
1302 dirty_ram_abs += TARGET_PAGE_SIZE;
1303 } while (*offset & (qemu_host_page_size - 1));
1304
1305 /* The offset we leave with is the last one we looked at */
1306 *offset -= TARGET_PAGE_SIZE;
1307 return pages;
1308}
Dr. David Alan Gilbert6c595cd2015-11-05 18:11:08 +00001309
1310/**
Juan Quintela56e93d22015-05-07 19:33:31 +02001311 * ram_find_and_save_block: Finds a dirty page and sends it to f
1312 *
1313 * Called within an RCU critical section.
1314 *
1315 * Returns: The number of pages written
1316 * 0 means no dirty pages
1317 *
1318 * @f: QEMUFile where to send the data
1319 * @last_stage: if we are at the completion stage
1320 * @bytes_transferred: increase it with the number of transferred bytes
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001321 *
1322 * On systems where host-page-size > target-page-size it will send all the
1323 * pages in a host page that are dirty.
Juan Quintela56e93d22015-05-07 19:33:31 +02001324 */
1325
1326static int ram_find_and_save_block(QEMUFile *f, bool last_stage,
1327 uint64_t *bytes_transferred)
1328{
Dr. David Alan Gilbertb8fb8cb2015-09-23 15:27:10 +01001329 PageSearchStatus pss;
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001330 MigrationState *ms = migrate_get_current();
Juan Quintela56e93d22015-05-07 19:33:31 +02001331 int pages = 0;
Dr. David Alan Gilbertb9e60922015-09-23 15:27:11 +01001332 bool again, found;
Dr. David Alan Gilbertf3f491f2015-11-05 18:11:01 +00001333 ram_addr_t dirty_ram_abs; /* Address of the start of the dirty page in
1334 ram_addr_t space */
Juan Quintela56e93d22015-05-07 19:33:31 +02001335
Dr. David Alan Gilbertb8fb8cb2015-09-23 15:27:10 +01001336 pss.block = last_seen_block;
1337 pss.offset = last_offset;
1338 pss.complete_round = false;
1339
1340 if (!pss.block) {
1341 pss.block = QLIST_FIRST_RCU(&ram_list.blocks);
1342 }
Juan Quintela56e93d22015-05-07 19:33:31 +02001343
Dr. David Alan Gilbertb9e60922015-09-23 15:27:11 +01001344 do {
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001345 again = true;
1346 found = get_queued_page(ms, &pss, &dirty_ram_abs);
1347
1348 if (!found) {
1349 /* priority queue empty, so just search for something dirty */
1350 found = find_dirty_block(f, &pss, &again, &dirty_ram_abs);
1351 }
Dr. David Alan Gilbertb9e60922015-09-23 15:27:11 +01001352
1353 if (found) {
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001354 pages = ram_save_host_page(ms, f, pss.block, &pss.offset,
1355 last_stage, bytes_transferred,
1356 dirty_ram_abs);
Juan Quintela56e93d22015-05-07 19:33:31 +02001357 }
Dr. David Alan Gilbertb9e60922015-09-23 15:27:11 +01001358 } while (!pages && again);
Juan Quintela56e93d22015-05-07 19:33:31 +02001359
Dr. David Alan Gilbertb8fb8cb2015-09-23 15:27:10 +01001360 last_seen_block = pss.block;
1361 last_offset = pss.offset;
Juan Quintela56e93d22015-05-07 19:33:31 +02001362
1363 return pages;
1364}
1365
1366void acct_update_position(QEMUFile *f, size_t size, bool zero)
1367{
1368 uint64_t pages = size / TARGET_PAGE_SIZE;
1369 if (zero) {
1370 acct_info.dup_pages += pages;
1371 } else {
1372 acct_info.norm_pages += pages;
1373 bytes_transferred += size;
1374 qemu_update_position(f, size);
1375 }
1376}
1377
1378static ram_addr_t ram_save_remaining(void)
1379{
1380 return migration_dirty_pages;
1381}
1382
1383uint64_t ram_bytes_remaining(void)
1384{
1385 return ram_save_remaining() * TARGET_PAGE_SIZE;
1386}
1387
1388uint64_t ram_bytes_transferred(void)
1389{
1390 return bytes_transferred;
1391}
1392
1393uint64_t ram_bytes_total(void)
1394{
1395 RAMBlock *block;
1396 uint64_t total = 0;
1397
1398 rcu_read_lock();
1399 QLIST_FOREACH_RCU(block, &ram_list.blocks, next)
1400 total += block->used_length;
1401 rcu_read_unlock();
1402 return total;
1403}
1404
1405void free_xbzrle_decoded_buf(void)
1406{
1407 g_free(xbzrle_decoded_buf);
1408 xbzrle_decoded_buf = NULL;
1409}
1410
Denis V. Lunev60be6342015-09-28 14:41:58 +03001411static void migration_bitmap_free(struct BitmapRcu *bmap)
1412{
1413 g_free(bmap->bmap);
Dr. David Alan Gilbertf3f491f2015-11-05 18:11:01 +00001414 g_free(bmap->unsentmap);
Denis V. Lunev60be6342015-09-28 14:41:58 +03001415 g_free(bmap);
1416}
1417
Liang Li6ad2a212015-11-02 15:37:03 +08001418static void ram_migration_cleanup(void *opaque)
Juan Quintela56e93d22015-05-07 19:33:31 +02001419{
Li Zhijian2ff64032015-07-02 20:18:05 +08001420 /* caller have hold iothread lock or is in a bh, so there is
1421 * no writing race against this migration_bitmap
1422 */
Denis V. Lunev60be6342015-09-28 14:41:58 +03001423 struct BitmapRcu *bitmap = migration_bitmap_rcu;
1424 atomic_rcu_set(&migration_bitmap_rcu, NULL);
Li Zhijian2ff64032015-07-02 20:18:05 +08001425 if (bitmap) {
Juan Quintela56e93d22015-05-07 19:33:31 +02001426 memory_global_dirty_log_stop();
Denis V. Lunev60be6342015-09-28 14:41:58 +03001427 call_rcu(bitmap, migration_bitmap_free, rcu);
Juan Quintela56e93d22015-05-07 19:33:31 +02001428 }
1429
1430 XBZRLE_cache_lock();
1431 if (XBZRLE.cache) {
1432 cache_fini(XBZRLE.cache);
1433 g_free(XBZRLE.encoded_buf);
1434 g_free(XBZRLE.current_buf);
1435 XBZRLE.cache = NULL;
1436 XBZRLE.encoded_buf = NULL;
1437 XBZRLE.current_buf = NULL;
1438 }
1439 XBZRLE_cache_unlock();
1440}
1441
Juan Quintela56e93d22015-05-07 19:33:31 +02001442static void reset_ram_globals(void)
1443{
1444 last_seen_block = NULL;
1445 last_sent_block = NULL;
1446 last_offset = 0;
1447 last_version = ram_list.version;
1448 ram_bulk_stage = true;
1449}
1450
1451#define MAX_WAIT 50 /* ms, half buffered_file limit */
1452
Li Zhijiandd631692015-07-02 20:18:06 +08001453void migration_bitmap_extend(ram_addr_t old, ram_addr_t new)
1454{
1455 /* called in qemu main thread, so there is
1456 * no writing race against this migration_bitmap
1457 */
Denis V. Lunev60be6342015-09-28 14:41:58 +03001458 if (migration_bitmap_rcu) {
1459 struct BitmapRcu *old_bitmap = migration_bitmap_rcu, *bitmap;
1460 bitmap = g_new(struct BitmapRcu, 1);
1461 bitmap->bmap = bitmap_new(new);
Li Zhijiandd631692015-07-02 20:18:06 +08001462
1463 /* prevent migration_bitmap content from being set bit
1464 * by migration_bitmap_sync_range() at the same time.
1465 * it is safe to migration if migration_bitmap is cleared bit
1466 * at the same time.
1467 */
1468 qemu_mutex_lock(&migration_bitmap_mutex);
Denis V. Lunev60be6342015-09-28 14:41:58 +03001469 bitmap_copy(bitmap->bmap, old_bitmap->bmap, old);
1470 bitmap_set(bitmap->bmap, old, new - old);
Dr. David Alan Gilbertf3f491f2015-11-05 18:11:01 +00001471
1472 /* We don't have a way to safely extend the sentmap
1473 * with RCU; so mark it as missing, entry to postcopy
1474 * will fail.
1475 */
1476 bitmap->unsentmap = NULL;
1477
Denis V. Lunev60be6342015-09-28 14:41:58 +03001478 atomic_rcu_set(&migration_bitmap_rcu, bitmap);
Li Zhijiandd631692015-07-02 20:18:06 +08001479 qemu_mutex_unlock(&migration_bitmap_mutex);
1480 migration_dirty_pages += new - old;
Denis V. Lunev60be6342015-09-28 14:41:58 +03001481 call_rcu(old_bitmap, migration_bitmap_free, rcu);
Li Zhijiandd631692015-07-02 20:18:06 +08001482 }
1483}
Juan Quintela56e93d22015-05-07 19:33:31 +02001484
Dr. David Alan Gilbert4f2e4252015-11-05 18:10:38 +00001485/*
1486 * 'expected' is the value you expect the bitmap mostly to be full
1487 * of; it won't bother printing lines that are all this value.
1488 * If 'todump' is null the migration bitmap is dumped.
1489 */
1490void ram_debug_dump_bitmap(unsigned long *todump, bool expected)
1491{
1492 int64_t ram_pages = last_ram_offset() >> TARGET_PAGE_BITS;
1493
1494 int64_t cur;
1495 int64_t linelen = 128;
1496 char linebuf[129];
1497
1498 if (!todump) {
1499 todump = atomic_rcu_read(&migration_bitmap_rcu)->bmap;
1500 }
1501
1502 for (cur = 0; cur < ram_pages; cur += linelen) {
1503 int64_t curb;
1504 bool found = false;
1505 /*
1506 * Last line; catch the case where the line length
1507 * is longer than remaining ram
1508 */
1509 if (cur + linelen > ram_pages) {
1510 linelen = ram_pages - cur;
1511 }
1512 for (curb = 0; curb < linelen; curb++) {
1513 bool thisbit = test_bit(cur + curb, todump);
1514 linebuf[curb] = thisbit ? '1' : '.';
1515 found = found || (thisbit != expected);
1516 }
1517 if (found) {
1518 linebuf[curb] = '\0';
1519 fprintf(stderr, "0x%08" PRIx64 " : %s\n", cur, linebuf);
1520 }
1521 }
1522}
1523
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00001524/* **** functions for postcopy ***** */
1525
1526/*
1527 * Callback from postcopy_each_ram_send_discard for each RAMBlock
1528 * Note: At this point the 'unsentmap' is the processed bitmap combined
1529 * with the dirtymap; so a '1' means it's either dirty or unsent.
1530 * start,length: Indexes into the bitmap for the first bit
1531 * representing the named block and length in target-pages
1532 */
1533static int postcopy_send_discard_bm_ram(MigrationState *ms,
1534 PostcopyDiscardState *pds,
1535 unsigned long start,
1536 unsigned long length)
1537{
1538 unsigned long end = start + length; /* one after the end */
1539 unsigned long current;
1540 unsigned long *unsentmap;
1541
1542 unsentmap = atomic_rcu_read(&migration_bitmap_rcu)->unsentmap;
1543 for (current = start; current < end; ) {
1544 unsigned long one = find_next_bit(unsentmap, end, current);
1545
1546 if (one <= end) {
1547 unsigned long zero = find_next_zero_bit(unsentmap, end, one + 1);
1548 unsigned long discard_length;
1549
1550 if (zero >= end) {
1551 discard_length = end - one;
1552 } else {
1553 discard_length = zero - one;
1554 }
1555 postcopy_discard_send_range(ms, pds, one, discard_length);
1556 current = one + discard_length;
1557 } else {
1558 current = one;
1559 }
1560 }
1561
1562 return 0;
1563}
1564
1565/*
1566 * Utility for the outgoing postcopy code.
1567 * Calls postcopy_send_discard_bm_ram for each RAMBlock
1568 * passing it bitmap indexes and name.
1569 * Returns: 0 on success
1570 * (qemu_ram_foreach_block ends up passing unscaled lengths
1571 * which would mean postcopy code would have to deal with target page)
1572 */
1573static int postcopy_each_ram_send_discard(MigrationState *ms)
1574{
1575 struct RAMBlock *block;
1576 int ret;
1577
1578 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1579 unsigned long first = block->offset >> TARGET_PAGE_BITS;
1580 PostcopyDiscardState *pds = postcopy_discard_send_init(ms,
1581 first,
1582 block->idstr);
1583
1584 /*
1585 * Postcopy sends chunks of bitmap over the wire, but it
1586 * just needs indexes at this point, avoids it having
1587 * target page specific code.
1588 */
1589 ret = postcopy_send_discard_bm_ram(ms, pds, first,
1590 block->used_length >> TARGET_PAGE_BITS);
1591 postcopy_discard_send_finish(ms, pds);
1592 if (ret) {
1593 return ret;
1594 }
1595 }
1596
1597 return 0;
1598}
1599
1600/*
Dr. David Alan Gilbert99e314e2015-11-05 18:11:15 +00001601 * Helper for postcopy_chunk_hostpages; it's called twice to cleanup
1602 * the two bitmaps, that are similar, but one is inverted.
1603 *
1604 * We search for runs of target-pages that don't start or end on a
1605 * host page boundary;
1606 * unsent_pass=true: Cleans up partially unsent host pages by searching
1607 * the unsentmap
1608 * unsent_pass=false: Cleans up partially dirty host pages by searching
1609 * the main migration bitmap
1610 *
1611 */
1612static void postcopy_chunk_hostpages_pass(MigrationState *ms, bool unsent_pass,
1613 RAMBlock *block,
1614 PostcopyDiscardState *pds)
1615{
1616 unsigned long *bitmap;
1617 unsigned long *unsentmap;
1618 unsigned int host_ratio = qemu_host_page_size / TARGET_PAGE_SIZE;
1619 unsigned long first = block->offset >> TARGET_PAGE_BITS;
1620 unsigned long len = block->used_length >> TARGET_PAGE_BITS;
1621 unsigned long last = first + (len - 1);
1622 unsigned long run_start;
1623
1624 bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap;
1625 unsentmap = atomic_rcu_read(&migration_bitmap_rcu)->unsentmap;
1626
1627 if (unsent_pass) {
1628 /* Find a sent page */
1629 run_start = find_next_zero_bit(unsentmap, last + 1, first);
1630 } else {
1631 /* Find a dirty page */
1632 run_start = find_next_bit(bitmap, last + 1, first);
1633 }
1634
1635 while (run_start <= last) {
1636 bool do_fixup = false;
1637 unsigned long fixup_start_addr;
1638 unsigned long host_offset;
1639
1640 /*
1641 * If the start of this run of pages is in the middle of a host
1642 * page, then we need to fixup this host page.
1643 */
1644 host_offset = run_start % host_ratio;
1645 if (host_offset) {
1646 do_fixup = true;
1647 run_start -= host_offset;
1648 fixup_start_addr = run_start;
1649 /* For the next pass */
1650 run_start = run_start + host_ratio;
1651 } else {
1652 /* Find the end of this run */
1653 unsigned long run_end;
1654 if (unsent_pass) {
1655 run_end = find_next_bit(unsentmap, last + 1, run_start + 1);
1656 } else {
1657 run_end = find_next_zero_bit(bitmap, last + 1, run_start + 1);
1658 }
1659 /*
1660 * If the end isn't at the start of a host page, then the
1661 * run doesn't finish at the end of a host page
1662 * and we need to discard.
1663 */
1664 host_offset = run_end % host_ratio;
1665 if (host_offset) {
1666 do_fixup = true;
1667 fixup_start_addr = run_end - host_offset;
1668 /*
1669 * This host page has gone, the next loop iteration starts
1670 * from after the fixup
1671 */
1672 run_start = fixup_start_addr + host_ratio;
1673 } else {
1674 /*
1675 * No discards on this iteration, next loop starts from
1676 * next sent/dirty page
1677 */
1678 run_start = run_end + 1;
1679 }
1680 }
1681
1682 if (do_fixup) {
1683 unsigned long page;
1684
1685 /* Tell the destination to discard this page */
1686 if (unsent_pass || !test_bit(fixup_start_addr, unsentmap)) {
1687 /* For the unsent_pass we:
1688 * discard partially sent pages
1689 * For the !unsent_pass (dirty) we:
1690 * discard partially dirty pages that were sent
1691 * (any partially sent pages were already discarded
1692 * by the previous unsent_pass)
1693 */
1694 postcopy_discard_send_range(ms, pds, fixup_start_addr,
1695 host_ratio);
1696 }
1697
1698 /* Clean up the bitmap */
1699 for (page = fixup_start_addr;
1700 page < fixup_start_addr + host_ratio; page++) {
1701 /* All pages in this host page are now not sent */
1702 set_bit(page, unsentmap);
1703
1704 /*
1705 * Remark them as dirty, updating the count for any pages
1706 * that weren't previously dirty.
1707 */
1708 migration_dirty_pages += !test_and_set_bit(page, bitmap);
1709 }
1710 }
1711
1712 if (unsent_pass) {
1713 /* Find the next sent page for the next iteration */
1714 run_start = find_next_zero_bit(unsentmap, last + 1,
1715 run_start);
1716 } else {
1717 /* Find the next dirty page for the next iteration */
1718 run_start = find_next_bit(bitmap, last + 1, run_start);
1719 }
1720 }
1721}
1722
1723/*
1724 * Utility for the outgoing postcopy code.
1725 *
1726 * Discard any partially sent host-page size chunks, mark any partially
1727 * dirty host-page size chunks as all dirty.
1728 *
1729 * Returns: 0 on success
1730 */
1731static int postcopy_chunk_hostpages(MigrationState *ms)
1732{
1733 struct RAMBlock *block;
1734
1735 if (qemu_host_page_size == TARGET_PAGE_SIZE) {
1736 /* Easy case - TPS==HPS - nothing to be done */
1737 return 0;
1738 }
1739
1740 /* Easiest way to make sure we don't resume in the middle of a host-page */
1741 last_seen_block = NULL;
1742 last_sent_block = NULL;
1743 last_offset = 0;
1744
1745 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1746 unsigned long first = block->offset >> TARGET_PAGE_BITS;
1747
1748 PostcopyDiscardState *pds =
1749 postcopy_discard_send_init(ms, first, block->idstr);
1750
1751 /* First pass: Discard all partially sent host pages */
1752 postcopy_chunk_hostpages_pass(ms, true, block, pds);
1753 /*
1754 * Second pass: Ensure that all partially dirty host pages are made
1755 * fully dirty.
1756 */
1757 postcopy_chunk_hostpages_pass(ms, false, block, pds);
1758
1759 postcopy_discard_send_finish(ms, pds);
1760 } /* ram_list loop */
1761
1762 return 0;
1763}
1764
1765/*
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00001766 * Transmit the set of pages to be discarded after precopy to the target
1767 * these are pages that:
1768 * a) Have been previously transmitted but are now dirty again
1769 * b) Pages that have never been transmitted, this ensures that
1770 * any pages on the destination that have been mapped by background
1771 * tasks get discarded (transparent huge pages is the specific concern)
1772 * Hopefully this is pretty sparse
1773 */
1774int ram_postcopy_send_discard_bitmap(MigrationState *ms)
1775{
1776 int ret;
1777 unsigned long *bitmap, *unsentmap;
1778
1779 rcu_read_lock();
1780
1781 /* This should be our last sync, the src is now paused */
1782 migration_bitmap_sync();
1783
1784 unsentmap = atomic_rcu_read(&migration_bitmap_rcu)->unsentmap;
1785 if (!unsentmap) {
1786 /* We don't have a safe way to resize the sentmap, so
1787 * if the bitmap was resized it will be NULL at this
1788 * point.
1789 */
1790 error_report("migration ram resized during precopy phase");
1791 rcu_read_unlock();
1792 return -EINVAL;
1793 }
1794
Dr. David Alan Gilbert99e314e2015-11-05 18:11:15 +00001795 /* Deal with TPS != HPS */
1796 ret = postcopy_chunk_hostpages(ms);
1797 if (ret) {
1798 rcu_read_unlock();
1799 return ret;
1800 }
1801
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00001802 /*
1803 * Update the unsentmap to be unsentmap = unsentmap | dirty
1804 */
1805 bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap;
1806 bitmap_or(unsentmap, unsentmap, bitmap,
1807 last_ram_offset() >> TARGET_PAGE_BITS);
1808
1809
1810 trace_ram_postcopy_send_discard_bitmap();
1811#ifdef DEBUG_POSTCOPY
1812 ram_debug_dump_bitmap(unsentmap, true);
1813#endif
1814
1815 ret = postcopy_each_ram_send_discard(ms);
1816 rcu_read_unlock();
1817
1818 return ret;
1819}
1820
1821/*
1822 * At the start of the postcopy phase of migration, any now-dirty
1823 * precopied pages are discarded.
1824 *
1825 * start, length describe a byte address range within the RAMBlock
1826 *
1827 * Returns 0 on success.
1828 */
1829int ram_discard_range(MigrationIncomingState *mis,
1830 const char *block_name,
1831 uint64_t start, size_t length)
1832{
1833 int ret = -1;
1834
1835 rcu_read_lock();
1836 RAMBlock *rb = qemu_ram_block_by_name(block_name);
1837
1838 if (!rb) {
1839 error_report("ram_discard_range: Failed to find block '%s'",
1840 block_name);
1841 goto err;
1842 }
1843
1844 uint8_t *host_startaddr = rb->host + start;
1845
1846 if ((uintptr_t)host_startaddr & (qemu_host_page_size - 1)) {
1847 error_report("ram_discard_range: Unaligned start address: %p",
1848 host_startaddr);
1849 goto err;
1850 }
1851
1852 if ((start + length) <= rb->used_length) {
1853 uint8_t *host_endaddr = host_startaddr + length;
1854 if ((uintptr_t)host_endaddr & (qemu_host_page_size - 1)) {
1855 error_report("ram_discard_range: Unaligned end address: %p",
1856 host_endaddr);
1857 goto err;
1858 }
1859 ret = postcopy_ram_discard_range(mis, host_startaddr, length);
1860 } else {
1861 error_report("ram_discard_range: Overrun block '%s' (%" PRIu64
Juan Quintela9458ad62015-11-10 17:42:05 +01001862 "/%zx/" RAM_ADDR_FMT")",
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00001863 block_name, start, length, rb->used_length);
1864 }
1865
1866err:
1867 rcu_read_unlock();
1868
1869 return ret;
1870}
1871
1872
Juan Quintela56e93d22015-05-07 19:33:31 +02001873/* Each of ram_save_setup, ram_save_iterate and ram_save_complete has
1874 * long-running RCU critical section. When rcu-reclaims in the code
1875 * start to become numerous it will be necessary to reduce the
1876 * granularity of these critical sections.
1877 */
1878
1879static int ram_save_setup(QEMUFile *f, void *opaque)
1880{
1881 RAMBlock *block;
1882 int64_t ram_bitmap_pages; /* Size of bitmap in pages, including gaps */
1883
Juan Quintela56e93d22015-05-07 19:33:31 +02001884 dirty_rate_high_cnt = 0;
1885 bitmap_sync_count = 0;
1886 migration_bitmap_sync_init();
Li Zhijiandd631692015-07-02 20:18:06 +08001887 qemu_mutex_init(&migration_bitmap_mutex);
Juan Quintela56e93d22015-05-07 19:33:31 +02001888
1889 if (migrate_use_xbzrle()) {
1890 XBZRLE_cache_lock();
1891 XBZRLE.cache = cache_init(migrate_xbzrle_cache_size() /
1892 TARGET_PAGE_SIZE,
1893 TARGET_PAGE_SIZE);
1894 if (!XBZRLE.cache) {
1895 XBZRLE_cache_unlock();
1896 error_report("Error creating cache");
1897 return -1;
1898 }
1899 XBZRLE_cache_unlock();
1900
1901 /* We prefer not to abort if there is no memory */
1902 XBZRLE.encoded_buf = g_try_malloc0(TARGET_PAGE_SIZE);
1903 if (!XBZRLE.encoded_buf) {
1904 error_report("Error allocating encoded_buf");
1905 return -1;
1906 }
1907
1908 XBZRLE.current_buf = g_try_malloc(TARGET_PAGE_SIZE);
1909 if (!XBZRLE.current_buf) {
1910 error_report("Error allocating current_buf");
1911 g_free(XBZRLE.encoded_buf);
1912 XBZRLE.encoded_buf = NULL;
1913 return -1;
1914 }
1915
1916 acct_clear();
1917 }
1918
1919 /* iothread lock needed for ram_list.dirty_memory[] */
1920 qemu_mutex_lock_iothread();
1921 qemu_mutex_lock_ramlist();
1922 rcu_read_lock();
1923 bytes_transferred = 0;
1924 reset_ram_globals();
1925
1926 ram_bitmap_pages = last_ram_offset() >> TARGET_PAGE_BITS;
Dr. David Alan Gilbertf3f491f2015-11-05 18:11:01 +00001927 migration_bitmap_rcu = g_new0(struct BitmapRcu, 1);
Denis V. Lunev60be6342015-09-28 14:41:58 +03001928 migration_bitmap_rcu->bmap = bitmap_new(ram_bitmap_pages);
1929 bitmap_set(migration_bitmap_rcu->bmap, 0, ram_bitmap_pages);
Juan Quintela56e93d22015-05-07 19:33:31 +02001930
Dr. David Alan Gilbertf3f491f2015-11-05 18:11:01 +00001931 if (migrate_postcopy_ram()) {
1932 migration_bitmap_rcu->unsentmap = bitmap_new(ram_bitmap_pages);
1933 bitmap_set(migration_bitmap_rcu->unsentmap, 0, ram_bitmap_pages);
1934 }
1935
Juan Quintela56e93d22015-05-07 19:33:31 +02001936 /*
1937 * Count the total number of pages used by ram blocks not including any
1938 * gaps due to alignment or unplugs.
1939 */
1940 migration_dirty_pages = ram_bytes_total() >> TARGET_PAGE_BITS;
1941
1942 memory_global_dirty_log_start();
1943 migration_bitmap_sync();
1944 qemu_mutex_unlock_ramlist();
1945 qemu_mutex_unlock_iothread();
1946
1947 qemu_put_be64(f, ram_bytes_total() | RAM_SAVE_FLAG_MEM_SIZE);
1948
1949 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1950 qemu_put_byte(f, strlen(block->idstr));
1951 qemu_put_buffer(f, (uint8_t *)block->idstr, strlen(block->idstr));
1952 qemu_put_be64(f, block->used_length);
1953 }
1954
1955 rcu_read_unlock();
1956
1957 ram_control_before_iterate(f, RAM_CONTROL_SETUP);
1958 ram_control_after_iterate(f, RAM_CONTROL_SETUP);
1959
1960 qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
1961
1962 return 0;
1963}
1964
1965static int ram_save_iterate(QEMUFile *f, void *opaque)
1966{
1967 int ret;
1968 int i;
1969 int64_t t0;
1970 int pages_sent = 0;
1971
1972 rcu_read_lock();
1973 if (ram_list.version != last_version) {
1974 reset_ram_globals();
1975 }
1976
1977 /* Read version before ram_list.blocks */
1978 smp_rmb();
1979
1980 ram_control_before_iterate(f, RAM_CONTROL_ROUND);
1981
1982 t0 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
1983 i = 0;
1984 while ((ret = qemu_file_rate_limit(f)) == 0) {
1985 int pages;
1986
1987 pages = ram_find_and_save_block(f, false, &bytes_transferred);
1988 /* no more pages to sent */
1989 if (pages == 0) {
1990 break;
1991 }
1992 pages_sent += pages;
1993 acct_info.iterations++;
Jason J. Herne070afca2015-09-08 13:12:35 -04001994
Juan Quintela56e93d22015-05-07 19:33:31 +02001995 /* we want to check in the 1st loop, just in case it was the 1st time
1996 and we had to sync the dirty bitmap.
1997 qemu_get_clock_ns() is a bit expensive, so we only check each some
1998 iterations
1999 */
2000 if ((i & 63) == 0) {
2001 uint64_t t1 = (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - t0) / 1000000;
2002 if (t1 > MAX_WAIT) {
2003 DPRINTF("big wait: %" PRIu64 " milliseconds, %d iterations\n",
2004 t1, i);
2005 break;
2006 }
2007 }
2008 i++;
2009 }
2010 flush_compressed_data(f);
2011 rcu_read_unlock();
2012
2013 /*
2014 * Must occur before EOS (or any QEMUFile operation)
2015 * because of RDMA protocol.
2016 */
2017 ram_control_after_iterate(f, RAM_CONTROL_ROUND);
2018
2019 qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
2020 bytes_transferred += 8;
2021
2022 ret = qemu_file_get_error(f);
2023 if (ret < 0) {
2024 return ret;
2025 }
2026
2027 return pages_sent;
2028}
2029
2030/* Called with iothread lock */
2031static int ram_save_complete(QEMUFile *f, void *opaque)
2032{
2033 rcu_read_lock();
2034
Dr. David Alan Gilbert663e6c12015-11-05 18:11:13 +00002035 if (!migration_in_postcopy(migrate_get_current())) {
2036 migration_bitmap_sync();
2037 }
Juan Quintela56e93d22015-05-07 19:33:31 +02002038
2039 ram_control_before_iterate(f, RAM_CONTROL_FINISH);
2040
2041 /* try transferring iterative blocks of memory */
2042
2043 /* flush all remaining blocks regardless of rate limiting */
2044 while (true) {
2045 int pages;
2046
2047 pages = ram_find_and_save_block(f, true, &bytes_transferred);
2048 /* no more blocks to sent */
2049 if (pages == 0) {
2050 break;
2051 }
2052 }
2053
2054 flush_compressed_data(f);
2055 ram_control_after_iterate(f, RAM_CONTROL_FINISH);
Juan Quintela56e93d22015-05-07 19:33:31 +02002056
2057 rcu_read_unlock();
Paolo Bonzinid09a6fd2015-07-09 08:47:58 +02002058
Juan Quintela56e93d22015-05-07 19:33:31 +02002059 qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
2060
2061 return 0;
2062}
2063
Dr. David Alan Gilbertc31b0982015-11-05 18:10:54 +00002064static void ram_save_pending(QEMUFile *f, void *opaque, uint64_t max_size,
2065 uint64_t *non_postcopiable_pending,
2066 uint64_t *postcopiable_pending)
Juan Quintela56e93d22015-05-07 19:33:31 +02002067{
2068 uint64_t remaining_size;
2069
2070 remaining_size = ram_save_remaining() * TARGET_PAGE_SIZE;
2071
Dr. David Alan Gilbert663e6c12015-11-05 18:11:13 +00002072 if (!migration_in_postcopy(migrate_get_current()) &&
2073 remaining_size < max_size) {
Juan Quintela56e93d22015-05-07 19:33:31 +02002074 qemu_mutex_lock_iothread();
2075 rcu_read_lock();
2076 migration_bitmap_sync();
2077 rcu_read_unlock();
2078 qemu_mutex_unlock_iothread();
2079 remaining_size = ram_save_remaining() * TARGET_PAGE_SIZE;
2080 }
Dr. David Alan Gilbertc31b0982015-11-05 18:10:54 +00002081
2082 /* We can do postcopy, and all the data is postcopiable */
2083 *postcopiable_pending += remaining_size;
Juan Quintela56e93d22015-05-07 19:33:31 +02002084}
2085
2086static int load_xbzrle(QEMUFile *f, ram_addr_t addr, void *host)
2087{
2088 unsigned int xh_len;
2089 int xh_flags;
Dr. David Alan Gilbert063e7602015-12-16 11:47:37 +00002090 uint8_t *loaded_data;
Juan Quintela56e93d22015-05-07 19:33:31 +02002091
2092 if (!xbzrle_decoded_buf) {
2093 xbzrle_decoded_buf = g_malloc(TARGET_PAGE_SIZE);
2094 }
Dr. David Alan Gilbert063e7602015-12-16 11:47:37 +00002095 loaded_data = xbzrle_decoded_buf;
Juan Quintela56e93d22015-05-07 19:33:31 +02002096
2097 /* extract RLE header */
2098 xh_flags = qemu_get_byte(f);
2099 xh_len = qemu_get_be16(f);
2100
2101 if (xh_flags != ENCODING_FLAG_XBZRLE) {
2102 error_report("Failed to load XBZRLE page - wrong compression!");
2103 return -1;
2104 }
2105
2106 if (xh_len > TARGET_PAGE_SIZE) {
2107 error_report("Failed to load XBZRLE page - len overflow!");
2108 return -1;
2109 }
2110 /* load data and decode */
Dr. David Alan Gilbert063e7602015-12-16 11:47:37 +00002111 qemu_get_buffer_in_place(f, &loaded_data, xh_len);
Juan Quintela56e93d22015-05-07 19:33:31 +02002112
2113 /* decode RLE */
Dr. David Alan Gilbert063e7602015-12-16 11:47:37 +00002114 if (xbzrle_decode_buffer(loaded_data, xh_len, host,
Juan Quintela56e93d22015-05-07 19:33:31 +02002115 TARGET_PAGE_SIZE) == -1) {
2116 error_report("Failed to load XBZRLE page - decode error!");
2117 return -1;
2118 }
2119
2120 return 0;
2121}
2122
2123/* Must be called from within a rcu critical section.
2124 * Returns a pointer from within the RCU-protected ram_list.
2125 */
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00002126/*
2127 * Read a RAMBlock ID from the stream f, find the host address of the
2128 * start of that block and add on 'offset'
2129 *
2130 * f: Stream to read from
2131 * offset: Offset within the block
2132 * flags: Page flags (mostly to see if it's a continuation of previous block)
2133 */
Juan Quintela56e93d22015-05-07 19:33:31 +02002134static inline void *host_from_stream_offset(QEMUFile *f,
2135 ram_addr_t offset,
2136 int flags)
2137{
2138 static RAMBlock *block = NULL;
2139 char id[256];
2140 uint8_t len;
2141
2142 if (flags & RAM_SAVE_FLAG_CONTINUE) {
2143 if (!block || block->max_length <= offset) {
2144 error_report("Ack, bad migration stream!");
2145 return NULL;
2146 }
2147
Dr. David Alan Gilbert2f68e392015-08-13 11:51:30 +01002148 return block->host + offset;
Juan Quintela56e93d22015-05-07 19:33:31 +02002149 }
2150
2151 len = qemu_get_byte(f);
2152 qemu_get_buffer(f, (uint8_t *)id, len);
2153 id[len] = 0;
2154
Dr. David Alan Gilberte3dd7492015-11-05 18:10:33 +00002155 block = qemu_ram_block_by_name(id);
2156 if (block && block->max_length > offset) {
2157 return block->host + offset;
Juan Quintela56e93d22015-05-07 19:33:31 +02002158 }
2159
Dr. David Alan Gilberte3dd7492015-11-05 18:10:33 +00002160 error_report("Can't find block %s", id);
Juan Quintela56e93d22015-05-07 19:33:31 +02002161 return NULL;
2162}
2163
2164/*
2165 * If a page (or a whole RDMA chunk) has been
2166 * determined to be zero, then zap it.
2167 */
2168void ram_handle_compressed(void *host, uint8_t ch, uint64_t size)
2169{
2170 if (ch != 0 || !is_zero_range(host, size)) {
2171 memset(host, ch, size);
2172 }
2173}
2174
2175static void *do_data_decompress(void *opaque)
2176{
2177 DecompressParam *param = opaque;
2178 unsigned long pagesize;
2179
2180 while (!quit_decomp_thread) {
2181 qemu_mutex_lock(&param->mutex);
2182 while (!param->start && !quit_decomp_thread) {
2183 qemu_cond_wait(&param->cond, &param->mutex);
2184 pagesize = TARGET_PAGE_SIZE;
2185 if (!quit_decomp_thread) {
2186 /* uncompress() will return failed in some case, especially
2187 * when the page is dirted when doing the compression, it's
2188 * not a problem because the dirty page will be retransferred
2189 * and uncompress() won't break the data in other pages.
2190 */
2191 uncompress((Bytef *)param->des, &pagesize,
2192 (const Bytef *)param->compbuf, param->len);
2193 }
2194 param->start = false;
2195 }
2196 qemu_mutex_unlock(&param->mutex);
2197 }
2198
2199 return NULL;
2200}
2201
2202void migrate_decompress_threads_create(void)
2203{
2204 int i, thread_count;
2205
2206 thread_count = migrate_decompress_threads();
2207 decompress_threads = g_new0(QemuThread, thread_count);
2208 decomp_param = g_new0(DecompressParam, thread_count);
Juan Quintela56e93d22015-05-07 19:33:31 +02002209 quit_decomp_thread = false;
2210 for (i = 0; i < thread_count; i++) {
2211 qemu_mutex_init(&decomp_param[i].mutex);
2212 qemu_cond_init(&decomp_param[i].cond);
2213 decomp_param[i].compbuf = g_malloc0(compressBound(TARGET_PAGE_SIZE));
2214 qemu_thread_create(decompress_threads + i, "decompress",
2215 do_data_decompress, decomp_param + i,
2216 QEMU_THREAD_JOINABLE);
2217 }
2218}
2219
2220void migrate_decompress_threads_join(void)
2221{
2222 int i, thread_count;
2223
2224 quit_decomp_thread = true;
2225 thread_count = migrate_decompress_threads();
2226 for (i = 0; i < thread_count; i++) {
2227 qemu_mutex_lock(&decomp_param[i].mutex);
2228 qemu_cond_signal(&decomp_param[i].cond);
2229 qemu_mutex_unlock(&decomp_param[i].mutex);
2230 }
2231 for (i = 0; i < thread_count; i++) {
2232 qemu_thread_join(decompress_threads + i);
2233 qemu_mutex_destroy(&decomp_param[i].mutex);
2234 qemu_cond_destroy(&decomp_param[i].cond);
2235 g_free(decomp_param[i].compbuf);
2236 }
2237 g_free(decompress_threads);
2238 g_free(decomp_param);
Juan Quintela56e93d22015-05-07 19:33:31 +02002239 decompress_threads = NULL;
2240 decomp_param = NULL;
Juan Quintela56e93d22015-05-07 19:33:31 +02002241}
2242
Dr. David Alan Gilbertc1bc6622015-12-16 11:47:38 +00002243static void decompress_data_with_multi_threads(QEMUFile *f,
Juan Quintela56e93d22015-05-07 19:33:31 +02002244 void *host, int len)
2245{
2246 int idx, thread_count;
2247
2248 thread_count = migrate_decompress_threads();
2249 while (true) {
2250 for (idx = 0; idx < thread_count; idx++) {
2251 if (!decomp_param[idx].start) {
Dr. David Alan Gilbertc1bc6622015-12-16 11:47:38 +00002252 qemu_get_buffer(f, decomp_param[idx].compbuf, len);
Juan Quintela56e93d22015-05-07 19:33:31 +02002253 decomp_param[idx].des = host;
2254 decomp_param[idx].len = len;
2255 start_decompression(&decomp_param[idx]);
2256 break;
2257 }
2258 }
2259 if (idx < thread_count) {
2260 break;
2261 }
2262 }
2263}
2264
Dr. David Alan Gilbert1caddf82015-11-05 18:11:03 +00002265/*
2266 * Allocate data structures etc needed by incoming migration with postcopy-ram
2267 * postcopy-ram's similarly names postcopy_ram_incoming_init does the work
2268 */
2269int ram_postcopy_incoming_init(MigrationIncomingState *mis)
2270{
2271 size_t ram_pages = last_ram_offset() >> TARGET_PAGE_BITS;
2272
2273 return postcopy_ram_incoming_init(mis, ram_pages);
2274}
2275
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00002276/*
2277 * Called in postcopy mode by ram_load().
2278 * rcu_read_lock is taken prior to this being called.
2279 */
2280static int ram_load_postcopy(QEMUFile *f)
2281{
2282 int flags = 0, ret = 0;
2283 bool place_needed = false;
2284 bool matching_page_sizes = qemu_host_page_size == TARGET_PAGE_SIZE;
2285 MigrationIncomingState *mis = migration_incoming_get_current();
2286 /* Temporary page that is later 'placed' */
2287 void *postcopy_host_page = postcopy_get_tmp_page(mis);
Dr. David Alan Gilbertc53b7dd2015-11-05 18:11:12 +00002288 void *last_host = NULL;
Dr. David Alan Gilberta3b6ff62015-11-11 14:02:28 +00002289 bool all_zero = false;
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00002290
2291 while (!ret && !(flags & RAM_SAVE_FLAG_EOS)) {
2292 ram_addr_t addr;
2293 void *host = NULL;
2294 void *page_buffer = NULL;
2295 void *place_source = NULL;
2296 uint8_t ch;
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00002297
2298 addr = qemu_get_be64(f);
2299 flags = addr & ~TARGET_PAGE_MASK;
2300 addr &= TARGET_PAGE_MASK;
2301
2302 trace_ram_load_postcopy_loop((uint64_t)addr, flags);
2303 place_needed = false;
2304 if (flags & (RAM_SAVE_FLAG_COMPRESS | RAM_SAVE_FLAG_PAGE)) {
2305 host = host_from_stream_offset(f, addr, flags);
2306 if (!host) {
2307 error_report("Illegal RAM offset " RAM_ADDR_FMT, addr);
2308 ret = -EINVAL;
2309 break;
2310 }
2311 page_buffer = host;
2312 /*
2313 * Postcopy requires that we place whole host pages atomically.
2314 * To make it atomic, the data is read into a temporary page
2315 * that's moved into place later.
2316 * The migration protocol uses, possibly smaller, target-pages
2317 * however the source ensures it always sends all the components
2318 * of a host page in order.
2319 */
2320 page_buffer = postcopy_host_page +
2321 ((uintptr_t)host & ~qemu_host_page_mask);
2322 /* If all TP are zero then we can optimise the place */
2323 if (!((uintptr_t)host & ~qemu_host_page_mask)) {
2324 all_zero = true;
Dr. David Alan Gilbertc53b7dd2015-11-05 18:11:12 +00002325 } else {
2326 /* not the 1st TP within the HP */
2327 if (host != (last_host + TARGET_PAGE_SIZE)) {
Markus Armbruster9af9e0f2015-12-18 16:35:19 +01002328 error_report("Non-sequential target page %p/%p",
Dr. David Alan Gilbertc53b7dd2015-11-05 18:11:12 +00002329 host, last_host);
2330 ret = -EINVAL;
2331 break;
2332 }
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00002333 }
2334
Dr. David Alan Gilbertc53b7dd2015-11-05 18:11:12 +00002335
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00002336 /*
2337 * If it's the last part of a host page then we place the host
2338 * page
2339 */
2340 place_needed = (((uintptr_t)host + TARGET_PAGE_SIZE) &
2341 ~qemu_host_page_mask) == 0;
2342 place_source = postcopy_host_page;
2343 }
Dr. David Alan Gilbertc53b7dd2015-11-05 18:11:12 +00002344 last_host = host;
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00002345
2346 switch (flags & ~RAM_SAVE_FLAG_CONTINUE) {
2347 case RAM_SAVE_FLAG_COMPRESS:
2348 ch = qemu_get_byte(f);
2349 memset(page_buffer, ch, TARGET_PAGE_SIZE);
2350 if (ch) {
2351 all_zero = false;
2352 }
2353 break;
2354
2355 case RAM_SAVE_FLAG_PAGE:
2356 all_zero = false;
2357 if (!place_needed || !matching_page_sizes) {
2358 qemu_get_buffer(f, page_buffer, TARGET_PAGE_SIZE);
2359 } else {
2360 /* Avoids the qemu_file copy during postcopy, which is
2361 * going to do a copy later; can only do it when we
2362 * do this read in one go (matching page sizes)
2363 */
2364 qemu_get_buffer_in_place(f, (uint8_t **)&place_source,
2365 TARGET_PAGE_SIZE);
2366 }
2367 break;
2368 case RAM_SAVE_FLAG_EOS:
2369 /* normal exit */
2370 break;
2371 default:
2372 error_report("Unknown combination of migration flags: %#x"
2373 " (postcopy mode)", flags);
2374 ret = -EINVAL;
2375 }
2376
2377 if (place_needed) {
2378 /* This gets called at the last target page in the host page */
2379 if (all_zero) {
2380 ret = postcopy_place_page_zero(mis,
2381 host + TARGET_PAGE_SIZE -
2382 qemu_host_page_size);
2383 } else {
2384 ret = postcopy_place_page(mis, host + TARGET_PAGE_SIZE -
2385 qemu_host_page_size,
2386 place_source);
2387 }
2388 }
2389 if (!ret) {
2390 ret = qemu_file_get_error(f);
2391 }
2392 }
2393
2394 return ret;
2395}
2396
Juan Quintela56e93d22015-05-07 19:33:31 +02002397static int ram_load(QEMUFile *f, void *opaque, int version_id)
2398{
2399 int flags = 0, ret = 0;
2400 static uint64_t seq_iter;
2401 int len = 0;
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00002402 /*
2403 * If system is running in postcopy mode, page inserts to host memory must
2404 * be atomic
2405 */
2406 bool postcopy_running = postcopy_state_get() >= POSTCOPY_INCOMING_LISTENING;
Juan Quintela56e93d22015-05-07 19:33:31 +02002407
2408 seq_iter++;
2409
2410 if (version_id != 4) {
2411 ret = -EINVAL;
2412 }
2413
2414 /* This RCU critical section can be very long running.
2415 * When RCU reclaims in the code start to become numerous,
2416 * it will be necessary to reduce the granularity of this
2417 * critical section.
2418 */
2419 rcu_read_lock();
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00002420
2421 if (postcopy_running) {
2422 ret = ram_load_postcopy(f);
2423 }
2424
2425 while (!postcopy_running && !ret && !(flags & RAM_SAVE_FLAG_EOS)) {
Juan Quintela56e93d22015-05-07 19:33:31 +02002426 ram_addr_t addr, total_ram_bytes;
Dr. David Alan Gilberta776aa12015-11-05 18:10:39 +00002427 void *host = NULL;
Juan Quintela56e93d22015-05-07 19:33:31 +02002428 uint8_t ch;
2429
2430 addr = qemu_get_be64(f);
2431 flags = addr & ~TARGET_PAGE_MASK;
2432 addr &= TARGET_PAGE_MASK;
2433
Dr. David Alan Gilberta776aa12015-11-05 18:10:39 +00002434 if (flags & (RAM_SAVE_FLAG_COMPRESS | RAM_SAVE_FLAG_PAGE |
2435 RAM_SAVE_FLAG_COMPRESS_PAGE | RAM_SAVE_FLAG_XBZRLE)) {
2436 host = host_from_stream_offset(f, addr, flags);
2437 if (!host) {
2438 error_report("Illegal RAM offset " RAM_ADDR_FMT, addr);
2439 ret = -EINVAL;
2440 break;
2441 }
2442 }
2443
Juan Quintela56e93d22015-05-07 19:33:31 +02002444 switch (flags & ~RAM_SAVE_FLAG_CONTINUE) {
2445 case RAM_SAVE_FLAG_MEM_SIZE:
2446 /* Synchronize RAM block list */
2447 total_ram_bytes = addr;
2448 while (!ret && total_ram_bytes) {
2449 RAMBlock *block;
Juan Quintela56e93d22015-05-07 19:33:31 +02002450 char id[256];
2451 ram_addr_t length;
2452
2453 len = qemu_get_byte(f);
2454 qemu_get_buffer(f, (uint8_t *)id, len);
2455 id[len] = 0;
2456 length = qemu_get_be64(f);
2457
Dr. David Alan Gilberte3dd7492015-11-05 18:10:33 +00002458 block = qemu_ram_block_by_name(id);
2459 if (block) {
2460 if (length != block->used_length) {
2461 Error *local_err = NULL;
Juan Quintela56e93d22015-05-07 19:33:31 +02002462
Dr. David Alan Gilberte3dd7492015-11-05 18:10:33 +00002463 ret = qemu_ram_resize(block->offset, length,
2464 &local_err);
2465 if (local_err) {
2466 error_report_err(local_err);
Juan Quintela56e93d22015-05-07 19:33:31 +02002467 }
Juan Quintela56e93d22015-05-07 19:33:31 +02002468 }
Dr. David Alan Gilberte3dd7492015-11-05 18:10:33 +00002469 ram_control_load_hook(f, RAM_CONTROL_BLOCK_REG,
2470 block->idstr);
2471 } else {
Juan Quintela56e93d22015-05-07 19:33:31 +02002472 error_report("Unknown ramblock \"%s\", cannot "
2473 "accept migration", id);
2474 ret = -EINVAL;
2475 }
2476
2477 total_ram_bytes -= length;
2478 }
2479 break;
Dr. David Alan Gilberta776aa12015-11-05 18:10:39 +00002480
Juan Quintela56e93d22015-05-07 19:33:31 +02002481 case RAM_SAVE_FLAG_COMPRESS:
Juan Quintela56e93d22015-05-07 19:33:31 +02002482 ch = qemu_get_byte(f);
2483 ram_handle_compressed(host, ch, TARGET_PAGE_SIZE);
2484 break;
Dr. David Alan Gilberta776aa12015-11-05 18:10:39 +00002485
Juan Quintela56e93d22015-05-07 19:33:31 +02002486 case RAM_SAVE_FLAG_PAGE:
Juan Quintela56e93d22015-05-07 19:33:31 +02002487 qemu_get_buffer(f, host, TARGET_PAGE_SIZE);
2488 break;
Juan Quintela56e93d22015-05-07 19:33:31 +02002489
Dr. David Alan Gilberta776aa12015-11-05 18:10:39 +00002490 case RAM_SAVE_FLAG_COMPRESS_PAGE:
Juan Quintela56e93d22015-05-07 19:33:31 +02002491 len = qemu_get_be32(f);
2492 if (len < 0 || len > compressBound(TARGET_PAGE_SIZE)) {
2493 error_report("Invalid compressed data length: %d", len);
2494 ret = -EINVAL;
2495 break;
2496 }
Dr. David Alan Gilbertc1bc6622015-12-16 11:47:38 +00002497 decompress_data_with_multi_threads(f, host, len);
Juan Quintela56e93d22015-05-07 19:33:31 +02002498 break;
Dr. David Alan Gilberta776aa12015-11-05 18:10:39 +00002499
Juan Quintela56e93d22015-05-07 19:33:31 +02002500 case RAM_SAVE_FLAG_XBZRLE:
Juan Quintela56e93d22015-05-07 19:33:31 +02002501 if (load_xbzrle(f, addr, host) < 0) {
2502 error_report("Failed to decompress XBZRLE page at "
2503 RAM_ADDR_FMT, addr);
2504 ret = -EINVAL;
2505 break;
2506 }
2507 break;
2508 case RAM_SAVE_FLAG_EOS:
2509 /* normal exit */
2510 break;
2511 default:
2512 if (flags & RAM_SAVE_FLAG_HOOK) {
Dr. David Alan Gilbert632e3a52015-06-11 18:17:23 +01002513 ram_control_load_hook(f, RAM_CONTROL_HOOK, NULL);
Juan Quintela56e93d22015-05-07 19:33:31 +02002514 } else {
2515 error_report("Unknown combination of migration flags: %#x",
2516 flags);
2517 ret = -EINVAL;
2518 }
2519 }
2520 if (!ret) {
2521 ret = qemu_file_get_error(f);
2522 }
2523 }
2524
2525 rcu_read_unlock();
2526 DPRINTF("Completed load of VM with exit code %d seq iteration "
2527 "%" PRIu64 "\n", ret, seq_iter);
2528 return ret;
2529}
2530
2531static SaveVMHandlers savevm_ram_handlers = {
2532 .save_live_setup = ram_save_setup,
2533 .save_live_iterate = ram_save_iterate,
Dr. David Alan Gilbert763c9062015-11-05 18:11:00 +00002534 .save_live_complete_postcopy = ram_save_complete,
Dr. David Alan Gilberta3e06c32015-11-05 18:10:41 +00002535 .save_live_complete_precopy = ram_save_complete,
Juan Quintela56e93d22015-05-07 19:33:31 +02002536 .save_live_pending = ram_save_pending,
2537 .load_state = ram_load,
Liang Li6ad2a212015-11-02 15:37:03 +08002538 .cleanup = ram_migration_cleanup,
Juan Quintela56e93d22015-05-07 19:33:31 +02002539};
2540
2541void ram_mig_init(void)
2542{
2543 qemu_mutex_init(&XBZRLE.lock);
2544 register_savevm_live(NULL, "ram", 0, 4, &savevm_ram_handlers, NULL);
2545}