blob: 2f5cbd1727d16c6d9a0266b40d49d5bace2f1026 [file] [log] [blame]
Juan Quintela56e93d22015-05-07 19:33:31 +02001/*
2 * QEMU System Emulator
3 *
4 * Copyright (c) 2003-2008 Fabrice Bellard
Juan Quintela76cc7b52015-05-08 13:20:21 +02005 * Copyright (c) 2011-2015 Red Hat Inc
6 *
7 * Authors:
8 * Juan Quintela <quintela@redhat.com>
Juan Quintela56e93d22015-05-07 19:33:31 +02009 *
10 * Permission is hereby granted, free of charge, to any person obtaining a copy
11 * of this software and associated documentation files (the "Software"), to deal
12 * in the Software without restriction, including without limitation the rights
13 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
14 * copies of the Software, and to permit persons to whom the Software is
15 * furnished to do so, subject to the following conditions:
16 *
17 * The above copyright notice and this permission notice shall be included in
18 * all copies or substantial portions of the Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 * THE SOFTWARE.
27 */
Peter Maydell1393a482016-01-26 18:16:54 +000028#include "qemu/osdep.h"
Paolo Bonzini33c11872016-03-15 16:58:45 +010029#include "qemu-common.h"
30#include "cpu.h"
Juan Quintela56e93d22015-05-07 19:33:31 +020031#include <zlib.h>
Dr. David Alan Gilbert4addcd42015-12-16 11:47:36 +000032#include "qapi-event.h"
Veronia Bahaaf348b6d2016-03-20 19:16:19 +020033#include "qemu/cutils.h"
Juan Quintela56e93d22015-05-07 19:33:31 +020034#include "qemu/bitops.h"
35#include "qemu/bitmap.h"
Juan Quintela7205c9e2015-05-08 13:54:36 +020036#include "qemu/timer.h"
37#include "qemu/main-loop.h"
Juan Quintela56e93d22015-05-07 19:33:31 +020038#include "migration/migration.h"
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +000039#include "migration/postcopy-ram.h"
Juan Quintela56e93d22015-05-07 19:33:31 +020040#include "exec/address-spaces.h"
41#include "migration/page_cache.h"
Juan Quintela56e93d22015-05-07 19:33:31 +020042#include "qemu/error-report.h"
Juan Quintela56e93d22015-05-07 19:33:31 +020043#include "trace.h"
Juan Quintela56e93d22015-05-07 19:33:31 +020044#include "exec/ram_addr.h"
Juan Quintela56e93d22015-05-07 19:33:31 +020045#include "qemu/rcu_queue.h"
zhanghailianga91246c2016-10-27 14:42:59 +080046#include "migration/colo.h"
Juan Quintela56e93d22015-05-07 19:33:31 +020047
Juan Quintela56e93d22015-05-07 19:33:31 +020048/***********************************************************/
49/* ram save/restore */
50
51#define RAM_SAVE_FLAG_FULL 0x01 /* Obsolete, not used anymore */
52#define RAM_SAVE_FLAG_COMPRESS 0x02
53#define RAM_SAVE_FLAG_MEM_SIZE 0x04
54#define RAM_SAVE_FLAG_PAGE 0x08
55#define RAM_SAVE_FLAG_EOS 0x10
56#define RAM_SAVE_FLAG_CONTINUE 0x20
57#define RAM_SAVE_FLAG_XBZRLE 0x40
58/* 0x80 is reserved in migration.h start with 0x100 next */
59#define RAM_SAVE_FLAG_COMPRESS_PAGE 0x100
60
Vijaya Kumar Kadb65de2016-10-24 16:26:49 +010061static uint8_t *ZERO_TARGET_PAGE;
Juan Quintela56e93d22015-05-07 19:33:31 +020062
63static inline bool is_zero_range(uint8_t *p, uint64_t size)
64{
Richard Hendersona1febc42016-08-29 11:46:14 -070065 return buffer_is_zero(p, size);
Juan Quintela56e93d22015-05-07 19:33:31 +020066}
67
68/* struct contains XBZRLE cache and a static page
69 used by the compression */
70static struct {
71 /* buffer used for XBZRLE encoding */
72 uint8_t *encoded_buf;
73 /* buffer for storing page content */
74 uint8_t *current_buf;
75 /* Cache for XBZRLE, Protected by lock. */
76 PageCache *cache;
77 QemuMutex lock;
78} XBZRLE;
79
80/* buffer used for XBZRLE decoding */
81static uint8_t *xbzrle_decoded_buf;
82
83static void XBZRLE_cache_lock(void)
84{
85 if (migrate_use_xbzrle())
86 qemu_mutex_lock(&XBZRLE.lock);
87}
88
89static void XBZRLE_cache_unlock(void)
90{
91 if (migrate_use_xbzrle())
92 qemu_mutex_unlock(&XBZRLE.lock);
93}
94
Juan Quintela3d0684b2017-03-23 15:06:39 +010095/**
96 * xbzrle_cache_resize: resize the xbzrle cache
97 *
98 * This function is called from qmp_migrate_set_cache_size in main
99 * thread, possibly while a migration is in progress. A running
100 * migration may be using the cache and might finish during this call,
101 * hence changes to the cache are protected by XBZRLE.lock().
102 *
103 * Returns the new_size or negative in case of error.
104 *
105 * @new_size: new cache size
Juan Quintela56e93d22015-05-07 19:33:31 +0200106 */
107int64_t xbzrle_cache_resize(int64_t new_size)
108{
109 PageCache *new_cache;
110 int64_t ret;
111
112 if (new_size < TARGET_PAGE_SIZE) {
113 return -1;
114 }
115
116 XBZRLE_cache_lock();
117
118 if (XBZRLE.cache != NULL) {
119 if (pow2floor(new_size) == migrate_xbzrle_cache_size()) {
120 goto out_new_size;
121 }
122 new_cache = cache_init(new_size / TARGET_PAGE_SIZE,
123 TARGET_PAGE_SIZE);
124 if (!new_cache) {
125 error_report("Error creating cache");
126 ret = -1;
127 goto out;
128 }
129
130 cache_fini(XBZRLE.cache);
131 XBZRLE.cache = new_cache;
132 }
133
134out_new_size:
135 ret = pow2floor(new_size);
136out:
137 XBZRLE_cache_unlock();
138 return ret;
139}
140
Juan Quintelaeb859c52017-03-13 21:51:55 +0100141struct RAMBitmap {
142 struct rcu_head rcu;
143 /* Main migration bitmap */
144 unsigned long *bmap;
145 /* bitmap of pages that haven't been sent even once
146 * only maintained and used in postcopy at the moment
147 * where it's used to send the dirtymap at the start
148 * of the postcopy phase
149 */
150 unsigned long *unsentmap;
151};
152typedef struct RAMBitmap RAMBitmap;
153
Juan Quintela6f37bb82017-03-13 19:26:29 +0100154/* State of RAM for migration */
155struct RAMState {
156 /* Last block that we have visited searching for dirty pages */
157 RAMBlock *last_seen_block;
158 /* Last block from where we have sent data */
159 RAMBlock *last_sent_block;
160 /* Last offset we have sent data from */
161 ram_addr_t last_offset;
162 /* last ram version we have seen */
163 uint32_t last_version;
164 /* We are in the first round */
165 bool ram_bulk_stage;
Juan Quintela8d820d62017-03-13 19:35:50 +0100166 /* How many times we have dirty too many pages */
167 int dirty_rate_high_cnt;
Juan Quintela5a987732017-03-13 19:39:02 +0100168 /* How many times we have synchronized the bitmap */
169 uint64_t bitmap_sync_count;
Juan Quintelaf664da82017-03-13 19:44:57 +0100170 /* these variables are used for bitmap sync */
171 /* last time we did a full bitmap_sync */
172 int64_t time_last_bitmap_sync;
Juan Quintelaeac74152017-03-28 14:59:01 +0200173 /* bytes transferred at start_time */
Juan Quintelac4bdf0c2017-03-28 14:59:54 +0200174 uint64_t bytes_xfer_prev;
Juan Quintelaa66cd902017-03-28 15:02:43 +0200175 /* number of dirty pages since start_time */
Juan Quintela68908ed2017-03-28 15:05:53 +0200176 uint64_t num_dirty_pages_period;
Juan Quintelab5833fd2017-03-13 19:49:19 +0100177 /* xbzrle misses since the beginning of the period */
178 uint64_t xbzrle_cache_miss_prev;
Juan Quintela36040d92017-03-13 19:51:13 +0100179 /* number of iterations at the beginning of period */
180 uint64_t iterations_prev;
Juan Quintelaf7ccd612017-03-13 20:30:21 +0100181 /* Accounting fields */
182 /* number of zero pages. It used to be pages filled by the same char. */
183 uint64_t zero_pages;
Juan Quintelab4d1c6e2017-03-13 20:40:53 +0100184 /* number of normal transferred pages */
185 uint64_t norm_pages;
Juan Quintela23b28c32017-03-13 20:51:34 +0100186 /* Iterations since start */
187 uint64_t iterations;
Juan Quintelaf36ada92017-03-13 20:59:32 +0100188 /* xbzrle transmitted bytes. Notice that this is with
189 * compression, they can't be calculated from the pages */
Juan Quintela07ed50a2017-03-13 20:57:27 +0100190 uint64_t xbzrle_bytes;
Juan Quintelaf36ada92017-03-13 20:59:32 +0100191 /* xbzrle transmmited pages */
192 uint64_t xbzrle_pages;
Juan Quintela544c36f2017-03-13 21:02:08 +0100193 /* xbzrle number of cache miss */
194 uint64_t xbzrle_cache_miss;
Juan Quintelab07016b2017-03-13 21:04:16 +0100195 /* xbzrle miss rate */
196 double xbzrle_cache_miss_rate;
Juan Quintela180f61f2017-03-13 21:07:03 +0100197 /* xbzrle number of overflows */
198 uint64_t xbzrle_overflows;
Juan Quintela0d8ec882017-03-13 21:21:41 +0100199 /* number of dirty bits in the bitmap */
200 uint64_t migration_dirty_pages;
Juan Quintela2f4fde92017-03-13 21:58:11 +0100201 /* total number of bytes transferred */
202 uint64_t bytes_transferred;
Juan Quintela108cfae2017-03-13 21:38:09 +0100203 /* protects modification of the bitmap */
204 QemuMutex bitmap_mutex;
Juan Quintelaeb859c52017-03-13 21:51:55 +0100205 /* Ram Bitmap protected by RCU */
206 RAMBitmap *ram_bitmap;
Juan Quintela6f37bb82017-03-13 19:26:29 +0100207};
208typedef struct RAMState RAMState;
209
210static RAMState ram_state;
211
Juan Quintela56e93d22015-05-07 19:33:31 +0200212uint64_t dup_mig_pages_transferred(void)
213{
Juan Quintelaf7ccd612017-03-13 20:30:21 +0100214 return ram_state.zero_pages;
Juan Quintela56e93d22015-05-07 19:33:31 +0200215}
216
Juan Quintela56e93d22015-05-07 19:33:31 +0200217uint64_t norm_mig_pages_transferred(void)
218{
Juan Quintelab4d1c6e2017-03-13 20:40:53 +0100219 return ram_state.norm_pages;
Juan Quintela56e93d22015-05-07 19:33:31 +0200220}
221
222uint64_t xbzrle_mig_bytes_transferred(void)
223{
Juan Quintela07ed50a2017-03-13 20:57:27 +0100224 return ram_state.xbzrle_bytes;
Juan Quintela56e93d22015-05-07 19:33:31 +0200225}
226
227uint64_t xbzrle_mig_pages_transferred(void)
228{
Juan Quintelaf36ada92017-03-13 20:59:32 +0100229 return ram_state.xbzrle_pages;
Juan Quintela56e93d22015-05-07 19:33:31 +0200230}
231
232uint64_t xbzrle_mig_pages_cache_miss(void)
233{
Juan Quintela544c36f2017-03-13 21:02:08 +0100234 return ram_state.xbzrle_cache_miss;
Juan Quintela56e93d22015-05-07 19:33:31 +0200235}
236
237double xbzrle_mig_cache_miss_rate(void)
238{
Juan Quintelab07016b2017-03-13 21:04:16 +0100239 return ram_state.xbzrle_cache_miss_rate;
Juan Quintela56e93d22015-05-07 19:33:31 +0200240}
241
242uint64_t xbzrle_mig_pages_overflow(void)
243{
Juan Quintela180f61f2017-03-13 21:07:03 +0100244 return ram_state.xbzrle_overflows;
Juan Quintela56e93d22015-05-07 19:33:31 +0200245}
246
Juan Quintela2f4fde92017-03-13 21:58:11 +0100247uint64_t ram_bytes_transferred(void)
248{
249 return ram_state.bytes_transferred;
250}
251
Juan Quintela9edabd42017-03-14 12:02:16 +0100252uint64_t ram_bytes_remaining(void)
253{
254 return ram_state.migration_dirty_pages * TARGET_PAGE_SIZE;
255}
256
Dr. David Alan Gilbertb8fb8cb2015-09-23 15:27:10 +0100257/* used by the search for pages to send */
258struct PageSearchStatus {
259 /* Current block being searched */
260 RAMBlock *block;
261 /* Current offset to search from */
262 ram_addr_t offset;
263 /* Set once we wrap around */
264 bool complete_round;
265};
266typedef struct PageSearchStatus PageSearchStatus;
267
Juan Quintela56e93d22015-05-07 19:33:31 +0200268struct CompressParam {
Juan Quintela56e93d22015-05-07 19:33:31 +0200269 bool done;
Liang Li90e56fb2016-05-05 15:32:56 +0800270 bool quit;
Juan Quintela56e93d22015-05-07 19:33:31 +0200271 QEMUFile *file;
272 QemuMutex mutex;
273 QemuCond cond;
274 RAMBlock *block;
275 ram_addr_t offset;
276};
277typedef struct CompressParam CompressParam;
278
279struct DecompressParam {
Liang Li73a89122016-05-05 15:32:51 +0800280 bool done;
Liang Li90e56fb2016-05-05 15:32:56 +0800281 bool quit;
Juan Quintela56e93d22015-05-07 19:33:31 +0200282 QemuMutex mutex;
283 QemuCond cond;
284 void *des;
Peter Maydelld341d9f2016-01-22 15:09:21 +0000285 uint8_t *compbuf;
Juan Quintela56e93d22015-05-07 19:33:31 +0200286 int len;
287};
288typedef struct DecompressParam DecompressParam;
289
290static CompressParam *comp_param;
291static QemuThread *compress_threads;
292/* comp_done_cond is used to wake up the migration thread when
293 * one of the compression threads has finished the compression.
294 * comp_done_lock is used to co-work with comp_done_cond.
295 */
Liang Li0d9f9a52016-05-05 15:32:59 +0800296static QemuMutex comp_done_lock;
297static QemuCond comp_done_cond;
Juan Quintela56e93d22015-05-07 19:33:31 +0200298/* The empty QEMUFileOps will be used by file in CompressParam */
299static const QEMUFileOps empty_ops = { };
300
301static bool compression_switch;
Juan Quintela56e93d22015-05-07 19:33:31 +0200302static DecompressParam *decomp_param;
303static QemuThread *decompress_threads;
Liang Li73a89122016-05-05 15:32:51 +0800304static QemuMutex decomp_done_lock;
305static QemuCond decomp_done_cond;
Juan Quintela56e93d22015-05-07 19:33:31 +0200306
Liang Lia7a9a882016-05-05 15:32:57 +0800307static int do_compress_ram_page(QEMUFile *f, RAMBlock *block,
308 ram_addr_t offset);
Juan Quintela56e93d22015-05-07 19:33:31 +0200309
310static void *do_data_compress(void *opaque)
311{
312 CompressParam *param = opaque;
Liang Lia7a9a882016-05-05 15:32:57 +0800313 RAMBlock *block;
314 ram_addr_t offset;
Juan Quintela56e93d22015-05-07 19:33:31 +0200315
Liang Lia7a9a882016-05-05 15:32:57 +0800316 qemu_mutex_lock(&param->mutex);
Liang Li90e56fb2016-05-05 15:32:56 +0800317 while (!param->quit) {
Liang Lia7a9a882016-05-05 15:32:57 +0800318 if (param->block) {
319 block = param->block;
320 offset = param->offset;
321 param->block = NULL;
322 qemu_mutex_unlock(&param->mutex);
323
324 do_compress_ram_page(param->file, block, offset);
325
Liang Li0d9f9a52016-05-05 15:32:59 +0800326 qemu_mutex_lock(&comp_done_lock);
Liang Lia7a9a882016-05-05 15:32:57 +0800327 param->done = true;
Liang Li0d9f9a52016-05-05 15:32:59 +0800328 qemu_cond_signal(&comp_done_cond);
329 qemu_mutex_unlock(&comp_done_lock);
Liang Lia7a9a882016-05-05 15:32:57 +0800330
331 qemu_mutex_lock(&param->mutex);
332 } else {
Juan Quintela56e93d22015-05-07 19:33:31 +0200333 qemu_cond_wait(&param->cond, &param->mutex);
334 }
Juan Quintela56e93d22015-05-07 19:33:31 +0200335 }
Liang Lia7a9a882016-05-05 15:32:57 +0800336 qemu_mutex_unlock(&param->mutex);
Juan Quintela56e93d22015-05-07 19:33:31 +0200337
338 return NULL;
339}
340
341static inline void terminate_compression_threads(void)
342{
343 int idx, thread_count;
344
345 thread_count = migrate_compress_threads();
Juan Quintela3d0684b2017-03-23 15:06:39 +0100346
Juan Quintela56e93d22015-05-07 19:33:31 +0200347 for (idx = 0; idx < thread_count; idx++) {
348 qemu_mutex_lock(&comp_param[idx].mutex);
Liang Li90e56fb2016-05-05 15:32:56 +0800349 comp_param[idx].quit = true;
Juan Quintela56e93d22015-05-07 19:33:31 +0200350 qemu_cond_signal(&comp_param[idx].cond);
351 qemu_mutex_unlock(&comp_param[idx].mutex);
352 }
353}
354
355void migrate_compress_threads_join(void)
356{
357 int i, thread_count;
358
359 if (!migrate_use_compression()) {
360 return;
361 }
362 terminate_compression_threads();
363 thread_count = migrate_compress_threads();
364 for (i = 0; i < thread_count; i++) {
365 qemu_thread_join(compress_threads + i);
366 qemu_fclose(comp_param[i].file);
367 qemu_mutex_destroy(&comp_param[i].mutex);
368 qemu_cond_destroy(&comp_param[i].cond);
369 }
Liang Li0d9f9a52016-05-05 15:32:59 +0800370 qemu_mutex_destroy(&comp_done_lock);
371 qemu_cond_destroy(&comp_done_cond);
Juan Quintela56e93d22015-05-07 19:33:31 +0200372 g_free(compress_threads);
373 g_free(comp_param);
Juan Quintela56e93d22015-05-07 19:33:31 +0200374 compress_threads = NULL;
375 comp_param = NULL;
Juan Quintela56e93d22015-05-07 19:33:31 +0200376}
377
378void migrate_compress_threads_create(void)
379{
380 int i, thread_count;
381
382 if (!migrate_use_compression()) {
383 return;
384 }
Juan Quintela56e93d22015-05-07 19:33:31 +0200385 compression_switch = true;
386 thread_count = migrate_compress_threads();
387 compress_threads = g_new0(QemuThread, thread_count);
388 comp_param = g_new0(CompressParam, thread_count);
Liang Li0d9f9a52016-05-05 15:32:59 +0800389 qemu_cond_init(&comp_done_cond);
390 qemu_mutex_init(&comp_done_lock);
Juan Quintela56e93d22015-05-07 19:33:31 +0200391 for (i = 0; i < thread_count; i++) {
Cao jine110aa92016-07-29 15:10:31 +0800392 /* comp_param[i].file is just used as a dummy buffer to save data,
393 * set its ops to empty.
Juan Quintela56e93d22015-05-07 19:33:31 +0200394 */
395 comp_param[i].file = qemu_fopen_ops(NULL, &empty_ops);
396 comp_param[i].done = true;
Liang Li90e56fb2016-05-05 15:32:56 +0800397 comp_param[i].quit = false;
Juan Quintela56e93d22015-05-07 19:33:31 +0200398 qemu_mutex_init(&comp_param[i].mutex);
399 qemu_cond_init(&comp_param[i].cond);
400 qemu_thread_create(compress_threads + i, "compress",
401 do_data_compress, comp_param + i,
402 QEMU_THREAD_JOINABLE);
403 }
404}
405
406/**
Juan Quintela3d0684b2017-03-23 15:06:39 +0100407 * save_page_header: write page header to wire
Juan Quintela56e93d22015-05-07 19:33:31 +0200408 *
409 * If this is the 1st block, it also writes the block identification
410 *
Juan Quintela3d0684b2017-03-23 15:06:39 +0100411 * Returns the number of bytes written
Juan Quintela56e93d22015-05-07 19:33:31 +0200412 *
413 * @f: QEMUFile where to send the data
414 * @block: block that contains the page we want to send
415 * @offset: offset inside the block for the page
416 * in the lower bits, it contains flags
417 */
418static size_t save_page_header(QEMUFile *f, RAMBlock *block, ram_addr_t offset)
419{
Liang Li9f5f3802015-07-13 17:34:10 +0800420 size_t size, len;
Juan Quintela56e93d22015-05-07 19:33:31 +0200421
422 qemu_put_be64(f, offset);
423 size = 8;
424
425 if (!(offset & RAM_SAVE_FLAG_CONTINUE)) {
Liang Li9f5f3802015-07-13 17:34:10 +0800426 len = strlen(block->idstr);
427 qemu_put_byte(f, len);
428 qemu_put_buffer(f, (uint8_t *)block->idstr, len);
429 size += 1 + len;
Juan Quintela56e93d22015-05-07 19:33:31 +0200430 }
431 return size;
432}
433
Juan Quintela3d0684b2017-03-23 15:06:39 +0100434/**
435 * mig_throttle_guest_down: throotle down the guest
436 *
437 * Reduce amount of guest cpu execution to hopefully slow down memory
438 * writes. If guest dirty memory rate is reduced below the rate at
439 * which we can transfer pages to the destination then we should be
440 * able to complete migration. Some workloads dirty memory way too
441 * fast and will not effectively converge, even with auto-converge.
Jason J. Herne070afca2015-09-08 13:12:35 -0400442 */
443static void mig_throttle_guest_down(void)
444{
445 MigrationState *s = migrate_get_current();
Daniel P. Berrange2594f562016-04-27 11:05:14 +0100446 uint64_t pct_initial = s->parameters.cpu_throttle_initial;
447 uint64_t pct_icrement = s->parameters.cpu_throttle_increment;
Jason J. Herne070afca2015-09-08 13:12:35 -0400448
449 /* We have not started throttling yet. Let's start it. */
450 if (!cpu_throttle_active()) {
451 cpu_throttle_set(pct_initial);
452 } else {
453 /* Throttling already on, just increase the rate */
454 cpu_throttle_set(cpu_throttle_get_percentage() + pct_icrement);
455 }
456}
457
Juan Quintela3d0684b2017-03-23 15:06:39 +0100458/**
459 * xbzrle_cache_zero_page: insert a zero page in the XBZRLE cache
460 *
Juan Quintela6f37bb82017-03-13 19:26:29 +0100461 * @rs: current RAM state
Juan Quintela3d0684b2017-03-23 15:06:39 +0100462 * @current_addr: address for the zero page
463 *
464 * Update the xbzrle cache to reflect a page that's been sent as all 0.
Juan Quintela56e93d22015-05-07 19:33:31 +0200465 * The important thing is that a stale (not-yet-0'd) page be replaced
466 * by the new data.
467 * As a bonus, if the page wasn't in the cache it gets added so that
Juan Quintela3d0684b2017-03-23 15:06:39 +0100468 * when a small write is made into the 0'd page it gets XBZRLE sent.
Juan Quintela56e93d22015-05-07 19:33:31 +0200469 */
Juan Quintela6f37bb82017-03-13 19:26:29 +0100470static void xbzrle_cache_zero_page(RAMState *rs, ram_addr_t current_addr)
Juan Quintela56e93d22015-05-07 19:33:31 +0200471{
Juan Quintela6f37bb82017-03-13 19:26:29 +0100472 if (rs->ram_bulk_stage || !migrate_use_xbzrle()) {
Juan Quintela56e93d22015-05-07 19:33:31 +0200473 return;
474 }
475
476 /* We don't care if this fails to allocate a new cache page
477 * as long as it updated an old one */
478 cache_insert(XBZRLE.cache, current_addr, ZERO_TARGET_PAGE,
Juan Quintela5a987732017-03-13 19:39:02 +0100479 rs->bitmap_sync_count);
Juan Quintela56e93d22015-05-07 19:33:31 +0200480}
481
482#define ENCODING_FLAG_XBZRLE 0x1
483
484/**
485 * save_xbzrle_page: compress and send current page
486 *
487 * Returns: 1 means that we wrote the page
488 * 0 means that page is identical to the one already sent
489 * -1 means that xbzrle would be longer than normal
490 *
Juan Quintela5a987732017-03-13 19:39:02 +0100491 * @rs: current RAM state
Juan Quintela56e93d22015-05-07 19:33:31 +0200492 * @f: QEMUFile where to send the data
Juan Quintela3d0684b2017-03-23 15:06:39 +0100493 * @current_data: pointer to the address of the page contents
494 * @current_addr: addr of the page
Juan Quintela56e93d22015-05-07 19:33:31 +0200495 * @block: block that contains the page we want to send
496 * @offset: offset inside the block for the page
497 * @last_stage: if we are at the completion stage
Juan Quintela56e93d22015-05-07 19:33:31 +0200498 */
Juan Quintela5a987732017-03-13 19:39:02 +0100499static int save_xbzrle_page(RAMState *rs, QEMUFile *f, uint8_t **current_data,
Juan Quintela56e93d22015-05-07 19:33:31 +0200500 ram_addr_t current_addr, RAMBlock *block,
Juan Quintela072c2512017-03-14 10:27:31 +0100501 ram_addr_t offset, bool last_stage)
Juan Quintela56e93d22015-05-07 19:33:31 +0200502{
503 int encoded_len = 0, bytes_xbzrle;
504 uint8_t *prev_cached_page;
505
Juan Quintela5a987732017-03-13 19:39:02 +0100506 if (!cache_is_cached(XBZRLE.cache, current_addr, rs->bitmap_sync_count)) {
Juan Quintela544c36f2017-03-13 21:02:08 +0100507 rs->xbzrle_cache_miss++;
Juan Quintela56e93d22015-05-07 19:33:31 +0200508 if (!last_stage) {
509 if (cache_insert(XBZRLE.cache, current_addr, *current_data,
Juan Quintela5a987732017-03-13 19:39:02 +0100510 rs->bitmap_sync_count) == -1) {
Juan Quintela56e93d22015-05-07 19:33:31 +0200511 return -1;
512 } else {
513 /* update *current_data when the page has been
514 inserted into cache */
515 *current_data = get_cached_data(XBZRLE.cache, current_addr);
516 }
517 }
518 return -1;
519 }
520
521 prev_cached_page = get_cached_data(XBZRLE.cache, current_addr);
522
523 /* save current buffer into memory */
524 memcpy(XBZRLE.current_buf, *current_data, TARGET_PAGE_SIZE);
525
526 /* XBZRLE encoding (if there is no overflow) */
527 encoded_len = xbzrle_encode_buffer(prev_cached_page, XBZRLE.current_buf,
528 TARGET_PAGE_SIZE, XBZRLE.encoded_buf,
529 TARGET_PAGE_SIZE);
530 if (encoded_len == 0) {
Juan Quintela55c44462017-01-23 22:32:05 +0100531 trace_save_xbzrle_page_skipping();
Juan Quintela56e93d22015-05-07 19:33:31 +0200532 return 0;
533 } else if (encoded_len == -1) {
Juan Quintela55c44462017-01-23 22:32:05 +0100534 trace_save_xbzrle_page_overflow();
Juan Quintela180f61f2017-03-13 21:07:03 +0100535 rs->xbzrle_overflows++;
Juan Quintela56e93d22015-05-07 19:33:31 +0200536 /* update data in the cache */
537 if (!last_stage) {
538 memcpy(prev_cached_page, *current_data, TARGET_PAGE_SIZE);
539 *current_data = prev_cached_page;
540 }
541 return -1;
542 }
543
544 /* we need to update the data in the cache, in order to get the same data */
545 if (!last_stage) {
546 memcpy(prev_cached_page, XBZRLE.current_buf, TARGET_PAGE_SIZE);
547 }
548
549 /* Send XBZRLE based compressed page */
550 bytes_xbzrle = save_page_header(f, block, offset | RAM_SAVE_FLAG_XBZRLE);
551 qemu_put_byte(f, ENCODING_FLAG_XBZRLE);
552 qemu_put_be16(f, encoded_len);
553 qemu_put_buffer(f, XBZRLE.encoded_buf, encoded_len);
554 bytes_xbzrle += encoded_len + 1 + 2;
Juan Quintelaf36ada92017-03-13 20:59:32 +0100555 rs->xbzrle_pages++;
Juan Quintela07ed50a2017-03-13 20:57:27 +0100556 rs->xbzrle_bytes += bytes_xbzrle;
Juan Quintela072c2512017-03-14 10:27:31 +0100557 rs->bytes_transferred += bytes_xbzrle;
Juan Quintela56e93d22015-05-07 19:33:31 +0200558
559 return 1;
560}
561
Juan Quintela3d0684b2017-03-23 15:06:39 +0100562/**
563 * migration_bitmap_find_dirty: find the next dirty page from start
Dr. David Alan Gilbertf3f491f2015-11-05 18:11:01 +0000564 *
Juan Quintela3d0684b2017-03-23 15:06:39 +0100565 * Called with rcu_read_lock() to protect migration_bitmap
566 *
567 * Returns the byte offset within memory region of the start of a dirty page
568 *
Juan Quintela6f37bb82017-03-13 19:26:29 +0100569 * @rs: current RAM state
Juan Quintela3d0684b2017-03-23 15:06:39 +0100570 * @rb: RAMBlock where to search for dirty pages
571 * @start: starting address (typically so we can continue from previous page)
572 * @ram_addr_abs: pointer into which to store the address of the dirty page
573 * within the global ram_addr space
Dr. David Alan Gilbertf3f491f2015-11-05 18:11:01 +0000574 */
Juan Quintela56e93d22015-05-07 19:33:31 +0200575static inline
Juan Quintela6f37bb82017-03-13 19:26:29 +0100576ram_addr_t migration_bitmap_find_dirty(RAMState *rs, RAMBlock *rb,
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +0000577 ram_addr_t start,
578 ram_addr_t *ram_addr_abs)
Juan Quintela56e93d22015-05-07 19:33:31 +0200579{
Dr. David Alan Gilbert2f68e392015-08-13 11:51:30 +0100580 unsigned long base = rb->offset >> TARGET_PAGE_BITS;
Juan Quintela56e93d22015-05-07 19:33:31 +0200581 unsigned long nr = base + (start >> TARGET_PAGE_BITS);
Dr. David Alan Gilbert2f68e392015-08-13 11:51:30 +0100582 uint64_t rb_size = rb->used_length;
583 unsigned long size = base + (rb_size >> TARGET_PAGE_BITS);
Li Zhijian2ff64032015-07-02 20:18:05 +0800584 unsigned long *bitmap;
Juan Quintela56e93d22015-05-07 19:33:31 +0200585
586 unsigned long next;
587
Juan Quintelaeb859c52017-03-13 21:51:55 +0100588 bitmap = atomic_rcu_read(&rs->ram_bitmap)->bmap;
Juan Quintela6f37bb82017-03-13 19:26:29 +0100589 if (rs->ram_bulk_stage && nr > base) {
Juan Quintela56e93d22015-05-07 19:33:31 +0200590 next = nr + 1;
591 } else {
Li Zhijian2ff64032015-07-02 20:18:05 +0800592 next = find_next_bit(bitmap, size, nr);
Juan Quintela56e93d22015-05-07 19:33:31 +0200593 }
594
Dr. David Alan Gilbertf3f491f2015-11-05 18:11:01 +0000595 *ram_addr_abs = next << TARGET_PAGE_BITS;
Juan Quintela56e93d22015-05-07 19:33:31 +0200596 return (next - base) << TARGET_PAGE_BITS;
597}
598
Juan Quintela0d8ec882017-03-13 21:21:41 +0100599static inline bool migration_bitmap_clear_dirty(RAMState *rs, ram_addr_t addr)
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +0000600{
601 bool ret;
602 int nr = addr >> TARGET_PAGE_BITS;
Juan Quintelaeb859c52017-03-13 21:51:55 +0100603 unsigned long *bitmap = atomic_rcu_read(&rs->ram_bitmap)->bmap;
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +0000604
605 ret = test_and_clear_bit(nr, bitmap);
606
607 if (ret) {
Juan Quintela0d8ec882017-03-13 21:21:41 +0100608 rs->migration_dirty_pages--;
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +0000609 }
610 return ret;
611}
612
Juan Quintelaa66cd902017-03-28 15:02:43 +0200613static void migration_bitmap_sync_range(RAMState *rs, ram_addr_t start,
614 ram_addr_t length)
Juan Quintela56e93d22015-05-07 19:33:31 +0200615{
Li Zhijian2ff64032015-07-02 20:18:05 +0800616 unsigned long *bitmap;
Juan Quintelaeb859c52017-03-13 21:51:55 +0100617 bitmap = atomic_rcu_read(&rs->ram_bitmap)->bmap;
Juan Quintela0d8ec882017-03-13 21:21:41 +0100618 rs->migration_dirty_pages +=
619 cpu_physical_memory_sync_dirty_bitmap(bitmap, start, length,
620 &rs->num_dirty_pages_period);
Juan Quintela56e93d22015-05-07 19:33:31 +0200621}
622
Juan Quintela3d0684b2017-03-23 15:06:39 +0100623/**
624 * ram_pagesize_summary: calculate all the pagesizes of a VM
625 *
626 * Returns a summary bitmap of the page sizes of all RAMBlocks
627 *
628 * For VMs with just normal pages this is equivalent to the host page
629 * size. If it's got some huge pages then it's the OR of all the
630 * different page sizes.
Dr. David Alan Gilberte8ca1db2017-02-24 18:28:29 +0000631 */
632uint64_t ram_pagesize_summary(void)
633{
634 RAMBlock *block;
635 uint64_t summary = 0;
636
637 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
638 summary |= block->page_size;
639 }
640
641 return summary;
642}
643
Juan Quintela8d820d62017-03-13 19:35:50 +0100644static void migration_bitmap_sync(RAMState *rs)
Juan Quintela56e93d22015-05-07 19:33:31 +0200645{
646 RAMBlock *block;
Juan Quintela56e93d22015-05-07 19:33:31 +0200647 MigrationState *s = migrate_get_current();
648 int64_t end_time;
Juan Quintelac4bdf0c2017-03-28 14:59:54 +0200649 uint64_t bytes_xfer_now;
Juan Quintela56e93d22015-05-07 19:33:31 +0200650
Juan Quintela5a987732017-03-13 19:39:02 +0100651 rs->bitmap_sync_count++;
Juan Quintela56e93d22015-05-07 19:33:31 +0200652
Juan Quintelaeac74152017-03-28 14:59:01 +0200653 if (!rs->bytes_xfer_prev) {
654 rs->bytes_xfer_prev = ram_bytes_transferred();
Juan Quintela56e93d22015-05-07 19:33:31 +0200655 }
656
Juan Quintelaf664da82017-03-13 19:44:57 +0100657 if (!rs->time_last_bitmap_sync) {
658 rs->time_last_bitmap_sync = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
Juan Quintela56e93d22015-05-07 19:33:31 +0200659 }
660
661 trace_migration_bitmap_sync_start();
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +0200662 memory_global_dirty_log_sync();
Juan Quintela56e93d22015-05-07 19:33:31 +0200663
Juan Quintela108cfae2017-03-13 21:38:09 +0100664 qemu_mutex_lock(&rs->bitmap_mutex);
Juan Quintela56e93d22015-05-07 19:33:31 +0200665 rcu_read_lock();
666 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
Juan Quintelaa66cd902017-03-28 15:02:43 +0200667 migration_bitmap_sync_range(rs, block->offset, block->used_length);
Juan Quintela56e93d22015-05-07 19:33:31 +0200668 }
669 rcu_read_unlock();
Juan Quintela108cfae2017-03-13 21:38:09 +0100670 qemu_mutex_unlock(&rs->bitmap_mutex);
Juan Quintela56e93d22015-05-07 19:33:31 +0200671
Juan Quintelaa66cd902017-03-28 15:02:43 +0200672 trace_migration_bitmap_sync_end(rs->num_dirty_pages_period);
Chao Fan1ffb5df2017-03-14 09:55:07 +0800673
Juan Quintela56e93d22015-05-07 19:33:31 +0200674 end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
675
676 /* more than 1 second = 1000 millisecons */
Juan Quintelaf664da82017-03-13 19:44:57 +0100677 if (end_time > rs->time_last_bitmap_sync + 1000) {
Juan Quintela56e93d22015-05-07 19:33:31 +0200678 if (migrate_auto_converge()) {
679 /* The following detection logic can be refined later. For now:
680 Check to see if the dirtied bytes is 50% more than the approx.
681 amount of bytes that just got transferred since the last time we
Jason J. Herne070afca2015-09-08 13:12:35 -0400682 were in this routine. If that happens twice, start or increase
683 throttling */
Juan Quintela56e93d22015-05-07 19:33:31 +0200684 bytes_xfer_now = ram_bytes_transferred();
Jason J. Herne070afca2015-09-08 13:12:35 -0400685
Juan Quintela56e93d22015-05-07 19:33:31 +0200686 if (s->dirty_pages_rate &&
Juan Quintelaa66cd902017-03-28 15:02:43 +0200687 (rs->num_dirty_pages_period * TARGET_PAGE_SIZE >
Juan Quintelaeac74152017-03-28 14:59:01 +0200688 (bytes_xfer_now - rs->bytes_xfer_prev) / 2) &&
Juan Quintela8d820d62017-03-13 19:35:50 +0100689 (rs->dirty_rate_high_cnt++ >= 2)) {
Juan Quintela56e93d22015-05-07 19:33:31 +0200690 trace_migration_throttle();
Juan Quintela8d820d62017-03-13 19:35:50 +0100691 rs->dirty_rate_high_cnt = 0;
Jason J. Herne070afca2015-09-08 13:12:35 -0400692 mig_throttle_guest_down();
Juan Quintela56e93d22015-05-07 19:33:31 +0200693 }
Juan Quintelaeac74152017-03-28 14:59:01 +0200694 rs->bytes_xfer_prev = bytes_xfer_now;
Juan Quintela56e93d22015-05-07 19:33:31 +0200695 }
Jason J. Herne070afca2015-09-08 13:12:35 -0400696
Juan Quintela56e93d22015-05-07 19:33:31 +0200697 if (migrate_use_xbzrle()) {
Juan Quintela23b28c32017-03-13 20:51:34 +0100698 if (rs->iterations_prev != rs->iterations) {
Juan Quintelab07016b2017-03-13 21:04:16 +0100699 rs->xbzrle_cache_miss_rate =
Juan Quintela544c36f2017-03-13 21:02:08 +0100700 (double)(rs->xbzrle_cache_miss -
Juan Quintelab5833fd2017-03-13 19:49:19 +0100701 rs->xbzrle_cache_miss_prev) /
Juan Quintela23b28c32017-03-13 20:51:34 +0100702 (rs->iterations - rs->iterations_prev);
Juan Quintela56e93d22015-05-07 19:33:31 +0200703 }
Juan Quintela23b28c32017-03-13 20:51:34 +0100704 rs->iterations_prev = rs->iterations;
Juan Quintela544c36f2017-03-13 21:02:08 +0100705 rs->xbzrle_cache_miss_prev = rs->xbzrle_cache_miss;
Juan Quintela56e93d22015-05-07 19:33:31 +0200706 }
Juan Quintelaa66cd902017-03-28 15:02:43 +0200707 s->dirty_pages_rate = rs->num_dirty_pages_period * 1000
Juan Quintelaf664da82017-03-13 19:44:57 +0100708 / (end_time - rs->time_last_bitmap_sync);
Juan Quintela56e93d22015-05-07 19:33:31 +0200709 s->dirty_bytes_rate = s->dirty_pages_rate * TARGET_PAGE_SIZE;
Juan Quintelaf664da82017-03-13 19:44:57 +0100710 rs->time_last_bitmap_sync = end_time;
Juan Quintelaa66cd902017-03-28 15:02:43 +0200711 rs->num_dirty_pages_period = 0;
Juan Quintela56e93d22015-05-07 19:33:31 +0200712 }
Juan Quintela5a987732017-03-13 19:39:02 +0100713 s->dirty_sync_count = rs->bitmap_sync_count;
Dr. David Alan Gilbert4addcd42015-12-16 11:47:36 +0000714 if (migrate_use_events()) {
Juan Quintela5a987732017-03-13 19:39:02 +0100715 qapi_event_send_migration_pass(rs->bitmap_sync_count, NULL);
Dr. David Alan Gilbert4addcd42015-12-16 11:47:36 +0000716 }
Juan Quintela56e93d22015-05-07 19:33:31 +0200717}
718
719/**
Juan Quintela3d0684b2017-03-23 15:06:39 +0100720 * save_zero_page: send the zero page to the stream
Juan Quintela56e93d22015-05-07 19:33:31 +0200721 *
Juan Quintela3d0684b2017-03-23 15:06:39 +0100722 * Returns the number of pages written.
Juan Quintela56e93d22015-05-07 19:33:31 +0200723 *
Juan Quintelaf7ccd612017-03-13 20:30:21 +0100724 * @rs: current RAM state
Juan Quintela56e93d22015-05-07 19:33:31 +0200725 * @f: QEMUFile where to send the data
726 * @block: block that contains the page we want to send
727 * @offset: offset inside the block for the page
728 * @p: pointer to the page
Juan Quintela56e93d22015-05-07 19:33:31 +0200729 */
Juan Quintelaf7ccd612017-03-13 20:30:21 +0100730static int save_zero_page(RAMState *rs, QEMUFile *f, RAMBlock *block,
Juan Quintela072c2512017-03-14 10:27:31 +0100731 ram_addr_t offset, uint8_t *p)
Juan Quintela56e93d22015-05-07 19:33:31 +0200732{
733 int pages = -1;
734
735 if (is_zero_range(p, TARGET_PAGE_SIZE)) {
Juan Quintelaf7ccd612017-03-13 20:30:21 +0100736 rs->zero_pages++;
Juan Quintela072c2512017-03-14 10:27:31 +0100737 rs->bytes_transferred +=
738 save_page_header(f, block, offset | RAM_SAVE_FLAG_COMPRESS);
Juan Quintela56e93d22015-05-07 19:33:31 +0200739 qemu_put_byte(f, 0);
Juan Quintela072c2512017-03-14 10:27:31 +0100740 rs->bytes_transferred += 1;
Juan Quintela56e93d22015-05-07 19:33:31 +0200741 pages = 1;
742 }
743
744 return pages;
745}
746
Juan Quintela36449152017-03-23 15:11:59 +0100747static void ram_release_pages(MigrationState *ms, const char *rbname,
Pavel Butsykin53f09a12017-02-03 18:23:20 +0300748 uint64_t offset, int pages)
749{
750 if (!migrate_release_ram() || !migration_in_postcopy(ms)) {
751 return;
752 }
753
Juan Quintela36449152017-03-23 15:11:59 +0100754 ram_discard_range(NULL, rbname, offset, pages << TARGET_PAGE_BITS);
Pavel Butsykin53f09a12017-02-03 18:23:20 +0300755}
756
Juan Quintela56e93d22015-05-07 19:33:31 +0200757/**
Juan Quintela3d0684b2017-03-23 15:06:39 +0100758 * ram_save_page: send the given page to the stream
Juan Quintela56e93d22015-05-07 19:33:31 +0200759 *
Juan Quintela3d0684b2017-03-23 15:06:39 +0100760 * Returns the number of pages written.
Dr. David Alan Gilbert3fd3c4b2015-12-10 16:31:46 +0000761 * < 0 - error
762 * >=0 - Number of pages written - this might legally be 0
763 * if xbzrle noticed the page was the same.
Juan Quintela56e93d22015-05-07 19:33:31 +0200764 *
Juan Quintela6f37bb82017-03-13 19:26:29 +0100765 * @rs: current RAM state
Juan Quintela3d0684b2017-03-23 15:06:39 +0100766 * @ms: current migration state
Juan Quintela56e93d22015-05-07 19:33:31 +0200767 * @f: QEMUFile where to send the data
768 * @block: block that contains the page we want to send
769 * @offset: offset inside the block for the page
770 * @last_stage: if we are at the completion stage
Juan Quintela56e93d22015-05-07 19:33:31 +0200771 */
Juan Quintela6f37bb82017-03-13 19:26:29 +0100772static int ram_save_page(RAMState *rs, MigrationState *ms, QEMUFile *f,
Juan Quintela072c2512017-03-14 10:27:31 +0100773 PageSearchStatus *pss, bool last_stage)
Juan Quintela56e93d22015-05-07 19:33:31 +0200774{
775 int pages = -1;
776 uint64_t bytes_xmit;
777 ram_addr_t current_addr;
Juan Quintela56e93d22015-05-07 19:33:31 +0200778 uint8_t *p;
779 int ret;
780 bool send_async = true;
zhanghailianga08f6892016-01-15 11:37:44 +0800781 RAMBlock *block = pss->block;
782 ram_addr_t offset = pss->offset;
Juan Quintela56e93d22015-05-07 19:33:31 +0200783
Dr. David Alan Gilbert2f68e392015-08-13 11:51:30 +0100784 p = block->host + offset;
Juan Quintela56e93d22015-05-07 19:33:31 +0200785
786 /* In doubt sent page as normal */
787 bytes_xmit = 0;
788 ret = ram_control_save_page(f, block->offset,
789 offset, TARGET_PAGE_SIZE, &bytes_xmit);
790 if (bytes_xmit) {
Juan Quintela072c2512017-03-14 10:27:31 +0100791 rs->bytes_transferred += bytes_xmit;
Juan Quintela56e93d22015-05-07 19:33:31 +0200792 pages = 1;
793 }
794
795 XBZRLE_cache_lock();
796
797 current_addr = block->offset + offset;
798
Juan Quintela6f37bb82017-03-13 19:26:29 +0100799 if (block == rs->last_sent_block) {
Juan Quintela56e93d22015-05-07 19:33:31 +0200800 offset |= RAM_SAVE_FLAG_CONTINUE;
801 }
802 if (ret != RAM_SAVE_CONTROL_NOT_SUPP) {
803 if (ret != RAM_SAVE_CONTROL_DELAYED) {
804 if (bytes_xmit > 0) {
Juan Quintelab4d1c6e2017-03-13 20:40:53 +0100805 rs->norm_pages++;
Juan Quintela56e93d22015-05-07 19:33:31 +0200806 } else if (bytes_xmit == 0) {
Juan Quintelaf7ccd612017-03-13 20:30:21 +0100807 rs->zero_pages++;
Juan Quintela56e93d22015-05-07 19:33:31 +0200808 }
809 }
810 } else {
Juan Quintela072c2512017-03-14 10:27:31 +0100811 pages = save_zero_page(rs, f, block, offset, p);
Juan Quintela56e93d22015-05-07 19:33:31 +0200812 if (pages > 0) {
813 /* Must let xbzrle know, otherwise a previous (now 0'd) cached
814 * page would be stale
815 */
Juan Quintela6f37bb82017-03-13 19:26:29 +0100816 xbzrle_cache_zero_page(rs, current_addr);
Pavel Butsykin53f09a12017-02-03 18:23:20 +0300817 ram_release_pages(ms, block->idstr, pss->offset, pages);
Juan Quintela6f37bb82017-03-13 19:26:29 +0100818 } else if (!rs->ram_bulk_stage &&
Pavel Butsykin9eb14762017-02-03 18:23:19 +0300819 !migration_in_postcopy(ms) && migrate_use_xbzrle()) {
Juan Quintela5a987732017-03-13 19:39:02 +0100820 pages = save_xbzrle_page(rs, f, &p, current_addr, block,
Juan Quintela072c2512017-03-14 10:27:31 +0100821 offset, last_stage);
Juan Quintela56e93d22015-05-07 19:33:31 +0200822 if (!last_stage) {
823 /* Can't send this cached data async, since the cache page
824 * might get updated before it gets to the wire
825 */
826 send_async = false;
827 }
828 }
829 }
830
831 /* XBZRLE overflow or normal page */
832 if (pages == -1) {
Juan Quintela072c2512017-03-14 10:27:31 +0100833 rs->bytes_transferred += save_page_header(f, block,
Juan Quintela56e93d22015-05-07 19:33:31 +0200834 offset | RAM_SAVE_FLAG_PAGE);
835 if (send_async) {
Pavel Butsykin53f09a12017-02-03 18:23:20 +0300836 qemu_put_buffer_async(f, p, TARGET_PAGE_SIZE,
837 migrate_release_ram() &
838 migration_in_postcopy(ms));
Juan Quintela56e93d22015-05-07 19:33:31 +0200839 } else {
840 qemu_put_buffer(f, p, TARGET_PAGE_SIZE);
841 }
Juan Quintela072c2512017-03-14 10:27:31 +0100842 rs->bytes_transferred += TARGET_PAGE_SIZE;
Juan Quintela56e93d22015-05-07 19:33:31 +0200843 pages = 1;
Juan Quintelab4d1c6e2017-03-13 20:40:53 +0100844 rs->norm_pages++;
Juan Quintela56e93d22015-05-07 19:33:31 +0200845 }
846
847 XBZRLE_cache_unlock();
848
849 return pages;
850}
851
Liang Lia7a9a882016-05-05 15:32:57 +0800852static int do_compress_ram_page(QEMUFile *f, RAMBlock *block,
853 ram_addr_t offset)
Juan Quintela56e93d22015-05-07 19:33:31 +0200854{
855 int bytes_sent, blen;
Liang Lia7a9a882016-05-05 15:32:57 +0800856 uint8_t *p = block->host + (offset & TARGET_PAGE_MASK);
Juan Quintela56e93d22015-05-07 19:33:31 +0200857
Liang Lia7a9a882016-05-05 15:32:57 +0800858 bytes_sent = save_page_header(f, block, offset |
Juan Quintela56e93d22015-05-07 19:33:31 +0200859 RAM_SAVE_FLAG_COMPRESS_PAGE);
Liang Lia7a9a882016-05-05 15:32:57 +0800860 blen = qemu_put_compression_data(f, p, TARGET_PAGE_SIZE,
Juan Quintela56e93d22015-05-07 19:33:31 +0200861 migrate_compress_level());
Liang Lib3be2892016-05-05 15:32:54 +0800862 if (blen < 0) {
863 bytes_sent = 0;
864 qemu_file_set_error(migrate_get_current()->to_dst_file, blen);
865 error_report("compressed data failed!");
866 } else {
867 bytes_sent += blen;
Pavel Butsykin53f09a12017-02-03 18:23:20 +0300868 ram_release_pages(migrate_get_current(), block->idstr,
869 offset & TARGET_PAGE_MASK, 1);
Liang Lib3be2892016-05-05 15:32:54 +0800870 }
Juan Quintela56e93d22015-05-07 19:33:31 +0200871
872 return bytes_sent;
873}
874
Juan Quintela2f4fde92017-03-13 21:58:11 +0100875static void flush_compressed_data(RAMState *rs, QEMUFile *f)
Juan Quintela56e93d22015-05-07 19:33:31 +0200876{
877 int idx, len, thread_count;
878
879 if (!migrate_use_compression()) {
880 return;
881 }
882 thread_count = migrate_compress_threads();
Liang Lia7a9a882016-05-05 15:32:57 +0800883
Liang Li0d9f9a52016-05-05 15:32:59 +0800884 qemu_mutex_lock(&comp_done_lock);
Juan Quintela56e93d22015-05-07 19:33:31 +0200885 for (idx = 0; idx < thread_count; idx++) {
Liang Lia7a9a882016-05-05 15:32:57 +0800886 while (!comp_param[idx].done) {
Liang Li0d9f9a52016-05-05 15:32:59 +0800887 qemu_cond_wait(&comp_done_cond, &comp_done_lock);
Juan Quintela56e93d22015-05-07 19:33:31 +0200888 }
Liang Lia7a9a882016-05-05 15:32:57 +0800889 }
Liang Li0d9f9a52016-05-05 15:32:59 +0800890 qemu_mutex_unlock(&comp_done_lock);
Liang Lia7a9a882016-05-05 15:32:57 +0800891
892 for (idx = 0; idx < thread_count; idx++) {
893 qemu_mutex_lock(&comp_param[idx].mutex);
Liang Li90e56fb2016-05-05 15:32:56 +0800894 if (!comp_param[idx].quit) {
Juan Quintela56e93d22015-05-07 19:33:31 +0200895 len = qemu_put_qemu_file(f, comp_param[idx].file);
Juan Quintela2f4fde92017-03-13 21:58:11 +0100896 rs->bytes_transferred += len;
Juan Quintela56e93d22015-05-07 19:33:31 +0200897 }
Liang Lia7a9a882016-05-05 15:32:57 +0800898 qemu_mutex_unlock(&comp_param[idx].mutex);
Juan Quintela56e93d22015-05-07 19:33:31 +0200899 }
900}
901
902static inline void set_compress_params(CompressParam *param, RAMBlock *block,
903 ram_addr_t offset)
904{
905 param->block = block;
906 param->offset = offset;
907}
908
Juan Quintelab4d1c6e2017-03-13 20:40:53 +0100909static int compress_page_with_multi_thread(RAMState *rs, QEMUFile *f,
Juan Quintela072c2512017-03-14 10:27:31 +0100910 RAMBlock *block, ram_addr_t offset)
Juan Quintela56e93d22015-05-07 19:33:31 +0200911{
912 int idx, thread_count, bytes_xmit = -1, pages = -1;
913
914 thread_count = migrate_compress_threads();
Liang Li0d9f9a52016-05-05 15:32:59 +0800915 qemu_mutex_lock(&comp_done_lock);
Juan Quintela56e93d22015-05-07 19:33:31 +0200916 while (true) {
917 for (idx = 0; idx < thread_count; idx++) {
918 if (comp_param[idx].done) {
Liang Lia7a9a882016-05-05 15:32:57 +0800919 comp_param[idx].done = false;
Juan Quintela56e93d22015-05-07 19:33:31 +0200920 bytes_xmit = qemu_put_qemu_file(f, comp_param[idx].file);
Liang Lia7a9a882016-05-05 15:32:57 +0800921 qemu_mutex_lock(&comp_param[idx].mutex);
Juan Quintela56e93d22015-05-07 19:33:31 +0200922 set_compress_params(&comp_param[idx], block, offset);
Liang Lia7a9a882016-05-05 15:32:57 +0800923 qemu_cond_signal(&comp_param[idx].cond);
924 qemu_mutex_unlock(&comp_param[idx].mutex);
Juan Quintela56e93d22015-05-07 19:33:31 +0200925 pages = 1;
Juan Quintelab4d1c6e2017-03-13 20:40:53 +0100926 rs->norm_pages++;
Juan Quintela072c2512017-03-14 10:27:31 +0100927 rs->bytes_transferred += bytes_xmit;
Juan Quintela56e93d22015-05-07 19:33:31 +0200928 break;
929 }
930 }
931 if (pages > 0) {
932 break;
933 } else {
Liang Li0d9f9a52016-05-05 15:32:59 +0800934 qemu_cond_wait(&comp_done_cond, &comp_done_lock);
Juan Quintela56e93d22015-05-07 19:33:31 +0200935 }
936 }
Liang Li0d9f9a52016-05-05 15:32:59 +0800937 qemu_mutex_unlock(&comp_done_lock);
Juan Quintela56e93d22015-05-07 19:33:31 +0200938
939 return pages;
940}
941
942/**
943 * ram_save_compressed_page: compress the given page and send it to the stream
944 *
Juan Quintela3d0684b2017-03-23 15:06:39 +0100945 * Returns the number of pages written.
Juan Quintela56e93d22015-05-07 19:33:31 +0200946 *
Juan Quintela6f37bb82017-03-13 19:26:29 +0100947 * @rs: current RAM state
Juan Quintela3d0684b2017-03-23 15:06:39 +0100948 * @ms: current migration state
Juan Quintela56e93d22015-05-07 19:33:31 +0200949 * @f: QEMUFile where to send the data
950 * @block: block that contains the page we want to send
951 * @offset: offset inside the block for the page
952 * @last_stage: if we are at the completion stage
Juan Quintela56e93d22015-05-07 19:33:31 +0200953 */
Juan Quintela6f37bb82017-03-13 19:26:29 +0100954static int ram_save_compressed_page(RAMState *rs, MigrationState *ms,
955 QEMUFile *f,
Juan Quintela072c2512017-03-14 10:27:31 +0100956 PageSearchStatus *pss, bool last_stage)
Juan Quintela56e93d22015-05-07 19:33:31 +0200957{
958 int pages = -1;
Liang Lifc504382016-05-05 15:32:55 +0800959 uint64_t bytes_xmit = 0;
Juan Quintela56e93d22015-05-07 19:33:31 +0200960 uint8_t *p;
Liang Lifc504382016-05-05 15:32:55 +0800961 int ret, blen;
zhanghailianga08f6892016-01-15 11:37:44 +0800962 RAMBlock *block = pss->block;
963 ram_addr_t offset = pss->offset;
Juan Quintela56e93d22015-05-07 19:33:31 +0200964
Dr. David Alan Gilbert2f68e392015-08-13 11:51:30 +0100965 p = block->host + offset;
Juan Quintela56e93d22015-05-07 19:33:31 +0200966
Juan Quintela56e93d22015-05-07 19:33:31 +0200967 ret = ram_control_save_page(f, block->offset,
968 offset, TARGET_PAGE_SIZE, &bytes_xmit);
969 if (bytes_xmit) {
Juan Quintela072c2512017-03-14 10:27:31 +0100970 rs->bytes_transferred += bytes_xmit;
Juan Quintela56e93d22015-05-07 19:33:31 +0200971 pages = 1;
972 }
Juan Quintela56e93d22015-05-07 19:33:31 +0200973 if (ret != RAM_SAVE_CONTROL_NOT_SUPP) {
974 if (ret != RAM_SAVE_CONTROL_DELAYED) {
975 if (bytes_xmit > 0) {
Juan Quintelab4d1c6e2017-03-13 20:40:53 +0100976 rs->norm_pages++;
Juan Quintela56e93d22015-05-07 19:33:31 +0200977 } else if (bytes_xmit == 0) {
Juan Quintelaf7ccd612017-03-13 20:30:21 +0100978 rs->zero_pages++;
Juan Quintela56e93d22015-05-07 19:33:31 +0200979 }
980 }
981 } else {
982 /* When starting the process of a new block, the first page of
983 * the block should be sent out before other pages in the same
984 * block, and all the pages in last block should have been sent
985 * out, keeping this order is important, because the 'cont' flag
986 * is used to avoid resending the block name.
987 */
Juan Quintela6f37bb82017-03-13 19:26:29 +0100988 if (block != rs->last_sent_block) {
Juan Quintela2f4fde92017-03-13 21:58:11 +0100989 flush_compressed_data(rs, f);
Juan Quintela072c2512017-03-14 10:27:31 +0100990 pages = save_zero_page(rs, f, block, offset, p);
Juan Quintela56e93d22015-05-07 19:33:31 +0200991 if (pages == -1) {
Liang Lifc504382016-05-05 15:32:55 +0800992 /* Make sure the first page is sent out before other pages */
993 bytes_xmit = save_page_header(f, block, offset |
994 RAM_SAVE_FLAG_COMPRESS_PAGE);
995 blen = qemu_put_compression_data(f, p, TARGET_PAGE_SIZE,
996 migrate_compress_level());
997 if (blen > 0) {
Juan Quintela072c2512017-03-14 10:27:31 +0100998 rs->bytes_transferred += bytes_xmit + blen;
Juan Quintelab4d1c6e2017-03-13 20:40:53 +0100999 rs->norm_pages++;
Liang Lib3be2892016-05-05 15:32:54 +08001000 pages = 1;
Liang Lifc504382016-05-05 15:32:55 +08001001 } else {
1002 qemu_file_set_error(f, blen);
1003 error_report("compressed data failed!");
Liang Lib3be2892016-05-05 15:32:54 +08001004 }
Juan Quintela56e93d22015-05-07 19:33:31 +02001005 }
Pavel Butsykin53f09a12017-02-03 18:23:20 +03001006 if (pages > 0) {
1007 ram_release_pages(ms, block->idstr, pss->offset, pages);
1008 }
Juan Quintela56e93d22015-05-07 19:33:31 +02001009 } else {
Liang Lifc504382016-05-05 15:32:55 +08001010 offset |= RAM_SAVE_FLAG_CONTINUE;
Juan Quintela072c2512017-03-14 10:27:31 +01001011 pages = save_zero_page(rs, f, block, offset, p);
Juan Quintela56e93d22015-05-07 19:33:31 +02001012 if (pages == -1) {
Juan Quintela072c2512017-03-14 10:27:31 +01001013 pages = compress_page_with_multi_thread(rs, f, block, offset);
Pavel Butsykin53f09a12017-02-03 18:23:20 +03001014 } else {
1015 ram_release_pages(ms, block->idstr, pss->offset, pages);
Juan Quintela56e93d22015-05-07 19:33:31 +02001016 }
1017 }
1018 }
1019
1020 return pages;
1021}
1022
Juan Quintela3d0684b2017-03-23 15:06:39 +01001023/**
1024 * find_dirty_block: find the next dirty page and update any state
1025 * associated with the search process.
Dr. David Alan Gilbertb9e60922015-09-23 15:27:11 +01001026 *
Juan Quintela3d0684b2017-03-23 15:06:39 +01001027 * Returns if a page is found
Dr. David Alan Gilbertb9e60922015-09-23 15:27:11 +01001028 *
Juan Quintela6f37bb82017-03-13 19:26:29 +01001029 * @rs: current RAM state
Juan Quintela3d0684b2017-03-23 15:06:39 +01001030 * @f: QEMUFile where to send the data
1031 * @pss: data about the state of the current dirty page scan
1032 * @again: set to false if the search has scanned the whole of RAM
1033 * @ram_addr_abs: pointer into which to store the address of the dirty page
1034 * within the global ram_addr space
Dr. David Alan Gilbertb9e60922015-09-23 15:27:11 +01001035 */
Juan Quintela6f37bb82017-03-13 19:26:29 +01001036static bool find_dirty_block(RAMState *rs, QEMUFile *f, PageSearchStatus *pss,
Dr. David Alan Gilbertf3f491f2015-11-05 18:11:01 +00001037 bool *again, ram_addr_t *ram_addr_abs)
Dr. David Alan Gilbertb9e60922015-09-23 15:27:11 +01001038{
Juan Quintela6f37bb82017-03-13 19:26:29 +01001039 pss->offset = migration_bitmap_find_dirty(rs, pss->block, pss->offset,
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001040 ram_addr_abs);
Juan Quintela6f37bb82017-03-13 19:26:29 +01001041 if (pss->complete_round && pss->block == rs->last_seen_block &&
1042 pss->offset >= rs->last_offset) {
Dr. David Alan Gilbertb9e60922015-09-23 15:27:11 +01001043 /*
1044 * We've been once around the RAM and haven't found anything.
1045 * Give up.
1046 */
1047 *again = false;
1048 return false;
1049 }
1050 if (pss->offset >= pss->block->used_length) {
1051 /* Didn't find anything in this RAM Block */
1052 pss->offset = 0;
1053 pss->block = QLIST_NEXT_RCU(pss->block, next);
1054 if (!pss->block) {
1055 /* Hit the end of the list */
1056 pss->block = QLIST_FIRST_RCU(&ram_list.blocks);
1057 /* Flag that we've looped */
1058 pss->complete_round = true;
Juan Quintela6f37bb82017-03-13 19:26:29 +01001059 rs->ram_bulk_stage = false;
Dr. David Alan Gilbertb9e60922015-09-23 15:27:11 +01001060 if (migrate_use_xbzrle()) {
1061 /* If xbzrle is on, stop using the data compression at this
1062 * point. In theory, xbzrle can do better than compression.
1063 */
Juan Quintela2f4fde92017-03-13 21:58:11 +01001064 flush_compressed_data(rs, f);
Dr. David Alan Gilbertb9e60922015-09-23 15:27:11 +01001065 compression_switch = false;
1066 }
1067 }
1068 /* Didn't find anything this time, but try again on the new block */
1069 *again = true;
1070 return false;
1071 } else {
1072 /* Can go around again, but... */
1073 *again = true;
1074 /* We've found something so probably don't need to */
1075 return true;
1076 }
1077}
1078
Juan Quintela3d0684b2017-03-23 15:06:39 +01001079/**
1080 * unqueue_page: gets a page of the queue
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001081 *
Juan Quintela3d0684b2017-03-23 15:06:39 +01001082 * Helper for 'get_queued_page' - gets a page off the queue
1083 *
1084 * Returns the block of the page (or NULL if none available)
1085 *
1086 * @ms: current migration state
1087 * @offset: used to return the offset within the RAMBlock
1088 * @ram_addr_abs: pointer into which to store the address of the dirty page
1089 * within the global ram_addr space
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001090 */
1091static RAMBlock *unqueue_page(MigrationState *ms, ram_addr_t *offset,
1092 ram_addr_t *ram_addr_abs)
1093{
1094 RAMBlock *block = NULL;
1095
1096 qemu_mutex_lock(&ms->src_page_req_mutex);
1097 if (!QSIMPLEQ_EMPTY(&ms->src_page_requests)) {
1098 struct MigrationSrcPageRequest *entry =
1099 QSIMPLEQ_FIRST(&ms->src_page_requests);
1100 block = entry->rb;
1101 *offset = entry->offset;
1102 *ram_addr_abs = (entry->offset + entry->rb->offset) &
1103 TARGET_PAGE_MASK;
1104
1105 if (entry->len > TARGET_PAGE_SIZE) {
1106 entry->len -= TARGET_PAGE_SIZE;
1107 entry->offset += TARGET_PAGE_SIZE;
1108 } else {
1109 memory_region_unref(block->mr);
1110 QSIMPLEQ_REMOVE_HEAD(&ms->src_page_requests, next_req);
1111 g_free(entry);
1112 }
1113 }
1114 qemu_mutex_unlock(&ms->src_page_req_mutex);
1115
1116 return block;
1117}
1118
Juan Quintela3d0684b2017-03-23 15:06:39 +01001119/**
1120 * get_queued_page: unqueue a page from the postocpy requests
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001121 *
Juan Quintela3d0684b2017-03-23 15:06:39 +01001122 * Skips pages that are already sent (!dirty)
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001123 *
Juan Quintela3d0684b2017-03-23 15:06:39 +01001124 * Returns if a queued page is found
1125 *
Juan Quintela6f37bb82017-03-13 19:26:29 +01001126 * @rs: current RAM state
Juan Quintela3d0684b2017-03-23 15:06:39 +01001127 * @ms: current migration state
1128 * @pss: data about the state of the current dirty page scan
1129 * @ram_addr_abs: pointer into which to store the address of the dirty page
1130 * within the global ram_addr space
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001131 */
Juan Quintela6f37bb82017-03-13 19:26:29 +01001132static bool get_queued_page(RAMState *rs, MigrationState *ms,
1133 PageSearchStatus *pss,
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001134 ram_addr_t *ram_addr_abs)
1135{
1136 RAMBlock *block;
1137 ram_addr_t offset;
1138 bool dirty;
1139
1140 do {
1141 block = unqueue_page(ms, &offset, ram_addr_abs);
1142 /*
1143 * We're sending this page, and since it's postcopy nothing else
1144 * will dirty it, and we must make sure it doesn't get sent again
1145 * even if this queue request was received after the background
1146 * search already sent it.
1147 */
1148 if (block) {
1149 unsigned long *bitmap;
Juan Quintelaeb859c52017-03-13 21:51:55 +01001150 bitmap = atomic_rcu_read(&rs->ram_bitmap)->bmap;
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001151 dirty = test_bit(*ram_addr_abs >> TARGET_PAGE_BITS, bitmap);
1152 if (!dirty) {
1153 trace_get_queued_page_not_dirty(
1154 block->idstr, (uint64_t)offset,
1155 (uint64_t)*ram_addr_abs,
1156 test_bit(*ram_addr_abs >> TARGET_PAGE_BITS,
Juan Quintelaeb859c52017-03-13 21:51:55 +01001157 atomic_rcu_read(&rs->ram_bitmap)->unsentmap));
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001158 } else {
1159 trace_get_queued_page(block->idstr,
1160 (uint64_t)offset,
1161 (uint64_t)*ram_addr_abs);
1162 }
1163 }
1164
1165 } while (block && !dirty);
1166
1167 if (block) {
1168 /*
1169 * As soon as we start servicing pages out of order, then we have
1170 * to kill the bulk stage, since the bulk stage assumes
1171 * in (migration_bitmap_find_and_reset_dirty) that every page is
1172 * dirty, that's no longer true.
1173 */
Juan Quintela6f37bb82017-03-13 19:26:29 +01001174 rs->ram_bulk_stage = false;
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001175
1176 /*
1177 * We want the background search to continue from the queued page
1178 * since the guest is likely to want other pages near to the page
1179 * it just requested.
1180 */
1181 pss->block = block;
1182 pss->offset = offset;
1183 }
1184
1185 return !!block;
1186}
1187
Juan Quintela56e93d22015-05-07 19:33:31 +02001188/**
Juan Quintela5e58f962017-04-03 22:06:54 +02001189 * migration_page_queue_free: drop any remaining pages in the ram
1190 * request queue
Dr. David Alan Gilbert6c595cd2015-11-05 18:11:08 +00001191 *
Juan Quintela3d0684b2017-03-23 15:06:39 +01001192 * It should be empty at the end anyway, but in error cases there may
1193 * be some left. in case that there is any page left, we drop it.
1194 *
1195 * @ms: current migration state
Dr. David Alan Gilbert6c595cd2015-11-05 18:11:08 +00001196 */
Juan Quintela5e58f962017-04-03 22:06:54 +02001197void migration_page_queue_free(MigrationState *ms)
Dr. David Alan Gilbert6c595cd2015-11-05 18:11:08 +00001198{
1199 struct MigrationSrcPageRequest *mspr, *next_mspr;
1200 /* This queue generally should be empty - but in the case of a failed
1201 * migration might have some droppings in.
1202 */
1203 rcu_read_lock();
1204 QSIMPLEQ_FOREACH_SAFE(mspr, &ms->src_page_requests, next_req, next_mspr) {
1205 memory_region_unref(mspr->rb->mr);
1206 QSIMPLEQ_REMOVE_HEAD(&ms->src_page_requests, next_req);
1207 g_free(mspr);
1208 }
1209 rcu_read_unlock();
1210}
1211
1212/**
Juan Quintela3d0684b2017-03-23 15:06:39 +01001213 * ram_save_queue_pages: queue the page for transmission
1214 *
1215 * A request from postcopy destination for example.
1216 *
1217 * Returns zero on success or negative on error
1218 *
1219 * @ms: current migration state
1220 * @rbname: Name of the RAMBLock of the request. NULL means the
1221 * same that last one.
1222 * @start: starting address from the start of the RAMBlock
1223 * @len: length (in bytes) to send
Dr. David Alan Gilbert6c595cd2015-11-05 18:11:08 +00001224 */
1225int ram_save_queue_pages(MigrationState *ms, const char *rbname,
1226 ram_addr_t start, ram_addr_t len)
1227{
1228 RAMBlock *ramblock;
1229
Dr. David Alan Gilbertd3bf5412016-06-13 12:16:42 +01001230 ms->postcopy_requests++;
Dr. David Alan Gilbert6c595cd2015-11-05 18:11:08 +00001231 rcu_read_lock();
1232 if (!rbname) {
1233 /* Reuse last RAMBlock */
1234 ramblock = ms->last_req_rb;
1235
1236 if (!ramblock) {
1237 /*
1238 * Shouldn't happen, we can't reuse the last RAMBlock if
1239 * it's the 1st request.
1240 */
1241 error_report("ram_save_queue_pages no previous block");
1242 goto err;
1243 }
1244 } else {
1245 ramblock = qemu_ram_block_by_name(rbname);
1246
1247 if (!ramblock) {
1248 /* We shouldn't be asked for a non-existent RAMBlock */
1249 error_report("ram_save_queue_pages no block '%s'", rbname);
1250 goto err;
1251 }
1252 ms->last_req_rb = ramblock;
1253 }
1254 trace_ram_save_queue_pages(ramblock->idstr, start, len);
1255 if (start+len > ramblock->used_length) {
Juan Quintela9458ad62015-11-10 17:42:05 +01001256 error_report("%s request overrun start=" RAM_ADDR_FMT " len="
1257 RAM_ADDR_FMT " blocklen=" RAM_ADDR_FMT,
Dr. David Alan Gilbert6c595cd2015-11-05 18:11:08 +00001258 __func__, start, len, ramblock->used_length);
1259 goto err;
1260 }
1261
1262 struct MigrationSrcPageRequest *new_entry =
1263 g_malloc0(sizeof(struct MigrationSrcPageRequest));
1264 new_entry->rb = ramblock;
1265 new_entry->offset = start;
1266 new_entry->len = len;
1267
1268 memory_region_ref(ramblock->mr);
1269 qemu_mutex_lock(&ms->src_page_req_mutex);
1270 QSIMPLEQ_INSERT_TAIL(&ms->src_page_requests, new_entry, next_req);
1271 qemu_mutex_unlock(&ms->src_page_req_mutex);
1272 rcu_read_unlock();
1273
1274 return 0;
1275
1276err:
1277 rcu_read_unlock();
1278 return -1;
1279}
1280
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001281/**
Juan Quintela3d0684b2017-03-23 15:06:39 +01001282 * ram_save_target_page: save one target page
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001283 *
Juan Quintela3d0684b2017-03-23 15:06:39 +01001284 * Returns the number of pages written
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001285 *
Juan Quintela6f37bb82017-03-13 19:26:29 +01001286 * @rs: current RAM state
Juan Quintela3d0684b2017-03-23 15:06:39 +01001287 * @ms: current migration state
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001288 * @f: QEMUFile where to send the data
Juan Quintela3d0684b2017-03-23 15:06:39 +01001289 * @pss: data about the page we want to send
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001290 * @last_stage: if we are at the completion stage
Juan Quintela3d0684b2017-03-23 15:06:39 +01001291 * @dirty_ram_abs: address of the start of the dirty page in ram_addr_t space
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001292 */
Juan Quintela6f37bb82017-03-13 19:26:29 +01001293static int ram_save_target_page(RAMState *rs, MigrationState *ms, QEMUFile *f,
zhanghailianga08f6892016-01-15 11:37:44 +08001294 PageSearchStatus *pss,
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001295 bool last_stage,
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001296 ram_addr_t dirty_ram_abs)
1297{
1298 int res = 0;
1299
1300 /* Check the pages is dirty and if it is send it */
Juan Quintela0d8ec882017-03-13 21:21:41 +01001301 if (migration_bitmap_clear_dirty(rs, dirty_ram_abs)) {
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001302 unsigned long *unsentmap;
1303 if (compression_switch && migrate_use_compression()) {
Juan Quintela072c2512017-03-14 10:27:31 +01001304 res = ram_save_compressed_page(rs, ms, f, pss, last_stage);
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001305 } else {
Juan Quintela072c2512017-03-14 10:27:31 +01001306 res = ram_save_page(rs, ms, f, pss, last_stage);
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001307 }
1308
1309 if (res < 0) {
1310 return res;
1311 }
Juan Quintelaeb859c52017-03-13 21:51:55 +01001312 unsentmap = atomic_rcu_read(&rs->ram_bitmap)->unsentmap;
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001313 if (unsentmap) {
1314 clear_bit(dirty_ram_abs >> TARGET_PAGE_BITS, unsentmap);
1315 }
Dr. David Alan Gilbert3fd3c4b2015-12-10 16:31:46 +00001316 /* Only update last_sent_block if a block was actually sent; xbzrle
1317 * might have decided the page was identical so didn't bother writing
1318 * to the stream.
1319 */
1320 if (res > 0) {
Juan Quintela6f37bb82017-03-13 19:26:29 +01001321 rs->last_sent_block = pss->block;
Dr. David Alan Gilbert3fd3c4b2015-12-10 16:31:46 +00001322 }
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001323 }
1324
1325 return res;
1326}
1327
1328/**
Juan Quintela3d0684b2017-03-23 15:06:39 +01001329 * ram_save_host_page: save a whole host page
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001330 *
Juan Quintela3d0684b2017-03-23 15:06:39 +01001331 * Starting at *offset send pages up to the end of the current host
1332 * page. It's valid for the initial offset to point into the middle of
1333 * a host page in which case the remainder of the hostpage is sent.
1334 * Only dirty target pages are sent. Note that the host page size may
1335 * be a huge page for this block.
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001336 *
Juan Quintela3d0684b2017-03-23 15:06:39 +01001337 * Returns the number of pages written or negative on error
1338 *
Juan Quintela6f37bb82017-03-13 19:26:29 +01001339 * @rs: current RAM state
Juan Quintela3d0684b2017-03-23 15:06:39 +01001340 * @ms: current migration state
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001341 * @f: QEMUFile where to send the data
Juan Quintela3d0684b2017-03-23 15:06:39 +01001342 * @pss: data about the page we want to send
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001343 * @last_stage: if we are at the completion stage
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001344 * @dirty_ram_abs: Address of the start of the dirty page in ram_addr_t space
1345 */
Juan Quintela6f37bb82017-03-13 19:26:29 +01001346static int ram_save_host_page(RAMState *rs, MigrationState *ms, QEMUFile *f,
zhanghailianga08f6892016-01-15 11:37:44 +08001347 PageSearchStatus *pss,
1348 bool last_stage,
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001349 ram_addr_t dirty_ram_abs)
1350{
1351 int tmppages, pages = 0;
Dr. David Alan Gilbert4c011c32017-02-24 18:28:39 +00001352 size_t pagesize = qemu_ram_pagesize(pss->block);
1353
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001354 do {
Juan Quintela6f37bb82017-03-13 19:26:29 +01001355 tmppages = ram_save_target_page(rs, ms, f, pss, last_stage,
Juan Quintela072c2512017-03-14 10:27:31 +01001356 dirty_ram_abs);
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001357 if (tmppages < 0) {
1358 return tmppages;
1359 }
1360
1361 pages += tmppages;
zhanghailianga08f6892016-01-15 11:37:44 +08001362 pss->offset += TARGET_PAGE_SIZE;
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001363 dirty_ram_abs += TARGET_PAGE_SIZE;
Dr. David Alan Gilbert4c011c32017-02-24 18:28:39 +00001364 } while (pss->offset & (pagesize - 1));
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001365
1366 /* The offset we leave with is the last one we looked at */
zhanghailianga08f6892016-01-15 11:37:44 +08001367 pss->offset -= TARGET_PAGE_SIZE;
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001368 return pages;
1369}
Dr. David Alan Gilbert6c595cd2015-11-05 18:11:08 +00001370
1371/**
Juan Quintela3d0684b2017-03-23 15:06:39 +01001372 * ram_find_and_save_block: finds a dirty page and sends it to f
Juan Quintela56e93d22015-05-07 19:33:31 +02001373 *
1374 * Called within an RCU critical section.
1375 *
Juan Quintela3d0684b2017-03-23 15:06:39 +01001376 * Returns the number of pages written where zero means no dirty pages
Juan Quintela56e93d22015-05-07 19:33:31 +02001377 *
Juan Quintela6f37bb82017-03-13 19:26:29 +01001378 * @rs: current RAM state
Juan Quintela56e93d22015-05-07 19:33:31 +02001379 * @f: QEMUFile where to send the data
1380 * @last_stage: if we are at the completion stage
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001381 *
1382 * On systems where host-page-size > target-page-size it will send all the
1383 * pages in a host page that are dirty.
Juan Quintela56e93d22015-05-07 19:33:31 +02001384 */
1385
Juan Quintela072c2512017-03-14 10:27:31 +01001386static int ram_find_and_save_block(RAMState *rs, QEMUFile *f, bool last_stage)
Juan Quintela56e93d22015-05-07 19:33:31 +02001387{
Dr. David Alan Gilbertb8fb8cb2015-09-23 15:27:10 +01001388 PageSearchStatus pss;
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001389 MigrationState *ms = migrate_get_current();
Juan Quintela56e93d22015-05-07 19:33:31 +02001390 int pages = 0;
Dr. David Alan Gilbertb9e60922015-09-23 15:27:11 +01001391 bool again, found;
Dr. David Alan Gilbertf3f491f2015-11-05 18:11:01 +00001392 ram_addr_t dirty_ram_abs; /* Address of the start of the dirty page in
1393 ram_addr_t space */
Juan Quintela56e93d22015-05-07 19:33:31 +02001394
Ashijeet Acharya0827b9e2017-02-08 19:58:45 +05301395 /* No dirty page as there is zero RAM */
1396 if (!ram_bytes_total()) {
1397 return pages;
1398 }
1399
Juan Quintela6f37bb82017-03-13 19:26:29 +01001400 pss.block = rs->last_seen_block;
1401 pss.offset = rs->last_offset;
Dr. David Alan Gilbertb8fb8cb2015-09-23 15:27:10 +01001402 pss.complete_round = false;
1403
1404 if (!pss.block) {
1405 pss.block = QLIST_FIRST_RCU(&ram_list.blocks);
1406 }
Juan Quintela56e93d22015-05-07 19:33:31 +02001407
Dr. David Alan Gilbertb9e60922015-09-23 15:27:11 +01001408 do {
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001409 again = true;
Juan Quintela6f37bb82017-03-13 19:26:29 +01001410 found = get_queued_page(rs, ms, &pss, &dirty_ram_abs);
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001411
1412 if (!found) {
1413 /* priority queue empty, so just search for something dirty */
Juan Quintela6f37bb82017-03-13 19:26:29 +01001414 found = find_dirty_block(rs, f, &pss, &again, &dirty_ram_abs);
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001415 }
Dr. David Alan Gilbertb9e60922015-09-23 15:27:11 +01001416
1417 if (found) {
Juan Quintela072c2512017-03-14 10:27:31 +01001418 pages = ram_save_host_page(rs, ms, f, &pss, last_stage,
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001419 dirty_ram_abs);
Juan Quintela56e93d22015-05-07 19:33:31 +02001420 }
Dr. David Alan Gilbertb9e60922015-09-23 15:27:11 +01001421 } while (!pages && again);
Juan Quintela56e93d22015-05-07 19:33:31 +02001422
Juan Quintela6f37bb82017-03-13 19:26:29 +01001423 rs->last_seen_block = pss.block;
1424 rs->last_offset = pss.offset;
Juan Quintela56e93d22015-05-07 19:33:31 +02001425
1426 return pages;
1427}
1428
1429void acct_update_position(QEMUFile *f, size_t size, bool zero)
1430{
1431 uint64_t pages = size / TARGET_PAGE_SIZE;
Juan Quintelaf7ccd612017-03-13 20:30:21 +01001432 RAMState *rs = &ram_state;
1433
Juan Quintela56e93d22015-05-07 19:33:31 +02001434 if (zero) {
Juan Quintelaf7ccd612017-03-13 20:30:21 +01001435 rs->zero_pages += pages;
Juan Quintela56e93d22015-05-07 19:33:31 +02001436 } else {
Juan Quintelab4d1c6e2017-03-13 20:40:53 +01001437 rs->norm_pages += pages;
Juan Quintela2f4fde92017-03-13 21:58:11 +01001438 rs->bytes_transferred += size;
Juan Quintela56e93d22015-05-07 19:33:31 +02001439 qemu_update_position(f, size);
1440 }
1441}
1442
Juan Quintela56e93d22015-05-07 19:33:31 +02001443uint64_t ram_bytes_total(void)
1444{
1445 RAMBlock *block;
1446 uint64_t total = 0;
1447
1448 rcu_read_lock();
1449 QLIST_FOREACH_RCU(block, &ram_list.blocks, next)
1450 total += block->used_length;
1451 rcu_read_unlock();
1452 return total;
1453}
1454
1455void free_xbzrle_decoded_buf(void)
1456{
1457 g_free(xbzrle_decoded_buf);
1458 xbzrle_decoded_buf = NULL;
1459}
1460
Juan Quintelaeb859c52017-03-13 21:51:55 +01001461static void migration_bitmap_free(struct RAMBitmap *bmap)
Denis V. Lunev60be6342015-09-28 14:41:58 +03001462{
1463 g_free(bmap->bmap);
Dr. David Alan Gilbertf3f491f2015-11-05 18:11:01 +00001464 g_free(bmap->unsentmap);
Denis V. Lunev60be6342015-09-28 14:41:58 +03001465 g_free(bmap);
1466}
1467
Liang Li6ad2a212015-11-02 15:37:03 +08001468static void ram_migration_cleanup(void *opaque)
Juan Quintela56e93d22015-05-07 19:33:31 +02001469{
Juan Quintelaeb859c52017-03-13 21:51:55 +01001470 RAMState *rs = opaque;
1471
Li Zhijian2ff64032015-07-02 20:18:05 +08001472 /* caller have hold iothread lock or is in a bh, so there is
1473 * no writing race against this migration_bitmap
1474 */
Juan Quintelaeb859c52017-03-13 21:51:55 +01001475 struct RAMBitmap *bitmap = rs->ram_bitmap;
1476 atomic_rcu_set(&rs->ram_bitmap, NULL);
Li Zhijian2ff64032015-07-02 20:18:05 +08001477 if (bitmap) {
Juan Quintela56e93d22015-05-07 19:33:31 +02001478 memory_global_dirty_log_stop();
Denis V. Lunev60be6342015-09-28 14:41:58 +03001479 call_rcu(bitmap, migration_bitmap_free, rcu);
Juan Quintela56e93d22015-05-07 19:33:31 +02001480 }
1481
1482 XBZRLE_cache_lock();
1483 if (XBZRLE.cache) {
1484 cache_fini(XBZRLE.cache);
1485 g_free(XBZRLE.encoded_buf);
1486 g_free(XBZRLE.current_buf);
Vijaya Kumar Kadb65de2016-10-24 16:26:49 +01001487 g_free(ZERO_TARGET_PAGE);
Juan Quintela56e93d22015-05-07 19:33:31 +02001488 XBZRLE.cache = NULL;
1489 XBZRLE.encoded_buf = NULL;
1490 XBZRLE.current_buf = NULL;
1491 }
1492 XBZRLE_cache_unlock();
1493}
1494
Juan Quintela6f37bb82017-03-13 19:26:29 +01001495static void ram_state_reset(RAMState *rs)
Juan Quintela56e93d22015-05-07 19:33:31 +02001496{
Juan Quintela6f37bb82017-03-13 19:26:29 +01001497 rs->last_seen_block = NULL;
1498 rs->last_sent_block = NULL;
1499 rs->last_offset = 0;
1500 rs->last_version = ram_list.version;
1501 rs->ram_bulk_stage = true;
Juan Quintela56e93d22015-05-07 19:33:31 +02001502}
1503
1504#define MAX_WAIT 50 /* ms, half buffered_file limit */
1505
Li Zhijiandd631692015-07-02 20:18:06 +08001506void migration_bitmap_extend(ram_addr_t old, ram_addr_t new)
1507{
Juan Quintela0d8ec882017-03-13 21:21:41 +01001508 RAMState *rs = &ram_state;
Juan Quintela108cfae2017-03-13 21:38:09 +01001509
Li Zhijiandd631692015-07-02 20:18:06 +08001510 /* called in qemu main thread, so there is
1511 * no writing race against this migration_bitmap
1512 */
Juan Quintelaeb859c52017-03-13 21:51:55 +01001513 if (rs->ram_bitmap) {
1514 struct RAMBitmap *old_bitmap = rs->ram_bitmap, *bitmap;
1515 bitmap = g_new(struct RAMBitmap, 1);
Denis V. Lunev60be6342015-09-28 14:41:58 +03001516 bitmap->bmap = bitmap_new(new);
Li Zhijiandd631692015-07-02 20:18:06 +08001517
1518 /* prevent migration_bitmap content from being set bit
1519 * by migration_bitmap_sync_range() at the same time.
1520 * it is safe to migration if migration_bitmap is cleared bit
1521 * at the same time.
1522 */
Juan Quintela108cfae2017-03-13 21:38:09 +01001523 qemu_mutex_lock(&rs->bitmap_mutex);
Denis V. Lunev60be6342015-09-28 14:41:58 +03001524 bitmap_copy(bitmap->bmap, old_bitmap->bmap, old);
1525 bitmap_set(bitmap->bmap, old, new - old);
Dr. David Alan Gilbertf3f491f2015-11-05 18:11:01 +00001526
1527 /* We don't have a way to safely extend the sentmap
1528 * with RCU; so mark it as missing, entry to postcopy
1529 * will fail.
1530 */
1531 bitmap->unsentmap = NULL;
1532
Juan Quintelaeb859c52017-03-13 21:51:55 +01001533 atomic_rcu_set(&rs->ram_bitmap, bitmap);
Juan Quintela108cfae2017-03-13 21:38:09 +01001534 qemu_mutex_unlock(&rs->bitmap_mutex);
Juan Quintela0d8ec882017-03-13 21:21:41 +01001535 rs->migration_dirty_pages += new - old;
Denis V. Lunev60be6342015-09-28 14:41:58 +03001536 call_rcu(old_bitmap, migration_bitmap_free, rcu);
Li Zhijiandd631692015-07-02 20:18:06 +08001537 }
1538}
Juan Quintela56e93d22015-05-07 19:33:31 +02001539
Dr. David Alan Gilbert4f2e4252015-11-05 18:10:38 +00001540/*
1541 * 'expected' is the value you expect the bitmap mostly to be full
1542 * of; it won't bother printing lines that are all this value.
1543 * If 'todump' is null the migration bitmap is dumped.
1544 */
1545void ram_debug_dump_bitmap(unsigned long *todump, bool expected)
1546{
1547 int64_t ram_pages = last_ram_offset() >> TARGET_PAGE_BITS;
Juan Quintelaeb859c52017-03-13 21:51:55 +01001548 RAMState *rs = &ram_state;
Dr. David Alan Gilbert4f2e4252015-11-05 18:10:38 +00001549 int64_t cur;
1550 int64_t linelen = 128;
1551 char linebuf[129];
1552
1553 if (!todump) {
Juan Quintelaeb859c52017-03-13 21:51:55 +01001554 todump = atomic_rcu_read(&rs->ram_bitmap)->bmap;
Dr. David Alan Gilbert4f2e4252015-11-05 18:10:38 +00001555 }
1556
1557 for (cur = 0; cur < ram_pages; cur += linelen) {
1558 int64_t curb;
1559 bool found = false;
1560 /*
1561 * Last line; catch the case where the line length
1562 * is longer than remaining ram
1563 */
1564 if (cur + linelen > ram_pages) {
1565 linelen = ram_pages - cur;
1566 }
1567 for (curb = 0; curb < linelen; curb++) {
1568 bool thisbit = test_bit(cur + curb, todump);
1569 linebuf[curb] = thisbit ? '1' : '.';
1570 found = found || (thisbit != expected);
1571 }
1572 if (found) {
1573 linebuf[curb] = '\0';
1574 fprintf(stderr, "0x%08" PRIx64 " : %s\n", cur, linebuf);
1575 }
1576 }
1577}
1578
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00001579/* **** functions for postcopy ***** */
1580
Pavel Butsykinced1c612017-02-03 18:23:21 +03001581void ram_postcopy_migrated_memory_release(MigrationState *ms)
1582{
Juan Quintelaeb859c52017-03-13 21:51:55 +01001583 RAMState *rs = &ram_state;
Pavel Butsykinced1c612017-02-03 18:23:21 +03001584 struct RAMBlock *block;
Juan Quintelaeb859c52017-03-13 21:51:55 +01001585 unsigned long *bitmap = atomic_rcu_read(&rs->ram_bitmap)->bmap;
Pavel Butsykinced1c612017-02-03 18:23:21 +03001586
1587 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1588 unsigned long first = block->offset >> TARGET_PAGE_BITS;
1589 unsigned long range = first + (block->used_length >> TARGET_PAGE_BITS);
1590 unsigned long run_start = find_next_zero_bit(bitmap, range, first);
1591
1592 while (run_start < range) {
1593 unsigned long run_end = find_next_bit(bitmap, range, run_start + 1);
1594 ram_discard_range(NULL, block->idstr, run_start << TARGET_PAGE_BITS,
1595 (run_end - run_start) << TARGET_PAGE_BITS);
1596 run_start = find_next_zero_bit(bitmap, range, run_end + 1);
1597 }
1598 }
1599}
1600
Juan Quintela3d0684b2017-03-23 15:06:39 +01001601/**
1602 * postcopy_send_discard_bm_ram: discard a RAMBlock
1603 *
1604 * Returns zero on success
1605 *
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00001606 * Callback from postcopy_each_ram_send_discard for each RAMBlock
1607 * Note: At this point the 'unsentmap' is the processed bitmap combined
1608 * with the dirtymap; so a '1' means it's either dirty or unsent.
Juan Quintela3d0684b2017-03-23 15:06:39 +01001609 *
1610 * @ms: current migration state
1611 * @pds: state for postcopy
1612 * @start: RAMBlock starting page
1613 * @length: RAMBlock size
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00001614 */
1615static int postcopy_send_discard_bm_ram(MigrationState *ms,
1616 PostcopyDiscardState *pds,
1617 unsigned long start,
1618 unsigned long length)
1619{
Juan Quintelaeb859c52017-03-13 21:51:55 +01001620 RAMState *rs = &ram_state;
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00001621 unsigned long end = start + length; /* one after the end */
1622 unsigned long current;
1623 unsigned long *unsentmap;
1624
Juan Quintelaeb859c52017-03-13 21:51:55 +01001625 unsentmap = atomic_rcu_read(&rs->ram_bitmap)->unsentmap;
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00001626 for (current = start; current < end; ) {
1627 unsigned long one = find_next_bit(unsentmap, end, current);
1628
1629 if (one <= end) {
1630 unsigned long zero = find_next_zero_bit(unsentmap, end, one + 1);
1631 unsigned long discard_length;
1632
1633 if (zero >= end) {
1634 discard_length = end - one;
1635 } else {
1636 discard_length = zero - one;
1637 }
Dr. David Alan Gilbertd688c622016-06-13 12:16:40 +01001638 if (discard_length) {
1639 postcopy_discard_send_range(ms, pds, one, discard_length);
1640 }
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00001641 current = one + discard_length;
1642 } else {
1643 current = one;
1644 }
1645 }
1646
1647 return 0;
1648}
1649
Juan Quintela3d0684b2017-03-23 15:06:39 +01001650/**
1651 * postcopy_each_ram_send_discard: discard all RAMBlocks
1652 *
1653 * Returns 0 for success or negative for error
1654 *
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00001655 * Utility for the outgoing postcopy code.
1656 * Calls postcopy_send_discard_bm_ram for each RAMBlock
1657 * passing it bitmap indexes and name.
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00001658 * (qemu_ram_foreach_block ends up passing unscaled lengths
1659 * which would mean postcopy code would have to deal with target page)
Juan Quintela3d0684b2017-03-23 15:06:39 +01001660 *
1661 * @ms: current migration state
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00001662 */
1663static int postcopy_each_ram_send_discard(MigrationState *ms)
1664{
1665 struct RAMBlock *block;
1666 int ret;
1667
1668 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1669 unsigned long first = block->offset >> TARGET_PAGE_BITS;
1670 PostcopyDiscardState *pds = postcopy_discard_send_init(ms,
1671 first,
1672 block->idstr);
1673
1674 /*
1675 * Postcopy sends chunks of bitmap over the wire, but it
1676 * just needs indexes at this point, avoids it having
1677 * target page specific code.
1678 */
1679 ret = postcopy_send_discard_bm_ram(ms, pds, first,
1680 block->used_length >> TARGET_PAGE_BITS);
1681 postcopy_discard_send_finish(ms, pds);
1682 if (ret) {
1683 return ret;
1684 }
1685 }
1686
1687 return 0;
1688}
1689
Juan Quintela3d0684b2017-03-23 15:06:39 +01001690/**
1691 * postcopy_chunk_hostpages_pass: canocalize bitmap in hostpages
Dr. David Alan Gilbert99e314e2015-11-05 18:11:15 +00001692 *
Juan Quintela3d0684b2017-03-23 15:06:39 +01001693 * Helper for postcopy_chunk_hostpages; it's called twice to
1694 * canonicalize the two bitmaps, that are similar, but one is
1695 * inverted.
Dr. David Alan Gilbert99e314e2015-11-05 18:11:15 +00001696 *
Juan Quintela3d0684b2017-03-23 15:06:39 +01001697 * Postcopy requires that all target pages in a hostpage are dirty or
1698 * clean, not a mix. This function canonicalizes the bitmaps.
1699 *
1700 * @ms: current migration state
1701 * @unsent_pass: if true we need to canonicalize partially unsent host pages
1702 * otherwise we need to canonicalize partially dirty host pages
1703 * @block: block that contains the page we want to canonicalize
1704 * @pds: state for postcopy
Dr. David Alan Gilbert99e314e2015-11-05 18:11:15 +00001705 */
1706static void postcopy_chunk_hostpages_pass(MigrationState *ms, bool unsent_pass,
1707 RAMBlock *block,
1708 PostcopyDiscardState *pds)
1709{
Juan Quintela0d8ec882017-03-13 21:21:41 +01001710 RAMState *rs = &ram_state;
Dr. David Alan Gilbert99e314e2015-11-05 18:11:15 +00001711 unsigned long *bitmap;
1712 unsigned long *unsentmap;
Dr. David Alan Gilbert29c59172017-02-24 18:28:31 +00001713 unsigned int host_ratio = block->page_size / TARGET_PAGE_SIZE;
Dr. David Alan Gilbert99e314e2015-11-05 18:11:15 +00001714 unsigned long first = block->offset >> TARGET_PAGE_BITS;
1715 unsigned long len = block->used_length >> TARGET_PAGE_BITS;
1716 unsigned long last = first + (len - 1);
1717 unsigned long run_start;
1718
Dr. David Alan Gilbert29c59172017-02-24 18:28:31 +00001719 if (block->page_size == TARGET_PAGE_SIZE) {
1720 /* Easy case - TPS==HPS for a non-huge page RAMBlock */
1721 return;
1722 }
1723
Juan Quintelaeb859c52017-03-13 21:51:55 +01001724 bitmap = atomic_rcu_read(&rs->ram_bitmap)->bmap;
1725 unsentmap = atomic_rcu_read(&rs->ram_bitmap)->unsentmap;
Dr. David Alan Gilbert99e314e2015-11-05 18:11:15 +00001726
1727 if (unsent_pass) {
1728 /* Find a sent page */
1729 run_start = find_next_zero_bit(unsentmap, last + 1, first);
1730 } else {
1731 /* Find a dirty page */
1732 run_start = find_next_bit(bitmap, last + 1, first);
1733 }
1734
1735 while (run_start <= last) {
1736 bool do_fixup = false;
1737 unsigned long fixup_start_addr;
1738 unsigned long host_offset;
1739
1740 /*
1741 * If the start of this run of pages is in the middle of a host
1742 * page, then we need to fixup this host page.
1743 */
1744 host_offset = run_start % host_ratio;
1745 if (host_offset) {
1746 do_fixup = true;
1747 run_start -= host_offset;
1748 fixup_start_addr = run_start;
1749 /* For the next pass */
1750 run_start = run_start + host_ratio;
1751 } else {
1752 /* Find the end of this run */
1753 unsigned long run_end;
1754 if (unsent_pass) {
1755 run_end = find_next_bit(unsentmap, last + 1, run_start + 1);
1756 } else {
1757 run_end = find_next_zero_bit(bitmap, last + 1, run_start + 1);
1758 }
1759 /*
1760 * If the end isn't at the start of a host page, then the
1761 * run doesn't finish at the end of a host page
1762 * and we need to discard.
1763 */
1764 host_offset = run_end % host_ratio;
1765 if (host_offset) {
1766 do_fixup = true;
1767 fixup_start_addr = run_end - host_offset;
1768 /*
1769 * This host page has gone, the next loop iteration starts
1770 * from after the fixup
1771 */
1772 run_start = fixup_start_addr + host_ratio;
1773 } else {
1774 /*
1775 * No discards on this iteration, next loop starts from
1776 * next sent/dirty page
1777 */
1778 run_start = run_end + 1;
1779 }
1780 }
1781
1782 if (do_fixup) {
1783 unsigned long page;
1784
1785 /* Tell the destination to discard this page */
1786 if (unsent_pass || !test_bit(fixup_start_addr, unsentmap)) {
1787 /* For the unsent_pass we:
1788 * discard partially sent pages
1789 * For the !unsent_pass (dirty) we:
1790 * discard partially dirty pages that were sent
1791 * (any partially sent pages were already discarded
1792 * by the previous unsent_pass)
1793 */
1794 postcopy_discard_send_range(ms, pds, fixup_start_addr,
1795 host_ratio);
1796 }
1797
1798 /* Clean up the bitmap */
1799 for (page = fixup_start_addr;
1800 page < fixup_start_addr + host_ratio; page++) {
1801 /* All pages in this host page are now not sent */
1802 set_bit(page, unsentmap);
1803
1804 /*
1805 * Remark them as dirty, updating the count for any pages
1806 * that weren't previously dirty.
1807 */
Juan Quintela0d8ec882017-03-13 21:21:41 +01001808 rs->migration_dirty_pages += !test_and_set_bit(page, bitmap);
Dr. David Alan Gilbert99e314e2015-11-05 18:11:15 +00001809 }
1810 }
1811
1812 if (unsent_pass) {
1813 /* Find the next sent page for the next iteration */
1814 run_start = find_next_zero_bit(unsentmap, last + 1,
1815 run_start);
1816 } else {
1817 /* Find the next dirty page for the next iteration */
1818 run_start = find_next_bit(bitmap, last + 1, run_start);
1819 }
1820 }
1821}
1822
Juan Quintela3d0684b2017-03-23 15:06:39 +01001823/**
1824 * postcopy_chuck_hostpages: discrad any partially sent host page
1825 *
Dr. David Alan Gilbert99e314e2015-11-05 18:11:15 +00001826 * Utility for the outgoing postcopy code.
1827 *
1828 * Discard any partially sent host-page size chunks, mark any partially
Dr. David Alan Gilbert29c59172017-02-24 18:28:31 +00001829 * dirty host-page size chunks as all dirty. In this case the host-page
1830 * is the host-page for the particular RAMBlock, i.e. it might be a huge page
Dr. David Alan Gilbert99e314e2015-11-05 18:11:15 +00001831 *
Juan Quintela3d0684b2017-03-23 15:06:39 +01001832 * Returns zero on success
1833 *
1834 * @ms: current migration state
Dr. David Alan Gilbert99e314e2015-11-05 18:11:15 +00001835 */
1836static int postcopy_chunk_hostpages(MigrationState *ms)
1837{
Juan Quintela6f37bb82017-03-13 19:26:29 +01001838 RAMState *rs = &ram_state;
Dr. David Alan Gilbert99e314e2015-11-05 18:11:15 +00001839 struct RAMBlock *block;
1840
Dr. David Alan Gilbert99e314e2015-11-05 18:11:15 +00001841 /* Easiest way to make sure we don't resume in the middle of a host-page */
Juan Quintela6f37bb82017-03-13 19:26:29 +01001842 rs->last_seen_block = NULL;
1843 rs->last_sent_block = NULL;
1844 rs->last_offset = 0;
Dr. David Alan Gilbert99e314e2015-11-05 18:11:15 +00001845
1846 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1847 unsigned long first = block->offset >> TARGET_PAGE_BITS;
1848
1849 PostcopyDiscardState *pds =
1850 postcopy_discard_send_init(ms, first, block->idstr);
1851
1852 /* First pass: Discard all partially sent host pages */
1853 postcopy_chunk_hostpages_pass(ms, true, block, pds);
1854 /*
1855 * Second pass: Ensure that all partially dirty host pages are made
1856 * fully dirty.
1857 */
1858 postcopy_chunk_hostpages_pass(ms, false, block, pds);
1859
1860 postcopy_discard_send_finish(ms, pds);
1861 } /* ram_list loop */
1862
1863 return 0;
1864}
1865
Juan Quintela3d0684b2017-03-23 15:06:39 +01001866/**
1867 * ram_postcopy_send_discard_bitmap: transmit the discard bitmap
1868 *
1869 * Returns zero on success
1870 *
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00001871 * Transmit the set of pages to be discarded after precopy to the target
1872 * these are pages that:
1873 * a) Have been previously transmitted but are now dirty again
1874 * b) Pages that have never been transmitted, this ensures that
1875 * any pages on the destination that have been mapped by background
1876 * tasks get discarded (transparent huge pages is the specific concern)
1877 * Hopefully this is pretty sparse
Juan Quintela3d0684b2017-03-23 15:06:39 +01001878 *
1879 * @ms: current migration state
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00001880 */
1881int ram_postcopy_send_discard_bitmap(MigrationState *ms)
1882{
Juan Quintelaeb859c52017-03-13 21:51:55 +01001883 RAMState *rs = &ram_state;
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00001884 int ret;
1885 unsigned long *bitmap, *unsentmap;
1886
1887 rcu_read_lock();
1888
1889 /* This should be our last sync, the src is now paused */
Juan Quintelaeb859c52017-03-13 21:51:55 +01001890 migration_bitmap_sync(rs);
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00001891
Juan Quintelaeb859c52017-03-13 21:51:55 +01001892 unsentmap = atomic_rcu_read(&rs->ram_bitmap)->unsentmap;
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00001893 if (!unsentmap) {
1894 /* We don't have a safe way to resize the sentmap, so
1895 * if the bitmap was resized it will be NULL at this
1896 * point.
1897 */
1898 error_report("migration ram resized during precopy phase");
1899 rcu_read_unlock();
1900 return -EINVAL;
1901 }
1902
Dr. David Alan Gilbert29c59172017-02-24 18:28:31 +00001903 /* Deal with TPS != HPS and huge pages */
Dr. David Alan Gilbert99e314e2015-11-05 18:11:15 +00001904 ret = postcopy_chunk_hostpages(ms);
1905 if (ret) {
1906 rcu_read_unlock();
1907 return ret;
1908 }
1909
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00001910 /*
1911 * Update the unsentmap to be unsentmap = unsentmap | dirty
1912 */
Juan Quintelaeb859c52017-03-13 21:51:55 +01001913 bitmap = atomic_rcu_read(&rs->ram_bitmap)->bmap;
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00001914 bitmap_or(unsentmap, unsentmap, bitmap,
1915 last_ram_offset() >> TARGET_PAGE_BITS);
1916
1917
1918 trace_ram_postcopy_send_discard_bitmap();
1919#ifdef DEBUG_POSTCOPY
1920 ram_debug_dump_bitmap(unsentmap, true);
1921#endif
1922
1923 ret = postcopy_each_ram_send_discard(ms);
1924 rcu_read_unlock();
1925
1926 return ret;
1927}
1928
Juan Quintela3d0684b2017-03-23 15:06:39 +01001929/**
1930 * ram_discard_range: discard dirtied pages at the beginning of postcopy
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00001931 *
Juan Quintela3d0684b2017-03-23 15:06:39 +01001932 * Returns zero on success
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00001933 *
Juan Quintela3d0684b2017-03-23 15:06:39 +01001934 * @mis: current migration incoming state
Juan Quintela36449152017-03-23 15:11:59 +01001935 * @rbname: name of the RAMBlock of the request. NULL means the
1936 * same that last one.
Juan Quintela3d0684b2017-03-23 15:06:39 +01001937 * @start: RAMBlock starting page
1938 * @length: RAMBlock size
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00001939 */
1940int ram_discard_range(MigrationIncomingState *mis,
Juan Quintela36449152017-03-23 15:11:59 +01001941 const char *rbname,
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00001942 uint64_t start, size_t length)
1943{
1944 int ret = -1;
1945
Juan Quintela36449152017-03-23 15:11:59 +01001946 trace_ram_discard_range(rbname, start, length);
Dr. David Alan Gilbertd3a50382017-02-24 18:28:32 +00001947
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00001948 rcu_read_lock();
Juan Quintela36449152017-03-23 15:11:59 +01001949 RAMBlock *rb = qemu_ram_block_by_name(rbname);
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00001950
1951 if (!rb) {
Juan Quintela36449152017-03-23 15:11:59 +01001952 error_report("ram_discard_range: Failed to find block '%s'", rbname);
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00001953 goto err;
1954 }
1955
Dr. David Alan Gilbertd3a50382017-02-24 18:28:32 +00001956 ret = ram_block_discard_range(rb, start, length);
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00001957
1958err:
1959 rcu_read_unlock();
1960
1961 return ret;
1962}
1963
Juan Quintelaceb4d162017-03-13 21:29:54 +01001964static int ram_state_init(RAMState *rs)
Juan Quintela56e93d22015-05-07 19:33:31 +02001965{
Juan Quintela56e93d22015-05-07 19:33:31 +02001966 int64_t ram_bitmap_pages; /* Size of bitmap in pages, including gaps */
1967
Juan Quintelaceb4d162017-03-13 21:29:54 +01001968 memset(rs, 0, sizeof(*rs));
Juan Quintela108cfae2017-03-13 21:38:09 +01001969 qemu_mutex_init(&rs->bitmap_mutex);
Juan Quintela56e93d22015-05-07 19:33:31 +02001970
1971 if (migrate_use_xbzrle()) {
1972 XBZRLE_cache_lock();
Vijaya Kumar Kadb65de2016-10-24 16:26:49 +01001973 ZERO_TARGET_PAGE = g_malloc0(TARGET_PAGE_SIZE);
Juan Quintela56e93d22015-05-07 19:33:31 +02001974 XBZRLE.cache = cache_init(migrate_xbzrle_cache_size() /
1975 TARGET_PAGE_SIZE,
1976 TARGET_PAGE_SIZE);
1977 if (!XBZRLE.cache) {
1978 XBZRLE_cache_unlock();
1979 error_report("Error creating cache");
1980 return -1;
1981 }
1982 XBZRLE_cache_unlock();
1983
1984 /* We prefer not to abort if there is no memory */
1985 XBZRLE.encoded_buf = g_try_malloc0(TARGET_PAGE_SIZE);
1986 if (!XBZRLE.encoded_buf) {
1987 error_report("Error allocating encoded_buf");
1988 return -1;
1989 }
1990
1991 XBZRLE.current_buf = g_try_malloc(TARGET_PAGE_SIZE);
1992 if (!XBZRLE.current_buf) {
1993 error_report("Error allocating current_buf");
1994 g_free(XBZRLE.encoded_buf);
1995 XBZRLE.encoded_buf = NULL;
1996 return -1;
1997 }
Juan Quintela56e93d22015-05-07 19:33:31 +02001998 }
1999
Paolo Bonzini49877832016-02-15 19:57:57 +01002000 /* For memory_global_dirty_log_start below. */
2001 qemu_mutex_lock_iothread();
2002
Juan Quintela56e93d22015-05-07 19:33:31 +02002003 qemu_mutex_lock_ramlist();
2004 rcu_read_lock();
Juan Quintela6f37bb82017-03-13 19:26:29 +01002005 ram_state_reset(rs);
Juan Quintela56e93d22015-05-07 19:33:31 +02002006
Juan Quintelaeb859c52017-03-13 21:51:55 +01002007 rs->ram_bitmap = g_new0(struct RAMBitmap, 1);
Ashijeet Acharya0827b9e2017-02-08 19:58:45 +05302008 /* Skip setting bitmap if there is no RAM */
2009 if (ram_bytes_total()) {
2010 ram_bitmap_pages = last_ram_offset() >> TARGET_PAGE_BITS;
Juan Quintelaeb859c52017-03-13 21:51:55 +01002011 rs->ram_bitmap->bmap = bitmap_new(ram_bitmap_pages);
2012 bitmap_set(rs->ram_bitmap->bmap, 0, ram_bitmap_pages);
Juan Quintela56e93d22015-05-07 19:33:31 +02002013
Ashijeet Acharya0827b9e2017-02-08 19:58:45 +05302014 if (migrate_postcopy_ram()) {
Juan Quintelaeb859c52017-03-13 21:51:55 +01002015 rs->ram_bitmap->unsentmap = bitmap_new(ram_bitmap_pages);
2016 bitmap_set(rs->ram_bitmap->unsentmap, 0, ram_bitmap_pages);
Ashijeet Acharya0827b9e2017-02-08 19:58:45 +05302017 }
Dr. David Alan Gilbertf3f491f2015-11-05 18:11:01 +00002018 }
2019
Juan Quintela56e93d22015-05-07 19:33:31 +02002020 /*
2021 * Count the total number of pages used by ram blocks not including any
2022 * gaps due to alignment or unplugs.
2023 */
Juan Quintela0d8ec882017-03-13 21:21:41 +01002024 rs->migration_dirty_pages = ram_bytes_total() >> TARGET_PAGE_BITS;
Juan Quintela56e93d22015-05-07 19:33:31 +02002025
2026 memory_global_dirty_log_start();
Juan Quintela8d820d62017-03-13 19:35:50 +01002027 migration_bitmap_sync(rs);
Juan Quintela56e93d22015-05-07 19:33:31 +02002028 qemu_mutex_unlock_ramlist();
Paolo Bonzini49877832016-02-15 19:57:57 +01002029 qemu_mutex_unlock_iothread();
zhanghailianga91246c2016-10-27 14:42:59 +08002030 rcu_read_unlock();
2031
2032 return 0;
2033}
2034
Juan Quintela3d0684b2017-03-23 15:06:39 +01002035/*
2036 * Each of ram_save_setup, ram_save_iterate and ram_save_complete has
zhanghailianga91246c2016-10-27 14:42:59 +08002037 * long-running RCU critical section. When rcu-reclaims in the code
2038 * start to become numerous it will be necessary to reduce the
2039 * granularity of these critical sections.
2040 */
2041
Juan Quintela3d0684b2017-03-23 15:06:39 +01002042/**
2043 * ram_save_setup: Setup RAM for migration
2044 *
2045 * Returns zero to indicate success and negative for error
2046 *
2047 * @f: QEMUFile where to send the data
2048 * @opaque: RAMState pointer
2049 */
zhanghailianga91246c2016-10-27 14:42:59 +08002050static int ram_save_setup(QEMUFile *f, void *opaque)
2051{
Juan Quintela6f37bb82017-03-13 19:26:29 +01002052 RAMState *rs = opaque;
zhanghailianga91246c2016-10-27 14:42:59 +08002053 RAMBlock *block;
2054
2055 /* migration has already setup the bitmap, reuse it. */
2056 if (!migration_in_colo_state()) {
Juan Quintelaceb4d162017-03-13 21:29:54 +01002057 if (ram_state_init(rs) < 0) {
zhanghailianga91246c2016-10-27 14:42:59 +08002058 return -1;
2059 }
2060 }
2061
2062 rcu_read_lock();
Juan Quintela56e93d22015-05-07 19:33:31 +02002063
2064 qemu_put_be64(f, ram_bytes_total() | RAM_SAVE_FLAG_MEM_SIZE);
2065
2066 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
2067 qemu_put_byte(f, strlen(block->idstr));
2068 qemu_put_buffer(f, (uint8_t *)block->idstr, strlen(block->idstr));
2069 qemu_put_be64(f, block->used_length);
Dr. David Alan Gilbertef08fb32017-02-24 18:28:30 +00002070 if (migrate_postcopy_ram() && block->page_size != qemu_host_page_size) {
2071 qemu_put_be64(f, block->page_size);
2072 }
Juan Quintela56e93d22015-05-07 19:33:31 +02002073 }
2074
2075 rcu_read_unlock();
2076
2077 ram_control_before_iterate(f, RAM_CONTROL_SETUP);
2078 ram_control_after_iterate(f, RAM_CONTROL_SETUP);
2079
2080 qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
2081
2082 return 0;
2083}
2084
Juan Quintela3d0684b2017-03-23 15:06:39 +01002085/**
2086 * ram_save_iterate: iterative stage for migration
2087 *
2088 * Returns zero to indicate success and negative for error
2089 *
2090 * @f: QEMUFile where to send the data
2091 * @opaque: RAMState pointer
2092 */
Juan Quintela56e93d22015-05-07 19:33:31 +02002093static int ram_save_iterate(QEMUFile *f, void *opaque)
2094{
Juan Quintela6f37bb82017-03-13 19:26:29 +01002095 RAMState *rs = opaque;
Juan Quintela56e93d22015-05-07 19:33:31 +02002096 int ret;
2097 int i;
2098 int64_t t0;
Thomas Huth5c903082016-11-04 14:10:17 +01002099 int done = 0;
Juan Quintela56e93d22015-05-07 19:33:31 +02002100
2101 rcu_read_lock();
Juan Quintela6f37bb82017-03-13 19:26:29 +01002102 if (ram_list.version != rs->last_version) {
2103 ram_state_reset(rs);
Juan Quintela56e93d22015-05-07 19:33:31 +02002104 }
2105
2106 /* Read version before ram_list.blocks */
2107 smp_rmb();
2108
2109 ram_control_before_iterate(f, RAM_CONTROL_ROUND);
2110
2111 t0 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
2112 i = 0;
2113 while ((ret = qemu_file_rate_limit(f)) == 0) {
2114 int pages;
2115
Juan Quintela072c2512017-03-14 10:27:31 +01002116 pages = ram_find_and_save_block(rs, f, false);
Juan Quintela56e93d22015-05-07 19:33:31 +02002117 /* no more pages to sent */
2118 if (pages == 0) {
Thomas Huth5c903082016-11-04 14:10:17 +01002119 done = 1;
Juan Quintela56e93d22015-05-07 19:33:31 +02002120 break;
2121 }
Juan Quintela23b28c32017-03-13 20:51:34 +01002122 rs->iterations++;
Jason J. Herne070afca2015-09-08 13:12:35 -04002123
Juan Quintela56e93d22015-05-07 19:33:31 +02002124 /* we want to check in the 1st loop, just in case it was the 1st time
2125 and we had to sync the dirty bitmap.
2126 qemu_get_clock_ns() is a bit expensive, so we only check each some
2127 iterations
2128 */
2129 if ((i & 63) == 0) {
2130 uint64_t t1 = (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - t0) / 1000000;
2131 if (t1 > MAX_WAIT) {
Juan Quintela55c44462017-01-23 22:32:05 +01002132 trace_ram_save_iterate_big_wait(t1, i);
Juan Quintela56e93d22015-05-07 19:33:31 +02002133 break;
2134 }
2135 }
2136 i++;
2137 }
Juan Quintela2f4fde92017-03-13 21:58:11 +01002138 flush_compressed_data(rs, f);
Juan Quintela56e93d22015-05-07 19:33:31 +02002139 rcu_read_unlock();
2140
2141 /*
2142 * Must occur before EOS (or any QEMUFile operation)
2143 * because of RDMA protocol.
2144 */
2145 ram_control_after_iterate(f, RAM_CONTROL_ROUND);
2146
2147 qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
Juan Quintela2f4fde92017-03-13 21:58:11 +01002148 rs->bytes_transferred += 8;
Juan Quintela56e93d22015-05-07 19:33:31 +02002149
2150 ret = qemu_file_get_error(f);
2151 if (ret < 0) {
2152 return ret;
2153 }
2154
Thomas Huth5c903082016-11-04 14:10:17 +01002155 return done;
Juan Quintela56e93d22015-05-07 19:33:31 +02002156}
2157
Juan Quintela3d0684b2017-03-23 15:06:39 +01002158/**
2159 * ram_save_complete: function called to send the remaining amount of ram
2160 *
2161 * Returns zero to indicate success
2162 *
2163 * Called with iothread lock
2164 *
2165 * @f: QEMUFile where to send the data
2166 * @opaque: RAMState pointer
2167 */
Juan Quintela56e93d22015-05-07 19:33:31 +02002168static int ram_save_complete(QEMUFile *f, void *opaque)
2169{
Juan Quintela6f37bb82017-03-13 19:26:29 +01002170 RAMState *rs = opaque;
2171
Juan Quintela56e93d22015-05-07 19:33:31 +02002172 rcu_read_lock();
2173
Dr. David Alan Gilbert663e6c12015-11-05 18:11:13 +00002174 if (!migration_in_postcopy(migrate_get_current())) {
Juan Quintela8d820d62017-03-13 19:35:50 +01002175 migration_bitmap_sync(rs);
Dr. David Alan Gilbert663e6c12015-11-05 18:11:13 +00002176 }
Juan Quintela56e93d22015-05-07 19:33:31 +02002177
2178 ram_control_before_iterate(f, RAM_CONTROL_FINISH);
2179
2180 /* try transferring iterative blocks of memory */
2181
2182 /* flush all remaining blocks regardless of rate limiting */
2183 while (true) {
2184 int pages;
2185
Juan Quintela072c2512017-03-14 10:27:31 +01002186 pages = ram_find_and_save_block(rs, f, !migration_in_colo_state());
Juan Quintela56e93d22015-05-07 19:33:31 +02002187 /* no more blocks to sent */
2188 if (pages == 0) {
2189 break;
2190 }
2191 }
2192
Juan Quintela2f4fde92017-03-13 21:58:11 +01002193 flush_compressed_data(rs, f);
Juan Quintela56e93d22015-05-07 19:33:31 +02002194 ram_control_after_iterate(f, RAM_CONTROL_FINISH);
Juan Quintela56e93d22015-05-07 19:33:31 +02002195
2196 rcu_read_unlock();
Paolo Bonzinid09a6fd2015-07-09 08:47:58 +02002197
Juan Quintela56e93d22015-05-07 19:33:31 +02002198 qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
2199
2200 return 0;
2201}
2202
Dr. David Alan Gilbertc31b0982015-11-05 18:10:54 +00002203static void ram_save_pending(QEMUFile *f, void *opaque, uint64_t max_size,
2204 uint64_t *non_postcopiable_pending,
2205 uint64_t *postcopiable_pending)
Juan Quintela56e93d22015-05-07 19:33:31 +02002206{
Juan Quintela8d820d62017-03-13 19:35:50 +01002207 RAMState *rs = opaque;
Juan Quintela56e93d22015-05-07 19:33:31 +02002208 uint64_t remaining_size;
2209
Juan Quintela9edabd42017-03-14 12:02:16 +01002210 remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE;
Juan Quintela56e93d22015-05-07 19:33:31 +02002211
Dr. David Alan Gilbert663e6c12015-11-05 18:11:13 +00002212 if (!migration_in_postcopy(migrate_get_current()) &&
2213 remaining_size < max_size) {
Juan Quintela56e93d22015-05-07 19:33:31 +02002214 qemu_mutex_lock_iothread();
2215 rcu_read_lock();
Juan Quintela8d820d62017-03-13 19:35:50 +01002216 migration_bitmap_sync(rs);
Juan Quintela56e93d22015-05-07 19:33:31 +02002217 rcu_read_unlock();
2218 qemu_mutex_unlock_iothread();
Juan Quintela9edabd42017-03-14 12:02:16 +01002219 remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE;
Juan Quintela56e93d22015-05-07 19:33:31 +02002220 }
Dr. David Alan Gilbertc31b0982015-11-05 18:10:54 +00002221
2222 /* We can do postcopy, and all the data is postcopiable */
2223 *postcopiable_pending += remaining_size;
Juan Quintela56e93d22015-05-07 19:33:31 +02002224}
2225
2226static int load_xbzrle(QEMUFile *f, ram_addr_t addr, void *host)
2227{
2228 unsigned int xh_len;
2229 int xh_flags;
Dr. David Alan Gilbert063e7602015-12-16 11:47:37 +00002230 uint8_t *loaded_data;
Juan Quintela56e93d22015-05-07 19:33:31 +02002231
2232 if (!xbzrle_decoded_buf) {
2233 xbzrle_decoded_buf = g_malloc(TARGET_PAGE_SIZE);
2234 }
Dr. David Alan Gilbert063e7602015-12-16 11:47:37 +00002235 loaded_data = xbzrle_decoded_buf;
Juan Quintela56e93d22015-05-07 19:33:31 +02002236
2237 /* extract RLE header */
2238 xh_flags = qemu_get_byte(f);
2239 xh_len = qemu_get_be16(f);
2240
2241 if (xh_flags != ENCODING_FLAG_XBZRLE) {
2242 error_report("Failed to load XBZRLE page - wrong compression!");
2243 return -1;
2244 }
2245
2246 if (xh_len > TARGET_PAGE_SIZE) {
2247 error_report("Failed to load XBZRLE page - len overflow!");
2248 return -1;
2249 }
2250 /* load data and decode */
Dr. David Alan Gilbert063e7602015-12-16 11:47:37 +00002251 qemu_get_buffer_in_place(f, &loaded_data, xh_len);
Juan Quintela56e93d22015-05-07 19:33:31 +02002252
2253 /* decode RLE */
Dr. David Alan Gilbert063e7602015-12-16 11:47:37 +00002254 if (xbzrle_decode_buffer(loaded_data, xh_len, host,
Juan Quintela56e93d22015-05-07 19:33:31 +02002255 TARGET_PAGE_SIZE) == -1) {
2256 error_report("Failed to load XBZRLE page - decode error!");
2257 return -1;
2258 }
2259
2260 return 0;
2261}
2262
Juan Quintela3d0684b2017-03-23 15:06:39 +01002263/**
2264 * ram_block_from_stream: read a RAMBlock id from the migration stream
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00002265 *
Juan Quintela3d0684b2017-03-23 15:06:39 +01002266 * Must be called from within a rcu critical section.
2267 *
2268 * Returns a pointer from within the RCU-protected ram_list.
2269 *
2270 * @f: QEMUFile where to read the data from
2271 * @flags: Page flags (mostly to see if it's a continuation of previous block)
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00002272 */
Juan Quintela3d0684b2017-03-23 15:06:39 +01002273static inline RAMBlock *ram_block_from_stream(QEMUFile *f, int flags)
Juan Quintela56e93d22015-05-07 19:33:31 +02002274{
2275 static RAMBlock *block = NULL;
2276 char id[256];
2277 uint8_t len;
2278
2279 if (flags & RAM_SAVE_FLAG_CONTINUE) {
zhanghailiang4c4bad42016-01-15 11:37:41 +08002280 if (!block) {
Juan Quintela56e93d22015-05-07 19:33:31 +02002281 error_report("Ack, bad migration stream!");
2282 return NULL;
2283 }
zhanghailiang4c4bad42016-01-15 11:37:41 +08002284 return block;
Juan Quintela56e93d22015-05-07 19:33:31 +02002285 }
2286
2287 len = qemu_get_byte(f);
2288 qemu_get_buffer(f, (uint8_t *)id, len);
2289 id[len] = 0;
2290
Dr. David Alan Gilberte3dd7492015-11-05 18:10:33 +00002291 block = qemu_ram_block_by_name(id);
zhanghailiang4c4bad42016-01-15 11:37:41 +08002292 if (!block) {
2293 error_report("Can't find block %s", id);
2294 return NULL;
Juan Quintela56e93d22015-05-07 19:33:31 +02002295 }
2296
zhanghailiang4c4bad42016-01-15 11:37:41 +08002297 return block;
2298}
2299
2300static inline void *host_from_ram_block_offset(RAMBlock *block,
2301 ram_addr_t offset)
2302{
2303 if (!offset_in_ramblock(block, offset)) {
2304 return NULL;
2305 }
2306
2307 return block->host + offset;
Juan Quintela56e93d22015-05-07 19:33:31 +02002308}
2309
Juan Quintela3d0684b2017-03-23 15:06:39 +01002310/**
2311 * ram_handle_compressed: handle the zero page case
2312 *
Juan Quintela56e93d22015-05-07 19:33:31 +02002313 * If a page (or a whole RDMA chunk) has been
2314 * determined to be zero, then zap it.
Juan Quintela3d0684b2017-03-23 15:06:39 +01002315 *
2316 * @host: host address for the zero page
2317 * @ch: what the page is filled from. We only support zero
2318 * @size: size of the zero page
Juan Quintela56e93d22015-05-07 19:33:31 +02002319 */
2320void ram_handle_compressed(void *host, uint8_t ch, uint64_t size)
2321{
2322 if (ch != 0 || !is_zero_range(host, size)) {
2323 memset(host, ch, size);
2324 }
2325}
2326
2327static void *do_data_decompress(void *opaque)
2328{
2329 DecompressParam *param = opaque;
2330 unsigned long pagesize;
Liang Li33d151f2016-05-05 15:32:58 +08002331 uint8_t *des;
2332 int len;
Juan Quintela56e93d22015-05-07 19:33:31 +02002333
Liang Li33d151f2016-05-05 15:32:58 +08002334 qemu_mutex_lock(&param->mutex);
Liang Li90e56fb2016-05-05 15:32:56 +08002335 while (!param->quit) {
Liang Li33d151f2016-05-05 15:32:58 +08002336 if (param->des) {
2337 des = param->des;
2338 len = param->len;
2339 param->des = 0;
2340 qemu_mutex_unlock(&param->mutex);
2341
Liang Li73a89122016-05-05 15:32:51 +08002342 pagesize = TARGET_PAGE_SIZE;
2343 /* uncompress() will return failed in some case, especially
2344 * when the page is dirted when doing the compression, it's
2345 * not a problem because the dirty page will be retransferred
2346 * and uncompress() won't break the data in other pages.
2347 */
Liang Li33d151f2016-05-05 15:32:58 +08002348 uncompress((Bytef *)des, &pagesize,
2349 (const Bytef *)param->compbuf, len);
Liang Li73a89122016-05-05 15:32:51 +08002350
Liang Li33d151f2016-05-05 15:32:58 +08002351 qemu_mutex_lock(&decomp_done_lock);
2352 param->done = true;
2353 qemu_cond_signal(&decomp_done_cond);
2354 qemu_mutex_unlock(&decomp_done_lock);
2355
2356 qemu_mutex_lock(&param->mutex);
2357 } else {
2358 qemu_cond_wait(&param->cond, &param->mutex);
2359 }
Juan Quintela56e93d22015-05-07 19:33:31 +02002360 }
Liang Li33d151f2016-05-05 15:32:58 +08002361 qemu_mutex_unlock(&param->mutex);
Juan Quintela56e93d22015-05-07 19:33:31 +02002362
2363 return NULL;
2364}
2365
Liang Li5533b2e2016-05-05 15:32:52 +08002366static void wait_for_decompress_done(void)
2367{
2368 int idx, thread_count;
2369
2370 if (!migrate_use_compression()) {
2371 return;
2372 }
2373
2374 thread_count = migrate_decompress_threads();
2375 qemu_mutex_lock(&decomp_done_lock);
2376 for (idx = 0; idx < thread_count; idx++) {
2377 while (!decomp_param[idx].done) {
2378 qemu_cond_wait(&decomp_done_cond, &decomp_done_lock);
2379 }
2380 }
2381 qemu_mutex_unlock(&decomp_done_lock);
2382}
2383
Juan Quintela56e93d22015-05-07 19:33:31 +02002384void migrate_decompress_threads_create(void)
2385{
2386 int i, thread_count;
2387
2388 thread_count = migrate_decompress_threads();
2389 decompress_threads = g_new0(QemuThread, thread_count);
2390 decomp_param = g_new0(DecompressParam, thread_count);
Liang Li73a89122016-05-05 15:32:51 +08002391 qemu_mutex_init(&decomp_done_lock);
2392 qemu_cond_init(&decomp_done_cond);
Juan Quintela56e93d22015-05-07 19:33:31 +02002393 for (i = 0; i < thread_count; i++) {
2394 qemu_mutex_init(&decomp_param[i].mutex);
2395 qemu_cond_init(&decomp_param[i].cond);
2396 decomp_param[i].compbuf = g_malloc0(compressBound(TARGET_PAGE_SIZE));
Liang Li73a89122016-05-05 15:32:51 +08002397 decomp_param[i].done = true;
Liang Li90e56fb2016-05-05 15:32:56 +08002398 decomp_param[i].quit = false;
Juan Quintela56e93d22015-05-07 19:33:31 +02002399 qemu_thread_create(decompress_threads + i, "decompress",
2400 do_data_decompress, decomp_param + i,
2401 QEMU_THREAD_JOINABLE);
2402 }
2403}
2404
2405void migrate_decompress_threads_join(void)
2406{
2407 int i, thread_count;
2408
Juan Quintela56e93d22015-05-07 19:33:31 +02002409 thread_count = migrate_decompress_threads();
2410 for (i = 0; i < thread_count; i++) {
2411 qemu_mutex_lock(&decomp_param[i].mutex);
Liang Li90e56fb2016-05-05 15:32:56 +08002412 decomp_param[i].quit = true;
Juan Quintela56e93d22015-05-07 19:33:31 +02002413 qemu_cond_signal(&decomp_param[i].cond);
2414 qemu_mutex_unlock(&decomp_param[i].mutex);
2415 }
2416 for (i = 0; i < thread_count; i++) {
2417 qemu_thread_join(decompress_threads + i);
2418 qemu_mutex_destroy(&decomp_param[i].mutex);
2419 qemu_cond_destroy(&decomp_param[i].cond);
2420 g_free(decomp_param[i].compbuf);
2421 }
2422 g_free(decompress_threads);
2423 g_free(decomp_param);
Juan Quintela56e93d22015-05-07 19:33:31 +02002424 decompress_threads = NULL;
2425 decomp_param = NULL;
Juan Quintela56e93d22015-05-07 19:33:31 +02002426}
2427
Dr. David Alan Gilbertc1bc6622015-12-16 11:47:38 +00002428static void decompress_data_with_multi_threads(QEMUFile *f,
Juan Quintela56e93d22015-05-07 19:33:31 +02002429 void *host, int len)
2430{
2431 int idx, thread_count;
2432
2433 thread_count = migrate_decompress_threads();
Liang Li73a89122016-05-05 15:32:51 +08002434 qemu_mutex_lock(&decomp_done_lock);
Juan Quintela56e93d22015-05-07 19:33:31 +02002435 while (true) {
2436 for (idx = 0; idx < thread_count; idx++) {
Liang Li73a89122016-05-05 15:32:51 +08002437 if (decomp_param[idx].done) {
Liang Li33d151f2016-05-05 15:32:58 +08002438 decomp_param[idx].done = false;
2439 qemu_mutex_lock(&decomp_param[idx].mutex);
Dr. David Alan Gilbertc1bc6622015-12-16 11:47:38 +00002440 qemu_get_buffer(f, decomp_param[idx].compbuf, len);
Juan Quintela56e93d22015-05-07 19:33:31 +02002441 decomp_param[idx].des = host;
2442 decomp_param[idx].len = len;
Liang Li33d151f2016-05-05 15:32:58 +08002443 qemu_cond_signal(&decomp_param[idx].cond);
2444 qemu_mutex_unlock(&decomp_param[idx].mutex);
Juan Quintela56e93d22015-05-07 19:33:31 +02002445 break;
2446 }
2447 }
2448 if (idx < thread_count) {
2449 break;
Liang Li73a89122016-05-05 15:32:51 +08002450 } else {
2451 qemu_cond_wait(&decomp_done_cond, &decomp_done_lock);
Juan Quintela56e93d22015-05-07 19:33:31 +02002452 }
2453 }
Liang Li73a89122016-05-05 15:32:51 +08002454 qemu_mutex_unlock(&decomp_done_lock);
Juan Quintela56e93d22015-05-07 19:33:31 +02002455}
2456
Juan Quintela3d0684b2017-03-23 15:06:39 +01002457/**
2458 * ram_postcopy_incoming_init: allocate postcopy data structures
2459 *
2460 * Returns 0 for success and negative if there was one error
2461 *
2462 * @mis: current migration incoming state
2463 *
2464 * Allocate data structures etc needed by incoming migration with
2465 * postcopy-ram. postcopy-ram's similarly names
2466 * postcopy_ram_incoming_init does the work.
Dr. David Alan Gilbert1caddf82015-11-05 18:11:03 +00002467 */
2468int ram_postcopy_incoming_init(MigrationIncomingState *mis)
2469{
2470 size_t ram_pages = last_ram_offset() >> TARGET_PAGE_BITS;
2471
2472 return postcopy_ram_incoming_init(mis, ram_pages);
2473}
2474
Juan Quintela3d0684b2017-03-23 15:06:39 +01002475/**
2476 * ram_load_postcopy: load a page in postcopy case
2477 *
2478 * Returns 0 for success or -errno in case of error
2479 *
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00002480 * Called in postcopy mode by ram_load().
2481 * rcu_read_lock is taken prior to this being called.
Juan Quintela3d0684b2017-03-23 15:06:39 +01002482 *
2483 * @f: QEMUFile where to send the data
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00002484 */
2485static int ram_load_postcopy(QEMUFile *f)
2486{
2487 int flags = 0, ret = 0;
2488 bool place_needed = false;
Dr. David Alan Gilbert28abd202017-02-24 18:28:37 +00002489 bool matching_page_sizes = false;
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00002490 MigrationIncomingState *mis = migration_incoming_get_current();
2491 /* Temporary page that is later 'placed' */
2492 void *postcopy_host_page = postcopy_get_tmp_page(mis);
Dr. David Alan Gilbertc53b7dd2015-11-05 18:11:12 +00002493 void *last_host = NULL;
Dr. David Alan Gilberta3b6ff62015-11-11 14:02:28 +00002494 bool all_zero = false;
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00002495
2496 while (!ret && !(flags & RAM_SAVE_FLAG_EOS)) {
2497 ram_addr_t addr;
2498 void *host = NULL;
2499 void *page_buffer = NULL;
2500 void *place_source = NULL;
Dr. David Alan Gilbertdf9ff5e2017-02-24 18:28:35 +00002501 RAMBlock *block = NULL;
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00002502 uint8_t ch;
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00002503
2504 addr = qemu_get_be64(f);
2505 flags = addr & ~TARGET_PAGE_MASK;
2506 addr &= TARGET_PAGE_MASK;
2507
2508 trace_ram_load_postcopy_loop((uint64_t)addr, flags);
2509 place_needed = false;
2510 if (flags & (RAM_SAVE_FLAG_COMPRESS | RAM_SAVE_FLAG_PAGE)) {
Dr. David Alan Gilbertdf9ff5e2017-02-24 18:28:35 +00002511 block = ram_block_from_stream(f, flags);
zhanghailiang4c4bad42016-01-15 11:37:41 +08002512
2513 host = host_from_ram_block_offset(block, addr);
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00002514 if (!host) {
2515 error_report("Illegal RAM offset " RAM_ADDR_FMT, addr);
2516 ret = -EINVAL;
2517 break;
2518 }
Dr. David Alan Gilbert28abd202017-02-24 18:28:37 +00002519 matching_page_sizes = block->page_size == TARGET_PAGE_SIZE;
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00002520 /*
Dr. David Alan Gilbert28abd202017-02-24 18:28:37 +00002521 * Postcopy requires that we place whole host pages atomically;
2522 * these may be huge pages for RAMBlocks that are backed by
2523 * hugetlbfs.
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00002524 * To make it atomic, the data is read into a temporary page
2525 * that's moved into place later.
2526 * The migration protocol uses, possibly smaller, target-pages
2527 * however the source ensures it always sends all the components
2528 * of a host page in order.
2529 */
2530 page_buffer = postcopy_host_page +
Dr. David Alan Gilbert28abd202017-02-24 18:28:37 +00002531 ((uintptr_t)host & (block->page_size - 1));
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00002532 /* If all TP are zero then we can optimise the place */
Dr. David Alan Gilbert28abd202017-02-24 18:28:37 +00002533 if (!((uintptr_t)host & (block->page_size - 1))) {
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00002534 all_zero = true;
Dr. David Alan Gilbertc53b7dd2015-11-05 18:11:12 +00002535 } else {
2536 /* not the 1st TP within the HP */
2537 if (host != (last_host + TARGET_PAGE_SIZE)) {
Markus Armbruster9af9e0f2015-12-18 16:35:19 +01002538 error_report("Non-sequential target page %p/%p",
Dr. David Alan Gilbertc53b7dd2015-11-05 18:11:12 +00002539 host, last_host);
2540 ret = -EINVAL;
2541 break;
2542 }
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00002543 }
2544
Dr. David Alan Gilbertc53b7dd2015-11-05 18:11:12 +00002545
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00002546 /*
2547 * If it's the last part of a host page then we place the host
2548 * page
2549 */
2550 place_needed = (((uintptr_t)host + TARGET_PAGE_SIZE) &
Dr. David Alan Gilbert28abd202017-02-24 18:28:37 +00002551 (block->page_size - 1)) == 0;
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00002552 place_source = postcopy_host_page;
2553 }
Dr. David Alan Gilbertc53b7dd2015-11-05 18:11:12 +00002554 last_host = host;
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00002555
2556 switch (flags & ~RAM_SAVE_FLAG_CONTINUE) {
2557 case RAM_SAVE_FLAG_COMPRESS:
2558 ch = qemu_get_byte(f);
2559 memset(page_buffer, ch, TARGET_PAGE_SIZE);
2560 if (ch) {
2561 all_zero = false;
2562 }
2563 break;
2564
2565 case RAM_SAVE_FLAG_PAGE:
2566 all_zero = false;
2567 if (!place_needed || !matching_page_sizes) {
2568 qemu_get_buffer(f, page_buffer, TARGET_PAGE_SIZE);
2569 } else {
2570 /* Avoids the qemu_file copy during postcopy, which is
2571 * going to do a copy later; can only do it when we
2572 * do this read in one go (matching page sizes)
2573 */
2574 qemu_get_buffer_in_place(f, (uint8_t **)&place_source,
2575 TARGET_PAGE_SIZE);
2576 }
2577 break;
2578 case RAM_SAVE_FLAG_EOS:
2579 /* normal exit */
2580 break;
2581 default:
2582 error_report("Unknown combination of migration flags: %#x"
2583 " (postcopy mode)", flags);
2584 ret = -EINVAL;
2585 }
2586
2587 if (place_needed) {
2588 /* This gets called at the last target page in the host page */
Dr. David Alan Gilbertdf9ff5e2017-02-24 18:28:35 +00002589 void *place_dest = host + TARGET_PAGE_SIZE - block->page_size;
2590
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00002591 if (all_zero) {
Dr. David Alan Gilbertdf9ff5e2017-02-24 18:28:35 +00002592 ret = postcopy_place_page_zero(mis, place_dest,
2593 block->page_size);
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00002594 } else {
Dr. David Alan Gilbertdf9ff5e2017-02-24 18:28:35 +00002595 ret = postcopy_place_page(mis, place_dest,
2596 place_source, block->page_size);
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00002597 }
2598 }
2599 if (!ret) {
2600 ret = qemu_file_get_error(f);
2601 }
2602 }
2603
2604 return ret;
2605}
2606
Juan Quintela56e93d22015-05-07 19:33:31 +02002607static int ram_load(QEMUFile *f, void *opaque, int version_id)
2608{
2609 int flags = 0, ret = 0;
2610 static uint64_t seq_iter;
2611 int len = 0;
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00002612 /*
2613 * If system is running in postcopy mode, page inserts to host memory must
2614 * be atomic
2615 */
2616 bool postcopy_running = postcopy_state_get() >= POSTCOPY_INCOMING_LISTENING;
Dr. David Alan Gilbertef08fb32017-02-24 18:28:30 +00002617 /* ADVISE is earlier, it shows the source has the postcopy capability on */
2618 bool postcopy_advised = postcopy_state_get() >= POSTCOPY_INCOMING_ADVISE;
Juan Quintela56e93d22015-05-07 19:33:31 +02002619
2620 seq_iter++;
2621
2622 if (version_id != 4) {
2623 ret = -EINVAL;
2624 }
2625
2626 /* This RCU critical section can be very long running.
2627 * When RCU reclaims in the code start to become numerous,
2628 * it will be necessary to reduce the granularity of this
2629 * critical section.
2630 */
2631 rcu_read_lock();
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00002632
2633 if (postcopy_running) {
2634 ret = ram_load_postcopy(f);
2635 }
2636
2637 while (!postcopy_running && !ret && !(flags & RAM_SAVE_FLAG_EOS)) {
Juan Quintela56e93d22015-05-07 19:33:31 +02002638 ram_addr_t addr, total_ram_bytes;
Dr. David Alan Gilberta776aa12015-11-05 18:10:39 +00002639 void *host = NULL;
Juan Quintela56e93d22015-05-07 19:33:31 +02002640 uint8_t ch;
2641
2642 addr = qemu_get_be64(f);
2643 flags = addr & ~TARGET_PAGE_MASK;
2644 addr &= TARGET_PAGE_MASK;
2645
Dr. David Alan Gilberta776aa12015-11-05 18:10:39 +00002646 if (flags & (RAM_SAVE_FLAG_COMPRESS | RAM_SAVE_FLAG_PAGE |
2647 RAM_SAVE_FLAG_COMPRESS_PAGE | RAM_SAVE_FLAG_XBZRLE)) {
zhanghailiang4c4bad42016-01-15 11:37:41 +08002648 RAMBlock *block = ram_block_from_stream(f, flags);
2649
2650 host = host_from_ram_block_offset(block, addr);
Dr. David Alan Gilberta776aa12015-11-05 18:10:39 +00002651 if (!host) {
2652 error_report("Illegal RAM offset " RAM_ADDR_FMT, addr);
2653 ret = -EINVAL;
2654 break;
2655 }
2656 }
2657
Juan Quintela56e93d22015-05-07 19:33:31 +02002658 switch (flags & ~RAM_SAVE_FLAG_CONTINUE) {
2659 case RAM_SAVE_FLAG_MEM_SIZE:
2660 /* Synchronize RAM block list */
2661 total_ram_bytes = addr;
2662 while (!ret && total_ram_bytes) {
2663 RAMBlock *block;
Juan Quintela56e93d22015-05-07 19:33:31 +02002664 char id[256];
2665 ram_addr_t length;
2666
2667 len = qemu_get_byte(f);
2668 qemu_get_buffer(f, (uint8_t *)id, len);
2669 id[len] = 0;
2670 length = qemu_get_be64(f);
2671
Dr. David Alan Gilberte3dd7492015-11-05 18:10:33 +00002672 block = qemu_ram_block_by_name(id);
2673 if (block) {
2674 if (length != block->used_length) {
2675 Error *local_err = NULL;
Juan Quintela56e93d22015-05-07 19:33:31 +02002676
Gongleifa53a0e2016-05-10 10:04:59 +08002677 ret = qemu_ram_resize(block, length,
Dr. David Alan Gilberte3dd7492015-11-05 18:10:33 +00002678 &local_err);
2679 if (local_err) {
2680 error_report_err(local_err);
Juan Quintela56e93d22015-05-07 19:33:31 +02002681 }
Juan Quintela56e93d22015-05-07 19:33:31 +02002682 }
Dr. David Alan Gilbertef08fb32017-02-24 18:28:30 +00002683 /* For postcopy we need to check hugepage sizes match */
2684 if (postcopy_advised &&
2685 block->page_size != qemu_host_page_size) {
2686 uint64_t remote_page_size = qemu_get_be64(f);
2687 if (remote_page_size != block->page_size) {
2688 error_report("Mismatched RAM page size %s "
2689 "(local) %zd != %" PRId64,
2690 id, block->page_size,
2691 remote_page_size);
2692 ret = -EINVAL;
2693 }
2694 }
Dr. David Alan Gilberte3dd7492015-11-05 18:10:33 +00002695 ram_control_load_hook(f, RAM_CONTROL_BLOCK_REG,
2696 block->idstr);
2697 } else {
Juan Quintela56e93d22015-05-07 19:33:31 +02002698 error_report("Unknown ramblock \"%s\", cannot "
2699 "accept migration", id);
2700 ret = -EINVAL;
2701 }
2702
2703 total_ram_bytes -= length;
2704 }
2705 break;
Dr. David Alan Gilberta776aa12015-11-05 18:10:39 +00002706
Juan Quintela56e93d22015-05-07 19:33:31 +02002707 case RAM_SAVE_FLAG_COMPRESS:
Juan Quintela56e93d22015-05-07 19:33:31 +02002708 ch = qemu_get_byte(f);
2709 ram_handle_compressed(host, ch, TARGET_PAGE_SIZE);
2710 break;
Dr. David Alan Gilberta776aa12015-11-05 18:10:39 +00002711
Juan Quintela56e93d22015-05-07 19:33:31 +02002712 case RAM_SAVE_FLAG_PAGE:
Juan Quintela56e93d22015-05-07 19:33:31 +02002713 qemu_get_buffer(f, host, TARGET_PAGE_SIZE);
2714 break;
Juan Quintela56e93d22015-05-07 19:33:31 +02002715
Dr. David Alan Gilberta776aa12015-11-05 18:10:39 +00002716 case RAM_SAVE_FLAG_COMPRESS_PAGE:
Juan Quintela56e93d22015-05-07 19:33:31 +02002717 len = qemu_get_be32(f);
2718 if (len < 0 || len > compressBound(TARGET_PAGE_SIZE)) {
2719 error_report("Invalid compressed data length: %d", len);
2720 ret = -EINVAL;
2721 break;
2722 }
Dr. David Alan Gilbertc1bc6622015-12-16 11:47:38 +00002723 decompress_data_with_multi_threads(f, host, len);
Juan Quintela56e93d22015-05-07 19:33:31 +02002724 break;
Dr. David Alan Gilberta776aa12015-11-05 18:10:39 +00002725
Juan Quintela56e93d22015-05-07 19:33:31 +02002726 case RAM_SAVE_FLAG_XBZRLE:
Juan Quintela56e93d22015-05-07 19:33:31 +02002727 if (load_xbzrle(f, addr, host) < 0) {
2728 error_report("Failed to decompress XBZRLE page at "
2729 RAM_ADDR_FMT, addr);
2730 ret = -EINVAL;
2731 break;
2732 }
2733 break;
2734 case RAM_SAVE_FLAG_EOS:
2735 /* normal exit */
2736 break;
2737 default:
2738 if (flags & RAM_SAVE_FLAG_HOOK) {
Dr. David Alan Gilbert632e3a52015-06-11 18:17:23 +01002739 ram_control_load_hook(f, RAM_CONTROL_HOOK, NULL);
Juan Quintela56e93d22015-05-07 19:33:31 +02002740 } else {
2741 error_report("Unknown combination of migration flags: %#x",
2742 flags);
2743 ret = -EINVAL;
2744 }
2745 }
2746 if (!ret) {
2747 ret = qemu_file_get_error(f);
2748 }
2749 }
2750
Liang Li5533b2e2016-05-05 15:32:52 +08002751 wait_for_decompress_done();
Juan Quintela56e93d22015-05-07 19:33:31 +02002752 rcu_read_unlock();
Juan Quintela55c44462017-01-23 22:32:05 +01002753 trace_ram_load_complete(ret, seq_iter);
Juan Quintela56e93d22015-05-07 19:33:31 +02002754 return ret;
2755}
2756
2757static SaveVMHandlers savevm_ram_handlers = {
2758 .save_live_setup = ram_save_setup,
2759 .save_live_iterate = ram_save_iterate,
Dr. David Alan Gilbert763c9062015-11-05 18:11:00 +00002760 .save_live_complete_postcopy = ram_save_complete,
Dr. David Alan Gilberta3e06c32015-11-05 18:10:41 +00002761 .save_live_complete_precopy = ram_save_complete,
Juan Quintela56e93d22015-05-07 19:33:31 +02002762 .save_live_pending = ram_save_pending,
2763 .load_state = ram_load,
Liang Li6ad2a212015-11-02 15:37:03 +08002764 .cleanup = ram_migration_cleanup,
Juan Quintela56e93d22015-05-07 19:33:31 +02002765};
2766
2767void ram_mig_init(void)
2768{
2769 qemu_mutex_init(&XBZRLE.lock);
Juan Quintela6f37bb82017-03-13 19:26:29 +01002770 register_savevm_live(NULL, "ram", 0, 4, &savevm_ram_handlers, &ram_state);
Juan Quintela56e93d22015-05-07 19:33:31 +02002771}