blob: 5f20c3d15daf5662b32ccb61bac49353ae8dd7b8 [file] [log] [blame]
Juan Quintela56e93d22015-05-07 19:33:31 +02001/*
2 * QEMU System Emulator
3 *
4 * Copyright (c) 2003-2008 Fabrice Bellard
Juan Quintela76cc7b52015-05-08 13:20:21 +02005 * Copyright (c) 2011-2015 Red Hat Inc
6 *
7 * Authors:
8 * Juan Quintela <quintela@redhat.com>
Juan Quintela56e93d22015-05-07 19:33:31 +02009 *
10 * Permission is hereby granted, free of charge, to any person obtaining a copy
11 * of this software and associated documentation files (the "Software"), to deal
12 * in the Software without restriction, including without limitation the rights
13 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
14 * copies of the Software, and to permit persons to whom the Software is
15 * furnished to do so, subject to the following conditions:
16 *
17 * The above copyright notice and this permission notice shall be included in
18 * all copies or substantial portions of the Software.
19 *
20 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
21 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
22 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
23 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
24 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
25 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
26 * THE SOFTWARE.
27 */
Markus Armbrustere688df62018-02-01 12:18:31 +010028
Peter Maydell1393a482016-01-26 18:16:54 +000029#include "qemu/osdep.h"
Paolo Bonzini33c11872016-03-15 16:58:45 +010030#include "cpu.h"
Juan Quintela56e93d22015-05-07 19:33:31 +020031#include <zlib.h>
Veronia Bahaaf348b6d2016-03-20 19:16:19 +020032#include "qemu/cutils.h"
Juan Quintela56e93d22015-05-07 19:33:31 +020033#include "qemu/bitops.h"
34#include "qemu/bitmap.h"
Juan Quintela7205c9e2015-05-08 13:54:36 +020035#include "qemu/main-loop.h"
Juan Quintela709e3fe2017-04-05 21:47:50 +020036#include "xbzrle.h"
Juan Quintela7b1e1a22017-04-17 20:26:27 +020037#include "ram.h"
Juan Quintela6666c962017-04-24 20:07:27 +020038#include "migration.h"
Juan Quintela71bb07d2018-02-19 19:01:03 +010039#include "socket.h"
Juan Quintelaf2a8f0a2017-04-24 13:42:55 +020040#include "migration/register.h"
Juan Quintela7b1e1a22017-04-17 20:26:27 +020041#include "migration/misc.h"
Juan Quintela08a0aee2017-04-20 18:52:18 +020042#include "qemu-file.h"
Juan Quintelabe07b0a2017-04-20 13:12:24 +020043#include "postcopy-ram.h"
Michael S. Tsirkin53d37d32018-05-03 22:50:51 +030044#include "page_cache.h"
Juan Quintela56e93d22015-05-07 19:33:31 +020045#include "qemu/error-report.h"
Markus Armbrustere688df62018-02-01 12:18:31 +010046#include "qapi/error.h"
Markus Armbruster9af23982018-02-11 10:36:01 +010047#include "qapi/qapi-events-migration.h"
Juan Quintela8acabf62017-10-05 22:00:31 +020048#include "qapi/qmp/qerror.h"
Juan Quintela56e93d22015-05-07 19:33:31 +020049#include "trace.h"
Juan Quintela56e93d22015-05-07 19:33:31 +020050#include "exec/ram_addr.h"
Alexey Perevalovf9494612017-10-05 14:13:20 +030051#include "exec/target_page.h"
Juan Quintela56e93d22015-05-07 19:33:31 +020052#include "qemu/rcu_queue.h"
zhanghailianga91246c2016-10-27 14:42:59 +080053#include "migration/colo.h"
Michael S. Tsirkin53d37d32018-05-03 22:50:51 +030054#include "block.h"
Juan Quintelaaf8b7d22018-04-06 19:32:12 +020055#include "sysemu/sysemu.h"
56#include "qemu/uuid.h"
Peter Xuedd090c2018-05-02 18:47:32 +080057#include "savevm.h"
Juan Quintelab9ee2f72016-01-15 11:40:13 +010058#include "qemu/iov.h"
Juan Quintela56e93d22015-05-07 19:33:31 +020059
Juan Quintela56e93d22015-05-07 19:33:31 +020060/***********************************************************/
61/* ram save/restore */
62
Juan Quintelabb890ed2017-04-28 09:39:55 +020063/* RAM_SAVE_FLAG_ZERO used to be named RAM_SAVE_FLAG_COMPRESS, it
64 * worked for pages that where filled with the same char. We switched
65 * it to only search for the zero value. And to avoid confusion with
66 * RAM_SSAVE_FLAG_COMPRESS_PAGE just rename it.
67 */
68
Juan Quintela56e93d22015-05-07 19:33:31 +020069#define RAM_SAVE_FLAG_FULL 0x01 /* Obsolete, not used anymore */
Juan Quintelabb890ed2017-04-28 09:39:55 +020070#define RAM_SAVE_FLAG_ZERO 0x02
Juan Quintela56e93d22015-05-07 19:33:31 +020071#define RAM_SAVE_FLAG_MEM_SIZE 0x04
72#define RAM_SAVE_FLAG_PAGE 0x08
73#define RAM_SAVE_FLAG_EOS 0x10
74#define RAM_SAVE_FLAG_CONTINUE 0x20
75#define RAM_SAVE_FLAG_XBZRLE 0x40
76/* 0x80 is reserved in migration.h start with 0x100 next */
77#define RAM_SAVE_FLAG_COMPRESS_PAGE 0x100
78
Juan Quintela56e93d22015-05-07 19:33:31 +020079static inline bool is_zero_range(uint8_t *p, uint64_t size)
80{
Richard Hendersona1febc42016-08-29 11:46:14 -070081 return buffer_is_zero(p, size);
Juan Quintela56e93d22015-05-07 19:33:31 +020082}
83
Juan Quintela93604472017-06-06 19:49:03 +020084XBZRLECacheStats xbzrle_counters;
85
Juan Quintela56e93d22015-05-07 19:33:31 +020086/* struct contains XBZRLE cache and a static page
87 used by the compression */
88static struct {
89 /* buffer used for XBZRLE encoding */
90 uint8_t *encoded_buf;
91 /* buffer for storing page content */
92 uint8_t *current_buf;
93 /* Cache for XBZRLE, Protected by lock. */
94 PageCache *cache;
95 QemuMutex lock;
Juan Quintelac00e0922017-05-09 16:22:01 +020096 /* it will store a page full of zeros */
97 uint8_t *zero_target_page;
Juan Quintelaf265e0e2017-06-28 11:52:27 +020098 /* buffer used for XBZRLE decoding */
99 uint8_t *decoded_buf;
Juan Quintela56e93d22015-05-07 19:33:31 +0200100} XBZRLE;
101
Juan Quintela56e93d22015-05-07 19:33:31 +0200102static void XBZRLE_cache_lock(void)
103{
104 if (migrate_use_xbzrle())
105 qemu_mutex_lock(&XBZRLE.lock);
106}
107
108static void XBZRLE_cache_unlock(void)
109{
110 if (migrate_use_xbzrle())
111 qemu_mutex_unlock(&XBZRLE.lock);
112}
113
Juan Quintela3d0684b2017-03-23 15:06:39 +0100114/**
115 * xbzrle_cache_resize: resize the xbzrle cache
116 *
117 * This function is called from qmp_migrate_set_cache_size in main
118 * thread, possibly while a migration is in progress. A running
119 * migration may be using the cache and might finish during this call,
120 * hence changes to the cache are protected by XBZRLE.lock().
121 *
Juan Quintelac9dede22017-10-06 23:03:55 +0200122 * Returns 0 for success or -1 for error
Juan Quintela3d0684b2017-03-23 15:06:39 +0100123 *
124 * @new_size: new cache size
Juan Quintela8acabf62017-10-05 22:00:31 +0200125 * @errp: set *errp if the check failed, with reason
Juan Quintela56e93d22015-05-07 19:33:31 +0200126 */
Juan Quintelac9dede22017-10-06 23:03:55 +0200127int xbzrle_cache_resize(int64_t new_size, Error **errp)
Juan Quintela56e93d22015-05-07 19:33:31 +0200128{
129 PageCache *new_cache;
Juan Quintelac9dede22017-10-06 23:03:55 +0200130 int64_t ret = 0;
Juan Quintela56e93d22015-05-07 19:33:31 +0200131
Juan Quintela8acabf62017-10-05 22:00:31 +0200132 /* Check for truncation */
133 if (new_size != (size_t)new_size) {
134 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "cache size",
135 "exceeding address space");
136 return -1;
137 }
138
Juan Quintela2a313e52017-10-06 23:00:12 +0200139 if (new_size == migrate_xbzrle_cache_size()) {
140 /* nothing to do */
Juan Quintelac9dede22017-10-06 23:03:55 +0200141 return 0;
Juan Quintela2a313e52017-10-06 23:00:12 +0200142 }
143
Juan Quintela56e93d22015-05-07 19:33:31 +0200144 XBZRLE_cache_lock();
145
146 if (XBZRLE.cache != NULL) {
Juan Quintela80f8dfd2017-10-06 22:30:45 +0200147 new_cache = cache_init(new_size, TARGET_PAGE_SIZE, errp);
Juan Quintela56e93d22015-05-07 19:33:31 +0200148 if (!new_cache) {
Juan Quintela56e93d22015-05-07 19:33:31 +0200149 ret = -1;
150 goto out;
151 }
152
153 cache_fini(XBZRLE.cache);
154 XBZRLE.cache = new_cache;
155 }
Juan Quintela56e93d22015-05-07 19:33:31 +0200156out:
157 XBZRLE_cache_unlock();
158 return ret;
159}
160
Yury Kotovfbd162e2019-02-15 20:45:46 +0300161static bool ramblock_is_ignored(RAMBlock *block)
162{
163 return !qemu_ram_is_migratable(block) ||
164 (migrate_ignore_shared() && qemu_ram_is_shared(block));
165}
166
Cédric Le Goaterb895de52018-05-14 08:57:00 +0200167/* Should be holding either ram_list.mutex, or the RCU lock. */
Yury Kotovfbd162e2019-02-15 20:45:46 +0300168#define RAMBLOCK_FOREACH_NOT_IGNORED(block) \
169 INTERNAL_RAMBLOCK_FOREACH(block) \
170 if (ramblock_is_ignored(block)) {} else
171
Cédric Le Goaterb895de52018-05-14 08:57:00 +0200172#define RAMBLOCK_FOREACH_MIGRATABLE(block) \
Dr. David Alan Gilbert343f6322018-06-05 17:25:45 +0100173 INTERNAL_RAMBLOCK_FOREACH(block) \
Cédric Le Goaterb895de52018-05-14 08:57:00 +0200174 if (!qemu_ram_is_migratable(block)) {} else
175
Dr. David Alan Gilbert343f6322018-06-05 17:25:45 +0100176#undef RAMBLOCK_FOREACH
177
Yury Kotovfbd162e2019-02-15 20:45:46 +0300178int foreach_not_ignored_block(RAMBlockIterFunc func, void *opaque)
179{
180 RAMBlock *block;
181 int ret = 0;
182
Dr. David Alan Gilbert89ac5a12019-10-07 15:36:39 +0100183 RCU_READ_LOCK_GUARD();
184
Yury Kotovfbd162e2019-02-15 20:45:46 +0300185 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
186 ret = func(block, opaque);
187 if (ret) {
188 break;
189 }
190 }
Yury Kotovfbd162e2019-02-15 20:45:46 +0300191 return ret;
192}
193
Alexey Perevalovf9494612017-10-05 14:13:20 +0300194static void ramblock_recv_map_init(void)
195{
196 RAMBlock *rb;
197
Yury Kotovfbd162e2019-02-15 20:45:46 +0300198 RAMBLOCK_FOREACH_NOT_IGNORED(rb) {
Alexey Perevalovf9494612017-10-05 14:13:20 +0300199 assert(!rb->receivedmap);
200 rb->receivedmap = bitmap_new(rb->max_length >> qemu_target_page_bits());
201 }
202}
203
204int ramblock_recv_bitmap_test(RAMBlock *rb, void *host_addr)
205{
206 return test_bit(ramblock_recv_bitmap_offset(host_addr, rb),
207 rb->receivedmap);
208}
209
Dr. David Alan Gilbert1cba9f62018-03-12 17:21:08 +0000210bool ramblock_recv_bitmap_test_byte_offset(RAMBlock *rb, uint64_t byte_offset)
211{
212 return test_bit(byte_offset >> TARGET_PAGE_BITS, rb->receivedmap);
213}
214
Alexey Perevalovf9494612017-10-05 14:13:20 +0300215void ramblock_recv_bitmap_set(RAMBlock *rb, void *host_addr)
216{
217 set_bit_atomic(ramblock_recv_bitmap_offset(host_addr, rb), rb->receivedmap);
218}
219
220void ramblock_recv_bitmap_set_range(RAMBlock *rb, void *host_addr,
221 size_t nr)
222{
223 bitmap_set_atomic(rb->receivedmap,
224 ramblock_recv_bitmap_offset(host_addr, rb),
225 nr);
226}
227
Peter Xua335deb2018-05-02 18:47:28 +0800228#define RAMBLOCK_RECV_BITMAP_ENDING (0x0123456789abcdefULL)
229
230/*
231 * Format: bitmap_size (8 bytes) + whole_bitmap (N bytes).
232 *
233 * Returns >0 if success with sent bytes, or <0 if error.
234 */
235int64_t ramblock_recv_bitmap_send(QEMUFile *file,
236 const char *block_name)
237{
238 RAMBlock *block = qemu_ram_block_by_name(block_name);
239 unsigned long *le_bitmap, nbits;
240 uint64_t size;
241
242 if (!block) {
243 error_report("%s: invalid block name: %s", __func__, block_name);
244 return -1;
245 }
246
247 nbits = block->used_length >> TARGET_PAGE_BITS;
248
249 /*
250 * Make sure the tmp bitmap buffer is big enough, e.g., on 32bit
251 * machines we may need 4 more bytes for padding (see below
252 * comment). So extend it a bit before hand.
253 */
254 le_bitmap = bitmap_new(nbits + BITS_PER_LONG);
255
256 /*
257 * Always use little endian when sending the bitmap. This is
258 * required that when source and destination VMs are not using the
259 * same endianess. (Note: big endian won't work.)
260 */
261 bitmap_to_le(le_bitmap, block->receivedmap, nbits);
262
263 /* Size of the bitmap, in bytes */
Peter Xua725ef92018-07-10 17:18:55 +0800264 size = DIV_ROUND_UP(nbits, 8);
Peter Xua335deb2018-05-02 18:47:28 +0800265
266 /*
267 * size is always aligned to 8 bytes for 64bit machines, but it
268 * may not be true for 32bit machines. We need this padding to
269 * make sure the migration can survive even between 32bit and
270 * 64bit machines.
271 */
272 size = ROUND_UP(size, 8);
273
274 qemu_put_be64(file, size);
275 qemu_put_buffer(file, (const uint8_t *)le_bitmap, size);
276 /*
277 * Mark as an end, in case the middle part is screwed up due to
278 * some "misterious" reason.
279 */
280 qemu_put_be64(file, RAMBLOCK_RECV_BITMAP_ENDING);
281 qemu_fflush(file);
282
Peter Xubf269902018-05-25 09:50:42 +0800283 g_free(le_bitmap);
Peter Xua335deb2018-05-02 18:47:28 +0800284
285 if (qemu_file_get_error(file)) {
286 return qemu_file_get_error(file);
287 }
288
289 return size + sizeof(size);
290}
291
Juan Quintelaec481c62017-03-20 22:12:40 +0100292/*
293 * An outstanding page request, on the source, having been received
294 * and queued
295 */
296struct RAMSrcPageRequest {
297 RAMBlock *rb;
298 hwaddr offset;
299 hwaddr len;
300
301 QSIMPLEQ_ENTRY(RAMSrcPageRequest) next_req;
302};
303
Juan Quintela6f37bb82017-03-13 19:26:29 +0100304/* State of RAM for migration */
305struct RAMState {
Juan Quintela204b88b2017-03-15 09:16:57 +0100306 /* QEMUFile used for this migration */
307 QEMUFile *f;
Juan Quintela6f37bb82017-03-13 19:26:29 +0100308 /* Last block that we have visited searching for dirty pages */
309 RAMBlock *last_seen_block;
310 /* Last block from where we have sent data */
311 RAMBlock *last_sent_block;
Juan Quintela269ace22017-03-21 15:23:31 +0100312 /* Last dirty target page we have sent */
313 ram_addr_t last_page;
Juan Quintela6f37bb82017-03-13 19:26:29 +0100314 /* last ram version we have seen */
315 uint32_t last_version;
316 /* We are in the first round */
317 bool ram_bulk_stage;
Wei Wang6eeb63f2018-12-11 16:24:52 +0800318 /* The free page optimization is enabled */
319 bool fpo_enabled;
Juan Quintela8d820d62017-03-13 19:35:50 +0100320 /* How many times we have dirty too many pages */
321 int dirty_rate_high_cnt;
Juan Quintelaf664da82017-03-13 19:44:57 +0100322 /* these variables are used for bitmap sync */
323 /* last time we did a full bitmap_sync */
324 int64_t time_last_bitmap_sync;
Juan Quintelaeac74152017-03-28 14:59:01 +0200325 /* bytes transferred at start_time */
Juan Quintelac4bdf0c2017-03-28 14:59:54 +0200326 uint64_t bytes_xfer_prev;
Juan Quintelaa66cd902017-03-28 15:02:43 +0200327 /* number of dirty pages since start_time */
Juan Quintela68908ed2017-03-28 15:05:53 +0200328 uint64_t num_dirty_pages_period;
Juan Quintelab5833fd2017-03-13 19:49:19 +0100329 /* xbzrle misses since the beginning of the period */
330 uint64_t xbzrle_cache_miss_prev;
Xiao Guangrong76e03002018-09-06 15:01:00 +0800331
332 /* compression statistics since the beginning of the period */
333 /* amount of count that no free thread to compress data */
334 uint64_t compress_thread_busy_prev;
335 /* amount bytes after compression */
336 uint64_t compressed_size_prev;
337 /* amount of compressed pages */
338 uint64_t compress_pages_prev;
339
Xiao Guangrongbe8b02e2018-09-03 17:26:42 +0800340 /* total handled target pages at the beginning of period */
341 uint64_t target_page_count_prev;
342 /* total handled target pages since start */
343 uint64_t target_page_count;
Juan Quintela93604472017-06-06 19:49:03 +0200344 /* number of dirty bits in the bitmap */
Peter Xu2dfaf122017-08-02 17:41:19 +0800345 uint64_t migration_dirty_pages;
Wei Wang386a9072018-12-11 16:24:49 +0800346 /* Protects modification of the bitmap and migration dirty pages */
Juan Quintela108cfae2017-03-13 21:38:09 +0100347 QemuMutex bitmap_mutex;
Juan Quintela68a098f2017-03-14 13:48:42 +0100348 /* The RAMBlock used in the last src_page_requests */
349 RAMBlock *last_req_rb;
Juan Quintelaec481c62017-03-20 22:12:40 +0100350 /* Queue of outstanding page requests from the destination */
351 QemuMutex src_page_req_mutex;
Paolo Bonzinib58deb32018-12-06 11:58:10 +0100352 QSIMPLEQ_HEAD(, RAMSrcPageRequest) src_page_requests;
Juan Quintela6f37bb82017-03-13 19:26:29 +0100353};
354typedef struct RAMState RAMState;
355
Juan Quintela53518d92017-05-04 11:46:24 +0200356static RAMState *ram_state;
Juan Quintela6f37bb82017-03-13 19:26:29 +0100357
Wei Wangbd227062018-12-11 16:24:51 +0800358static NotifierWithReturnList precopy_notifier_list;
359
360void precopy_infrastructure_init(void)
361{
362 notifier_with_return_list_init(&precopy_notifier_list);
363}
364
365void precopy_add_notifier(NotifierWithReturn *n)
366{
367 notifier_with_return_list_add(&precopy_notifier_list, n);
368}
369
370void precopy_remove_notifier(NotifierWithReturn *n)
371{
372 notifier_with_return_remove(n);
373}
374
375int precopy_notify(PrecopyNotifyReason reason, Error **errp)
376{
377 PrecopyNotifyData pnd;
378 pnd.reason = reason;
379 pnd.errp = errp;
380
381 return notifier_with_return_list_notify(&precopy_notifier_list, &pnd);
382}
383
Wei Wang6eeb63f2018-12-11 16:24:52 +0800384void precopy_enable_free_page_optimization(void)
385{
386 if (!ram_state) {
387 return;
388 }
389
390 ram_state->fpo_enabled = true;
391}
392
Juan Quintela9edabd42017-03-14 12:02:16 +0100393uint64_t ram_bytes_remaining(void)
394{
Dr. David Alan Gilbertbae416e2017-12-15 11:51:23 +0000395 return ram_state ? (ram_state->migration_dirty_pages * TARGET_PAGE_SIZE) :
396 0;
Juan Quintela9edabd42017-03-14 12:02:16 +0100397}
398
Juan Quintela93604472017-06-06 19:49:03 +0200399MigrationStats ram_counters;
Juan Quintela96506892017-03-14 18:41:03 +0100400
Dr. David Alan Gilbertb8fb8cb2015-09-23 15:27:10 +0100401/* used by the search for pages to send */
402struct PageSearchStatus {
403 /* Current block being searched */
404 RAMBlock *block;
Juan Quintelaa935e302017-03-21 15:36:51 +0100405 /* Current page to search from */
406 unsigned long page;
Dr. David Alan Gilbertb8fb8cb2015-09-23 15:27:10 +0100407 /* Set once we wrap around */
408 bool complete_round;
409};
410typedef struct PageSearchStatus PageSearchStatus;
411
Xiao Guangrong76e03002018-09-06 15:01:00 +0800412CompressionStats compression_counters;
413
Juan Quintela56e93d22015-05-07 19:33:31 +0200414struct CompressParam {
Juan Quintela56e93d22015-05-07 19:33:31 +0200415 bool done;
Liang Li90e56fb2016-05-05 15:32:56 +0800416 bool quit;
Xiao Guangrong5e5fdcf2018-08-21 16:10:24 +0800417 bool zero_page;
Juan Quintela56e93d22015-05-07 19:33:31 +0200418 QEMUFile *file;
419 QemuMutex mutex;
420 QemuCond cond;
421 RAMBlock *block;
422 ram_addr_t offset;
Xiao Guangrong34ab9e92018-03-30 15:51:22 +0800423
424 /* internally used fields */
Xiao Guangrongdcaf4462018-03-30 15:51:20 +0800425 z_stream stream;
Xiao Guangrong34ab9e92018-03-30 15:51:22 +0800426 uint8_t *originbuf;
Juan Quintela56e93d22015-05-07 19:33:31 +0200427};
428typedef struct CompressParam CompressParam;
429
430struct DecompressParam {
Liang Li73a89122016-05-05 15:32:51 +0800431 bool done;
Liang Li90e56fb2016-05-05 15:32:56 +0800432 bool quit;
Juan Quintela56e93d22015-05-07 19:33:31 +0200433 QemuMutex mutex;
434 QemuCond cond;
435 void *des;
Peter Maydelld341d9f2016-01-22 15:09:21 +0000436 uint8_t *compbuf;
Juan Quintela56e93d22015-05-07 19:33:31 +0200437 int len;
Xiao Guangrong797ca152018-03-30 15:51:21 +0800438 z_stream stream;
Juan Quintela56e93d22015-05-07 19:33:31 +0200439};
440typedef struct DecompressParam DecompressParam;
441
442static CompressParam *comp_param;
443static QemuThread *compress_threads;
444/* comp_done_cond is used to wake up the migration thread when
445 * one of the compression threads has finished the compression.
446 * comp_done_lock is used to co-work with comp_done_cond.
447 */
Liang Li0d9f9a52016-05-05 15:32:59 +0800448static QemuMutex comp_done_lock;
449static QemuCond comp_done_cond;
Juan Quintela56e93d22015-05-07 19:33:31 +0200450/* The empty QEMUFileOps will be used by file in CompressParam */
451static const QEMUFileOps empty_ops = { };
452
Xiao Guangrong34ab9e92018-03-30 15:51:22 +0800453static QEMUFile *decomp_file;
Juan Quintela56e93d22015-05-07 19:33:31 +0200454static DecompressParam *decomp_param;
455static QemuThread *decompress_threads;
Liang Li73a89122016-05-05 15:32:51 +0800456static QemuMutex decomp_done_lock;
457static QemuCond decomp_done_cond;
Juan Quintela56e93d22015-05-07 19:33:31 +0200458
Xiao Guangrong5e5fdcf2018-08-21 16:10:24 +0800459static bool do_compress_ram_page(QEMUFile *f, z_stream *stream, RAMBlock *block,
Xiao Guangrong6ef37712018-08-21 16:10:23 +0800460 ram_addr_t offset, uint8_t *source_buf);
Juan Quintela56e93d22015-05-07 19:33:31 +0200461
462static void *do_data_compress(void *opaque)
463{
464 CompressParam *param = opaque;
Liang Lia7a9a882016-05-05 15:32:57 +0800465 RAMBlock *block;
466 ram_addr_t offset;
Xiao Guangrong5e5fdcf2018-08-21 16:10:24 +0800467 bool zero_page;
Juan Quintela56e93d22015-05-07 19:33:31 +0200468
Liang Lia7a9a882016-05-05 15:32:57 +0800469 qemu_mutex_lock(&param->mutex);
Liang Li90e56fb2016-05-05 15:32:56 +0800470 while (!param->quit) {
Liang Lia7a9a882016-05-05 15:32:57 +0800471 if (param->block) {
472 block = param->block;
473 offset = param->offset;
474 param->block = NULL;
475 qemu_mutex_unlock(&param->mutex);
476
Xiao Guangrong5e5fdcf2018-08-21 16:10:24 +0800477 zero_page = do_compress_ram_page(param->file, &param->stream,
478 block, offset, param->originbuf);
Liang Lia7a9a882016-05-05 15:32:57 +0800479
Liang Li0d9f9a52016-05-05 15:32:59 +0800480 qemu_mutex_lock(&comp_done_lock);
Liang Lia7a9a882016-05-05 15:32:57 +0800481 param->done = true;
Xiao Guangrong5e5fdcf2018-08-21 16:10:24 +0800482 param->zero_page = zero_page;
Liang Li0d9f9a52016-05-05 15:32:59 +0800483 qemu_cond_signal(&comp_done_cond);
484 qemu_mutex_unlock(&comp_done_lock);
Liang Lia7a9a882016-05-05 15:32:57 +0800485
486 qemu_mutex_lock(&param->mutex);
487 } else {
Juan Quintela56e93d22015-05-07 19:33:31 +0200488 qemu_cond_wait(&param->cond, &param->mutex);
489 }
Juan Quintela56e93d22015-05-07 19:33:31 +0200490 }
Liang Lia7a9a882016-05-05 15:32:57 +0800491 qemu_mutex_unlock(&param->mutex);
Juan Quintela56e93d22015-05-07 19:33:31 +0200492
493 return NULL;
494}
495
Juan Quintelaf0afa332017-06-28 11:52:28 +0200496static void compress_threads_save_cleanup(void)
Juan Quintela56e93d22015-05-07 19:33:31 +0200497{
498 int i, thread_count;
499
Fei Li05306932018-09-25 17:14:40 +0800500 if (!migrate_use_compression() || !comp_param) {
Juan Quintela56e93d22015-05-07 19:33:31 +0200501 return;
502 }
Fei Li05306932018-09-25 17:14:40 +0800503
Juan Quintela56e93d22015-05-07 19:33:31 +0200504 thread_count = migrate_compress_threads();
505 for (i = 0; i < thread_count; i++) {
Xiao Guangrongdcaf4462018-03-30 15:51:20 +0800506 /*
507 * we use it as a indicator which shows if the thread is
508 * properly init'd or not
509 */
510 if (!comp_param[i].file) {
511 break;
512 }
Fei Li05306932018-09-25 17:14:40 +0800513
514 qemu_mutex_lock(&comp_param[i].mutex);
515 comp_param[i].quit = true;
516 qemu_cond_signal(&comp_param[i].cond);
517 qemu_mutex_unlock(&comp_param[i].mutex);
518
Juan Quintela56e93d22015-05-07 19:33:31 +0200519 qemu_thread_join(compress_threads + i);
Juan Quintela56e93d22015-05-07 19:33:31 +0200520 qemu_mutex_destroy(&comp_param[i].mutex);
521 qemu_cond_destroy(&comp_param[i].cond);
Xiao Guangrongdcaf4462018-03-30 15:51:20 +0800522 deflateEnd(&comp_param[i].stream);
Xiao Guangrong34ab9e92018-03-30 15:51:22 +0800523 g_free(comp_param[i].originbuf);
Xiao Guangrongdcaf4462018-03-30 15:51:20 +0800524 qemu_fclose(comp_param[i].file);
525 comp_param[i].file = NULL;
Juan Quintela56e93d22015-05-07 19:33:31 +0200526 }
Liang Li0d9f9a52016-05-05 15:32:59 +0800527 qemu_mutex_destroy(&comp_done_lock);
528 qemu_cond_destroy(&comp_done_cond);
Juan Quintela56e93d22015-05-07 19:33:31 +0200529 g_free(compress_threads);
530 g_free(comp_param);
Juan Quintela56e93d22015-05-07 19:33:31 +0200531 compress_threads = NULL;
532 comp_param = NULL;
Juan Quintela56e93d22015-05-07 19:33:31 +0200533}
534
Xiao Guangrongdcaf4462018-03-30 15:51:20 +0800535static int compress_threads_save_setup(void)
Juan Quintela56e93d22015-05-07 19:33:31 +0200536{
537 int i, thread_count;
538
539 if (!migrate_use_compression()) {
Xiao Guangrongdcaf4462018-03-30 15:51:20 +0800540 return 0;
Juan Quintela56e93d22015-05-07 19:33:31 +0200541 }
Juan Quintela56e93d22015-05-07 19:33:31 +0200542 thread_count = migrate_compress_threads();
543 compress_threads = g_new0(QemuThread, thread_count);
544 comp_param = g_new0(CompressParam, thread_count);
Liang Li0d9f9a52016-05-05 15:32:59 +0800545 qemu_cond_init(&comp_done_cond);
546 qemu_mutex_init(&comp_done_lock);
Juan Quintela56e93d22015-05-07 19:33:31 +0200547 for (i = 0; i < thread_count; i++) {
Xiao Guangrong34ab9e92018-03-30 15:51:22 +0800548 comp_param[i].originbuf = g_try_malloc(TARGET_PAGE_SIZE);
549 if (!comp_param[i].originbuf) {
550 goto exit;
551 }
552
Xiao Guangrongdcaf4462018-03-30 15:51:20 +0800553 if (deflateInit(&comp_param[i].stream,
554 migrate_compress_level()) != Z_OK) {
Xiao Guangrong34ab9e92018-03-30 15:51:22 +0800555 g_free(comp_param[i].originbuf);
Xiao Guangrongdcaf4462018-03-30 15:51:20 +0800556 goto exit;
557 }
558
Cao jine110aa92016-07-29 15:10:31 +0800559 /* comp_param[i].file is just used as a dummy buffer to save data,
560 * set its ops to empty.
Juan Quintela56e93d22015-05-07 19:33:31 +0200561 */
562 comp_param[i].file = qemu_fopen_ops(NULL, &empty_ops);
563 comp_param[i].done = true;
Liang Li90e56fb2016-05-05 15:32:56 +0800564 comp_param[i].quit = false;
Juan Quintela56e93d22015-05-07 19:33:31 +0200565 qemu_mutex_init(&comp_param[i].mutex);
566 qemu_cond_init(&comp_param[i].cond);
567 qemu_thread_create(compress_threads + i, "compress",
568 do_data_compress, comp_param + i,
569 QEMU_THREAD_JOINABLE);
570 }
Xiao Guangrongdcaf4462018-03-30 15:51:20 +0800571 return 0;
572
573exit:
574 compress_threads_save_cleanup();
575 return -1;
Juan Quintela56e93d22015-05-07 19:33:31 +0200576}
577
Juan Quintelaf986c3d2016-01-14 16:52:55 +0100578/* Multiple fd's */
579
Juan Quintelaaf8b7d22018-04-06 19:32:12 +0200580#define MULTIFD_MAGIC 0x11223344U
581#define MULTIFD_VERSION 1
582
Juan Quintela6df264a2018-02-28 09:10:07 +0100583#define MULTIFD_FLAG_SYNC (1 << 0)
584
Juan Quintelaefd1a1d2019-02-20 12:06:03 +0100585/* This value needs to be a multiple of qemu_target_page_size() */
Juan Quintela4b0c7262019-02-20 12:45:57 +0100586#define MULTIFD_PACKET_SIZE (512 * 1024)
Juan Quintelaefd1a1d2019-02-20 12:06:03 +0100587
Juan Quintelaaf8b7d22018-04-06 19:32:12 +0200588typedef struct {
589 uint32_t magic;
590 uint32_t version;
591 unsigned char uuid[16]; /* QemuUUID */
592 uint8_t id;
Juan Quintela5fbd8b42019-03-13 10:54:58 +0100593 uint8_t unused1[7]; /* Reserved for future use */
594 uint64_t unused2[4]; /* Reserved for future use */
Juan Quintelaaf8b7d22018-04-06 19:32:12 +0200595} __attribute__((packed)) MultiFDInit_t;
596
Juan Quintela8c4598f2018-04-07 13:59:07 +0200597typedef struct {
Juan Quintela2a26c972018-04-04 11:26:58 +0200598 uint32_t magic;
599 uint32_t version;
600 uint32_t flags;
Juan Quintela6f862692019-02-20 12:04:04 +0100601 /* maximum number of allocated pages */
602 uint32_t pages_alloc;
603 uint32_t pages_used;
Juan Quintela2a34ee52019-01-04 19:45:39 +0100604 /* size of the next packet that contains pages */
605 uint32_t next_packet_size;
Juan Quintela2a26c972018-04-04 11:26:58 +0200606 uint64_t packet_num;
Juan Quintela5fbd8b42019-03-13 10:54:58 +0100607 uint64_t unused[4]; /* Reserved for future use */
Juan Quintela2a26c972018-04-04 11:26:58 +0200608 char ramblock[256];
609 uint64_t offset[];
610} __attribute__((packed)) MultiFDPacket_t;
611
612typedef struct {
Juan Quintela34c55a92018-04-10 23:35:15 +0200613 /* number of used pages */
614 uint32_t used;
615 /* number of allocated pages */
616 uint32_t allocated;
617 /* global number of generated multifd packets */
618 uint64_t packet_num;
619 /* offset of each page */
620 ram_addr_t *offset;
621 /* pointer to each page */
622 struct iovec *iov;
623 RAMBlock *block;
624} MultiFDPages_t;
625
626typedef struct {
Juan Quintela8c4598f2018-04-07 13:59:07 +0200627 /* this fields are not changed once the thread is created */
628 /* channel number */
Juan Quintelaf986c3d2016-01-14 16:52:55 +0100629 uint8_t id;
Juan Quintela8c4598f2018-04-07 13:59:07 +0200630 /* channel thread name */
Juan Quintelaf986c3d2016-01-14 16:52:55 +0100631 char *name;
Juan Quintela8c4598f2018-04-07 13:59:07 +0200632 /* channel thread id */
Juan Quintelaf986c3d2016-01-14 16:52:55 +0100633 QemuThread thread;
Juan Quintela8c4598f2018-04-07 13:59:07 +0200634 /* communication channel */
Juan Quintela60df2d42018-03-07 07:56:15 +0100635 QIOChannel *c;
Juan Quintela8c4598f2018-04-07 13:59:07 +0200636 /* sem where to wait for more work */
Juan Quintelaf986c3d2016-01-14 16:52:55 +0100637 QemuSemaphore sem;
Juan Quintela8c4598f2018-04-07 13:59:07 +0200638 /* this mutex protects the following parameters */
Juan Quintelaf986c3d2016-01-14 16:52:55 +0100639 QemuMutex mutex;
Juan Quintela8c4598f2018-04-07 13:59:07 +0200640 /* is this channel thread running */
Juan Quintela66770702018-02-19 19:01:45 +0100641 bool running;
Juan Quintela8c4598f2018-04-07 13:59:07 +0200642 /* should this thread finish */
Juan Quintelaf986c3d2016-01-14 16:52:55 +0100643 bool quit;
Juan Quintela0beb5ed2018-04-11 03:02:10 +0200644 /* thread has work to do */
645 int pending_job;
Juan Quintela34c55a92018-04-10 23:35:15 +0200646 /* array of pages to sent */
647 MultiFDPages_t *pages;
Juan Quintela2a26c972018-04-04 11:26:58 +0200648 /* packet allocated len */
649 uint32_t packet_len;
650 /* pointer to the packet */
651 MultiFDPacket_t *packet;
652 /* multifd flags for each packet */
653 uint32_t flags;
Juan Quintela2a34ee52019-01-04 19:45:39 +0100654 /* size of the next packet that contains pages */
655 uint32_t next_packet_size;
Juan Quintela2a26c972018-04-04 11:26:58 +0200656 /* global number of generated multifd packets */
657 uint64_t packet_num;
Juan Quintela408ea6a2018-04-06 18:28:59 +0200658 /* thread local variables */
659 /* packets sent through this channel */
660 uint64_t num_packets;
661 /* pages sent through this channel */
662 uint64_t num_pages;
Juan Quintela18cdcea2019-08-14 04:02:14 +0200663 /* syncs main thread and channels */
664 QemuSemaphore sem_sync;
Juan Quintela8c4598f2018-04-07 13:59:07 +0200665} MultiFDSendParams;
666
667typedef struct {
668 /* this fields are not changed once the thread is created */
669 /* channel number */
670 uint8_t id;
671 /* channel thread name */
672 char *name;
673 /* channel thread id */
674 QemuThread thread;
675 /* communication channel */
676 QIOChannel *c;
Juan Quintela8c4598f2018-04-07 13:59:07 +0200677 /* this mutex protects the following parameters */
678 QemuMutex mutex;
679 /* is this channel thread running */
680 bool running;
Juan Quintela3c3ca252019-07-24 11:46:24 +0200681 /* should this thread finish */
682 bool quit;
Juan Quintela34c55a92018-04-10 23:35:15 +0200683 /* array of pages to receive */
684 MultiFDPages_t *pages;
Juan Quintela2a26c972018-04-04 11:26:58 +0200685 /* packet allocated len */
686 uint32_t packet_len;
687 /* pointer to the packet */
688 MultiFDPacket_t *packet;
689 /* multifd flags for each packet */
690 uint32_t flags;
691 /* global number of generated multifd packets */
692 uint64_t packet_num;
Juan Quintela408ea6a2018-04-06 18:28:59 +0200693 /* thread local variables */
Juan Quintela2a34ee52019-01-04 19:45:39 +0100694 /* size of the next packet that contains pages */
695 uint32_t next_packet_size;
Juan Quintela408ea6a2018-04-06 18:28:59 +0200696 /* packets sent through this channel */
697 uint64_t num_packets;
698 /* pages sent through this channel */
699 uint64_t num_pages;
Juan Quintela6df264a2018-02-28 09:10:07 +0100700 /* syncs main thread and channels */
701 QemuSemaphore sem_sync;
Juan Quintela8c4598f2018-04-07 13:59:07 +0200702} MultiFDRecvParams;
Juan Quintelaf986c3d2016-01-14 16:52:55 +0100703
Juan Quintelaaf8b7d22018-04-06 19:32:12 +0200704static int multifd_send_initial_packet(MultiFDSendParams *p, Error **errp)
705{
Juan Quintelad069bcc2020-01-13 18:41:02 +0100706 MultiFDInit_t msg = {};
Juan Quintelaaf8b7d22018-04-06 19:32:12 +0200707 int ret;
708
709 msg.magic = cpu_to_be32(MULTIFD_MAGIC);
710 msg.version = cpu_to_be32(MULTIFD_VERSION);
711 msg.id = p->id;
712 memcpy(msg.uuid, &qemu_uuid.data, sizeof(msg.uuid));
713
714 ret = qio_channel_write_all(p->c, (char *)&msg, sizeof(msg), errp);
715 if (ret != 0) {
716 return -1;
717 }
718 return 0;
719}
720
721static int multifd_recv_initial_packet(QIOChannel *c, Error **errp)
722{
723 MultiFDInit_t msg;
724 int ret;
725
726 ret = qio_channel_read_all(c, (char *)&msg, sizeof(msg), errp);
727 if (ret != 0) {
728 return -1;
729 }
730
Peter Maydell341ba0d2018-09-25 17:19:24 +0100731 msg.magic = be32_to_cpu(msg.magic);
732 msg.version = be32_to_cpu(msg.version);
Juan Quintelaaf8b7d22018-04-06 19:32:12 +0200733
734 if (msg.magic != MULTIFD_MAGIC) {
735 error_setg(errp, "multifd: received packet magic %x "
736 "expected %x", msg.magic, MULTIFD_MAGIC);
737 return -1;
738 }
739
740 if (msg.version != MULTIFD_VERSION) {
741 error_setg(errp, "multifd: received packet version %d "
742 "expected %d", msg.version, MULTIFD_VERSION);
743 return -1;
744 }
745
746 if (memcmp(msg.uuid, &qemu_uuid, sizeof(qemu_uuid))) {
747 char *uuid = qemu_uuid_unparse_strdup(&qemu_uuid);
748 char *msg_uuid = qemu_uuid_unparse_strdup((const QemuUUID *)msg.uuid);
749
750 error_setg(errp, "multifd: received uuid '%s' and expected "
751 "uuid '%s' for channel %hhd", msg_uuid, uuid, msg.id);
752 g_free(uuid);
753 g_free(msg_uuid);
754 return -1;
755 }
756
757 if (msg.id > migrate_multifd_channels()) {
758 error_setg(errp, "multifd: received channel version %d "
759 "expected %d", msg.version, MULTIFD_VERSION);
760 return -1;
761 }
762
763 return msg.id;
764}
765
Juan Quintela34c55a92018-04-10 23:35:15 +0200766static MultiFDPages_t *multifd_pages_init(size_t size)
767{
768 MultiFDPages_t *pages = g_new0(MultiFDPages_t, 1);
769
770 pages->allocated = size;
771 pages->iov = g_new0(struct iovec, size);
772 pages->offset = g_new0(ram_addr_t, size);
773
774 return pages;
775}
776
777static void multifd_pages_clear(MultiFDPages_t *pages)
778{
779 pages->used = 0;
780 pages->allocated = 0;
781 pages->packet_num = 0;
782 pages->block = NULL;
783 g_free(pages->iov);
784 pages->iov = NULL;
785 g_free(pages->offset);
786 pages->offset = NULL;
787 g_free(pages);
788}
789
Juan Quintela2a26c972018-04-04 11:26:58 +0200790static void multifd_send_fill_packet(MultiFDSendParams *p)
791{
792 MultiFDPacket_t *packet = p->packet;
793 int i;
794
Juan Quintela2a26c972018-04-04 11:26:58 +0200795 packet->flags = cpu_to_be32(p->flags);
Wei Yangf2148c42019-10-11 16:50:48 +0800796 packet->pages_alloc = cpu_to_be32(p->pages->allocated);
Juan Quintela6f862692019-02-20 12:04:04 +0100797 packet->pages_used = cpu_to_be32(p->pages->used);
Juan Quintela2a34ee52019-01-04 19:45:39 +0100798 packet->next_packet_size = cpu_to_be32(p->next_packet_size);
Juan Quintela2a26c972018-04-04 11:26:58 +0200799 packet->packet_num = cpu_to_be64(p->packet_num);
800
801 if (p->pages->block) {
802 strncpy(packet->ramblock, p->pages->block->idstr, 256);
803 }
804
805 for (i = 0; i < p->pages->used; i++) {
806 packet->offset[i] = cpu_to_be64(p->pages->offset[i]);
807 }
808}
809
810static int multifd_recv_unfill_packet(MultiFDRecvParams *p, Error **errp)
811{
812 MultiFDPacket_t *packet = p->packet;
Juan Quintela7ed379b2019-02-20 12:44:07 +0100813 uint32_t pages_max = MULTIFD_PACKET_SIZE / qemu_target_page_size();
Juan Quintela2a26c972018-04-04 11:26:58 +0200814 RAMBlock *block;
815 int i;
816
Peter Maydell341ba0d2018-09-25 17:19:24 +0100817 packet->magic = be32_to_cpu(packet->magic);
Juan Quintela2a26c972018-04-04 11:26:58 +0200818 if (packet->magic != MULTIFD_MAGIC) {
819 error_setg(errp, "multifd: received packet "
820 "magic %x and expected magic %x",
821 packet->magic, MULTIFD_MAGIC);
822 return -1;
823 }
824
Peter Maydell341ba0d2018-09-25 17:19:24 +0100825 packet->version = be32_to_cpu(packet->version);
Juan Quintela2a26c972018-04-04 11:26:58 +0200826 if (packet->version != MULTIFD_VERSION) {
827 error_setg(errp, "multifd: received packet "
828 "version %d and expected version %d",
829 packet->version, MULTIFD_VERSION);
830 return -1;
831 }
832
833 p->flags = be32_to_cpu(packet->flags);
834
Juan Quintela6f862692019-02-20 12:04:04 +0100835 packet->pages_alloc = be32_to_cpu(packet->pages_alloc);
Juan Quintela7ed379b2019-02-20 12:44:07 +0100836 /*
Wei Yangd884e772019-10-11 16:50:47 +0800837 * If we received a packet that is 100 times bigger than expected
Juan Quintela7ed379b2019-02-20 12:44:07 +0100838 * just stop migration. It is a magic number.
839 */
840 if (packet->pages_alloc > pages_max * 100) {
Juan Quintela2a26c972018-04-04 11:26:58 +0200841 error_setg(errp, "multifd: received packet "
Juan Quintela7ed379b2019-02-20 12:44:07 +0100842 "with size %d and expected a maximum size of %d",
843 packet->pages_alloc, pages_max * 100) ;
Juan Quintela2a26c972018-04-04 11:26:58 +0200844 return -1;
845 }
Juan Quintela7ed379b2019-02-20 12:44:07 +0100846 /*
847 * We received a packet that is bigger than expected but inside
848 * reasonable limits (see previous comment). Just reallocate.
849 */
850 if (packet->pages_alloc > p->pages->allocated) {
851 multifd_pages_clear(p->pages);
Peter Maydellf151f8a2019-04-09 16:18:30 +0100852 p->pages = multifd_pages_init(packet->pages_alloc);
Juan Quintela7ed379b2019-02-20 12:44:07 +0100853 }
Juan Quintela2a26c972018-04-04 11:26:58 +0200854
Juan Quintela6f862692019-02-20 12:04:04 +0100855 p->pages->used = be32_to_cpu(packet->pages_used);
856 if (p->pages->used > packet->pages_alloc) {
Juan Quintela2a26c972018-04-04 11:26:58 +0200857 error_setg(errp, "multifd: received packet "
Juan Quintela6f862692019-02-20 12:04:04 +0100858 "with %d pages and expected maximum pages are %d",
859 p->pages->used, packet->pages_alloc) ;
Juan Quintela2a26c972018-04-04 11:26:58 +0200860 return -1;
861 }
862
Juan Quintela2a34ee52019-01-04 19:45:39 +0100863 p->next_packet_size = be32_to_cpu(packet->next_packet_size);
Juan Quintela2a26c972018-04-04 11:26:58 +0200864 p->packet_num = be64_to_cpu(packet->packet_num);
865
Marc-André Lureaue4f1bea2019-10-04 15:12:09 +0400866 if (p->pages->used == 0) {
867 return 0;
868 }
869
870 /* make sure that ramblock is 0 terminated */
871 packet->ramblock[255] = 0;
872 block = qemu_ram_block_by_name(packet->ramblock);
873 if (!block) {
874 error_setg(errp, "multifd: unknown ram block %s",
875 packet->ramblock);
876 return -1;
Juan Quintela2a26c972018-04-04 11:26:58 +0200877 }
878
879 for (i = 0; i < p->pages->used; i++) {
880 ram_addr_t offset = be64_to_cpu(packet->offset[i]);
881
882 if (offset > (block->used_length - TARGET_PAGE_SIZE)) {
883 error_setg(errp, "multifd: offset too long " RAM_ADDR_FMT
884 " (max " RAM_ADDR_FMT ")",
885 offset, block->max_length);
886 return -1;
887 }
888 p->pages->iov[i].iov_base = block->host + offset;
889 p->pages->iov[i].iov_len = TARGET_PAGE_SIZE;
890 }
891
892 return 0;
893}
894
Juan Quintelaf986c3d2016-01-14 16:52:55 +0100895struct {
896 MultiFDSendParams *params;
Juan Quintela34c55a92018-04-10 23:35:15 +0200897 /* array of pages to sent */
898 MultiFDPages_t *pages;
Juan Quintela6df264a2018-02-28 09:10:07 +0100899 /* global number of generated multifd packets */
900 uint64_t packet_num;
Juan Quintelab9ee2f72016-01-15 11:40:13 +0100901 /* send channels ready */
902 QemuSemaphore channels_ready;
Juan Quintela4d65a622019-12-18 05:36:22 +0100903 /*
904 * Have we already run terminate threads. There is a race when it
905 * happens that we got one error while we are exiting.
906 * We will use atomic operations. Only valid values are 0 and 1.
907 */
908 int exiting;
Juan Quintelaf986c3d2016-01-14 16:52:55 +0100909} *multifd_send_state;
910
Juan Quintelab9ee2f72016-01-15 11:40:13 +0100911/*
912 * How we use multifd_send_state->pages and channel->pages?
913 *
914 * We create a pages for each channel, and a main one. Each time that
915 * we need to send a batch of pages we interchange the ones between
916 * multifd_send_state and the channel that is sending it. There are
917 * two reasons for that:
918 * - to not have to do so many mallocs during migration
919 * - to make easier to know what to free at the end of migration
920 *
921 * This way we always know who is the owner of each "pages" struct,
Wei Yanga5f7b1a2019-05-11 07:37:29 +0800922 * and we don't need any locking. It belongs to the migration thread
Juan Quintelab9ee2f72016-01-15 11:40:13 +0100923 * or to the channel thread. Switching is safe because the migration
924 * thread is using the channel mutex when changing it, and the channel
925 * have to had finish with its own, otherwise pending_job can't be
926 * false.
927 */
928
Ivan Ren1b81c972019-07-30 13:33:35 +0800929static int multifd_send_pages(RAMState *rs)
Juan Quintelab9ee2f72016-01-15 11:40:13 +0100930{
931 int i;
932 static int next_channel;
933 MultiFDSendParams *p = NULL; /* make happy gcc */
934 MultiFDPages_t *pages = multifd_send_state->pages;
935 uint64_t transferred;
936
Juan Quintela4d65a622019-12-18 05:36:22 +0100937 if (atomic_read(&multifd_send_state->exiting)) {
938 return -1;
939 }
940
Juan Quintelab9ee2f72016-01-15 11:40:13 +0100941 qemu_sem_wait(&multifd_send_state->channels_ready);
942 for (i = next_channel;; i = (i + 1) % migrate_multifd_channels()) {
943 p = &multifd_send_state->params[i];
944
945 qemu_mutex_lock(&p->mutex);
Ivan Ren713f7622019-06-25 21:18:17 +0800946 if (p->quit) {
947 error_report("%s: channel %d has already quit!", __func__, i);
948 qemu_mutex_unlock(&p->mutex);
949 return -1;
950 }
Juan Quintelab9ee2f72016-01-15 11:40:13 +0100951 if (!p->pending_job) {
952 p->pending_job++;
953 next_channel = (i + 1) % migrate_multifd_channels();
954 break;
955 }
956 qemu_mutex_unlock(&p->mutex);
957 }
958 p->pages->used = 0;
959
960 p->packet_num = multifd_send_state->packet_num++;
961 p->pages->block = NULL;
962 multifd_send_state->pages = p->pages;
963 p->pages = pages;
Peter Xu4fcefd42018-07-20 11:47:13 +0800964 transferred = ((uint64_t) pages->used) * TARGET_PAGE_SIZE + p->packet_len;
Ivan Ren1b81c972019-07-30 13:33:35 +0800965 qemu_file_update_transfer(rs->f, transferred);
Juan Quintelab9ee2f72016-01-15 11:40:13 +0100966 ram_counters.multifd_bytes += transferred;
967 ram_counters.transferred += transferred;;
968 qemu_mutex_unlock(&p->mutex);
969 qemu_sem_post(&p->sem);
Ivan Ren713f7622019-06-25 21:18:17 +0800970
971 return 1;
Juan Quintelab9ee2f72016-01-15 11:40:13 +0100972}
973
Ivan Ren1b81c972019-07-30 13:33:35 +0800974static int multifd_queue_page(RAMState *rs, RAMBlock *block, ram_addr_t offset)
Juan Quintelab9ee2f72016-01-15 11:40:13 +0100975{
976 MultiFDPages_t *pages = multifd_send_state->pages;
977
978 if (!pages->block) {
979 pages->block = block;
980 }
981
982 if (pages->block == block) {
983 pages->offset[pages->used] = offset;
984 pages->iov[pages->used].iov_base = block->host + offset;
985 pages->iov[pages->used].iov_len = TARGET_PAGE_SIZE;
986 pages->used++;
987
988 if (pages->used < pages->allocated) {
Ivan Ren713f7622019-06-25 21:18:17 +0800989 return 1;
Juan Quintelab9ee2f72016-01-15 11:40:13 +0100990 }
991 }
992
Ivan Ren1b81c972019-07-30 13:33:35 +0800993 if (multifd_send_pages(rs) < 0) {
Ivan Ren713f7622019-06-25 21:18:17 +0800994 return -1;
995 }
Juan Quintelab9ee2f72016-01-15 11:40:13 +0100996
997 if (pages->block != block) {
Ivan Ren1b81c972019-07-30 13:33:35 +0800998 return multifd_queue_page(rs, block, offset);
Juan Quintelab9ee2f72016-01-15 11:40:13 +0100999 }
Ivan Ren713f7622019-06-25 21:18:17 +08001000
1001 return 1;
Juan Quintelab9ee2f72016-01-15 11:40:13 +01001002}
1003
Juan Quintela66770702018-02-19 19:01:45 +01001004static void multifd_send_terminate_threads(Error *err)
Juan Quintelaf986c3d2016-01-14 16:52:55 +01001005{
1006 int i;
1007
Juan Quintela5558c912019-08-14 04:02:13 +02001008 trace_multifd_send_terminate_threads(err != NULL);
1009
Juan Quintela7a169d72018-02-19 19:01:15 +01001010 if (err) {
1011 MigrationState *s = migrate_get_current();
1012 migrate_set_error(s, err);
1013 if (s->state == MIGRATION_STATUS_SETUP ||
1014 s->state == MIGRATION_STATUS_PRE_SWITCHOVER ||
1015 s->state == MIGRATION_STATUS_DEVICE ||
1016 s->state == MIGRATION_STATUS_ACTIVE) {
1017 migrate_set_state(&s->state, s->state,
1018 MIGRATION_STATUS_FAILED);
1019 }
1020 }
1021
Juan Quintela4d65a622019-12-18 05:36:22 +01001022 /*
1023 * We don't want to exit each threads twice. Depending on where
1024 * we get the error, or if there are two independent errors in two
1025 * threads at the same time, we can end calling this function
1026 * twice.
1027 */
1028 if (atomic_xchg(&multifd_send_state->exiting, 1)) {
1029 return;
1030 }
1031
Juan Quintela66770702018-02-19 19:01:45 +01001032 for (i = 0; i < migrate_multifd_channels(); i++) {
Juan Quintelaf986c3d2016-01-14 16:52:55 +01001033 MultiFDSendParams *p = &multifd_send_state->params[i];
1034
1035 qemu_mutex_lock(&p->mutex);
1036 p->quit = true;
1037 qemu_sem_post(&p->sem);
1038 qemu_mutex_unlock(&p->mutex);
1039 }
1040}
1041
Fei Li1398b2e2019-01-13 22:08:47 +08001042void multifd_save_cleanup(void)
Juan Quintelaf986c3d2016-01-14 16:52:55 +01001043{
1044 int i;
Juan Quintelaf986c3d2016-01-14 16:52:55 +01001045
1046 if (!migrate_use_multifd()) {
Fei Li1398b2e2019-01-13 22:08:47 +08001047 return;
Juan Quintelaf986c3d2016-01-14 16:52:55 +01001048 }
Juan Quintela66770702018-02-19 19:01:45 +01001049 multifd_send_terminate_threads(NULL);
1050 for (i = 0; i < migrate_multifd_channels(); i++) {
Juan Quintelaf986c3d2016-01-14 16:52:55 +01001051 MultiFDSendParams *p = &multifd_send_state->params[i];
1052
Juan Quintela66770702018-02-19 19:01:45 +01001053 if (p->running) {
1054 qemu_thread_join(&p->thread);
1055 }
Juan Quintela60df2d42018-03-07 07:56:15 +01001056 socket_send_channel_destroy(p->c);
1057 p->c = NULL;
Juan Quintelaf986c3d2016-01-14 16:52:55 +01001058 qemu_mutex_destroy(&p->mutex);
1059 qemu_sem_destroy(&p->sem);
Juan Quintela18cdcea2019-08-14 04:02:14 +02001060 qemu_sem_destroy(&p->sem_sync);
Juan Quintelaf986c3d2016-01-14 16:52:55 +01001061 g_free(p->name);
1062 p->name = NULL;
Juan Quintela34c55a92018-04-10 23:35:15 +02001063 multifd_pages_clear(p->pages);
1064 p->pages = NULL;
Juan Quintela2a26c972018-04-04 11:26:58 +02001065 p->packet_len = 0;
1066 g_free(p->packet);
1067 p->packet = NULL;
Juan Quintelaf986c3d2016-01-14 16:52:55 +01001068 }
Juan Quintelab9ee2f72016-01-15 11:40:13 +01001069 qemu_sem_destroy(&multifd_send_state->channels_ready);
Juan Quintelaf986c3d2016-01-14 16:52:55 +01001070 g_free(multifd_send_state->params);
1071 multifd_send_state->params = NULL;
Juan Quintela34c55a92018-04-10 23:35:15 +02001072 multifd_pages_clear(multifd_send_state->pages);
1073 multifd_send_state->pages = NULL;
Juan Quintelaf986c3d2016-01-14 16:52:55 +01001074 g_free(multifd_send_state);
1075 multifd_send_state = NULL;
Juan Quintelaf986c3d2016-01-14 16:52:55 +01001076}
1077
Ivan Ren1b81c972019-07-30 13:33:35 +08001078static void multifd_send_sync_main(RAMState *rs)
Juan Quintela6df264a2018-02-28 09:10:07 +01001079{
1080 int i;
1081
1082 if (!migrate_use_multifd()) {
1083 return;
1084 }
Juan Quintelab9ee2f72016-01-15 11:40:13 +01001085 if (multifd_send_state->pages->used) {
Ivan Ren1b81c972019-07-30 13:33:35 +08001086 if (multifd_send_pages(rs) < 0) {
Ivan Ren713f7622019-06-25 21:18:17 +08001087 error_report("%s: multifd_send_pages fail", __func__);
1088 return;
1089 }
Juan Quintelab9ee2f72016-01-15 11:40:13 +01001090 }
Juan Quintela6df264a2018-02-28 09:10:07 +01001091 for (i = 0; i < migrate_multifd_channels(); i++) {
1092 MultiFDSendParams *p = &multifd_send_state->params[i];
1093
1094 trace_multifd_send_sync_main_signal(p->id);
1095
1096 qemu_mutex_lock(&p->mutex);
Juan Quintelab9ee2f72016-01-15 11:40:13 +01001097
Ivan Ren713f7622019-06-25 21:18:17 +08001098 if (p->quit) {
1099 error_report("%s: channel %d has already quit", __func__, i);
1100 qemu_mutex_unlock(&p->mutex);
1101 return;
1102 }
1103
Juan Quintelab9ee2f72016-01-15 11:40:13 +01001104 p->packet_num = multifd_send_state->packet_num++;
Juan Quintela6df264a2018-02-28 09:10:07 +01001105 p->flags |= MULTIFD_FLAG_SYNC;
1106 p->pending_job++;
Ivan Ren1b81c972019-07-30 13:33:35 +08001107 qemu_file_update_transfer(rs->f, p->packet_len);
Ivan Ren81507f62019-07-30 13:33:36 +08001108 ram_counters.multifd_bytes += p->packet_len;
1109 ram_counters.transferred += p->packet_len;
Juan Quintela6df264a2018-02-28 09:10:07 +01001110 qemu_mutex_unlock(&p->mutex);
1111 qemu_sem_post(&p->sem);
1112 }
1113 for (i = 0; i < migrate_multifd_channels(); i++) {
1114 MultiFDSendParams *p = &multifd_send_state->params[i];
1115
1116 trace_multifd_send_sync_main_wait(p->id);
Juan Quintela18cdcea2019-08-14 04:02:14 +02001117 qemu_sem_wait(&p->sem_sync);
Juan Quintela6df264a2018-02-28 09:10:07 +01001118 }
1119 trace_multifd_send_sync_main(multifd_send_state->packet_num);
1120}
1121
Juan Quintelaf986c3d2016-01-14 16:52:55 +01001122static void *multifd_send_thread(void *opaque)
1123{
1124 MultiFDSendParams *p = opaque;
Juan Quintelaaf8b7d22018-04-06 19:32:12 +02001125 Error *local_err = NULL;
Ivan Rena3ec6b72019-06-25 21:18:18 +08001126 int ret = 0;
1127 uint32_t flags = 0;
Juan Quintelaaf8b7d22018-04-06 19:32:12 +02001128
Juan Quintela408ea6a2018-04-06 18:28:59 +02001129 trace_multifd_send_thread_start(p->id);
Lidong Chen74637e62018-08-06 21:29:29 +08001130 rcu_register_thread();
Juan Quintela408ea6a2018-04-06 18:28:59 +02001131
Juan Quintelaaf8b7d22018-04-06 19:32:12 +02001132 if (multifd_send_initial_packet(p, &local_err) < 0) {
Ivan Ren2f4aefd2019-08-29 10:16:36 +08001133 ret = -1;
Juan Quintelaaf8b7d22018-04-06 19:32:12 +02001134 goto out;
1135 }
Juan Quintela408ea6a2018-04-06 18:28:59 +02001136 /* initial packet */
1137 p->num_packets = 1;
Juan Quintelaf986c3d2016-01-14 16:52:55 +01001138
1139 while (true) {
Juan Quintelad82628e2018-04-11 02:44:24 +02001140 qemu_sem_wait(&p->sem);
Juan Quintela4d65a622019-12-18 05:36:22 +01001141
1142 if (atomic_read(&multifd_send_state->exiting)) {
1143 break;
1144 }
Juan Quintelaf986c3d2016-01-14 16:52:55 +01001145 qemu_mutex_lock(&p->mutex);
Juan Quintela0beb5ed2018-04-11 03:02:10 +02001146
1147 if (p->pending_job) {
1148 uint32_t used = p->pages->used;
1149 uint64_t packet_num = p->packet_num;
Ivan Rena3ec6b72019-06-25 21:18:18 +08001150 flags = p->flags;
Juan Quintela0beb5ed2018-04-11 03:02:10 +02001151
Juan Quintela2a34ee52019-01-04 19:45:39 +01001152 p->next_packet_size = used * qemu_target_page_size();
Juan Quintela0beb5ed2018-04-11 03:02:10 +02001153 multifd_send_fill_packet(p);
1154 p->flags = 0;
1155 p->num_packets++;
1156 p->num_pages += used;
Juan Quintela0beb5ed2018-04-11 03:02:10 +02001157 qemu_mutex_unlock(&p->mutex);
1158
Juan Quintela2a34ee52019-01-04 19:45:39 +01001159 trace_multifd_send(p->id, packet_num, used, flags,
1160 p->next_packet_size);
Juan Quintela0beb5ed2018-04-11 03:02:10 +02001161
Juan Quintela8b2db7f2018-04-11 12:36:13 +02001162 ret = qio_channel_write_all(p->c, (void *)p->packet,
1163 p->packet_len, &local_err);
1164 if (ret != 0) {
1165 break;
1166 }
1167
Juan Quintelaad24c7c2019-01-04 19:12:35 +01001168 if (used) {
1169 ret = qio_channel_writev_all(p->c, p->pages->iov,
1170 used, &local_err);
1171 if (ret != 0) {
1172 break;
1173 }
Juan Quintela8b2db7f2018-04-11 12:36:13 +02001174 }
Juan Quintela0beb5ed2018-04-11 03:02:10 +02001175
1176 qemu_mutex_lock(&p->mutex);
1177 p->pending_job--;
1178 qemu_mutex_unlock(&p->mutex);
Juan Quintela6df264a2018-02-28 09:10:07 +01001179
1180 if (flags & MULTIFD_FLAG_SYNC) {
Juan Quintela18cdcea2019-08-14 04:02:14 +02001181 qemu_sem_post(&p->sem_sync);
Juan Quintela6df264a2018-02-28 09:10:07 +01001182 }
Juan Quintelab9ee2f72016-01-15 11:40:13 +01001183 qemu_sem_post(&multifd_send_state->channels_ready);
Juan Quintela0beb5ed2018-04-11 03:02:10 +02001184 } else if (p->quit) {
Juan Quintelaf986c3d2016-01-14 16:52:55 +01001185 qemu_mutex_unlock(&p->mutex);
1186 break;
Juan Quintela6df264a2018-02-28 09:10:07 +01001187 } else {
1188 qemu_mutex_unlock(&p->mutex);
1189 /* sometimes there are spurious wakeups */
Juan Quintelaf986c3d2016-01-14 16:52:55 +01001190 }
Juan Quintelaf986c3d2016-01-14 16:52:55 +01001191 }
1192
Juan Quintelaaf8b7d22018-04-06 19:32:12 +02001193out:
1194 if (local_err) {
Juan Quintela7dd59d02019-08-14 04:02:17 +02001195 trace_multifd_send_error(p->id);
Juan Quintelaaf8b7d22018-04-06 19:32:12 +02001196 multifd_send_terminate_threads(local_err);
1197 }
1198
Ivan Rena3ec6b72019-06-25 21:18:18 +08001199 /*
1200 * Error happen, I will exit, but I can't just leave, tell
1201 * who pay attention to me.
1202 */
1203 if (ret != 0) {
Ivan Ren2f4aefd2019-08-29 10:16:36 +08001204 qemu_sem_post(&p->sem_sync);
Ivan Rena3ec6b72019-06-25 21:18:18 +08001205 qemu_sem_post(&multifd_send_state->channels_ready);
1206 }
1207
Juan Quintela66770702018-02-19 19:01:45 +01001208 qemu_mutex_lock(&p->mutex);
1209 p->running = false;
1210 qemu_mutex_unlock(&p->mutex);
1211
Lidong Chen74637e62018-08-06 21:29:29 +08001212 rcu_unregister_thread();
Juan Quintela408ea6a2018-04-06 18:28:59 +02001213 trace_multifd_send_thread_end(p->id, p->num_packets, p->num_pages);
1214
Juan Quintelaf986c3d2016-01-14 16:52:55 +01001215 return NULL;
1216}
1217
Juan Quintela60df2d42018-03-07 07:56:15 +01001218static void multifd_new_send_channel_async(QIOTask *task, gpointer opaque)
1219{
1220 MultiFDSendParams *p = opaque;
1221 QIOChannel *sioc = QIO_CHANNEL(qio_task_get_source(task));
1222 Error *local_err = NULL;
1223
Juan Quintela7dd59d02019-08-14 04:02:17 +02001224 trace_multifd_new_send_channel_async(p->id);
Juan Quintela60df2d42018-03-07 07:56:15 +01001225 if (qio_task_propagate_error(task, &local_err)) {
Fei Li1398b2e2019-01-13 22:08:47 +08001226 migrate_set_error(migrate_get_current(), local_err);
1227 multifd_save_cleanup();
Juan Quintela60df2d42018-03-07 07:56:15 +01001228 } else {
1229 p->c = QIO_CHANNEL(sioc);
1230 qio_channel_set_delay(p->c, false);
1231 p->running = true;
1232 qemu_thread_create(&p->thread, p->name, multifd_send_thread, p,
1233 QEMU_THREAD_JOINABLE);
Juan Quintela60df2d42018-03-07 07:56:15 +01001234 }
1235}
1236
Juan Quintelaf986c3d2016-01-14 16:52:55 +01001237int multifd_save_setup(void)
1238{
1239 int thread_count;
Juan Quintelaefd1a1d2019-02-20 12:06:03 +01001240 uint32_t page_count = MULTIFD_PACKET_SIZE / qemu_target_page_size();
Juan Quintelaf986c3d2016-01-14 16:52:55 +01001241 uint8_t i;
1242
1243 if (!migrate_use_multifd()) {
1244 return 0;
1245 }
1246 thread_count = migrate_multifd_channels();
1247 multifd_send_state = g_malloc0(sizeof(*multifd_send_state));
1248 multifd_send_state->params = g_new0(MultiFDSendParams, thread_count);
Juan Quintela34c55a92018-04-10 23:35:15 +02001249 multifd_send_state->pages = multifd_pages_init(page_count);
Juan Quintelab9ee2f72016-01-15 11:40:13 +01001250 qemu_sem_init(&multifd_send_state->channels_ready, 0);
Juan Quintela4d65a622019-12-18 05:36:22 +01001251 atomic_set(&multifd_send_state->exiting, 0);
Juan Quintela34c55a92018-04-10 23:35:15 +02001252
Juan Quintelaf986c3d2016-01-14 16:52:55 +01001253 for (i = 0; i < thread_count; i++) {
1254 MultiFDSendParams *p = &multifd_send_state->params[i];
1255
1256 qemu_mutex_init(&p->mutex);
1257 qemu_sem_init(&p->sem, 0);
Juan Quintela18cdcea2019-08-14 04:02:14 +02001258 qemu_sem_init(&p->sem_sync, 0);
Juan Quintelaf986c3d2016-01-14 16:52:55 +01001259 p->quit = false;
Juan Quintela0beb5ed2018-04-11 03:02:10 +02001260 p->pending_job = 0;
Juan Quintelaf986c3d2016-01-14 16:52:55 +01001261 p->id = i;
Juan Quintela34c55a92018-04-10 23:35:15 +02001262 p->pages = multifd_pages_init(page_count);
Juan Quintela2a26c972018-04-04 11:26:58 +02001263 p->packet_len = sizeof(MultiFDPacket_t)
1264 + sizeof(ram_addr_t) * page_count;
1265 p->packet = g_malloc0(p->packet_len);
Wei Yang9985e1f2019-10-11 16:50:49 +08001266 p->packet->magic = cpu_to_be32(MULTIFD_MAGIC);
1267 p->packet->version = cpu_to_be32(MULTIFD_VERSION);
Juan Quintelaf986c3d2016-01-14 16:52:55 +01001268 p->name = g_strdup_printf("multifdsend_%d", i);
Juan Quintela60df2d42018-03-07 07:56:15 +01001269 socket_send_channel_create(multifd_new_send_channel_async, p);
Juan Quintelaf986c3d2016-01-14 16:52:55 +01001270 }
1271 return 0;
1272}
1273
Juan Quintelaf986c3d2016-01-14 16:52:55 +01001274struct {
1275 MultiFDRecvParams *params;
1276 /* number of created threads */
1277 int count;
Juan Quintela6df264a2018-02-28 09:10:07 +01001278 /* syncs main thread and channels */
1279 QemuSemaphore sem_sync;
1280 /* global number of generated multifd packets */
1281 uint64_t packet_num;
Juan Quintelaf986c3d2016-01-14 16:52:55 +01001282} *multifd_recv_state;
1283
Juan Quintela66770702018-02-19 19:01:45 +01001284static void multifd_recv_terminate_threads(Error *err)
Juan Quintelaf986c3d2016-01-14 16:52:55 +01001285{
1286 int i;
1287
Juan Quintela5558c912019-08-14 04:02:13 +02001288 trace_multifd_recv_terminate_threads(err != NULL);
1289
Juan Quintela7a169d72018-02-19 19:01:15 +01001290 if (err) {
1291 MigrationState *s = migrate_get_current();
1292 migrate_set_error(s, err);
1293 if (s->state == MIGRATION_STATUS_SETUP ||
1294 s->state == MIGRATION_STATUS_ACTIVE) {
1295 migrate_set_state(&s->state, s->state,
1296 MIGRATION_STATUS_FAILED);
1297 }
1298 }
1299
Juan Quintela66770702018-02-19 19:01:45 +01001300 for (i = 0; i < migrate_multifd_channels(); i++) {
Juan Quintelaf986c3d2016-01-14 16:52:55 +01001301 MultiFDRecvParams *p = &multifd_recv_state->params[i];
1302
1303 qemu_mutex_lock(&p->mutex);
Juan Quintela3c3ca252019-07-24 11:46:24 +02001304 p->quit = true;
Juan Quintela7a5cc332018-04-18 00:49:19 +02001305 /* We could arrive here for two reasons:
1306 - normal quit, i.e. everything went fine, just finished
1307 - error quit: We close the channels so the channel threads
1308 finish the qio_channel_read_all_eof() */
1309 qio_channel_shutdown(p->c, QIO_CHANNEL_SHUTDOWN_BOTH, NULL);
Juan Quintelaf986c3d2016-01-14 16:52:55 +01001310 qemu_mutex_unlock(&p->mutex);
1311 }
1312}
1313
1314int multifd_load_cleanup(Error **errp)
1315{
1316 int i;
1317 int ret = 0;
1318
1319 if (!migrate_use_multifd()) {
1320 return 0;
1321 }
Juan Quintela66770702018-02-19 19:01:45 +01001322 multifd_recv_terminate_threads(NULL);
1323 for (i = 0; i < migrate_multifd_channels(); i++) {
Juan Quintelaf986c3d2016-01-14 16:52:55 +01001324 MultiFDRecvParams *p = &multifd_recv_state->params[i];
1325
Juan Quintela66770702018-02-19 19:01:45 +01001326 if (p->running) {
Juan Quintela3c3ca252019-07-24 11:46:24 +02001327 p->quit = true;
Ivan Renf193bc02019-06-25 21:18:19 +08001328 /*
1329 * multifd_recv_thread may hung at MULTIFD_FLAG_SYNC handle code,
1330 * however try to wakeup it without harm in cleanup phase.
1331 */
1332 qemu_sem_post(&p->sem_sync);
Juan Quintela66770702018-02-19 19:01:45 +01001333 qemu_thread_join(&p->thread);
1334 }
Juan Quintela60df2d42018-03-07 07:56:15 +01001335 object_unref(OBJECT(p->c));
1336 p->c = NULL;
Juan Quintelaf986c3d2016-01-14 16:52:55 +01001337 qemu_mutex_destroy(&p->mutex);
Juan Quintela6df264a2018-02-28 09:10:07 +01001338 qemu_sem_destroy(&p->sem_sync);
Juan Quintelaf986c3d2016-01-14 16:52:55 +01001339 g_free(p->name);
1340 p->name = NULL;
Juan Quintela34c55a92018-04-10 23:35:15 +02001341 multifd_pages_clear(p->pages);
1342 p->pages = NULL;
Juan Quintela2a26c972018-04-04 11:26:58 +02001343 p->packet_len = 0;
1344 g_free(p->packet);
1345 p->packet = NULL;
Juan Quintelaf986c3d2016-01-14 16:52:55 +01001346 }
Juan Quintela6df264a2018-02-28 09:10:07 +01001347 qemu_sem_destroy(&multifd_recv_state->sem_sync);
Juan Quintelaf986c3d2016-01-14 16:52:55 +01001348 g_free(multifd_recv_state->params);
1349 multifd_recv_state->params = NULL;
1350 g_free(multifd_recv_state);
1351 multifd_recv_state = NULL;
1352
1353 return ret;
1354}
1355
Juan Quintela6df264a2018-02-28 09:10:07 +01001356static void multifd_recv_sync_main(void)
1357{
1358 int i;
1359
1360 if (!migrate_use_multifd()) {
1361 return;
1362 }
1363 for (i = 0; i < migrate_multifd_channels(); i++) {
1364 MultiFDRecvParams *p = &multifd_recv_state->params[i];
1365
Juan Quintela6df264a2018-02-28 09:10:07 +01001366 trace_multifd_recv_sync_main_wait(p->id);
1367 qemu_sem_wait(&multifd_recv_state->sem_sync);
Wei Yang77568ea2019-06-04 10:35:40 +08001368 }
1369 for (i = 0; i < migrate_multifd_channels(); i++) {
1370 MultiFDRecvParams *p = &multifd_recv_state->params[i];
1371
Juan Quintela6df264a2018-02-28 09:10:07 +01001372 qemu_mutex_lock(&p->mutex);
1373 if (multifd_recv_state->packet_num < p->packet_num) {
1374 multifd_recv_state->packet_num = p->packet_num;
1375 }
1376 qemu_mutex_unlock(&p->mutex);
Juan Quintela6df264a2018-02-28 09:10:07 +01001377 trace_multifd_recv_sync_main_signal(p->id);
Juan Quintela6df264a2018-02-28 09:10:07 +01001378 qemu_sem_post(&p->sem_sync);
1379 }
1380 trace_multifd_recv_sync_main(multifd_recv_state->packet_num);
1381}
1382
Juan Quintelaf986c3d2016-01-14 16:52:55 +01001383static void *multifd_recv_thread(void *opaque)
1384{
1385 MultiFDRecvParams *p = opaque;
Juan Quintela2a26c972018-04-04 11:26:58 +02001386 Error *local_err = NULL;
1387 int ret;
Juan Quintelaf986c3d2016-01-14 16:52:55 +01001388
Juan Quintela408ea6a2018-04-06 18:28:59 +02001389 trace_multifd_recv_thread_start(p->id);
Lidong Chen74637e62018-08-06 21:29:29 +08001390 rcu_register_thread();
Juan Quintela408ea6a2018-04-06 18:28:59 +02001391
Juan Quintelaf986c3d2016-01-14 16:52:55 +01001392 while (true) {
Juan Quintela6df264a2018-02-28 09:10:07 +01001393 uint32_t used;
1394 uint32_t flags;
1395
Juan Quintela3c3ca252019-07-24 11:46:24 +02001396 if (p->quit) {
1397 break;
1398 }
1399
Juan Quintela8b2db7f2018-04-11 12:36:13 +02001400 ret = qio_channel_read_all_eof(p->c, (void *)p->packet,
1401 p->packet_len, &local_err);
1402 if (ret == 0) { /* EOF */
1403 break;
1404 }
1405 if (ret == -1) { /* Error */
1406 break;
1407 }
Juan Quintela6df264a2018-02-28 09:10:07 +01001408
Juan Quintelaf986c3d2016-01-14 16:52:55 +01001409 qemu_mutex_lock(&p->mutex);
Juan Quintela6df264a2018-02-28 09:10:07 +01001410 ret = multifd_recv_unfill_packet(p, &local_err);
1411 if (ret) {
Juan Quintelaf986c3d2016-01-14 16:52:55 +01001412 qemu_mutex_unlock(&p->mutex);
1413 break;
1414 }
Juan Quintela6df264a2018-02-28 09:10:07 +01001415
1416 used = p->pages->used;
1417 flags = p->flags;
Juan Quintela2a34ee52019-01-04 19:45:39 +01001418 trace_multifd_recv(p->id, p->packet_num, used, flags,
1419 p->next_packet_size);
Juan Quintela6df264a2018-02-28 09:10:07 +01001420 p->num_packets++;
1421 p->num_pages += used;
Juan Quintelaf986c3d2016-01-14 16:52:55 +01001422 qemu_mutex_unlock(&p->mutex);
Juan Quintela6df264a2018-02-28 09:10:07 +01001423
Juan Quintelaad24c7c2019-01-04 19:12:35 +01001424 if (used) {
1425 ret = qio_channel_readv_all(p->c, p->pages->iov,
1426 used, &local_err);
1427 if (ret != 0) {
1428 break;
1429 }
Juan Quintela8b2db7f2018-04-11 12:36:13 +02001430 }
1431
Juan Quintela6df264a2018-02-28 09:10:07 +01001432 if (flags & MULTIFD_FLAG_SYNC) {
1433 qemu_sem_post(&multifd_recv_state->sem_sync);
1434 qemu_sem_wait(&p->sem_sync);
1435 }
Juan Quintelaf986c3d2016-01-14 16:52:55 +01001436 }
1437
Juan Quintelad82628e2018-04-11 02:44:24 +02001438 if (local_err) {
1439 multifd_recv_terminate_threads(local_err);
1440 }
Juan Quintela66770702018-02-19 19:01:45 +01001441 qemu_mutex_lock(&p->mutex);
1442 p->running = false;
1443 qemu_mutex_unlock(&p->mutex);
1444
Lidong Chen74637e62018-08-06 21:29:29 +08001445 rcu_unregister_thread();
Juan Quintela408ea6a2018-04-06 18:28:59 +02001446 trace_multifd_recv_thread_end(p->id, p->num_packets, p->num_pages);
1447
Juan Quintelaf986c3d2016-01-14 16:52:55 +01001448 return NULL;
1449}
1450
1451int multifd_load_setup(void)
1452{
1453 int thread_count;
Juan Quintelaefd1a1d2019-02-20 12:06:03 +01001454 uint32_t page_count = MULTIFD_PACKET_SIZE / qemu_target_page_size();
Juan Quintelaf986c3d2016-01-14 16:52:55 +01001455 uint8_t i;
1456
1457 if (!migrate_use_multifd()) {
1458 return 0;
1459 }
1460 thread_count = migrate_multifd_channels();
1461 multifd_recv_state = g_malloc0(sizeof(*multifd_recv_state));
1462 multifd_recv_state->params = g_new0(MultiFDRecvParams, thread_count);
Juan Quintela66770702018-02-19 19:01:45 +01001463 atomic_set(&multifd_recv_state->count, 0);
Juan Quintela6df264a2018-02-28 09:10:07 +01001464 qemu_sem_init(&multifd_recv_state->sem_sync, 0);
Juan Quintela34c55a92018-04-10 23:35:15 +02001465
Juan Quintelaf986c3d2016-01-14 16:52:55 +01001466 for (i = 0; i < thread_count; i++) {
1467 MultiFDRecvParams *p = &multifd_recv_state->params[i];
1468
1469 qemu_mutex_init(&p->mutex);
Juan Quintela6df264a2018-02-28 09:10:07 +01001470 qemu_sem_init(&p->sem_sync, 0);
Juan Quintela3c3ca252019-07-24 11:46:24 +02001471 p->quit = false;
Juan Quintelaf986c3d2016-01-14 16:52:55 +01001472 p->id = i;
Juan Quintela34c55a92018-04-10 23:35:15 +02001473 p->pages = multifd_pages_init(page_count);
Juan Quintela2a26c972018-04-04 11:26:58 +02001474 p->packet_len = sizeof(MultiFDPacket_t)
1475 + sizeof(ram_addr_t) * page_count;
1476 p->packet = g_malloc0(p->packet_len);
Juan Quintelaf986c3d2016-01-14 16:52:55 +01001477 p->name = g_strdup_printf("multifdrecv_%d", i);
Juan Quintelaf986c3d2016-01-14 16:52:55 +01001478 }
1479 return 0;
1480}
1481
Juan Quintela62c1e0c2018-02-19 18:59:02 +01001482bool multifd_recv_all_channels_created(void)
1483{
1484 int thread_count = migrate_multifd_channels();
1485
1486 if (!migrate_use_multifd()) {
1487 return true;
1488 }
1489
1490 return thread_count == atomic_read(&multifd_recv_state->count);
1491}
1492
Fei Li49ed0d22019-01-13 22:08:46 +08001493/*
1494 * Try to receive all multifd channels to get ready for the migration.
1495 * - Return true and do not set @errp when correctly receving all channels;
1496 * - Return false and do not set @errp when correctly receiving the current one;
1497 * - Return false and set @errp when failing to receive the current channel.
1498 */
1499bool multifd_recv_new_channel(QIOChannel *ioc, Error **errp)
Juan Quintela71bb07d2018-02-19 19:01:03 +01001500{
Juan Quintela60df2d42018-03-07 07:56:15 +01001501 MultiFDRecvParams *p;
Juan Quintelaaf8b7d22018-04-06 19:32:12 +02001502 Error *local_err = NULL;
1503 int id;
Juan Quintela60df2d42018-03-07 07:56:15 +01001504
Juan Quintelaaf8b7d22018-04-06 19:32:12 +02001505 id = multifd_recv_initial_packet(ioc, &local_err);
1506 if (id < 0) {
1507 multifd_recv_terminate_threads(local_err);
Fei Li49ed0d22019-01-13 22:08:46 +08001508 error_propagate_prepend(errp, local_err,
1509 "failed to receive packet"
1510 " via multifd channel %d: ",
1511 atomic_read(&multifd_recv_state->count));
Peter Xu81e62052018-06-27 21:22:44 +08001512 return false;
Juan Quintelaaf8b7d22018-04-06 19:32:12 +02001513 }
Juan Quintela7dd59d02019-08-14 04:02:17 +02001514 trace_multifd_recv_new_channel(id);
Juan Quintelaaf8b7d22018-04-06 19:32:12 +02001515
1516 p = &multifd_recv_state->params[id];
1517 if (p->c != NULL) {
1518 error_setg(&local_err, "multifd: received id '%d' already setup'",
1519 id);
1520 multifd_recv_terminate_threads(local_err);
Fei Li49ed0d22019-01-13 22:08:46 +08001521 error_propagate(errp, local_err);
Peter Xu81e62052018-06-27 21:22:44 +08001522 return false;
Juan Quintelaaf8b7d22018-04-06 19:32:12 +02001523 }
Juan Quintela60df2d42018-03-07 07:56:15 +01001524 p->c = ioc;
1525 object_ref(OBJECT(ioc));
Juan Quintela408ea6a2018-04-06 18:28:59 +02001526 /* initial packet */
1527 p->num_packets = 1;
Juan Quintela60df2d42018-03-07 07:56:15 +01001528
1529 p->running = true;
1530 qemu_thread_create(&p->thread, p->name, multifd_recv_thread, p,
1531 QEMU_THREAD_JOINABLE);
1532 atomic_inc(&multifd_recv_state->count);
Fei Li49ed0d22019-01-13 22:08:46 +08001533 return atomic_read(&multifd_recv_state->count) ==
1534 migrate_multifd_channels();
Juan Quintela71bb07d2018-02-19 19:01:03 +01001535}
1536
Juan Quintela56e93d22015-05-07 19:33:31 +02001537/**
Juan Quintela3d0684b2017-03-23 15:06:39 +01001538 * save_page_header: write page header to wire
Juan Quintela56e93d22015-05-07 19:33:31 +02001539 *
1540 * If this is the 1st block, it also writes the block identification
1541 *
Juan Quintela3d0684b2017-03-23 15:06:39 +01001542 * Returns the number of bytes written
Juan Quintela56e93d22015-05-07 19:33:31 +02001543 *
1544 * @f: QEMUFile where to send the data
1545 * @block: block that contains the page we want to send
1546 * @offset: offset inside the block for the page
1547 * in the lower bits, it contains flags
1548 */
Juan Quintela2bf3aa82017-05-10 13:28:13 +02001549static size_t save_page_header(RAMState *rs, QEMUFile *f, RAMBlock *block,
1550 ram_addr_t offset)
Juan Quintela56e93d22015-05-07 19:33:31 +02001551{
Liang Li9f5f3802015-07-13 17:34:10 +08001552 size_t size, len;
Juan Quintela56e93d22015-05-07 19:33:31 +02001553
Juan Quintela24795692017-03-21 11:45:01 +01001554 if (block == rs->last_sent_block) {
1555 offset |= RAM_SAVE_FLAG_CONTINUE;
1556 }
Juan Quintela2bf3aa82017-05-10 13:28:13 +02001557 qemu_put_be64(f, offset);
Juan Quintela56e93d22015-05-07 19:33:31 +02001558 size = 8;
1559
1560 if (!(offset & RAM_SAVE_FLAG_CONTINUE)) {
Liang Li9f5f3802015-07-13 17:34:10 +08001561 len = strlen(block->idstr);
Juan Quintela2bf3aa82017-05-10 13:28:13 +02001562 qemu_put_byte(f, len);
1563 qemu_put_buffer(f, (uint8_t *)block->idstr, len);
Liang Li9f5f3802015-07-13 17:34:10 +08001564 size += 1 + len;
Juan Quintela24795692017-03-21 11:45:01 +01001565 rs->last_sent_block = block;
Juan Quintela56e93d22015-05-07 19:33:31 +02001566 }
1567 return size;
1568}
1569
Juan Quintela3d0684b2017-03-23 15:06:39 +01001570/**
1571 * mig_throttle_guest_down: throotle down the guest
1572 *
1573 * Reduce amount of guest cpu execution to hopefully slow down memory
1574 * writes. If guest dirty memory rate is reduced below the rate at
1575 * which we can transfer pages to the destination then we should be
1576 * able to complete migration. Some workloads dirty memory way too
1577 * fast and will not effectively converge, even with auto-converge.
Jason J. Herne070afca2015-09-08 13:12:35 -04001578 */
1579static void mig_throttle_guest_down(void)
1580{
1581 MigrationState *s = migrate_get_current();
Daniel P. Berrange2594f562016-04-27 11:05:14 +01001582 uint64_t pct_initial = s->parameters.cpu_throttle_initial;
1583 uint64_t pct_icrement = s->parameters.cpu_throttle_increment;
Li Qiang4cbc9c72018-08-01 06:00:20 -07001584 int pct_max = s->parameters.max_cpu_throttle;
Jason J. Herne070afca2015-09-08 13:12:35 -04001585
1586 /* We have not started throttling yet. Let's start it. */
1587 if (!cpu_throttle_active()) {
1588 cpu_throttle_set(pct_initial);
1589 } else {
1590 /* Throttling already on, just increase the rate */
Li Qiang4cbc9c72018-08-01 06:00:20 -07001591 cpu_throttle_set(MIN(cpu_throttle_get_percentage() + pct_icrement,
1592 pct_max));
Jason J. Herne070afca2015-09-08 13:12:35 -04001593 }
1594}
1595
Juan Quintela3d0684b2017-03-23 15:06:39 +01001596/**
1597 * xbzrle_cache_zero_page: insert a zero page in the XBZRLE cache
1598 *
Juan Quintela6f37bb82017-03-13 19:26:29 +01001599 * @rs: current RAM state
Juan Quintela3d0684b2017-03-23 15:06:39 +01001600 * @current_addr: address for the zero page
1601 *
1602 * Update the xbzrle cache to reflect a page that's been sent as all 0.
Juan Quintela56e93d22015-05-07 19:33:31 +02001603 * The important thing is that a stale (not-yet-0'd) page be replaced
1604 * by the new data.
1605 * As a bonus, if the page wasn't in the cache it gets added so that
Juan Quintela3d0684b2017-03-23 15:06:39 +01001606 * when a small write is made into the 0'd page it gets XBZRLE sent.
Juan Quintela56e93d22015-05-07 19:33:31 +02001607 */
Juan Quintela6f37bb82017-03-13 19:26:29 +01001608static void xbzrle_cache_zero_page(RAMState *rs, ram_addr_t current_addr)
Juan Quintela56e93d22015-05-07 19:33:31 +02001609{
Juan Quintela6f37bb82017-03-13 19:26:29 +01001610 if (rs->ram_bulk_stage || !migrate_use_xbzrle()) {
Juan Quintela56e93d22015-05-07 19:33:31 +02001611 return;
1612 }
1613
1614 /* We don't care if this fails to allocate a new cache page
1615 * as long as it updated an old one */
Juan Quintelac00e0922017-05-09 16:22:01 +02001616 cache_insert(XBZRLE.cache, current_addr, XBZRLE.zero_target_page,
Juan Quintela93604472017-06-06 19:49:03 +02001617 ram_counters.dirty_sync_count);
Juan Quintela56e93d22015-05-07 19:33:31 +02001618}
1619
1620#define ENCODING_FLAG_XBZRLE 0x1
1621
1622/**
1623 * save_xbzrle_page: compress and send current page
1624 *
1625 * Returns: 1 means that we wrote the page
1626 * 0 means that page is identical to the one already sent
1627 * -1 means that xbzrle would be longer than normal
1628 *
Juan Quintela5a987732017-03-13 19:39:02 +01001629 * @rs: current RAM state
Juan Quintela3d0684b2017-03-23 15:06:39 +01001630 * @current_data: pointer to the address of the page contents
1631 * @current_addr: addr of the page
Juan Quintela56e93d22015-05-07 19:33:31 +02001632 * @block: block that contains the page we want to send
1633 * @offset: offset inside the block for the page
1634 * @last_stage: if we are at the completion stage
Juan Quintela56e93d22015-05-07 19:33:31 +02001635 */
Juan Quintela204b88b2017-03-15 09:16:57 +01001636static int save_xbzrle_page(RAMState *rs, uint8_t **current_data,
Juan Quintela56e93d22015-05-07 19:33:31 +02001637 ram_addr_t current_addr, RAMBlock *block,
Juan Quintela072c2512017-03-14 10:27:31 +01001638 ram_addr_t offset, bool last_stage)
Juan Quintela56e93d22015-05-07 19:33:31 +02001639{
1640 int encoded_len = 0, bytes_xbzrle;
1641 uint8_t *prev_cached_page;
1642
Juan Quintela93604472017-06-06 19:49:03 +02001643 if (!cache_is_cached(XBZRLE.cache, current_addr,
1644 ram_counters.dirty_sync_count)) {
1645 xbzrle_counters.cache_miss++;
Juan Quintela56e93d22015-05-07 19:33:31 +02001646 if (!last_stage) {
1647 if (cache_insert(XBZRLE.cache, current_addr, *current_data,
Juan Quintela93604472017-06-06 19:49:03 +02001648 ram_counters.dirty_sync_count) == -1) {
Juan Quintela56e93d22015-05-07 19:33:31 +02001649 return -1;
1650 } else {
1651 /* update *current_data when the page has been
1652 inserted into cache */
1653 *current_data = get_cached_data(XBZRLE.cache, current_addr);
1654 }
1655 }
1656 return -1;
1657 }
1658
1659 prev_cached_page = get_cached_data(XBZRLE.cache, current_addr);
1660
1661 /* save current buffer into memory */
1662 memcpy(XBZRLE.current_buf, *current_data, TARGET_PAGE_SIZE);
1663
1664 /* XBZRLE encoding (if there is no overflow) */
1665 encoded_len = xbzrle_encode_buffer(prev_cached_page, XBZRLE.current_buf,
1666 TARGET_PAGE_SIZE, XBZRLE.encoded_buf,
1667 TARGET_PAGE_SIZE);
Wei Yangca353802019-06-10 08:41:59 +08001668
1669 /*
1670 * Update the cache contents, so that it corresponds to the data
1671 * sent, in all cases except where we skip the page.
1672 */
1673 if (!last_stage && encoded_len != 0) {
1674 memcpy(prev_cached_page, XBZRLE.current_buf, TARGET_PAGE_SIZE);
1675 /*
1676 * In the case where we couldn't compress, ensure that the caller
1677 * sends the data from the cache, since the guest might have
1678 * changed the RAM since we copied it.
1679 */
1680 *current_data = prev_cached_page;
1681 }
1682
Juan Quintela56e93d22015-05-07 19:33:31 +02001683 if (encoded_len == 0) {
Juan Quintela55c44462017-01-23 22:32:05 +01001684 trace_save_xbzrle_page_skipping();
Juan Quintela56e93d22015-05-07 19:33:31 +02001685 return 0;
1686 } else if (encoded_len == -1) {
Juan Quintela55c44462017-01-23 22:32:05 +01001687 trace_save_xbzrle_page_overflow();
Juan Quintela93604472017-06-06 19:49:03 +02001688 xbzrle_counters.overflow++;
Juan Quintela56e93d22015-05-07 19:33:31 +02001689 return -1;
1690 }
1691
Juan Quintela56e93d22015-05-07 19:33:31 +02001692 /* Send XBZRLE based compressed page */
Juan Quintela2bf3aa82017-05-10 13:28:13 +02001693 bytes_xbzrle = save_page_header(rs, rs->f, block,
Juan Quintela204b88b2017-03-15 09:16:57 +01001694 offset | RAM_SAVE_FLAG_XBZRLE);
1695 qemu_put_byte(rs->f, ENCODING_FLAG_XBZRLE);
1696 qemu_put_be16(rs->f, encoded_len);
1697 qemu_put_buffer(rs->f, XBZRLE.encoded_buf, encoded_len);
Juan Quintela56e93d22015-05-07 19:33:31 +02001698 bytes_xbzrle += encoded_len + 1 + 2;
Juan Quintela93604472017-06-06 19:49:03 +02001699 xbzrle_counters.pages++;
1700 xbzrle_counters.bytes += bytes_xbzrle;
1701 ram_counters.transferred += bytes_xbzrle;
Juan Quintela56e93d22015-05-07 19:33:31 +02001702
1703 return 1;
1704}
1705
Juan Quintela3d0684b2017-03-23 15:06:39 +01001706/**
1707 * migration_bitmap_find_dirty: find the next dirty page from start
Dr. David Alan Gilbertf3f491f2015-11-05 18:11:01 +00001708 *
Wei Yanga5f7b1a2019-05-11 07:37:29 +08001709 * Returns the page offset within memory region of the start of a dirty page
Juan Quintela3d0684b2017-03-23 15:06:39 +01001710 *
Juan Quintela6f37bb82017-03-13 19:26:29 +01001711 * @rs: current RAM state
Juan Quintela3d0684b2017-03-23 15:06:39 +01001712 * @rb: RAMBlock where to search for dirty pages
Juan Quintelaa935e302017-03-21 15:36:51 +01001713 * @start: page where we start the search
Dr. David Alan Gilbertf3f491f2015-11-05 18:11:01 +00001714 */
Juan Quintela56e93d22015-05-07 19:33:31 +02001715static inline
Juan Quintelaa935e302017-03-21 15:36:51 +01001716unsigned long migration_bitmap_find_dirty(RAMState *rs, RAMBlock *rb,
Juan Quintelaf20e2862017-03-21 16:19:05 +01001717 unsigned long start)
Juan Quintela56e93d22015-05-07 19:33:31 +02001718{
Juan Quintela6b6712e2017-03-22 15:18:04 +01001719 unsigned long size = rb->used_length >> TARGET_PAGE_BITS;
1720 unsigned long *bitmap = rb->bmap;
Juan Quintela56e93d22015-05-07 19:33:31 +02001721 unsigned long next;
1722
Yury Kotovfbd162e2019-02-15 20:45:46 +03001723 if (ramblock_is_ignored(rb)) {
Cédric Le Goaterb895de52018-05-14 08:57:00 +02001724 return size;
1725 }
1726
Wei Wang6eeb63f2018-12-11 16:24:52 +08001727 /*
1728 * When the free page optimization is enabled, we need to check the bitmap
1729 * to send the non-free pages rather than all the pages in the bulk stage.
1730 */
1731 if (!rs->fpo_enabled && rs->ram_bulk_stage && start > 0) {
Juan Quintela6b6712e2017-03-22 15:18:04 +01001732 next = start + 1;
Juan Quintela56e93d22015-05-07 19:33:31 +02001733 } else {
Juan Quintela6b6712e2017-03-22 15:18:04 +01001734 next = find_next_bit(bitmap, size, start);
Juan Quintela56e93d22015-05-07 19:33:31 +02001735 }
1736
Juan Quintela6b6712e2017-03-22 15:18:04 +01001737 return next;
Juan Quintela56e93d22015-05-07 19:33:31 +02001738}
1739
Juan Quintela06b10682017-03-21 15:18:05 +01001740static inline bool migration_bitmap_clear_dirty(RAMState *rs,
Juan Quintelaf20e2862017-03-21 16:19:05 +01001741 RAMBlock *rb,
1742 unsigned long page)
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001743{
1744 bool ret;
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001745
Wei Wang386a9072018-12-11 16:24:49 +08001746 qemu_mutex_lock(&rs->bitmap_mutex);
Peter Xu002cad62019-06-03 14:50:56 +08001747
1748 /*
1749 * Clear dirty bitmap if needed. This _must_ be called before we
1750 * send any of the page in the chunk because we need to make sure
1751 * we can capture further page content changes when we sync dirty
1752 * log the next time. So as long as we are going to send any of
1753 * the page in the chunk we clear the remote dirty bitmap for all.
1754 * Clearing it earlier won't be a problem, but too late will.
1755 */
1756 if (rb->clear_bmap && clear_bmap_test_and_clear(rb, page)) {
1757 uint8_t shift = rb->clear_bmap_shift;
1758 hwaddr size = 1ULL << (TARGET_PAGE_BITS + shift);
1759 hwaddr start = (page << TARGET_PAGE_BITS) & (-size);
1760
1761 /*
1762 * CLEAR_BITMAP_SHIFT_MIN should always guarantee this... this
1763 * can make things easier sometimes since then start address
1764 * of the small chunk will always be 64 pages aligned so the
1765 * bitmap will always be aligned to unsigned long. We should
1766 * even be able to remove this restriction but I'm simply
1767 * keeping it.
1768 */
1769 assert(shift >= 6);
1770 trace_migration_bitmap_clear_dirty(rb->idstr, start, size, page);
1771 memory_region_clear_dirty_bitmap(rb->mr, start, size);
1772 }
1773
Juan Quintela6b6712e2017-03-22 15:18:04 +01001774 ret = test_and_clear_bit(page, rb->bmap);
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001775
1776 if (ret) {
Juan Quintela0d8ec882017-03-13 21:21:41 +01001777 rs->migration_dirty_pages--;
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001778 }
Wei Wang386a9072018-12-11 16:24:49 +08001779 qemu_mutex_unlock(&rs->bitmap_mutex);
1780
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00001781 return ret;
1782}
1783
Peter Xu267691b2019-06-03 14:50:46 +08001784/* Called with RCU critical section */
Wei Yang7a3e9572019-08-08 11:31:55 +08001785static void ramblock_sync_dirty_bitmap(RAMState *rs, RAMBlock *rb)
Juan Quintela56e93d22015-05-07 19:33:31 +02001786{
Juan Quintela0d8ec882017-03-13 21:21:41 +01001787 rs->migration_dirty_pages +=
Wei Yang5d0980a2019-07-18 09:25:47 +08001788 cpu_physical_memory_sync_dirty_bitmap(rb, 0, rb->used_length,
Juan Quintela0d8ec882017-03-13 21:21:41 +01001789 &rs->num_dirty_pages_period);
Juan Quintela56e93d22015-05-07 19:33:31 +02001790}
1791
Juan Quintela3d0684b2017-03-23 15:06:39 +01001792/**
1793 * ram_pagesize_summary: calculate all the pagesizes of a VM
1794 *
1795 * Returns a summary bitmap of the page sizes of all RAMBlocks
1796 *
1797 * For VMs with just normal pages this is equivalent to the host page
1798 * size. If it's got some huge pages then it's the OR of all the
1799 * different page sizes.
Dr. David Alan Gilberte8ca1db2017-02-24 18:28:29 +00001800 */
1801uint64_t ram_pagesize_summary(void)
1802{
1803 RAMBlock *block;
1804 uint64_t summary = 0;
1805
Yury Kotovfbd162e2019-02-15 20:45:46 +03001806 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
Dr. David Alan Gilberte8ca1db2017-02-24 18:28:29 +00001807 summary |= block->page_size;
1808 }
1809
1810 return summary;
1811}
1812
Xiao Guangrongaecbfe92019-01-11 14:37:30 +08001813uint64_t ram_get_total_transferred_pages(void)
1814{
1815 return ram_counters.normal + ram_counters.duplicate +
1816 compression_counters.pages + xbzrle_counters.pages;
1817}
1818
Xiao Guangrongb7340352018-06-04 17:55:12 +08001819static void migration_update_rates(RAMState *rs, int64_t end_time)
1820{
Xiao Guangrongbe8b02e2018-09-03 17:26:42 +08001821 uint64_t page_count = rs->target_page_count - rs->target_page_count_prev;
Xiao Guangrong76e03002018-09-06 15:01:00 +08001822 double compressed_size;
Xiao Guangrongb7340352018-06-04 17:55:12 +08001823
1824 /* calculate period counters */
1825 ram_counters.dirty_pages_rate = rs->num_dirty_pages_period * 1000
1826 / (end_time - rs->time_last_bitmap_sync);
1827
Xiao Guangrongbe8b02e2018-09-03 17:26:42 +08001828 if (!page_count) {
Xiao Guangrongb7340352018-06-04 17:55:12 +08001829 return;
1830 }
1831
1832 if (migrate_use_xbzrle()) {
1833 xbzrle_counters.cache_miss_rate = (double)(xbzrle_counters.cache_miss -
Xiao Guangrongbe8b02e2018-09-03 17:26:42 +08001834 rs->xbzrle_cache_miss_prev) / page_count;
Xiao Guangrongb7340352018-06-04 17:55:12 +08001835 rs->xbzrle_cache_miss_prev = xbzrle_counters.cache_miss;
1836 }
Xiao Guangrong76e03002018-09-06 15:01:00 +08001837
1838 if (migrate_use_compression()) {
1839 compression_counters.busy_rate = (double)(compression_counters.busy -
1840 rs->compress_thread_busy_prev) / page_count;
1841 rs->compress_thread_busy_prev = compression_counters.busy;
1842
1843 compressed_size = compression_counters.compressed_size -
1844 rs->compressed_size_prev;
1845 if (compressed_size) {
1846 double uncompressed_size = (compression_counters.pages -
1847 rs->compress_pages_prev) * TARGET_PAGE_SIZE;
1848
1849 /* Compression-Ratio = Uncompressed-size / Compressed-size */
1850 compression_counters.compression_rate =
1851 uncompressed_size / compressed_size;
1852
1853 rs->compress_pages_prev = compression_counters.pages;
1854 rs->compressed_size_prev = compression_counters.compressed_size;
1855 }
1856 }
Xiao Guangrongb7340352018-06-04 17:55:12 +08001857}
1858
Juan Quintela8d820d62017-03-13 19:35:50 +01001859static void migration_bitmap_sync(RAMState *rs)
Juan Quintela56e93d22015-05-07 19:33:31 +02001860{
1861 RAMBlock *block;
Juan Quintela56e93d22015-05-07 19:33:31 +02001862 int64_t end_time;
Juan Quintelac4bdf0c2017-03-28 14:59:54 +02001863 uint64_t bytes_xfer_now;
Juan Quintela56e93d22015-05-07 19:33:31 +02001864
Juan Quintela93604472017-06-06 19:49:03 +02001865 ram_counters.dirty_sync_count++;
Juan Quintela56e93d22015-05-07 19:33:31 +02001866
Juan Quintelaf664da82017-03-13 19:44:57 +01001867 if (!rs->time_last_bitmap_sync) {
1868 rs->time_last_bitmap_sync = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
Juan Quintela56e93d22015-05-07 19:33:31 +02001869 }
1870
1871 trace_migration_bitmap_sync_start();
Paolo Bonzini9c1f8f42016-09-22 16:08:31 +02001872 memory_global_dirty_log_sync();
Juan Quintela56e93d22015-05-07 19:33:31 +02001873
Juan Quintela108cfae2017-03-13 21:38:09 +01001874 qemu_mutex_lock(&rs->bitmap_mutex);
Dr. David Alan Gilbert89ac5a12019-10-07 15:36:39 +01001875 WITH_RCU_READ_LOCK_GUARD() {
1876 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
1877 ramblock_sync_dirty_bitmap(rs, block);
1878 }
1879 ram_counters.remaining = ram_bytes_remaining();
Juan Quintela56e93d22015-05-07 19:33:31 +02001880 }
Juan Quintela108cfae2017-03-13 21:38:09 +01001881 qemu_mutex_unlock(&rs->bitmap_mutex);
Juan Quintela56e93d22015-05-07 19:33:31 +02001882
Paolo Bonzini9458a9a2018-02-06 18:37:39 +01001883 memory_global_after_dirty_log_sync();
Juan Quintelaa66cd902017-03-28 15:02:43 +02001884 trace_migration_bitmap_sync_end(rs->num_dirty_pages_period);
Chao Fan1ffb5df2017-03-14 09:55:07 +08001885
Juan Quintela56e93d22015-05-07 19:33:31 +02001886 end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
1887
1888 /* more than 1 second = 1000 millisecons */
Juan Quintelaf664da82017-03-13 19:44:57 +01001889 if (end_time > rs->time_last_bitmap_sync + 1000) {
Juan Quintela93604472017-06-06 19:49:03 +02001890 bytes_xfer_now = ram_counters.transferred;
Felipe Franciosid693c6f2017-05-24 17:10:01 +01001891
Peter Lieven9ac78b62017-09-26 12:33:16 +02001892 /* During block migration the auto-converge logic incorrectly detects
1893 * that ram migration makes no progress. Avoid this by disabling the
1894 * throttling logic during the bulk phase of block migration. */
1895 if (migrate_auto_converge() && !blk_mig_bulk_active()) {
Juan Quintela56e93d22015-05-07 19:33:31 +02001896 /* The following detection logic can be refined later. For now:
1897 Check to see if the dirtied bytes is 50% more than the approx.
1898 amount of bytes that just got transferred since the last time we
Jason J. Herne070afca2015-09-08 13:12:35 -04001899 were in this routine. If that happens twice, start or increase
1900 throttling */
Jason J. Herne070afca2015-09-08 13:12:35 -04001901
Felipe Franciosid693c6f2017-05-24 17:10:01 +01001902 if ((rs->num_dirty_pages_period * TARGET_PAGE_SIZE >
Juan Quintelaeac74152017-03-28 14:59:01 +02001903 (bytes_xfer_now - rs->bytes_xfer_prev) / 2) &&
Felipe Franciosib4a3c642017-05-24 17:10:03 +01001904 (++rs->dirty_rate_high_cnt >= 2)) {
Juan Quintela56e93d22015-05-07 19:33:31 +02001905 trace_migration_throttle();
Juan Quintela8d820d62017-03-13 19:35:50 +01001906 rs->dirty_rate_high_cnt = 0;
Jason J. Herne070afca2015-09-08 13:12:35 -04001907 mig_throttle_guest_down();
Felipe Franciosid693c6f2017-05-24 17:10:01 +01001908 }
Juan Quintela56e93d22015-05-07 19:33:31 +02001909 }
Jason J. Herne070afca2015-09-08 13:12:35 -04001910
Xiao Guangrongb7340352018-06-04 17:55:12 +08001911 migration_update_rates(rs, end_time);
1912
Xiao Guangrongbe8b02e2018-09-03 17:26:42 +08001913 rs->target_page_count_prev = rs->target_page_count;
Felipe Franciosid693c6f2017-05-24 17:10:01 +01001914
1915 /* reset period counters */
Juan Quintelaf664da82017-03-13 19:44:57 +01001916 rs->time_last_bitmap_sync = end_time;
Juan Quintelaa66cd902017-03-28 15:02:43 +02001917 rs->num_dirty_pages_period = 0;
Felipe Franciosid2a4d852017-05-24 17:10:02 +01001918 rs->bytes_xfer_prev = bytes_xfer_now;
Juan Quintela56e93d22015-05-07 19:33:31 +02001919 }
Dr. David Alan Gilbert4addcd42015-12-16 11:47:36 +00001920 if (migrate_use_events()) {
Peter Xu3ab72382018-08-15 21:37:37 +08001921 qapi_event_send_migration_pass(ram_counters.dirty_sync_count);
Dr. David Alan Gilbert4addcd42015-12-16 11:47:36 +00001922 }
Juan Quintela56e93d22015-05-07 19:33:31 +02001923}
1924
Wei Wangbd227062018-12-11 16:24:51 +08001925static void migration_bitmap_sync_precopy(RAMState *rs)
1926{
1927 Error *local_err = NULL;
1928
1929 /*
1930 * The current notifier usage is just an optimization to migration, so we
1931 * don't stop the normal migration process in the error case.
1932 */
1933 if (precopy_notify(PRECOPY_NOTIFY_BEFORE_BITMAP_SYNC, &local_err)) {
1934 error_report_err(local_err);
1935 }
1936
1937 migration_bitmap_sync(rs);
1938
1939 if (precopy_notify(PRECOPY_NOTIFY_AFTER_BITMAP_SYNC, &local_err)) {
1940 error_report_err(local_err);
1941 }
1942}
1943
Juan Quintela56e93d22015-05-07 19:33:31 +02001944/**
Xiao Guangrong6c97ec52018-08-21 16:10:22 +08001945 * save_zero_page_to_file: send the zero page to the file
1946 *
1947 * Returns the size of data written to the file, 0 means the page is not
1948 * a zero page
1949 *
1950 * @rs: current RAM state
1951 * @file: the file where the data is saved
1952 * @block: block that contains the page we want to send
1953 * @offset: offset inside the block for the page
1954 */
1955static int save_zero_page_to_file(RAMState *rs, QEMUFile *file,
1956 RAMBlock *block, ram_addr_t offset)
1957{
1958 uint8_t *p = block->host + offset;
1959 int len = 0;
1960
1961 if (is_zero_range(p, TARGET_PAGE_SIZE)) {
1962 len += save_page_header(rs, file, block, offset | RAM_SAVE_FLAG_ZERO);
1963 qemu_put_byte(file, 0);
1964 len += 1;
1965 }
1966 return len;
1967}
1968
1969/**
Juan Quintela3d0684b2017-03-23 15:06:39 +01001970 * save_zero_page: send the zero page to the stream
Juan Quintela56e93d22015-05-07 19:33:31 +02001971 *
Juan Quintela3d0684b2017-03-23 15:06:39 +01001972 * Returns the number of pages written.
Juan Quintela56e93d22015-05-07 19:33:31 +02001973 *
Juan Quintelaf7ccd612017-03-13 20:30:21 +01001974 * @rs: current RAM state
Juan Quintela56e93d22015-05-07 19:33:31 +02001975 * @block: block that contains the page we want to send
1976 * @offset: offset inside the block for the page
Juan Quintela56e93d22015-05-07 19:33:31 +02001977 */
Juan Quintela7faccdc2018-01-08 18:58:17 +01001978static int save_zero_page(RAMState *rs, RAMBlock *block, ram_addr_t offset)
Juan Quintela56e93d22015-05-07 19:33:31 +02001979{
Xiao Guangrong6c97ec52018-08-21 16:10:22 +08001980 int len = save_zero_page_to_file(rs, rs->f, block, offset);
Juan Quintela56e93d22015-05-07 19:33:31 +02001981
Xiao Guangrong6c97ec52018-08-21 16:10:22 +08001982 if (len) {
Juan Quintela93604472017-06-06 19:49:03 +02001983 ram_counters.duplicate++;
Xiao Guangrong6c97ec52018-08-21 16:10:22 +08001984 ram_counters.transferred += len;
1985 return 1;
Juan Quintela56e93d22015-05-07 19:33:31 +02001986 }
Xiao Guangrong6c97ec52018-08-21 16:10:22 +08001987 return -1;
Juan Quintela56e93d22015-05-07 19:33:31 +02001988}
1989
Juan Quintela57273092017-03-20 22:25:28 +01001990static void ram_release_pages(const char *rbname, uint64_t offset, int pages)
Pavel Butsykin53f09a12017-02-03 18:23:20 +03001991{
Juan Quintela57273092017-03-20 22:25:28 +01001992 if (!migrate_release_ram() || !migration_in_postcopy()) {
Pavel Butsykin53f09a12017-02-03 18:23:20 +03001993 return;
1994 }
1995
Juan Quintelaaaa20642017-03-21 11:35:24 +01001996 ram_discard_range(rbname, offset, pages << TARGET_PAGE_BITS);
Pavel Butsykin53f09a12017-02-03 18:23:20 +03001997}
1998
Xiao Guangrong059ff0f2018-03-30 15:51:23 +08001999/*
2000 * @pages: the number of pages written by the control path,
2001 * < 0 - error
2002 * > 0 - number of pages written
2003 *
2004 * Return true if the pages has been saved, otherwise false is returned.
2005 */
2006static bool control_save_page(RAMState *rs, RAMBlock *block, ram_addr_t offset,
2007 int *pages)
2008{
2009 uint64_t bytes_xmit = 0;
2010 int ret;
2011
2012 *pages = -1;
2013 ret = ram_control_save_page(rs->f, block->offset, offset, TARGET_PAGE_SIZE,
2014 &bytes_xmit);
2015 if (ret == RAM_SAVE_CONTROL_NOT_SUPP) {
2016 return false;
2017 }
2018
2019 if (bytes_xmit) {
2020 ram_counters.transferred += bytes_xmit;
2021 *pages = 1;
2022 }
2023
2024 if (ret == RAM_SAVE_CONTROL_DELAYED) {
2025 return true;
2026 }
2027
2028 if (bytes_xmit > 0) {
2029 ram_counters.normal++;
2030 } else if (bytes_xmit == 0) {
2031 ram_counters.duplicate++;
2032 }
2033
2034 return true;
2035}
2036
Xiao Guangrong65dacaa2018-03-30 15:51:27 +08002037/*
2038 * directly send the page to the stream
2039 *
2040 * Returns the number of pages written.
2041 *
2042 * @rs: current RAM state
2043 * @block: block that contains the page we want to send
2044 * @offset: offset inside the block for the page
2045 * @buf: the page to be sent
2046 * @async: send to page asyncly
2047 */
2048static int save_normal_page(RAMState *rs, RAMBlock *block, ram_addr_t offset,
2049 uint8_t *buf, bool async)
2050{
2051 ram_counters.transferred += save_page_header(rs, rs->f, block,
2052 offset | RAM_SAVE_FLAG_PAGE);
2053 if (async) {
2054 qemu_put_buffer_async(rs->f, buf, TARGET_PAGE_SIZE,
2055 migrate_release_ram() &
2056 migration_in_postcopy());
2057 } else {
2058 qemu_put_buffer(rs->f, buf, TARGET_PAGE_SIZE);
2059 }
2060 ram_counters.transferred += TARGET_PAGE_SIZE;
2061 ram_counters.normal++;
2062 return 1;
2063}
2064
Juan Quintela56e93d22015-05-07 19:33:31 +02002065/**
Juan Quintela3d0684b2017-03-23 15:06:39 +01002066 * ram_save_page: send the given page to the stream
Juan Quintela56e93d22015-05-07 19:33:31 +02002067 *
Juan Quintela3d0684b2017-03-23 15:06:39 +01002068 * Returns the number of pages written.
Dr. David Alan Gilbert3fd3c4b2015-12-10 16:31:46 +00002069 * < 0 - error
2070 * >=0 - Number of pages written - this might legally be 0
2071 * if xbzrle noticed the page was the same.
Juan Quintela56e93d22015-05-07 19:33:31 +02002072 *
Juan Quintela6f37bb82017-03-13 19:26:29 +01002073 * @rs: current RAM state
Juan Quintela56e93d22015-05-07 19:33:31 +02002074 * @block: block that contains the page we want to send
2075 * @offset: offset inside the block for the page
2076 * @last_stage: if we are at the completion stage
Juan Quintela56e93d22015-05-07 19:33:31 +02002077 */
Juan Quintelaa0a8aa12017-03-20 22:29:07 +01002078static int ram_save_page(RAMState *rs, PageSearchStatus *pss, bool last_stage)
Juan Quintela56e93d22015-05-07 19:33:31 +02002079{
2080 int pages = -1;
Juan Quintela56e93d22015-05-07 19:33:31 +02002081 uint8_t *p;
Juan Quintela56e93d22015-05-07 19:33:31 +02002082 bool send_async = true;
zhanghailianga08f6892016-01-15 11:37:44 +08002083 RAMBlock *block = pss->block;
Juan Quintelaa935e302017-03-21 15:36:51 +01002084 ram_addr_t offset = pss->page << TARGET_PAGE_BITS;
Xiao Guangrong059ff0f2018-03-30 15:51:23 +08002085 ram_addr_t current_addr = block->offset + offset;
Juan Quintela56e93d22015-05-07 19:33:31 +02002086
Dr. David Alan Gilbert2f68e392015-08-13 11:51:30 +01002087 p = block->host + offset;
Dr. David Alan Gilbert1db9d8e2017-04-26 19:37:21 +01002088 trace_ram_save_page(block->idstr, (uint64_t)offset, p);
Juan Quintela56e93d22015-05-07 19:33:31 +02002089
Juan Quintela56e93d22015-05-07 19:33:31 +02002090 XBZRLE_cache_lock();
Xiao Guangrongd7400a32018-03-30 15:51:26 +08002091 if (!rs->ram_bulk_stage && !migration_in_postcopy() &&
2092 migrate_use_xbzrle()) {
Xiao Guangrong059ff0f2018-03-30 15:51:23 +08002093 pages = save_xbzrle_page(rs, &p, current_addr, block,
2094 offset, last_stage);
2095 if (!last_stage) {
2096 /* Can't send this cached data async, since the cache page
2097 * might get updated before it gets to the wire
Juan Quintela56e93d22015-05-07 19:33:31 +02002098 */
Xiao Guangrong059ff0f2018-03-30 15:51:23 +08002099 send_async = false;
Juan Quintela56e93d22015-05-07 19:33:31 +02002100 }
2101 }
2102
2103 /* XBZRLE overflow or normal page */
2104 if (pages == -1) {
Xiao Guangrong65dacaa2018-03-30 15:51:27 +08002105 pages = save_normal_page(rs, block, offset, p, send_async);
Juan Quintela56e93d22015-05-07 19:33:31 +02002106 }
2107
2108 XBZRLE_cache_unlock();
2109
2110 return pages;
2111}
2112
Juan Quintelab9ee2f72016-01-15 11:40:13 +01002113static int ram_save_multifd_page(RAMState *rs, RAMBlock *block,
2114 ram_addr_t offset)
2115{
Ivan Ren1b81c972019-07-30 13:33:35 +08002116 if (multifd_queue_page(rs, block, offset) < 0) {
Ivan Ren713f7622019-06-25 21:18:17 +08002117 return -1;
2118 }
Juan Quintelab9ee2f72016-01-15 11:40:13 +01002119 ram_counters.normal++;
2120
2121 return 1;
2122}
2123
Xiao Guangrong5e5fdcf2018-08-21 16:10:24 +08002124static bool do_compress_ram_page(QEMUFile *f, z_stream *stream, RAMBlock *block,
Xiao Guangrong6ef37712018-08-21 16:10:23 +08002125 ram_addr_t offset, uint8_t *source_buf)
Juan Quintela56e93d22015-05-07 19:33:31 +02002126{
Juan Quintela53518d92017-05-04 11:46:24 +02002127 RAMState *rs = ram_state;
Liang Lia7a9a882016-05-05 15:32:57 +08002128 uint8_t *p = block->host + (offset & TARGET_PAGE_MASK);
Xiao Guangrong5e5fdcf2018-08-21 16:10:24 +08002129 bool zero_page = false;
Xiao Guangrong6ef37712018-08-21 16:10:23 +08002130 int ret;
Juan Quintela56e93d22015-05-07 19:33:31 +02002131
Xiao Guangrong5e5fdcf2018-08-21 16:10:24 +08002132 if (save_zero_page_to_file(rs, f, block, offset)) {
2133 zero_page = true;
2134 goto exit;
2135 }
2136
Xiao Guangrong6ef37712018-08-21 16:10:23 +08002137 save_page_header(rs, f, block, offset | RAM_SAVE_FLAG_COMPRESS_PAGE);
Xiao Guangrong34ab9e92018-03-30 15:51:22 +08002138
2139 /*
2140 * copy it to a internal buffer to avoid it being modified by VM
2141 * so that we can catch up the error during compression and
2142 * decompression
2143 */
2144 memcpy(source_buf, p, TARGET_PAGE_SIZE);
Xiao Guangrong6ef37712018-08-21 16:10:23 +08002145 ret = qemu_put_compression_data(f, stream, source_buf, TARGET_PAGE_SIZE);
2146 if (ret < 0) {
2147 qemu_file_set_error(migrate_get_current()->to_dst_file, ret);
Liang Lib3be2892016-05-05 15:32:54 +08002148 error_report("compressed data failed!");
Xiao Guangrong5e5fdcf2018-08-21 16:10:24 +08002149 return false;
Liang Lib3be2892016-05-05 15:32:54 +08002150 }
Juan Quintela56e93d22015-05-07 19:33:31 +02002151
Xiao Guangrong5e5fdcf2018-08-21 16:10:24 +08002152exit:
Xiao Guangrong6ef37712018-08-21 16:10:23 +08002153 ram_release_pages(block->idstr, offset & TARGET_PAGE_MASK, 1);
Xiao Guangrong5e5fdcf2018-08-21 16:10:24 +08002154 return zero_page;
2155}
2156
2157static void
2158update_compress_thread_counts(const CompressParam *param, int bytes_xmit)
2159{
Xiao Guangrong76e03002018-09-06 15:01:00 +08002160 ram_counters.transferred += bytes_xmit;
2161
Xiao Guangrong5e5fdcf2018-08-21 16:10:24 +08002162 if (param->zero_page) {
2163 ram_counters.duplicate++;
Xiao Guangrong76e03002018-09-06 15:01:00 +08002164 return;
Xiao Guangrong5e5fdcf2018-08-21 16:10:24 +08002165 }
Xiao Guangrong76e03002018-09-06 15:01:00 +08002166
2167 /* 8 means a header with RAM_SAVE_FLAG_CONTINUE. */
2168 compression_counters.compressed_size += bytes_xmit - 8;
2169 compression_counters.pages++;
Juan Quintela56e93d22015-05-07 19:33:31 +02002170}
2171
Xiao Guangrong32b05492018-09-06 15:01:01 +08002172static bool save_page_use_compression(RAMState *rs);
2173
Juan Quintelace25d332017-03-15 11:00:51 +01002174static void flush_compressed_data(RAMState *rs)
Juan Quintela56e93d22015-05-07 19:33:31 +02002175{
2176 int idx, len, thread_count;
2177
Xiao Guangrong32b05492018-09-06 15:01:01 +08002178 if (!save_page_use_compression(rs)) {
Juan Quintela56e93d22015-05-07 19:33:31 +02002179 return;
2180 }
2181 thread_count = migrate_compress_threads();
Liang Lia7a9a882016-05-05 15:32:57 +08002182
Liang Li0d9f9a52016-05-05 15:32:59 +08002183 qemu_mutex_lock(&comp_done_lock);
Juan Quintela56e93d22015-05-07 19:33:31 +02002184 for (idx = 0; idx < thread_count; idx++) {
Liang Lia7a9a882016-05-05 15:32:57 +08002185 while (!comp_param[idx].done) {
Liang Li0d9f9a52016-05-05 15:32:59 +08002186 qemu_cond_wait(&comp_done_cond, &comp_done_lock);
Juan Quintela56e93d22015-05-07 19:33:31 +02002187 }
Liang Lia7a9a882016-05-05 15:32:57 +08002188 }
Liang Li0d9f9a52016-05-05 15:32:59 +08002189 qemu_mutex_unlock(&comp_done_lock);
Liang Lia7a9a882016-05-05 15:32:57 +08002190
2191 for (idx = 0; idx < thread_count; idx++) {
2192 qemu_mutex_lock(&comp_param[idx].mutex);
Liang Li90e56fb2016-05-05 15:32:56 +08002193 if (!comp_param[idx].quit) {
Juan Quintelace25d332017-03-15 11:00:51 +01002194 len = qemu_put_qemu_file(rs->f, comp_param[idx].file);
Xiao Guangrong5e5fdcf2018-08-21 16:10:24 +08002195 /*
2196 * it's safe to fetch zero_page without holding comp_done_lock
2197 * as there is no further request submitted to the thread,
2198 * i.e, the thread should be waiting for a request at this point.
2199 */
2200 update_compress_thread_counts(&comp_param[idx], len);
Juan Quintela56e93d22015-05-07 19:33:31 +02002201 }
Liang Lia7a9a882016-05-05 15:32:57 +08002202 qemu_mutex_unlock(&comp_param[idx].mutex);
Juan Quintela56e93d22015-05-07 19:33:31 +02002203 }
2204}
2205
2206static inline void set_compress_params(CompressParam *param, RAMBlock *block,
2207 ram_addr_t offset)
2208{
2209 param->block = block;
2210 param->offset = offset;
2211}
2212
Juan Quintelace25d332017-03-15 11:00:51 +01002213static int compress_page_with_multi_thread(RAMState *rs, RAMBlock *block,
2214 ram_addr_t offset)
Juan Quintela56e93d22015-05-07 19:33:31 +02002215{
2216 int idx, thread_count, bytes_xmit = -1, pages = -1;
Xiao Guangrong1d588722018-08-21 16:10:20 +08002217 bool wait = migrate_compress_wait_thread();
Juan Quintela56e93d22015-05-07 19:33:31 +02002218
2219 thread_count = migrate_compress_threads();
Liang Li0d9f9a52016-05-05 15:32:59 +08002220 qemu_mutex_lock(&comp_done_lock);
Xiao Guangrong1d588722018-08-21 16:10:20 +08002221retry:
2222 for (idx = 0; idx < thread_count; idx++) {
2223 if (comp_param[idx].done) {
2224 comp_param[idx].done = false;
2225 bytes_xmit = qemu_put_qemu_file(rs->f, comp_param[idx].file);
2226 qemu_mutex_lock(&comp_param[idx].mutex);
2227 set_compress_params(&comp_param[idx], block, offset);
2228 qemu_cond_signal(&comp_param[idx].cond);
2229 qemu_mutex_unlock(&comp_param[idx].mutex);
2230 pages = 1;
Xiao Guangrong5e5fdcf2018-08-21 16:10:24 +08002231 update_compress_thread_counts(&comp_param[idx], bytes_xmit);
Juan Quintela56e93d22015-05-07 19:33:31 +02002232 break;
Juan Quintela56e93d22015-05-07 19:33:31 +02002233 }
2234 }
Xiao Guangrong1d588722018-08-21 16:10:20 +08002235
2236 /*
2237 * wait for the free thread if the user specifies 'compress-wait-thread',
2238 * otherwise we will post the page out in the main thread as normal page.
2239 */
2240 if (pages < 0 && wait) {
2241 qemu_cond_wait(&comp_done_cond, &comp_done_lock);
2242 goto retry;
2243 }
Liang Li0d9f9a52016-05-05 15:32:59 +08002244 qemu_mutex_unlock(&comp_done_lock);
Juan Quintela56e93d22015-05-07 19:33:31 +02002245
2246 return pages;
2247}
2248
2249/**
Juan Quintela3d0684b2017-03-23 15:06:39 +01002250 * find_dirty_block: find the next dirty page and update any state
2251 * associated with the search process.
Dr. David Alan Gilbertb9e60922015-09-23 15:27:11 +01002252 *
Wei Yanga5f7b1a2019-05-11 07:37:29 +08002253 * Returns true if a page is found
Dr. David Alan Gilbertb9e60922015-09-23 15:27:11 +01002254 *
Juan Quintela6f37bb82017-03-13 19:26:29 +01002255 * @rs: current RAM state
Juan Quintela3d0684b2017-03-23 15:06:39 +01002256 * @pss: data about the state of the current dirty page scan
2257 * @again: set to false if the search has scanned the whole of RAM
Dr. David Alan Gilbertb9e60922015-09-23 15:27:11 +01002258 */
Juan Quintelaf20e2862017-03-21 16:19:05 +01002259static bool find_dirty_block(RAMState *rs, PageSearchStatus *pss, bool *again)
Dr. David Alan Gilbertb9e60922015-09-23 15:27:11 +01002260{
Juan Quintelaf20e2862017-03-21 16:19:05 +01002261 pss->page = migration_bitmap_find_dirty(rs, pss->block, pss->page);
Juan Quintela6f37bb82017-03-13 19:26:29 +01002262 if (pss->complete_round && pss->block == rs->last_seen_block &&
Juan Quintelaa935e302017-03-21 15:36:51 +01002263 pss->page >= rs->last_page) {
Dr. David Alan Gilbertb9e60922015-09-23 15:27:11 +01002264 /*
2265 * We've been once around the RAM and haven't found anything.
2266 * Give up.
2267 */
2268 *again = false;
2269 return false;
2270 }
Juan Quintelaa935e302017-03-21 15:36:51 +01002271 if ((pss->page << TARGET_PAGE_BITS) >= pss->block->used_length) {
Dr. David Alan Gilbertb9e60922015-09-23 15:27:11 +01002272 /* Didn't find anything in this RAM Block */
Juan Quintelaa935e302017-03-21 15:36:51 +01002273 pss->page = 0;
Dr. David Alan Gilbertb9e60922015-09-23 15:27:11 +01002274 pss->block = QLIST_NEXT_RCU(pss->block, next);
2275 if (!pss->block) {
Xiao Guangrong48df9d82018-09-06 15:00:59 +08002276 /*
2277 * If memory migration starts over, we will meet a dirtied page
2278 * which may still exists in compression threads's ring, so we
2279 * should flush the compressed data to make sure the new page
2280 * is not overwritten by the old one in the destination.
2281 *
2282 * Also If xbzrle is on, stop using the data compression at this
2283 * point. In theory, xbzrle can do better than compression.
2284 */
2285 flush_compressed_data(rs);
2286
Dr. David Alan Gilbertb9e60922015-09-23 15:27:11 +01002287 /* Hit the end of the list */
2288 pss->block = QLIST_FIRST_RCU(&ram_list.blocks);
2289 /* Flag that we've looped */
2290 pss->complete_round = true;
Juan Quintela6f37bb82017-03-13 19:26:29 +01002291 rs->ram_bulk_stage = false;
Dr. David Alan Gilbertb9e60922015-09-23 15:27:11 +01002292 }
2293 /* Didn't find anything this time, but try again on the new block */
2294 *again = true;
2295 return false;
2296 } else {
2297 /* Can go around again, but... */
2298 *again = true;
2299 /* We've found something so probably don't need to */
2300 return true;
2301 }
2302}
2303
Juan Quintela3d0684b2017-03-23 15:06:39 +01002304/**
2305 * unqueue_page: gets a page of the queue
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00002306 *
Juan Quintela3d0684b2017-03-23 15:06:39 +01002307 * Helper for 'get_queued_page' - gets a page off the queue
2308 *
2309 * Returns the block of the page (or NULL if none available)
2310 *
Juan Quintelaec481c62017-03-20 22:12:40 +01002311 * @rs: current RAM state
Juan Quintela3d0684b2017-03-23 15:06:39 +01002312 * @offset: used to return the offset within the RAMBlock
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00002313 */
Juan Quintelaf20e2862017-03-21 16:19:05 +01002314static RAMBlock *unqueue_page(RAMState *rs, ram_addr_t *offset)
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00002315{
2316 RAMBlock *block = NULL;
2317
Xiao Guangrongae526e32018-08-21 16:10:25 +08002318 if (QSIMPLEQ_EMPTY_ATOMIC(&rs->src_page_requests)) {
2319 return NULL;
2320 }
2321
Juan Quintelaec481c62017-03-20 22:12:40 +01002322 qemu_mutex_lock(&rs->src_page_req_mutex);
2323 if (!QSIMPLEQ_EMPTY(&rs->src_page_requests)) {
2324 struct RAMSrcPageRequest *entry =
2325 QSIMPLEQ_FIRST(&rs->src_page_requests);
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00002326 block = entry->rb;
2327 *offset = entry->offset;
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00002328
2329 if (entry->len > TARGET_PAGE_SIZE) {
2330 entry->len -= TARGET_PAGE_SIZE;
2331 entry->offset += TARGET_PAGE_SIZE;
2332 } else {
2333 memory_region_unref(block->mr);
Juan Quintelaec481c62017-03-20 22:12:40 +01002334 QSIMPLEQ_REMOVE_HEAD(&rs->src_page_requests, next_req);
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00002335 g_free(entry);
Dr. David Alan Gilberte03a34f2018-06-13 11:26:42 +01002336 migration_consume_urgent_request();
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00002337 }
2338 }
Juan Quintelaec481c62017-03-20 22:12:40 +01002339 qemu_mutex_unlock(&rs->src_page_req_mutex);
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00002340
2341 return block;
2342}
2343
Juan Quintela3d0684b2017-03-23 15:06:39 +01002344/**
Li Qiangff1543a2019-05-24 23:28:32 -07002345 * get_queued_page: unqueue a page from the postcopy requests
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00002346 *
Juan Quintela3d0684b2017-03-23 15:06:39 +01002347 * Skips pages that are already sent (!dirty)
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00002348 *
Wei Yanga5f7b1a2019-05-11 07:37:29 +08002349 * Returns true if a queued page is found
Juan Quintela3d0684b2017-03-23 15:06:39 +01002350 *
Juan Quintela6f37bb82017-03-13 19:26:29 +01002351 * @rs: current RAM state
Juan Quintela3d0684b2017-03-23 15:06:39 +01002352 * @pss: data about the state of the current dirty page scan
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00002353 */
Juan Quintelaf20e2862017-03-21 16:19:05 +01002354static bool get_queued_page(RAMState *rs, PageSearchStatus *pss)
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00002355{
2356 RAMBlock *block;
2357 ram_addr_t offset;
2358 bool dirty;
2359
2360 do {
Juan Quintelaf20e2862017-03-21 16:19:05 +01002361 block = unqueue_page(rs, &offset);
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00002362 /*
2363 * We're sending this page, and since it's postcopy nothing else
2364 * will dirty it, and we must make sure it doesn't get sent again
2365 * even if this queue request was received after the background
2366 * search already sent it.
2367 */
2368 if (block) {
Juan Quintelaf20e2862017-03-21 16:19:05 +01002369 unsigned long page;
2370
Juan Quintela6b6712e2017-03-22 15:18:04 +01002371 page = offset >> TARGET_PAGE_BITS;
2372 dirty = test_bit(page, block->bmap);
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00002373 if (!dirty) {
Juan Quintela06b10682017-03-21 15:18:05 +01002374 trace_get_queued_page_not_dirty(block->idstr, (uint64_t)offset,
Wei Yang64737602019-08-19 14:18:43 +08002375 page);
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00002376 } else {
Juan Quintelaf20e2862017-03-21 16:19:05 +01002377 trace_get_queued_page(block->idstr, (uint64_t)offset, page);
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00002378 }
2379 }
2380
2381 } while (block && !dirty);
2382
2383 if (block) {
2384 /*
2385 * As soon as we start servicing pages out of order, then we have
2386 * to kill the bulk stage, since the bulk stage assumes
2387 * in (migration_bitmap_find_and_reset_dirty) that every page is
2388 * dirty, that's no longer true.
2389 */
Juan Quintela6f37bb82017-03-13 19:26:29 +01002390 rs->ram_bulk_stage = false;
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00002391
2392 /*
2393 * We want the background search to continue from the queued page
2394 * since the guest is likely to want other pages near to the page
2395 * it just requested.
2396 */
2397 pss->block = block;
Juan Quintelaa935e302017-03-21 15:36:51 +01002398 pss->page = offset >> TARGET_PAGE_BITS;
Wei Yang422314e2019-06-05 09:08:28 +08002399
2400 /*
2401 * This unqueued page would break the "one round" check, even is
2402 * really rare.
2403 */
2404 pss->complete_round = false;
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00002405 }
2406
2407 return !!block;
2408}
2409
Juan Quintela56e93d22015-05-07 19:33:31 +02002410/**
Juan Quintela5e58f962017-04-03 22:06:54 +02002411 * migration_page_queue_free: drop any remaining pages in the ram
2412 * request queue
Dr. David Alan Gilbert6c595cd2015-11-05 18:11:08 +00002413 *
Juan Quintela3d0684b2017-03-23 15:06:39 +01002414 * It should be empty at the end anyway, but in error cases there may
2415 * be some left. in case that there is any page left, we drop it.
2416 *
Dr. David Alan Gilbert6c595cd2015-11-05 18:11:08 +00002417 */
Juan Quintela83c13382017-05-04 11:45:01 +02002418static void migration_page_queue_free(RAMState *rs)
Dr. David Alan Gilbert6c595cd2015-11-05 18:11:08 +00002419{
Juan Quintelaec481c62017-03-20 22:12:40 +01002420 struct RAMSrcPageRequest *mspr, *next_mspr;
Dr. David Alan Gilbert6c595cd2015-11-05 18:11:08 +00002421 /* This queue generally should be empty - but in the case of a failed
2422 * migration might have some droppings in.
2423 */
Dr. David Alan Gilbert89ac5a12019-10-07 15:36:39 +01002424 RCU_READ_LOCK_GUARD();
Juan Quintelaec481c62017-03-20 22:12:40 +01002425 QSIMPLEQ_FOREACH_SAFE(mspr, &rs->src_page_requests, next_req, next_mspr) {
Dr. David Alan Gilbert6c595cd2015-11-05 18:11:08 +00002426 memory_region_unref(mspr->rb->mr);
Juan Quintelaec481c62017-03-20 22:12:40 +01002427 QSIMPLEQ_REMOVE_HEAD(&rs->src_page_requests, next_req);
Dr. David Alan Gilbert6c595cd2015-11-05 18:11:08 +00002428 g_free(mspr);
2429 }
Dr. David Alan Gilbert6c595cd2015-11-05 18:11:08 +00002430}
2431
2432/**
Juan Quintela3d0684b2017-03-23 15:06:39 +01002433 * ram_save_queue_pages: queue the page for transmission
2434 *
2435 * A request from postcopy destination for example.
2436 *
2437 * Returns zero on success or negative on error
2438 *
Juan Quintela3d0684b2017-03-23 15:06:39 +01002439 * @rbname: Name of the RAMBLock of the request. NULL means the
2440 * same that last one.
2441 * @start: starting address from the start of the RAMBlock
2442 * @len: length (in bytes) to send
Dr. David Alan Gilbert6c595cd2015-11-05 18:11:08 +00002443 */
Juan Quintela96506892017-03-14 18:41:03 +01002444int ram_save_queue_pages(const char *rbname, ram_addr_t start, ram_addr_t len)
Dr. David Alan Gilbert6c595cd2015-11-05 18:11:08 +00002445{
2446 RAMBlock *ramblock;
Juan Quintela53518d92017-05-04 11:46:24 +02002447 RAMState *rs = ram_state;
Dr. David Alan Gilbert6c595cd2015-11-05 18:11:08 +00002448
Juan Quintela93604472017-06-06 19:49:03 +02002449 ram_counters.postcopy_requests++;
Dr. David Alan Gilbert89ac5a12019-10-07 15:36:39 +01002450 RCU_READ_LOCK_GUARD();
2451
Dr. David Alan Gilbert6c595cd2015-11-05 18:11:08 +00002452 if (!rbname) {
2453 /* Reuse last RAMBlock */
Juan Quintela68a098f2017-03-14 13:48:42 +01002454 ramblock = rs->last_req_rb;
Dr. David Alan Gilbert6c595cd2015-11-05 18:11:08 +00002455
2456 if (!ramblock) {
2457 /*
2458 * Shouldn't happen, we can't reuse the last RAMBlock if
2459 * it's the 1st request.
2460 */
2461 error_report("ram_save_queue_pages no previous block");
Daniel Henrique Barboza03acb4e2020-01-06 15:23:31 -03002462 return -1;
Dr. David Alan Gilbert6c595cd2015-11-05 18:11:08 +00002463 }
2464 } else {
2465 ramblock = qemu_ram_block_by_name(rbname);
2466
2467 if (!ramblock) {
2468 /* We shouldn't be asked for a non-existent RAMBlock */
2469 error_report("ram_save_queue_pages no block '%s'", rbname);
Daniel Henrique Barboza03acb4e2020-01-06 15:23:31 -03002470 return -1;
Dr. David Alan Gilbert6c595cd2015-11-05 18:11:08 +00002471 }
Juan Quintela68a098f2017-03-14 13:48:42 +01002472 rs->last_req_rb = ramblock;
Dr. David Alan Gilbert6c595cd2015-11-05 18:11:08 +00002473 }
2474 trace_ram_save_queue_pages(ramblock->idstr, start, len);
2475 if (start+len > ramblock->used_length) {
Juan Quintela9458ad62015-11-10 17:42:05 +01002476 error_report("%s request overrun start=" RAM_ADDR_FMT " len="
2477 RAM_ADDR_FMT " blocklen=" RAM_ADDR_FMT,
Dr. David Alan Gilbert6c595cd2015-11-05 18:11:08 +00002478 __func__, start, len, ramblock->used_length);
Daniel Henrique Barboza03acb4e2020-01-06 15:23:31 -03002479 return -1;
Dr. David Alan Gilbert6c595cd2015-11-05 18:11:08 +00002480 }
2481
Juan Quintelaec481c62017-03-20 22:12:40 +01002482 struct RAMSrcPageRequest *new_entry =
2483 g_malloc0(sizeof(struct RAMSrcPageRequest));
Dr. David Alan Gilbert6c595cd2015-11-05 18:11:08 +00002484 new_entry->rb = ramblock;
2485 new_entry->offset = start;
2486 new_entry->len = len;
2487
2488 memory_region_ref(ramblock->mr);
Juan Quintelaec481c62017-03-20 22:12:40 +01002489 qemu_mutex_lock(&rs->src_page_req_mutex);
2490 QSIMPLEQ_INSERT_TAIL(&rs->src_page_requests, new_entry, next_req);
Dr. David Alan Gilberte03a34f2018-06-13 11:26:42 +01002491 migration_make_urgent_request();
Juan Quintelaec481c62017-03-20 22:12:40 +01002492 qemu_mutex_unlock(&rs->src_page_req_mutex);
Dr. David Alan Gilbert6c595cd2015-11-05 18:11:08 +00002493
2494 return 0;
Dr. David Alan Gilbert6c595cd2015-11-05 18:11:08 +00002495}
2496
Xiao Guangrongd7400a32018-03-30 15:51:26 +08002497static bool save_page_use_compression(RAMState *rs)
2498{
2499 if (!migrate_use_compression()) {
2500 return false;
2501 }
2502
2503 /*
2504 * If xbzrle is on, stop using the data compression after first
2505 * round of migration even if compression is enabled. In theory,
2506 * xbzrle can do better than compression.
2507 */
2508 if (rs->ram_bulk_stage || !migrate_use_xbzrle()) {
2509 return true;
2510 }
2511
2512 return false;
2513}
2514
Xiao Guangrong5e5fdcf2018-08-21 16:10:24 +08002515/*
2516 * try to compress the page before posting it out, return true if the page
2517 * has been properly handled by compression, otherwise needs other
2518 * paths to handle it
2519 */
2520static bool save_compress_page(RAMState *rs, RAMBlock *block, ram_addr_t offset)
2521{
2522 if (!save_page_use_compression(rs)) {
2523 return false;
2524 }
2525
2526 /*
2527 * When starting the process of a new block, the first page of
2528 * the block should be sent out before other pages in the same
2529 * block, and all the pages in last block should have been sent
2530 * out, keeping this order is important, because the 'cont' flag
2531 * is used to avoid resending the block name.
2532 *
2533 * We post the fist page as normal page as compression will take
2534 * much CPU resource.
2535 */
2536 if (block != rs->last_sent_block) {
2537 flush_compressed_data(rs);
2538 return false;
2539 }
2540
2541 if (compress_page_with_multi_thread(rs, block, offset) > 0) {
2542 return true;
2543 }
2544
Xiao Guangrong76e03002018-09-06 15:01:00 +08002545 compression_counters.busy++;
Xiao Guangrong5e5fdcf2018-08-21 16:10:24 +08002546 return false;
2547}
2548
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00002549/**
Juan Quintela3d0684b2017-03-23 15:06:39 +01002550 * ram_save_target_page: save one target page
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00002551 *
Juan Quintela3d0684b2017-03-23 15:06:39 +01002552 * Returns the number of pages written
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00002553 *
Juan Quintela6f37bb82017-03-13 19:26:29 +01002554 * @rs: current RAM state
Juan Quintela3d0684b2017-03-23 15:06:39 +01002555 * @pss: data about the page we want to send
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00002556 * @last_stage: if we are at the completion stage
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00002557 */
Juan Quintelaa0a8aa12017-03-20 22:29:07 +01002558static int ram_save_target_page(RAMState *rs, PageSearchStatus *pss,
Juan Quintelaf20e2862017-03-21 16:19:05 +01002559 bool last_stage)
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00002560{
Xiao Guangronga8ec91f2018-03-30 15:51:25 +08002561 RAMBlock *block = pss->block;
2562 ram_addr_t offset = pss->page << TARGET_PAGE_BITS;
2563 int res;
2564
2565 if (control_save_page(rs, block, offset, &res)) {
2566 return res;
2567 }
2568
Xiao Guangrong5e5fdcf2018-08-21 16:10:24 +08002569 if (save_compress_page(rs, block, offset)) {
2570 return 1;
Xiao Guangrongd7400a32018-03-30 15:51:26 +08002571 }
2572
2573 res = save_zero_page(rs, block, offset);
2574 if (res > 0) {
2575 /* Must let xbzrle know, otherwise a previous (now 0'd) cached
2576 * page would be stale
2577 */
2578 if (!save_page_use_compression(rs)) {
2579 XBZRLE_cache_lock();
2580 xbzrle_cache_zero_page(rs, block->offset + offset);
2581 XBZRLE_cache_unlock();
2582 }
2583 ram_release_pages(block->idstr, offset, res);
2584 return res;
2585 }
2586
Xiao Guangrongda3f56c2018-03-30 15:51:28 +08002587 /*
Xiao Guangrong5e5fdcf2018-08-21 16:10:24 +08002588 * do not use multifd for compression as the first page in the new
2589 * block should be posted out before sending the compressed page
Xiao Guangrongda3f56c2018-03-30 15:51:28 +08002590 */
Xiao Guangrong5e5fdcf2018-08-21 16:10:24 +08002591 if (!save_page_use_compression(rs) && migrate_use_multifd()) {
Juan Quintelab9ee2f72016-01-15 11:40:13 +01002592 return ram_save_multifd_page(rs, block, offset);
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00002593 }
2594
Xiao Guangrong1faa5662018-03-30 15:51:24 +08002595 return ram_save_page(rs, pss, last_stage);
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00002596}
2597
2598/**
Juan Quintela3d0684b2017-03-23 15:06:39 +01002599 * ram_save_host_page: save a whole host page
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00002600 *
Juan Quintela3d0684b2017-03-23 15:06:39 +01002601 * Starting at *offset send pages up to the end of the current host
2602 * page. It's valid for the initial offset to point into the middle of
2603 * a host page in which case the remainder of the hostpage is sent.
2604 * Only dirty target pages are sent. Note that the host page size may
2605 * be a huge page for this block.
Dr. David Alan Gilbert1eb3fc02017-05-17 17:58:09 +01002606 * The saving stops at the boundary of the used_length of the block
2607 * if the RAMBlock isn't a multiple of the host page size.
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00002608 *
Juan Quintela3d0684b2017-03-23 15:06:39 +01002609 * Returns the number of pages written or negative on error
2610 *
Juan Quintela6f37bb82017-03-13 19:26:29 +01002611 * @rs: current RAM state
Juan Quintela3d0684b2017-03-23 15:06:39 +01002612 * @ms: current migration state
Juan Quintela3d0684b2017-03-23 15:06:39 +01002613 * @pss: data about the page we want to send
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00002614 * @last_stage: if we are at the completion stage
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00002615 */
Juan Quintelaa0a8aa12017-03-20 22:29:07 +01002616static int ram_save_host_page(RAMState *rs, PageSearchStatus *pss,
Juan Quintelaf20e2862017-03-21 16:19:05 +01002617 bool last_stage)
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00002618{
2619 int tmppages, pages = 0;
Juan Quintelaa935e302017-03-21 15:36:51 +01002620 size_t pagesize_bits =
2621 qemu_ram_pagesize(pss->block) >> TARGET_PAGE_BITS;
Dr. David Alan Gilbert4c011c32017-02-24 18:28:39 +00002622
Yury Kotovfbd162e2019-02-15 20:45:46 +03002623 if (ramblock_is_ignored(pss->block)) {
Cédric Le Goaterb895de52018-05-14 08:57:00 +02002624 error_report("block %s should not be migrated !", pss->block->idstr);
2625 return 0;
2626 }
2627
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00002628 do {
Xiao Guangrong1faa5662018-03-30 15:51:24 +08002629 /* Check the pages is dirty and if it is send it */
2630 if (!migration_bitmap_clear_dirty(rs, pss->block, pss->page)) {
2631 pss->page++;
2632 continue;
2633 }
2634
Juan Quintelaf20e2862017-03-21 16:19:05 +01002635 tmppages = ram_save_target_page(rs, pss, last_stage);
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00002636 if (tmppages < 0) {
2637 return tmppages;
2638 }
2639
2640 pages += tmppages;
Juan Quintelaa935e302017-03-21 15:36:51 +01002641 pss->page++;
Dr. David Alan Gilbert97e1e062019-12-05 10:29:18 +00002642 /* Allow rate limiting to happen in the middle of huge pages */
2643 migration_rate_limit();
Dr. David Alan Gilbert1eb3fc02017-05-17 17:58:09 +01002644 } while ((pss->page & (pagesize_bits - 1)) &&
2645 offset_in_ramblock(pss->block, pss->page << TARGET_PAGE_BITS));
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00002646
2647 /* The offset we leave with is the last one we looked at */
Juan Quintelaa935e302017-03-21 15:36:51 +01002648 pss->page--;
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00002649 return pages;
2650}
Dr. David Alan Gilbert6c595cd2015-11-05 18:11:08 +00002651
2652/**
Juan Quintela3d0684b2017-03-23 15:06:39 +01002653 * ram_find_and_save_block: finds a dirty page and sends it to f
Juan Quintela56e93d22015-05-07 19:33:31 +02002654 *
2655 * Called within an RCU critical section.
2656 *
Xiao Guangronge8f37352018-09-03 17:26:44 +08002657 * Returns the number of pages written where zero means no dirty pages,
2658 * or negative on error
Juan Quintela56e93d22015-05-07 19:33:31 +02002659 *
Juan Quintela6f37bb82017-03-13 19:26:29 +01002660 * @rs: current RAM state
Juan Quintela56e93d22015-05-07 19:33:31 +02002661 * @last_stage: if we are at the completion stage
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00002662 *
2663 * On systems where host-page-size > target-page-size it will send all the
2664 * pages in a host page that are dirty.
Juan Quintela56e93d22015-05-07 19:33:31 +02002665 */
2666
Juan Quintelace25d332017-03-15 11:00:51 +01002667static int ram_find_and_save_block(RAMState *rs, bool last_stage)
Juan Quintela56e93d22015-05-07 19:33:31 +02002668{
Dr. David Alan Gilbertb8fb8cb2015-09-23 15:27:10 +01002669 PageSearchStatus pss;
Juan Quintela56e93d22015-05-07 19:33:31 +02002670 int pages = 0;
Dr. David Alan Gilbertb9e60922015-09-23 15:27:11 +01002671 bool again, found;
Juan Quintela56e93d22015-05-07 19:33:31 +02002672
Ashijeet Acharya0827b9e2017-02-08 19:58:45 +05302673 /* No dirty page as there is zero RAM */
2674 if (!ram_bytes_total()) {
2675 return pages;
2676 }
2677
Juan Quintela6f37bb82017-03-13 19:26:29 +01002678 pss.block = rs->last_seen_block;
Juan Quintelaa935e302017-03-21 15:36:51 +01002679 pss.page = rs->last_page;
Dr. David Alan Gilbertb8fb8cb2015-09-23 15:27:10 +01002680 pss.complete_round = false;
2681
2682 if (!pss.block) {
2683 pss.block = QLIST_FIRST_RCU(&ram_list.blocks);
2684 }
Juan Quintela56e93d22015-05-07 19:33:31 +02002685
Dr. David Alan Gilbertb9e60922015-09-23 15:27:11 +01002686 do {
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00002687 again = true;
Juan Quintelaf20e2862017-03-21 16:19:05 +01002688 found = get_queued_page(rs, &pss);
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00002689
2690 if (!found) {
2691 /* priority queue empty, so just search for something dirty */
Juan Quintelaf20e2862017-03-21 16:19:05 +01002692 found = find_dirty_block(rs, &pss, &again);
Dr. David Alan Gilberta82d5932015-11-05 18:11:09 +00002693 }
Dr. David Alan Gilbertb9e60922015-09-23 15:27:11 +01002694
2695 if (found) {
Juan Quintelaf20e2862017-03-21 16:19:05 +01002696 pages = ram_save_host_page(rs, &pss, last_stage);
Juan Quintela56e93d22015-05-07 19:33:31 +02002697 }
Dr. David Alan Gilbertb9e60922015-09-23 15:27:11 +01002698 } while (!pages && again);
Juan Quintela56e93d22015-05-07 19:33:31 +02002699
Juan Quintela6f37bb82017-03-13 19:26:29 +01002700 rs->last_seen_block = pss.block;
Juan Quintelaa935e302017-03-21 15:36:51 +01002701 rs->last_page = pss.page;
Juan Quintela56e93d22015-05-07 19:33:31 +02002702
2703 return pages;
2704}
2705
2706void acct_update_position(QEMUFile *f, size_t size, bool zero)
2707{
2708 uint64_t pages = size / TARGET_PAGE_SIZE;
Juan Quintelaf7ccd612017-03-13 20:30:21 +01002709
Juan Quintela56e93d22015-05-07 19:33:31 +02002710 if (zero) {
Juan Quintela93604472017-06-06 19:49:03 +02002711 ram_counters.duplicate += pages;
Juan Quintela56e93d22015-05-07 19:33:31 +02002712 } else {
Juan Quintela93604472017-06-06 19:49:03 +02002713 ram_counters.normal += pages;
2714 ram_counters.transferred += size;
Juan Quintela56e93d22015-05-07 19:33:31 +02002715 qemu_update_position(f, size);
2716 }
2717}
2718
Yury Kotovfbd162e2019-02-15 20:45:46 +03002719static uint64_t ram_bytes_total_common(bool count_ignored)
Juan Quintela56e93d22015-05-07 19:33:31 +02002720{
2721 RAMBlock *block;
2722 uint64_t total = 0;
2723
Dr. David Alan Gilbert89ac5a12019-10-07 15:36:39 +01002724 RCU_READ_LOCK_GUARD();
2725
Yury Kotovfbd162e2019-02-15 20:45:46 +03002726 if (count_ignored) {
2727 RAMBLOCK_FOREACH_MIGRATABLE(block) {
2728 total += block->used_length;
2729 }
2730 } else {
2731 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
2732 total += block->used_length;
2733 }
Peter Xu99e15582017-05-12 12:17:39 +08002734 }
Juan Quintela56e93d22015-05-07 19:33:31 +02002735 return total;
2736}
2737
Yury Kotovfbd162e2019-02-15 20:45:46 +03002738uint64_t ram_bytes_total(void)
2739{
2740 return ram_bytes_total_common(false);
2741}
2742
Juan Quintelaf265e0e2017-06-28 11:52:27 +02002743static void xbzrle_load_setup(void)
Juan Quintela56e93d22015-05-07 19:33:31 +02002744{
Juan Quintelaf265e0e2017-06-28 11:52:27 +02002745 XBZRLE.decoded_buf = g_malloc(TARGET_PAGE_SIZE);
Juan Quintela56e93d22015-05-07 19:33:31 +02002746}
2747
Juan Quintelaf265e0e2017-06-28 11:52:27 +02002748static void xbzrle_load_cleanup(void)
2749{
2750 g_free(XBZRLE.decoded_buf);
2751 XBZRLE.decoded_buf = NULL;
2752}
2753
Peter Xu7d7c96b2017-10-19 14:31:58 +08002754static void ram_state_cleanup(RAMState **rsp)
2755{
Dr. David Alan Gilbertb9ccaf62018-02-12 16:03:39 +00002756 if (*rsp) {
2757 migration_page_queue_free(*rsp);
2758 qemu_mutex_destroy(&(*rsp)->bitmap_mutex);
2759 qemu_mutex_destroy(&(*rsp)->src_page_req_mutex);
2760 g_free(*rsp);
2761 *rsp = NULL;
2762 }
Peter Xu7d7c96b2017-10-19 14:31:58 +08002763}
2764
Peter Xu84593a02017-10-19 14:31:59 +08002765static void xbzrle_cleanup(void)
2766{
2767 XBZRLE_cache_lock();
2768 if (XBZRLE.cache) {
2769 cache_fini(XBZRLE.cache);
2770 g_free(XBZRLE.encoded_buf);
2771 g_free(XBZRLE.current_buf);
2772 g_free(XBZRLE.zero_target_page);
2773 XBZRLE.cache = NULL;
2774 XBZRLE.encoded_buf = NULL;
2775 XBZRLE.current_buf = NULL;
2776 XBZRLE.zero_target_page = NULL;
2777 }
2778 XBZRLE_cache_unlock();
2779}
2780
Juan Quintelaf265e0e2017-06-28 11:52:27 +02002781static void ram_save_cleanup(void *opaque)
Juan Quintela56e93d22015-05-07 19:33:31 +02002782{
Juan Quintela53518d92017-05-04 11:46:24 +02002783 RAMState **rsp = opaque;
Juan Quintela6b6712e2017-03-22 15:18:04 +01002784 RAMBlock *block;
Juan Quintelaeb859c52017-03-13 21:51:55 +01002785
Li Zhijian2ff64032015-07-02 20:18:05 +08002786 /* caller have hold iothread lock or is in a bh, so there is
Yi Wang46334562019-04-15 14:51:29 +08002787 * no writing race against the migration bitmap
Li Zhijian2ff64032015-07-02 20:18:05 +08002788 */
Juan Quintela6b6712e2017-03-22 15:18:04 +01002789 memory_global_dirty_log_stop();
2790
Yury Kotovfbd162e2019-02-15 20:45:46 +03002791 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
Peter Xu002cad62019-06-03 14:50:56 +08002792 g_free(block->clear_bmap);
2793 block->clear_bmap = NULL;
Juan Quintela6b6712e2017-03-22 15:18:04 +01002794 g_free(block->bmap);
2795 block->bmap = NULL;
Juan Quintela56e93d22015-05-07 19:33:31 +02002796 }
2797
Peter Xu84593a02017-10-19 14:31:59 +08002798 xbzrle_cleanup();
Juan Quintelaf0afa332017-06-28 11:52:28 +02002799 compress_threads_save_cleanup();
Peter Xu7d7c96b2017-10-19 14:31:58 +08002800 ram_state_cleanup(rsp);
Juan Quintela56e93d22015-05-07 19:33:31 +02002801}
2802
Juan Quintela6f37bb82017-03-13 19:26:29 +01002803static void ram_state_reset(RAMState *rs)
Juan Quintela56e93d22015-05-07 19:33:31 +02002804{
Juan Quintela6f37bb82017-03-13 19:26:29 +01002805 rs->last_seen_block = NULL;
2806 rs->last_sent_block = NULL;
Juan Quintela269ace22017-03-21 15:23:31 +01002807 rs->last_page = 0;
Juan Quintela6f37bb82017-03-13 19:26:29 +01002808 rs->last_version = ram_list.version;
2809 rs->ram_bulk_stage = true;
Wei Wang6eeb63f2018-12-11 16:24:52 +08002810 rs->fpo_enabled = false;
Juan Quintela56e93d22015-05-07 19:33:31 +02002811}
2812
2813#define MAX_WAIT 50 /* ms, half buffered_file limit */
2814
Dr. David Alan Gilbert4f2e4252015-11-05 18:10:38 +00002815/*
2816 * 'expected' is the value you expect the bitmap mostly to be full
2817 * of; it won't bother printing lines that are all this value.
2818 * If 'todump' is null the migration bitmap is dumped.
2819 */
Juan Quintela6b6712e2017-03-22 15:18:04 +01002820void ram_debug_dump_bitmap(unsigned long *todump, bool expected,
2821 unsigned long pages)
Dr. David Alan Gilbert4f2e4252015-11-05 18:10:38 +00002822{
Dr. David Alan Gilbert4f2e4252015-11-05 18:10:38 +00002823 int64_t cur;
2824 int64_t linelen = 128;
2825 char linebuf[129];
2826
Juan Quintela6b6712e2017-03-22 15:18:04 +01002827 for (cur = 0; cur < pages; cur += linelen) {
Dr. David Alan Gilbert4f2e4252015-11-05 18:10:38 +00002828 int64_t curb;
2829 bool found = false;
2830 /*
2831 * Last line; catch the case where the line length
2832 * is longer than remaining ram
2833 */
Juan Quintela6b6712e2017-03-22 15:18:04 +01002834 if (cur + linelen > pages) {
2835 linelen = pages - cur;
Dr. David Alan Gilbert4f2e4252015-11-05 18:10:38 +00002836 }
2837 for (curb = 0; curb < linelen; curb++) {
2838 bool thisbit = test_bit(cur + curb, todump);
2839 linebuf[curb] = thisbit ? '1' : '.';
2840 found = found || (thisbit != expected);
2841 }
2842 if (found) {
2843 linebuf[curb] = '\0';
2844 fprintf(stderr, "0x%08" PRIx64 " : %s\n", cur, linebuf);
2845 }
2846 }
2847}
2848
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00002849/* **** functions for postcopy ***** */
2850
Pavel Butsykinced1c612017-02-03 18:23:21 +03002851void ram_postcopy_migrated_memory_release(MigrationState *ms)
2852{
2853 struct RAMBlock *block;
Pavel Butsykinced1c612017-02-03 18:23:21 +03002854
Yury Kotovfbd162e2019-02-15 20:45:46 +03002855 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
Juan Quintela6b6712e2017-03-22 15:18:04 +01002856 unsigned long *bitmap = block->bmap;
2857 unsigned long range = block->used_length >> TARGET_PAGE_BITS;
2858 unsigned long run_start = find_next_zero_bit(bitmap, range, 0);
Pavel Butsykinced1c612017-02-03 18:23:21 +03002859
2860 while (run_start < range) {
2861 unsigned long run_end = find_next_bit(bitmap, range, run_start + 1);
Juan Quintelaaaa20642017-03-21 11:35:24 +01002862 ram_discard_range(block->idstr, run_start << TARGET_PAGE_BITS,
Pavel Butsykinced1c612017-02-03 18:23:21 +03002863 (run_end - run_start) << TARGET_PAGE_BITS);
2864 run_start = find_next_zero_bit(bitmap, range, run_end + 1);
2865 }
2866 }
2867}
2868
Juan Quintela3d0684b2017-03-23 15:06:39 +01002869/**
2870 * postcopy_send_discard_bm_ram: discard a RAMBlock
2871 *
2872 * Returns zero on success
2873 *
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00002874 * Callback from postcopy_each_ram_send_discard for each RAMBlock
Juan Quintela3d0684b2017-03-23 15:06:39 +01002875 *
2876 * @ms: current migration state
Wei Yang89dab312019-07-15 10:05:49 +08002877 * @block: RAMBlock to discard
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00002878 */
Wei Yang810cf2b2019-07-24 09:07:21 +08002879static int postcopy_send_discard_bm_ram(MigrationState *ms, RAMBlock *block)
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00002880{
Juan Quintela6b6712e2017-03-22 15:18:04 +01002881 unsigned long end = block->used_length >> TARGET_PAGE_BITS;
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00002882 unsigned long current;
Wei Yang1e7cf8c2019-08-19 14:18:42 +08002883 unsigned long *bitmap = block->bmap;
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00002884
Juan Quintela6b6712e2017-03-22 15:18:04 +01002885 for (current = 0; current < end; ) {
Wei Yang1e7cf8c2019-08-19 14:18:42 +08002886 unsigned long one = find_next_bit(bitmap, end, current);
Wei Yang33a5cb622019-06-27 10:08:21 +08002887 unsigned long zero, discard_length;
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00002888
Wei Yang33a5cb622019-06-27 10:08:21 +08002889 if (one >= end) {
2890 break;
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00002891 }
Wei Yang33a5cb622019-06-27 10:08:21 +08002892
Wei Yang1e7cf8c2019-08-19 14:18:42 +08002893 zero = find_next_zero_bit(bitmap, end, one + 1);
Wei Yang33a5cb622019-06-27 10:08:21 +08002894
2895 if (zero >= end) {
2896 discard_length = end - one;
2897 } else {
2898 discard_length = zero - one;
2899 }
Wei Yang810cf2b2019-07-24 09:07:21 +08002900 postcopy_discard_send_range(ms, one, discard_length);
Wei Yang33a5cb622019-06-27 10:08:21 +08002901 current = one + discard_length;
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00002902 }
2903
2904 return 0;
2905}
2906
Juan Quintela3d0684b2017-03-23 15:06:39 +01002907/**
2908 * postcopy_each_ram_send_discard: discard all RAMBlocks
2909 *
2910 * Returns 0 for success or negative for error
2911 *
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00002912 * Utility for the outgoing postcopy code.
2913 * Calls postcopy_send_discard_bm_ram for each RAMBlock
2914 * passing it bitmap indexes and name.
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00002915 * (qemu_ram_foreach_block ends up passing unscaled lengths
2916 * which would mean postcopy code would have to deal with target page)
Juan Quintela3d0684b2017-03-23 15:06:39 +01002917 *
2918 * @ms: current migration state
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00002919 */
2920static int postcopy_each_ram_send_discard(MigrationState *ms)
2921{
2922 struct RAMBlock *block;
2923 int ret;
2924
Yury Kotovfbd162e2019-02-15 20:45:46 +03002925 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
Wei Yang810cf2b2019-07-24 09:07:21 +08002926 postcopy_discard_send_init(ms, block->idstr);
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00002927
2928 /*
2929 * Postcopy sends chunks of bitmap over the wire, but it
2930 * just needs indexes at this point, avoids it having
2931 * target page specific code.
2932 */
Wei Yang810cf2b2019-07-24 09:07:21 +08002933 ret = postcopy_send_discard_bm_ram(ms, block);
2934 postcopy_discard_send_finish(ms);
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00002935 if (ret) {
2936 return ret;
2937 }
2938 }
2939
2940 return 0;
2941}
2942
Juan Quintela3d0684b2017-03-23 15:06:39 +01002943/**
Wei Yang8324ef82019-08-19 14:18:41 +08002944 * postcopy_chunk_hostpages_pass: canonicalize bitmap in hostpages
Dr. David Alan Gilbert99e314e2015-11-05 18:11:15 +00002945 *
Juan Quintela3d0684b2017-03-23 15:06:39 +01002946 * Helper for postcopy_chunk_hostpages; it's called twice to
2947 * canonicalize the two bitmaps, that are similar, but one is
2948 * inverted.
Dr. David Alan Gilbert99e314e2015-11-05 18:11:15 +00002949 *
Juan Quintela3d0684b2017-03-23 15:06:39 +01002950 * Postcopy requires that all target pages in a hostpage are dirty or
2951 * clean, not a mix. This function canonicalizes the bitmaps.
2952 *
2953 * @ms: current migration state
Juan Quintela3d0684b2017-03-23 15:06:39 +01002954 * @block: block that contains the page we want to canonicalize
Dr. David Alan Gilbert99e314e2015-11-05 18:11:15 +00002955 */
Wei Yang1e7cf8c2019-08-19 14:18:42 +08002956static void postcopy_chunk_hostpages_pass(MigrationState *ms, RAMBlock *block)
Dr. David Alan Gilbert99e314e2015-11-05 18:11:15 +00002957{
Juan Quintela53518d92017-05-04 11:46:24 +02002958 RAMState *rs = ram_state;
Juan Quintela6b6712e2017-03-22 15:18:04 +01002959 unsigned long *bitmap = block->bmap;
Dr. David Alan Gilbert29c59172017-02-24 18:28:31 +00002960 unsigned int host_ratio = block->page_size / TARGET_PAGE_SIZE;
Juan Quintela6b6712e2017-03-22 15:18:04 +01002961 unsigned long pages = block->used_length >> TARGET_PAGE_BITS;
Dr. David Alan Gilbert99e314e2015-11-05 18:11:15 +00002962 unsigned long run_start;
2963
Dr. David Alan Gilbert29c59172017-02-24 18:28:31 +00002964 if (block->page_size == TARGET_PAGE_SIZE) {
2965 /* Easy case - TPS==HPS for a non-huge page RAMBlock */
2966 return;
2967 }
2968
Wei Yang1e7cf8c2019-08-19 14:18:42 +08002969 /* Find a dirty page */
2970 run_start = find_next_bit(bitmap, pages, 0);
Dr. David Alan Gilbert99e314e2015-11-05 18:11:15 +00002971
Juan Quintela6b6712e2017-03-22 15:18:04 +01002972 while (run_start < pages) {
Dr. David Alan Gilbert99e314e2015-11-05 18:11:15 +00002973
2974 /*
2975 * If the start of this run of pages is in the middle of a host
2976 * page, then we need to fixup this host page.
2977 */
Wei Yang9dec3cc2019-08-06 08:46:48 +08002978 if (QEMU_IS_ALIGNED(run_start, host_ratio)) {
Dr. David Alan Gilbert99e314e2015-11-05 18:11:15 +00002979 /* Find the end of this run */
Wei Yang1e7cf8c2019-08-19 14:18:42 +08002980 run_start = find_next_zero_bit(bitmap, pages, run_start + 1);
Dr. David Alan Gilbert99e314e2015-11-05 18:11:15 +00002981 /*
2982 * If the end isn't at the start of a host page, then the
2983 * run doesn't finish at the end of a host page
2984 * and we need to discard.
2985 */
Dr. David Alan Gilbert99e314e2015-11-05 18:11:15 +00002986 }
2987
Wei Yang9dec3cc2019-08-06 08:46:48 +08002988 if (!QEMU_IS_ALIGNED(run_start, host_ratio)) {
Dr. David Alan Gilbert99e314e2015-11-05 18:11:15 +00002989 unsigned long page;
Wei Yangdad45ab2019-08-06 08:46:47 +08002990 unsigned long fixup_start_addr = QEMU_ALIGN_DOWN(run_start,
2991 host_ratio);
2992 run_start = QEMU_ALIGN_UP(run_start, host_ratio);
Dr. David Alan Gilbert99e314e2015-11-05 18:11:15 +00002993
Dr. David Alan Gilbert99e314e2015-11-05 18:11:15 +00002994 /* Clean up the bitmap */
2995 for (page = fixup_start_addr;
2996 page < fixup_start_addr + host_ratio; page++) {
Dr. David Alan Gilbert99e314e2015-11-05 18:11:15 +00002997 /*
2998 * Remark them as dirty, updating the count for any pages
2999 * that weren't previously dirty.
3000 */
Juan Quintela0d8ec882017-03-13 21:21:41 +01003001 rs->migration_dirty_pages += !test_and_set_bit(page, bitmap);
Dr. David Alan Gilbert99e314e2015-11-05 18:11:15 +00003002 }
3003 }
3004
Wei Yang1e7cf8c2019-08-19 14:18:42 +08003005 /* Find the next dirty page for the next iteration */
3006 run_start = find_next_bit(bitmap, pages, run_start);
Dr. David Alan Gilbert99e314e2015-11-05 18:11:15 +00003007 }
3008}
3009
Juan Quintela3d0684b2017-03-23 15:06:39 +01003010/**
Wei Yang89dab312019-07-15 10:05:49 +08003011 * postcopy_chunk_hostpages: discard any partially sent host page
Juan Quintela3d0684b2017-03-23 15:06:39 +01003012 *
Dr. David Alan Gilbert99e314e2015-11-05 18:11:15 +00003013 * Utility for the outgoing postcopy code.
3014 *
3015 * Discard any partially sent host-page size chunks, mark any partially
Dr. David Alan Gilbert29c59172017-02-24 18:28:31 +00003016 * dirty host-page size chunks as all dirty. In this case the host-page
3017 * is the host-page for the particular RAMBlock, i.e. it might be a huge page
Dr. David Alan Gilbert99e314e2015-11-05 18:11:15 +00003018 *
Juan Quintela3d0684b2017-03-23 15:06:39 +01003019 * Returns zero on success
3020 *
3021 * @ms: current migration state
Juan Quintela6b6712e2017-03-22 15:18:04 +01003022 * @block: block we want to work with
Dr. David Alan Gilbert99e314e2015-11-05 18:11:15 +00003023 */
Juan Quintela6b6712e2017-03-22 15:18:04 +01003024static int postcopy_chunk_hostpages(MigrationState *ms, RAMBlock *block)
Dr. David Alan Gilbert99e314e2015-11-05 18:11:15 +00003025{
Wei Yang810cf2b2019-07-24 09:07:21 +08003026 postcopy_discard_send_init(ms, block->idstr);
Dr. David Alan Gilbert99e314e2015-11-05 18:11:15 +00003027
Juan Quintela6b6712e2017-03-22 15:18:04 +01003028 /*
Wei Yang1e7cf8c2019-08-19 14:18:42 +08003029 * Ensure that all partially dirty host pages are made fully dirty.
Juan Quintela6b6712e2017-03-22 15:18:04 +01003030 */
Wei Yang1e7cf8c2019-08-19 14:18:42 +08003031 postcopy_chunk_hostpages_pass(ms, block);
Dr. David Alan Gilbert99e314e2015-11-05 18:11:15 +00003032
Wei Yang810cf2b2019-07-24 09:07:21 +08003033 postcopy_discard_send_finish(ms);
Dr. David Alan Gilbert99e314e2015-11-05 18:11:15 +00003034 return 0;
3035}
3036
Juan Quintela3d0684b2017-03-23 15:06:39 +01003037/**
3038 * ram_postcopy_send_discard_bitmap: transmit the discard bitmap
3039 *
3040 * Returns zero on success
3041 *
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00003042 * Transmit the set of pages to be discarded after precopy to the target
3043 * these are pages that:
3044 * a) Have been previously transmitted but are now dirty again
3045 * b) Pages that have never been transmitted, this ensures that
3046 * any pages on the destination that have been mapped by background
3047 * tasks get discarded (transparent huge pages is the specific concern)
3048 * Hopefully this is pretty sparse
Juan Quintela3d0684b2017-03-23 15:06:39 +01003049 *
3050 * @ms: current migration state
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00003051 */
3052int ram_postcopy_send_discard_bitmap(MigrationState *ms)
3053{
Juan Quintela53518d92017-05-04 11:46:24 +02003054 RAMState *rs = ram_state;
Juan Quintela6b6712e2017-03-22 15:18:04 +01003055 RAMBlock *block;
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00003056 int ret;
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00003057
Dr. David Alan Gilbert89ac5a12019-10-07 15:36:39 +01003058 RCU_READ_LOCK_GUARD();
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00003059
3060 /* This should be our last sync, the src is now paused */
Juan Quintelaeb859c52017-03-13 21:51:55 +01003061 migration_bitmap_sync(rs);
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00003062
Juan Quintela6b6712e2017-03-22 15:18:04 +01003063 /* Easiest way to make sure we don't resume in the middle of a host-page */
3064 rs->last_seen_block = NULL;
3065 rs->last_sent_block = NULL;
3066 rs->last_page = 0;
3067
Yury Kotovfbd162e2019-02-15 20:45:46 +03003068 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
Juan Quintela6b6712e2017-03-22 15:18:04 +01003069 /* Deal with TPS != HPS and huge pages */
3070 ret = postcopy_chunk_hostpages(ms, block);
3071 if (ret) {
Juan Quintela6b6712e2017-03-22 15:18:04 +01003072 return ret;
3073 }
3074
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00003075#ifdef DEBUG_POSTCOPY
Wei Yang1e7cf8c2019-08-19 14:18:42 +08003076 ram_debug_dump_bitmap(block->bmap, true,
3077 block->used_length >> TARGET_PAGE_BITS);
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00003078#endif
Juan Quintela6b6712e2017-03-22 15:18:04 +01003079 }
3080 trace_ram_postcopy_send_discard_bitmap();
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00003081
3082 ret = postcopy_each_ram_send_discard(ms);
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00003083
3084 return ret;
3085}
3086
Juan Quintela3d0684b2017-03-23 15:06:39 +01003087/**
3088 * ram_discard_range: discard dirtied pages at the beginning of postcopy
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00003089 *
Juan Quintela3d0684b2017-03-23 15:06:39 +01003090 * Returns zero on success
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00003091 *
Juan Quintela36449152017-03-23 15:11:59 +01003092 * @rbname: name of the RAMBlock of the request. NULL means the
3093 * same that last one.
Juan Quintela3d0684b2017-03-23 15:06:39 +01003094 * @start: RAMBlock starting page
3095 * @length: RAMBlock size
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00003096 */
Juan Quintelaaaa20642017-03-21 11:35:24 +01003097int ram_discard_range(const char *rbname, uint64_t start, size_t length)
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00003098{
Juan Quintela36449152017-03-23 15:11:59 +01003099 trace_ram_discard_range(rbname, start, length);
Dr. David Alan Gilbertd3a50382017-02-24 18:28:32 +00003100
Dr. David Alan Gilbert89ac5a12019-10-07 15:36:39 +01003101 RCU_READ_LOCK_GUARD();
Juan Quintela36449152017-03-23 15:11:59 +01003102 RAMBlock *rb = qemu_ram_block_by_name(rbname);
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00003103
3104 if (!rb) {
Juan Quintela36449152017-03-23 15:11:59 +01003105 error_report("ram_discard_range: Failed to find block '%s'", rbname);
Daniel Henrique Barboza03acb4e2020-01-06 15:23:31 -03003106 return -1;
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00003107 }
3108
Peter Xu814bb082018-07-23 20:33:02 +08003109 /*
3110 * On source VM, we don't need to update the received bitmap since
3111 * we don't even have one.
3112 */
3113 if (rb->receivedmap) {
3114 bitmap_clear(rb->receivedmap, start >> qemu_target_page_bits(),
3115 length >> qemu_target_page_bits());
3116 }
3117
Daniel Henrique Barboza03acb4e2020-01-06 15:23:31 -03003118 return ram_block_discard_range(rb, start, length);
Dr. David Alan Gilberte0b266f2015-11-05 18:11:02 +00003119}
3120
Peter Xu84593a02017-10-19 14:31:59 +08003121/*
3122 * For every allocation, we will try not to crash the VM if the
3123 * allocation failed.
3124 */
3125static int xbzrle_init(void)
3126{
3127 Error *local_err = NULL;
3128
3129 if (!migrate_use_xbzrle()) {
3130 return 0;
3131 }
3132
3133 XBZRLE_cache_lock();
3134
3135 XBZRLE.zero_target_page = g_try_malloc0(TARGET_PAGE_SIZE);
3136 if (!XBZRLE.zero_target_page) {
3137 error_report("%s: Error allocating zero page", __func__);
3138 goto err_out;
3139 }
3140
3141 XBZRLE.cache = cache_init(migrate_xbzrle_cache_size(),
3142 TARGET_PAGE_SIZE, &local_err);
3143 if (!XBZRLE.cache) {
3144 error_report_err(local_err);
3145 goto free_zero_page;
3146 }
3147
3148 XBZRLE.encoded_buf = g_try_malloc0(TARGET_PAGE_SIZE);
3149 if (!XBZRLE.encoded_buf) {
3150 error_report("%s: Error allocating encoded_buf", __func__);
3151 goto free_cache;
3152 }
3153
3154 XBZRLE.current_buf = g_try_malloc(TARGET_PAGE_SIZE);
3155 if (!XBZRLE.current_buf) {
3156 error_report("%s: Error allocating current_buf", __func__);
3157 goto free_encoded_buf;
3158 }
3159
3160 /* We are all good */
3161 XBZRLE_cache_unlock();
3162 return 0;
3163
3164free_encoded_buf:
3165 g_free(XBZRLE.encoded_buf);
3166 XBZRLE.encoded_buf = NULL;
3167free_cache:
3168 cache_fini(XBZRLE.cache);
3169 XBZRLE.cache = NULL;
3170free_zero_page:
3171 g_free(XBZRLE.zero_target_page);
3172 XBZRLE.zero_target_page = NULL;
3173err_out:
3174 XBZRLE_cache_unlock();
3175 return -ENOMEM;
3176}
3177
Juan Quintela53518d92017-05-04 11:46:24 +02003178static int ram_state_init(RAMState **rsp)
Juan Quintela56e93d22015-05-07 19:33:31 +02003179{
Peter Xu7d00ee62017-10-19 14:31:57 +08003180 *rsp = g_try_new0(RAMState, 1);
3181
3182 if (!*rsp) {
3183 error_report("%s: Init ramstate fail", __func__);
3184 return -1;
3185 }
Juan Quintela53518d92017-05-04 11:46:24 +02003186
3187 qemu_mutex_init(&(*rsp)->bitmap_mutex);
3188 qemu_mutex_init(&(*rsp)->src_page_req_mutex);
3189 QSIMPLEQ_INIT(&(*rsp)->src_page_requests);
Juan Quintela56e93d22015-05-07 19:33:31 +02003190
Peter Xu7d00ee62017-10-19 14:31:57 +08003191 /*
Ivan Ren40c4d4a2019-07-14 22:51:19 +08003192 * Count the total number of pages used by ram blocks not including any
3193 * gaps due to alignment or unplugs.
Wei Yang03158512019-06-04 14:17:27 +08003194 * This must match with the initial values of dirty bitmap.
Peter Xu7d00ee62017-10-19 14:31:57 +08003195 */
Ivan Ren40c4d4a2019-07-14 22:51:19 +08003196 (*rsp)->migration_dirty_pages = ram_bytes_total() >> TARGET_PAGE_BITS;
Peter Xu7d00ee62017-10-19 14:31:57 +08003197 ram_state_reset(*rsp);
3198
3199 return 0;
3200}
3201
Peter Xud6eff5d2017-10-19 14:32:00 +08003202static void ram_list_init_bitmaps(void)
3203{
Peter Xu002cad62019-06-03 14:50:56 +08003204 MigrationState *ms = migrate_get_current();
Peter Xud6eff5d2017-10-19 14:32:00 +08003205 RAMBlock *block;
3206 unsigned long pages;
Peter Xu002cad62019-06-03 14:50:56 +08003207 uint8_t shift;
Peter Xud6eff5d2017-10-19 14:32:00 +08003208
3209 /* Skip setting bitmap if there is no RAM */
3210 if (ram_bytes_total()) {
Peter Xu002cad62019-06-03 14:50:56 +08003211 shift = ms->clear_bitmap_shift;
3212 if (shift > CLEAR_BITMAP_SHIFT_MAX) {
3213 error_report("clear_bitmap_shift (%u) too big, using "
3214 "max value (%u)", shift, CLEAR_BITMAP_SHIFT_MAX);
3215 shift = CLEAR_BITMAP_SHIFT_MAX;
3216 } else if (shift < CLEAR_BITMAP_SHIFT_MIN) {
3217 error_report("clear_bitmap_shift (%u) too small, using "
3218 "min value (%u)", shift, CLEAR_BITMAP_SHIFT_MIN);
3219 shift = CLEAR_BITMAP_SHIFT_MIN;
3220 }
3221
Yury Kotovfbd162e2019-02-15 20:45:46 +03003222 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
Peter Xud6eff5d2017-10-19 14:32:00 +08003223 pages = block->max_length >> TARGET_PAGE_BITS;
Wei Yang03158512019-06-04 14:17:27 +08003224 /*
3225 * The initial dirty bitmap for migration must be set with all
3226 * ones to make sure we'll migrate every guest RAM page to
3227 * destination.
Ivan Ren40c4d4a2019-07-14 22:51:19 +08003228 * Here we set RAMBlock.bmap all to 1 because when rebegin a
3229 * new migration after a failed migration, ram_list.
3230 * dirty_memory[DIRTY_MEMORY_MIGRATION] don't include the whole
3231 * guest memory.
Wei Yang03158512019-06-04 14:17:27 +08003232 */
Peter Xud6eff5d2017-10-19 14:32:00 +08003233 block->bmap = bitmap_new(pages);
Ivan Ren40c4d4a2019-07-14 22:51:19 +08003234 bitmap_set(block->bmap, 0, pages);
Peter Xu002cad62019-06-03 14:50:56 +08003235 block->clear_bmap_shift = shift;
3236 block->clear_bmap = bitmap_new(clear_bmap_size(pages, shift));
Peter Xud6eff5d2017-10-19 14:32:00 +08003237 }
3238 }
3239}
3240
3241static void ram_init_bitmaps(RAMState *rs)
3242{
3243 /* For memory_global_dirty_log_start below. */
3244 qemu_mutex_lock_iothread();
3245 qemu_mutex_lock_ramlist();
Peter Xud6eff5d2017-10-19 14:32:00 +08003246
Dr. David Alan Gilbert89ac5a12019-10-07 15:36:39 +01003247 WITH_RCU_READ_LOCK_GUARD() {
3248 ram_list_init_bitmaps();
3249 memory_global_dirty_log_start();
3250 migration_bitmap_sync_precopy(rs);
3251 }
Peter Xud6eff5d2017-10-19 14:32:00 +08003252 qemu_mutex_unlock_ramlist();
3253 qemu_mutex_unlock_iothread();
3254}
3255
Peter Xu7d00ee62017-10-19 14:31:57 +08003256static int ram_init_all(RAMState **rsp)
3257{
Peter Xu7d00ee62017-10-19 14:31:57 +08003258 if (ram_state_init(rsp)) {
3259 return -1;
3260 }
3261
Peter Xu84593a02017-10-19 14:31:59 +08003262 if (xbzrle_init()) {
3263 ram_state_cleanup(rsp);
3264 return -1;
Juan Quintela56e93d22015-05-07 19:33:31 +02003265 }
3266
Peter Xud6eff5d2017-10-19 14:32:00 +08003267 ram_init_bitmaps(*rsp);
zhanghailianga91246c2016-10-27 14:42:59 +08003268
3269 return 0;
3270}
3271
Peter Xu08614f32018-05-02 18:47:33 +08003272static void ram_state_resume_prepare(RAMState *rs, QEMUFile *out)
3273{
3274 RAMBlock *block;
3275 uint64_t pages = 0;
3276
3277 /*
3278 * Postcopy is not using xbzrle/compression, so no need for that.
3279 * Also, since source are already halted, we don't need to care
3280 * about dirty page logging as well.
3281 */
3282
Yury Kotovfbd162e2019-02-15 20:45:46 +03003283 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
Peter Xu08614f32018-05-02 18:47:33 +08003284 pages += bitmap_count_one(block->bmap,
3285 block->used_length >> TARGET_PAGE_BITS);
3286 }
3287
3288 /* This may not be aligned with current bitmaps. Recalculate. */
3289 rs->migration_dirty_pages = pages;
3290
3291 rs->last_seen_block = NULL;
3292 rs->last_sent_block = NULL;
3293 rs->last_page = 0;
3294 rs->last_version = ram_list.version;
3295 /*
3296 * Disable the bulk stage, otherwise we'll resend the whole RAM no
3297 * matter what we have sent.
3298 */
3299 rs->ram_bulk_stage = false;
3300
3301 /* Update RAMState cache of output QEMUFile */
3302 rs->f = out;
3303
3304 trace_ram_state_resume_prepare(pages);
3305}
3306
Juan Quintela3d0684b2017-03-23 15:06:39 +01003307/*
Wei Wang6bcb05f2018-12-11 16:24:50 +08003308 * This function clears bits of the free pages reported by the caller from the
3309 * migration dirty bitmap. @addr is the host address corresponding to the
3310 * start of the continuous guest free pages, and @len is the total bytes of
3311 * those pages.
3312 */
3313void qemu_guest_free_page_hint(void *addr, size_t len)
3314{
3315 RAMBlock *block;
3316 ram_addr_t offset;
3317 size_t used_len, start, npages;
3318 MigrationState *s = migrate_get_current();
3319
3320 /* This function is currently expected to be used during live migration */
3321 if (!migration_is_setup_or_active(s->state)) {
3322 return;
3323 }
3324
3325 for (; len > 0; len -= used_len, addr += used_len) {
3326 block = qemu_ram_block_from_host(addr, false, &offset);
3327 if (unlikely(!block || offset >= block->used_length)) {
3328 /*
3329 * The implementation might not support RAMBlock resize during
3330 * live migration, but it could happen in theory with future
3331 * updates. So we add a check here to capture that case.
3332 */
3333 error_report_once("%s unexpected error", __func__);
3334 return;
3335 }
3336
3337 if (len <= block->used_length - offset) {
3338 used_len = len;
3339 } else {
3340 used_len = block->used_length - offset;
3341 }
3342
3343 start = offset >> TARGET_PAGE_BITS;
3344 npages = used_len >> TARGET_PAGE_BITS;
3345
3346 qemu_mutex_lock(&ram_state->bitmap_mutex);
3347 ram_state->migration_dirty_pages -=
3348 bitmap_count_one_with_offset(block->bmap, start, npages);
3349 bitmap_clear(block->bmap, start, npages);
3350 qemu_mutex_unlock(&ram_state->bitmap_mutex);
3351 }
3352}
3353
3354/*
Juan Quintela3d0684b2017-03-23 15:06:39 +01003355 * Each of ram_save_setup, ram_save_iterate and ram_save_complete has
zhanghailianga91246c2016-10-27 14:42:59 +08003356 * long-running RCU critical section. When rcu-reclaims in the code
3357 * start to become numerous it will be necessary to reduce the
3358 * granularity of these critical sections.
3359 */
3360
Juan Quintela3d0684b2017-03-23 15:06:39 +01003361/**
3362 * ram_save_setup: Setup RAM for migration
3363 *
3364 * Returns zero to indicate success and negative for error
3365 *
3366 * @f: QEMUFile where to send the data
3367 * @opaque: RAMState pointer
3368 */
zhanghailianga91246c2016-10-27 14:42:59 +08003369static int ram_save_setup(QEMUFile *f, void *opaque)
3370{
Juan Quintela53518d92017-05-04 11:46:24 +02003371 RAMState **rsp = opaque;
zhanghailianga91246c2016-10-27 14:42:59 +08003372 RAMBlock *block;
3373
Xiao Guangrongdcaf4462018-03-30 15:51:20 +08003374 if (compress_threads_save_setup()) {
3375 return -1;
3376 }
3377
zhanghailianga91246c2016-10-27 14:42:59 +08003378 /* migration has already setup the bitmap, reuse it. */
3379 if (!migration_in_colo_state()) {
Peter Xu7d00ee62017-10-19 14:31:57 +08003380 if (ram_init_all(rsp) != 0) {
Xiao Guangrongdcaf4462018-03-30 15:51:20 +08003381 compress_threads_save_cleanup();
zhanghailianga91246c2016-10-27 14:42:59 +08003382 return -1;
Juan Quintela53518d92017-05-04 11:46:24 +02003383 }
zhanghailianga91246c2016-10-27 14:42:59 +08003384 }
Juan Quintela53518d92017-05-04 11:46:24 +02003385 (*rsp)->f = f;
zhanghailianga91246c2016-10-27 14:42:59 +08003386
Dr. David Alan Gilbert0e6ebd42019-10-07 15:36:38 +01003387 WITH_RCU_READ_LOCK_GUARD() {
3388 qemu_put_be64(f, ram_bytes_total_common(true) | RAM_SAVE_FLAG_MEM_SIZE);
Juan Quintela56e93d22015-05-07 19:33:31 +02003389
Dr. David Alan Gilbert0e6ebd42019-10-07 15:36:38 +01003390 RAMBLOCK_FOREACH_MIGRATABLE(block) {
3391 qemu_put_byte(f, strlen(block->idstr));
3392 qemu_put_buffer(f, (uint8_t *)block->idstr, strlen(block->idstr));
3393 qemu_put_be64(f, block->used_length);
3394 if (migrate_postcopy_ram() && block->page_size !=
3395 qemu_host_page_size) {
3396 qemu_put_be64(f, block->page_size);
3397 }
3398 if (migrate_ignore_shared()) {
3399 qemu_put_be64(f, block->mr->addr);
3400 }
Yury Kotovfbd162e2019-02-15 20:45:46 +03003401 }
Juan Quintela56e93d22015-05-07 19:33:31 +02003402 }
3403
Juan Quintela56e93d22015-05-07 19:33:31 +02003404 ram_control_before_iterate(f, RAM_CONTROL_SETUP);
3405 ram_control_after_iterate(f, RAM_CONTROL_SETUP);
3406
Ivan Ren1b81c972019-07-30 13:33:35 +08003407 multifd_send_sync_main(*rsp);
Juan Quintela56e93d22015-05-07 19:33:31 +02003408 qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
Juan Quintela35374cb2018-04-18 10:13:21 +02003409 qemu_fflush(f);
Juan Quintela56e93d22015-05-07 19:33:31 +02003410
3411 return 0;
3412}
3413
Juan Quintela3d0684b2017-03-23 15:06:39 +01003414/**
3415 * ram_save_iterate: iterative stage for migration
3416 *
3417 * Returns zero to indicate success and negative for error
3418 *
3419 * @f: QEMUFile where to send the data
3420 * @opaque: RAMState pointer
3421 */
Juan Quintela56e93d22015-05-07 19:33:31 +02003422static int ram_save_iterate(QEMUFile *f, void *opaque)
3423{
Juan Quintela53518d92017-05-04 11:46:24 +02003424 RAMState **temp = opaque;
3425 RAMState *rs = *temp;
Juan Quintela56e93d22015-05-07 19:33:31 +02003426 int ret;
3427 int i;
3428 int64_t t0;
Thomas Huth5c903082016-11-04 14:10:17 +01003429 int done = 0;
Juan Quintela56e93d22015-05-07 19:33:31 +02003430
Peter Lievenb2557342018-03-08 12:18:24 +01003431 if (blk_mig_bulk_active()) {
3432 /* Avoid transferring ram during bulk phase of block migration as
3433 * the bulk phase will usually take a long time and transferring
3434 * ram updates during that time is pointless. */
3435 goto out;
3436 }
3437
Dr. David Alan Gilbert89ac5a12019-10-07 15:36:39 +01003438 WITH_RCU_READ_LOCK_GUARD() {
3439 if (ram_list.version != rs->last_version) {
3440 ram_state_reset(rs);
Dr. David Alan Gilberte03a34f2018-06-13 11:26:42 +01003441 }
3442
Dr. David Alan Gilbert89ac5a12019-10-07 15:36:39 +01003443 /* Read version before ram_list.blocks */
3444 smp_rmb();
Xiao Guangronge8f37352018-09-03 17:26:44 +08003445
Dr. David Alan Gilbert89ac5a12019-10-07 15:36:39 +01003446 ram_control_before_iterate(f, RAM_CONTROL_ROUND);
Xiao Guangronge8f37352018-09-03 17:26:44 +08003447
Dr. David Alan Gilbert89ac5a12019-10-07 15:36:39 +01003448 t0 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
3449 i = 0;
3450 while ((ret = qemu_file_rate_limit(f)) == 0 ||
3451 !QSIMPLEQ_EMPTY(&rs->src_page_requests)) {
3452 int pages;
Jason J. Herne070afca2015-09-08 13:12:35 -04003453
Dr. David Alan Gilbert89ac5a12019-10-07 15:36:39 +01003454 if (qemu_file_get_error(f)) {
Juan Quintela56e93d22015-05-07 19:33:31 +02003455 break;
3456 }
Dr. David Alan Gilbert89ac5a12019-10-07 15:36:39 +01003457
3458 pages = ram_find_and_save_block(rs, false);
3459 /* no more pages to sent */
3460 if (pages == 0) {
3461 done = 1;
3462 break;
3463 }
3464
3465 if (pages < 0) {
3466 qemu_file_set_error(f, pages);
3467 break;
3468 }
3469
3470 rs->target_page_count += pages;
3471
3472 /*
Wei Yang644acf92019-11-07 20:39:07 +08003473 * During postcopy, it is necessary to make sure one whole host
3474 * page is sent in one chunk.
3475 */
3476 if (migrate_postcopy_ram()) {
3477 flush_compressed_data(rs);
3478 }
3479
3480 /*
Dr. David Alan Gilbert89ac5a12019-10-07 15:36:39 +01003481 * we want to check in the 1st loop, just in case it was the 1st
3482 * time and we had to sync the dirty bitmap.
3483 * qemu_clock_get_ns() is a bit expensive, so we only check each
3484 * some iterations
3485 */
3486 if ((i & 63) == 0) {
3487 uint64_t t1 = (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - t0) /
3488 1000000;
3489 if (t1 > MAX_WAIT) {
3490 trace_ram_save_iterate_big_wait(t1, i);
3491 break;
3492 }
3493 }
3494 i++;
Juan Quintela56e93d22015-05-07 19:33:31 +02003495 }
Juan Quintela56e93d22015-05-07 19:33:31 +02003496 }
Juan Quintela56e93d22015-05-07 19:33:31 +02003497
3498 /*
3499 * Must occur before EOS (or any QEMUFile operation)
3500 * because of RDMA protocol.
3501 */
3502 ram_control_after_iterate(f, RAM_CONTROL_ROUND);
3503
Peter Lievenb2557342018-03-08 12:18:24 +01003504out:
Ivan Ren1b81c972019-07-30 13:33:35 +08003505 multifd_send_sync_main(rs);
Juan Quintela56e93d22015-05-07 19:33:31 +02003506 qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
Juan Quintela35374cb2018-04-18 10:13:21 +02003507 qemu_fflush(f);
Juan Quintela93604472017-06-06 19:49:03 +02003508 ram_counters.transferred += 8;
Juan Quintela56e93d22015-05-07 19:33:31 +02003509
3510 ret = qemu_file_get_error(f);
3511 if (ret < 0) {
3512 return ret;
3513 }
3514
Thomas Huth5c903082016-11-04 14:10:17 +01003515 return done;
Juan Quintela56e93d22015-05-07 19:33:31 +02003516}
3517
Juan Quintela3d0684b2017-03-23 15:06:39 +01003518/**
3519 * ram_save_complete: function called to send the remaining amount of ram
3520 *
Xiao Guangronge8f37352018-09-03 17:26:44 +08003521 * Returns zero to indicate success or negative on error
Juan Quintela3d0684b2017-03-23 15:06:39 +01003522 *
3523 * Called with iothread lock
3524 *
3525 * @f: QEMUFile where to send the data
3526 * @opaque: RAMState pointer
3527 */
Juan Quintela56e93d22015-05-07 19:33:31 +02003528static int ram_save_complete(QEMUFile *f, void *opaque)
3529{
Juan Quintela53518d92017-05-04 11:46:24 +02003530 RAMState **temp = opaque;
3531 RAMState *rs = *temp;
Xiao Guangronge8f37352018-09-03 17:26:44 +08003532 int ret = 0;
Juan Quintela6f37bb82017-03-13 19:26:29 +01003533
Dr. David Alan Gilbert89ac5a12019-10-07 15:36:39 +01003534 WITH_RCU_READ_LOCK_GUARD() {
3535 if (!migration_in_postcopy()) {
3536 migration_bitmap_sync_precopy(rs);
Juan Quintela56e93d22015-05-07 19:33:31 +02003537 }
Dr. David Alan Gilbert89ac5a12019-10-07 15:36:39 +01003538
3539 ram_control_before_iterate(f, RAM_CONTROL_FINISH);
3540
3541 /* try transferring iterative blocks of memory */
3542
3543 /* flush all remaining blocks regardless of rate limiting */
3544 while (true) {
3545 int pages;
3546
3547 pages = ram_find_and_save_block(rs, !migration_in_colo_state());
3548 /* no more blocks to sent */
3549 if (pages == 0) {
3550 break;
3551 }
3552 if (pages < 0) {
3553 ret = pages;
3554 break;
3555 }
Xiao Guangronge8f37352018-09-03 17:26:44 +08003556 }
Dr. David Alan Gilbert89ac5a12019-10-07 15:36:39 +01003557
3558 flush_compressed_data(rs);
3559 ram_control_after_iterate(f, RAM_CONTROL_FINISH);
Juan Quintela56e93d22015-05-07 19:33:31 +02003560 }
3561
Ivan Ren1b81c972019-07-30 13:33:35 +08003562 multifd_send_sync_main(rs);
Juan Quintela56e93d22015-05-07 19:33:31 +02003563 qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
Juan Quintela35374cb2018-04-18 10:13:21 +02003564 qemu_fflush(f);
Juan Quintela56e93d22015-05-07 19:33:31 +02003565
Xiao Guangronge8f37352018-09-03 17:26:44 +08003566 return ret;
Juan Quintela56e93d22015-05-07 19:33:31 +02003567}
3568
Dr. David Alan Gilbertc31b0982015-11-05 18:10:54 +00003569static void ram_save_pending(QEMUFile *f, void *opaque, uint64_t max_size,
Vladimir Sementsov-Ogievskiy47995022018-03-13 15:34:00 -04003570 uint64_t *res_precopy_only,
3571 uint64_t *res_compatible,
3572 uint64_t *res_postcopy_only)
Juan Quintela56e93d22015-05-07 19:33:31 +02003573{
Juan Quintela53518d92017-05-04 11:46:24 +02003574 RAMState **temp = opaque;
3575 RAMState *rs = *temp;
Juan Quintela56e93d22015-05-07 19:33:31 +02003576 uint64_t remaining_size;
3577
Juan Quintela9edabd42017-03-14 12:02:16 +01003578 remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE;
Juan Quintela56e93d22015-05-07 19:33:31 +02003579
Juan Quintela57273092017-03-20 22:25:28 +01003580 if (!migration_in_postcopy() &&
Dr. David Alan Gilbert663e6c12015-11-05 18:11:13 +00003581 remaining_size < max_size) {
Juan Quintela56e93d22015-05-07 19:33:31 +02003582 qemu_mutex_lock_iothread();
Dr. David Alan Gilbert89ac5a12019-10-07 15:36:39 +01003583 WITH_RCU_READ_LOCK_GUARD() {
3584 migration_bitmap_sync_precopy(rs);
3585 }
Juan Quintela56e93d22015-05-07 19:33:31 +02003586 qemu_mutex_unlock_iothread();
Juan Quintela9edabd42017-03-14 12:02:16 +01003587 remaining_size = rs->migration_dirty_pages * TARGET_PAGE_SIZE;
Juan Quintela56e93d22015-05-07 19:33:31 +02003588 }
Dr. David Alan Gilbertc31b0982015-11-05 18:10:54 +00003589
Vladimir Sementsov-Ogievskiy86e11672017-07-10 19:30:15 +03003590 if (migrate_postcopy_ram()) {
3591 /* We can do postcopy, and all the data is postcopiable */
Vladimir Sementsov-Ogievskiy47995022018-03-13 15:34:00 -04003592 *res_compatible += remaining_size;
Vladimir Sementsov-Ogievskiy86e11672017-07-10 19:30:15 +03003593 } else {
Vladimir Sementsov-Ogievskiy47995022018-03-13 15:34:00 -04003594 *res_precopy_only += remaining_size;
Vladimir Sementsov-Ogievskiy86e11672017-07-10 19:30:15 +03003595 }
Juan Quintela56e93d22015-05-07 19:33:31 +02003596}
3597
3598static int load_xbzrle(QEMUFile *f, ram_addr_t addr, void *host)
3599{
3600 unsigned int xh_len;
3601 int xh_flags;
Dr. David Alan Gilbert063e7602015-12-16 11:47:37 +00003602 uint8_t *loaded_data;
Juan Quintela56e93d22015-05-07 19:33:31 +02003603
Juan Quintela56e93d22015-05-07 19:33:31 +02003604 /* extract RLE header */
3605 xh_flags = qemu_get_byte(f);
3606 xh_len = qemu_get_be16(f);
3607
3608 if (xh_flags != ENCODING_FLAG_XBZRLE) {
3609 error_report("Failed to load XBZRLE page - wrong compression!");
3610 return -1;
3611 }
3612
3613 if (xh_len > TARGET_PAGE_SIZE) {
3614 error_report("Failed to load XBZRLE page - len overflow!");
3615 return -1;
3616 }
Juan Quintelaf265e0e2017-06-28 11:52:27 +02003617 loaded_data = XBZRLE.decoded_buf;
Juan Quintela56e93d22015-05-07 19:33:31 +02003618 /* load data and decode */
Juan Quintelaf265e0e2017-06-28 11:52:27 +02003619 /* it can change loaded_data to point to an internal buffer */
Dr. David Alan Gilbert063e7602015-12-16 11:47:37 +00003620 qemu_get_buffer_in_place(f, &loaded_data, xh_len);
Juan Quintela56e93d22015-05-07 19:33:31 +02003621
3622 /* decode RLE */
Dr. David Alan Gilbert063e7602015-12-16 11:47:37 +00003623 if (xbzrle_decode_buffer(loaded_data, xh_len, host,
Juan Quintela56e93d22015-05-07 19:33:31 +02003624 TARGET_PAGE_SIZE) == -1) {
3625 error_report("Failed to load XBZRLE page - decode error!");
3626 return -1;
3627 }
3628
3629 return 0;
3630}
3631
Juan Quintela3d0684b2017-03-23 15:06:39 +01003632/**
3633 * ram_block_from_stream: read a RAMBlock id from the migration stream
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00003634 *
Juan Quintela3d0684b2017-03-23 15:06:39 +01003635 * Must be called from within a rcu critical section.
3636 *
3637 * Returns a pointer from within the RCU-protected ram_list.
3638 *
3639 * @f: QEMUFile where to read the data from
3640 * @flags: Page flags (mostly to see if it's a continuation of previous block)
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00003641 */
Juan Quintela3d0684b2017-03-23 15:06:39 +01003642static inline RAMBlock *ram_block_from_stream(QEMUFile *f, int flags)
Juan Quintela56e93d22015-05-07 19:33:31 +02003643{
3644 static RAMBlock *block = NULL;
3645 char id[256];
3646 uint8_t len;
3647
3648 if (flags & RAM_SAVE_FLAG_CONTINUE) {
zhanghailiang4c4bad42016-01-15 11:37:41 +08003649 if (!block) {
Juan Quintela56e93d22015-05-07 19:33:31 +02003650 error_report("Ack, bad migration stream!");
3651 return NULL;
3652 }
zhanghailiang4c4bad42016-01-15 11:37:41 +08003653 return block;
Juan Quintela56e93d22015-05-07 19:33:31 +02003654 }
3655
3656 len = qemu_get_byte(f);
3657 qemu_get_buffer(f, (uint8_t *)id, len);
3658 id[len] = 0;
3659
Dr. David Alan Gilberte3dd7492015-11-05 18:10:33 +00003660 block = qemu_ram_block_by_name(id);
zhanghailiang4c4bad42016-01-15 11:37:41 +08003661 if (!block) {
3662 error_report("Can't find block %s", id);
3663 return NULL;
Juan Quintela56e93d22015-05-07 19:33:31 +02003664 }
3665
Yury Kotovfbd162e2019-02-15 20:45:46 +03003666 if (ramblock_is_ignored(block)) {
Cédric Le Goaterb895de52018-05-14 08:57:00 +02003667 error_report("block %s should not be migrated !", id);
3668 return NULL;
3669 }
3670
zhanghailiang4c4bad42016-01-15 11:37:41 +08003671 return block;
3672}
3673
3674static inline void *host_from_ram_block_offset(RAMBlock *block,
3675 ram_addr_t offset)
3676{
3677 if (!offset_in_ramblock(block, offset)) {
3678 return NULL;
3679 }
3680
3681 return block->host + offset;
Juan Quintela56e93d22015-05-07 19:33:31 +02003682}
3683
Zhang Chen13af18f2018-09-03 12:38:48 +08003684static inline void *colo_cache_from_block_offset(RAMBlock *block,
3685 ram_addr_t offset)
3686{
3687 if (!offset_in_ramblock(block, offset)) {
3688 return NULL;
3689 }
3690 if (!block->colo_cache) {
3691 error_report("%s: colo_cache is NULL in block :%s",
3692 __func__, block->idstr);
3693 return NULL;
3694 }
Zhang Chen7d9acaf2018-09-03 12:38:49 +08003695
3696 /*
3697 * During colo checkpoint, we need bitmap of these migrated pages.
3698 * It help us to decide which pages in ram cache should be flushed
3699 * into VM's RAM later.
3700 */
3701 if (!test_and_set_bit(offset >> TARGET_PAGE_BITS, block->bmap)) {
3702 ram_state->migration_dirty_pages++;
3703 }
Zhang Chen13af18f2018-09-03 12:38:48 +08003704 return block->colo_cache + offset;
3705}
3706
Juan Quintela3d0684b2017-03-23 15:06:39 +01003707/**
3708 * ram_handle_compressed: handle the zero page case
3709 *
Juan Quintela56e93d22015-05-07 19:33:31 +02003710 * If a page (or a whole RDMA chunk) has been
3711 * determined to be zero, then zap it.
Juan Quintela3d0684b2017-03-23 15:06:39 +01003712 *
3713 * @host: host address for the zero page
3714 * @ch: what the page is filled from. We only support zero
3715 * @size: size of the zero page
Juan Quintela56e93d22015-05-07 19:33:31 +02003716 */
3717void ram_handle_compressed(void *host, uint8_t ch, uint64_t size)
3718{
3719 if (ch != 0 || !is_zero_range(host, size)) {
3720 memset(host, ch, size);
3721 }
3722}
3723
Xiao Guangrong797ca152018-03-30 15:51:21 +08003724/* return the size after decompression, or negative value on error */
3725static int
3726qemu_uncompress_data(z_stream *stream, uint8_t *dest, size_t dest_len,
3727 const uint8_t *source, size_t source_len)
3728{
3729 int err;
3730
3731 err = inflateReset(stream);
3732 if (err != Z_OK) {
3733 return -1;
3734 }
3735
3736 stream->avail_in = source_len;
3737 stream->next_in = (uint8_t *)source;
3738 stream->avail_out = dest_len;
3739 stream->next_out = dest;
3740
3741 err = inflate(stream, Z_NO_FLUSH);
3742 if (err != Z_STREAM_END) {
3743 return -1;
3744 }
3745
3746 return stream->total_out;
3747}
3748
Juan Quintela56e93d22015-05-07 19:33:31 +02003749static void *do_data_decompress(void *opaque)
3750{
3751 DecompressParam *param = opaque;
3752 unsigned long pagesize;
Liang Li33d151f2016-05-05 15:32:58 +08003753 uint8_t *des;
Xiao Guangrong34ab9e92018-03-30 15:51:22 +08003754 int len, ret;
Juan Quintela56e93d22015-05-07 19:33:31 +02003755
Liang Li33d151f2016-05-05 15:32:58 +08003756 qemu_mutex_lock(&param->mutex);
Liang Li90e56fb2016-05-05 15:32:56 +08003757 while (!param->quit) {
Liang Li33d151f2016-05-05 15:32:58 +08003758 if (param->des) {
3759 des = param->des;
3760 len = param->len;
3761 param->des = 0;
3762 qemu_mutex_unlock(&param->mutex);
3763
Liang Li73a89122016-05-05 15:32:51 +08003764 pagesize = TARGET_PAGE_SIZE;
Xiao Guangrong34ab9e92018-03-30 15:51:22 +08003765
3766 ret = qemu_uncompress_data(&param->stream, des, pagesize,
3767 param->compbuf, len);
Xiao Guangrongf5482222018-05-03 16:06:11 +08003768 if (ret < 0 && migrate_get_current()->decompress_error_check) {
Xiao Guangrong34ab9e92018-03-30 15:51:22 +08003769 error_report("decompress data failed");
3770 qemu_file_set_error(decomp_file, ret);
3771 }
Liang Li73a89122016-05-05 15:32:51 +08003772
Liang Li33d151f2016-05-05 15:32:58 +08003773 qemu_mutex_lock(&decomp_done_lock);
3774 param->done = true;
3775 qemu_cond_signal(&decomp_done_cond);
3776 qemu_mutex_unlock(&decomp_done_lock);
3777
3778 qemu_mutex_lock(&param->mutex);
3779 } else {
3780 qemu_cond_wait(&param->cond, &param->mutex);
3781 }
Juan Quintela56e93d22015-05-07 19:33:31 +02003782 }
Liang Li33d151f2016-05-05 15:32:58 +08003783 qemu_mutex_unlock(&param->mutex);
Juan Quintela56e93d22015-05-07 19:33:31 +02003784
3785 return NULL;
3786}
3787
Xiao Guangrong34ab9e92018-03-30 15:51:22 +08003788static int wait_for_decompress_done(void)
Liang Li5533b2e2016-05-05 15:32:52 +08003789{
3790 int idx, thread_count;
3791
3792 if (!migrate_use_compression()) {
Xiao Guangrong34ab9e92018-03-30 15:51:22 +08003793 return 0;
Liang Li5533b2e2016-05-05 15:32:52 +08003794 }
3795
3796 thread_count = migrate_decompress_threads();
3797 qemu_mutex_lock(&decomp_done_lock);
3798 for (idx = 0; idx < thread_count; idx++) {
3799 while (!decomp_param[idx].done) {
3800 qemu_cond_wait(&decomp_done_cond, &decomp_done_lock);
3801 }
3802 }
3803 qemu_mutex_unlock(&decomp_done_lock);
Xiao Guangrong34ab9e92018-03-30 15:51:22 +08003804 return qemu_file_get_error(decomp_file);
Liang Li5533b2e2016-05-05 15:32:52 +08003805}
3806
Juan Quintelaf0afa332017-06-28 11:52:28 +02003807static void compress_threads_load_cleanup(void)
Juan Quintela56e93d22015-05-07 19:33:31 +02003808{
3809 int i, thread_count;
3810
Juan Quintela3416ab52016-04-20 11:56:01 +02003811 if (!migrate_use_compression()) {
3812 return;
3813 }
Juan Quintela56e93d22015-05-07 19:33:31 +02003814 thread_count = migrate_decompress_threads();
3815 for (i = 0; i < thread_count; i++) {
Xiao Guangrong797ca152018-03-30 15:51:21 +08003816 /*
3817 * we use it as a indicator which shows if the thread is
3818 * properly init'd or not
3819 */
3820 if (!decomp_param[i].compbuf) {
3821 break;
3822 }
3823
Juan Quintela56e93d22015-05-07 19:33:31 +02003824 qemu_mutex_lock(&decomp_param[i].mutex);
Liang Li90e56fb2016-05-05 15:32:56 +08003825 decomp_param[i].quit = true;
Juan Quintela56e93d22015-05-07 19:33:31 +02003826 qemu_cond_signal(&decomp_param[i].cond);
3827 qemu_mutex_unlock(&decomp_param[i].mutex);
3828 }
3829 for (i = 0; i < thread_count; i++) {
Xiao Guangrong797ca152018-03-30 15:51:21 +08003830 if (!decomp_param[i].compbuf) {
3831 break;
3832 }
3833
Juan Quintela56e93d22015-05-07 19:33:31 +02003834 qemu_thread_join(decompress_threads + i);
3835 qemu_mutex_destroy(&decomp_param[i].mutex);
3836 qemu_cond_destroy(&decomp_param[i].cond);
Xiao Guangrong797ca152018-03-30 15:51:21 +08003837 inflateEnd(&decomp_param[i].stream);
Juan Quintela56e93d22015-05-07 19:33:31 +02003838 g_free(decomp_param[i].compbuf);
Xiao Guangrong797ca152018-03-30 15:51:21 +08003839 decomp_param[i].compbuf = NULL;
Juan Quintela56e93d22015-05-07 19:33:31 +02003840 }
3841 g_free(decompress_threads);
3842 g_free(decomp_param);
Juan Quintela56e93d22015-05-07 19:33:31 +02003843 decompress_threads = NULL;
3844 decomp_param = NULL;
Xiao Guangrong34ab9e92018-03-30 15:51:22 +08003845 decomp_file = NULL;
Juan Quintela56e93d22015-05-07 19:33:31 +02003846}
3847
Xiao Guangrong34ab9e92018-03-30 15:51:22 +08003848static int compress_threads_load_setup(QEMUFile *f)
Xiao Guangrong797ca152018-03-30 15:51:21 +08003849{
3850 int i, thread_count;
3851
3852 if (!migrate_use_compression()) {
3853 return 0;
3854 }
3855
3856 thread_count = migrate_decompress_threads();
3857 decompress_threads = g_new0(QemuThread, thread_count);
3858 decomp_param = g_new0(DecompressParam, thread_count);
3859 qemu_mutex_init(&decomp_done_lock);
3860 qemu_cond_init(&decomp_done_cond);
Xiao Guangrong34ab9e92018-03-30 15:51:22 +08003861 decomp_file = f;
Xiao Guangrong797ca152018-03-30 15:51:21 +08003862 for (i = 0; i < thread_count; i++) {
3863 if (inflateInit(&decomp_param[i].stream) != Z_OK) {
3864 goto exit;
3865 }
3866
3867 decomp_param[i].compbuf = g_malloc0(compressBound(TARGET_PAGE_SIZE));
3868 qemu_mutex_init(&decomp_param[i].mutex);
3869 qemu_cond_init(&decomp_param[i].cond);
3870 decomp_param[i].done = true;
3871 decomp_param[i].quit = false;
3872 qemu_thread_create(decompress_threads + i, "decompress",
3873 do_data_decompress, decomp_param + i,
3874 QEMU_THREAD_JOINABLE);
3875 }
3876 return 0;
3877exit:
3878 compress_threads_load_cleanup();
3879 return -1;
3880}
3881
Dr. David Alan Gilbertc1bc6622015-12-16 11:47:38 +00003882static void decompress_data_with_multi_threads(QEMUFile *f,
Juan Quintela56e93d22015-05-07 19:33:31 +02003883 void *host, int len)
3884{
3885 int idx, thread_count;
3886
3887 thread_count = migrate_decompress_threads();
Liang Li73a89122016-05-05 15:32:51 +08003888 qemu_mutex_lock(&decomp_done_lock);
Juan Quintela56e93d22015-05-07 19:33:31 +02003889 while (true) {
3890 for (idx = 0; idx < thread_count; idx++) {
Liang Li73a89122016-05-05 15:32:51 +08003891 if (decomp_param[idx].done) {
Liang Li33d151f2016-05-05 15:32:58 +08003892 decomp_param[idx].done = false;
3893 qemu_mutex_lock(&decomp_param[idx].mutex);
Dr. David Alan Gilbertc1bc6622015-12-16 11:47:38 +00003894 qemu_get_buffer(f, decomp_param[idx].compbuf, len);
Juan Quintela56e93d22015-05-07 19:33:31 +02003895 decomp_param[idx].des = host;
3896 decomp_param[idx].len = len;
Liang Li33d151f2016-05-05 15:32:58 +08003897 qemu_cond_signal(&decomp_param[idx].cond);
3898 qemu_mutex_unlock(&decomp_param[idx].mutex);
Juan Quintela56e93d22015-05-07 19:33:31 +02003899 break;
3900 }
3901 }
3902 if (idx < thread_count) {
3903 break;
Liang Li73a89122016-05-05 15:32:51 +08003904 } else {
3905 qemu_cond_wait(&decomp_done_cond, &decomp_done_lock);
Juan Quintela56e93d22015-05-07 19:33:31 +02003906 }
3907 }
Liang Li73a89122016-05-05 15:32:51 +08003908 qemu_mutex_unlock(&decomp_done_lock);
Juan Quintela56e93d22015-05-07 19:33:31 +02003909}
3910
Zhang Chen13af18f2018-09-03 12:38:48 +08003911/*
3912 * colo cache: this is for secondary VM, we cache the whole
3913 * memory of the secondary VM, it is need to hold the global lock
3914 * to call this helper.
3915 */
3916int colo_init_ram_cache(void)
3917{
3918 RAMBlock *block;
3919
Paolo Bonzini44901b52019-12-13 15:07:22 +01003920 WITH_RCU_READ_LOCK_GUARD() {
3921 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
3922 block->colo_cache = qemu_anon_ram_alloc(block->used_length,
3923 NULL,
3924 false);
3925 if (!block->colo_cache) {
3926 error_report("%s: Can't alloc memory for COLO cache of block %s,"
3927 "size 0x" RAM_ADDR_FMT, __func__, block->idstr,
3928 block->used_length);
3929 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
3930 if (block->colo_cache) {
3931 qemu_anon_ram_free(block->colo_cache, block->used_length);
3932 block->colo_cache = NULL;
3933 }
Dr. David Alan Gilbert89ac5a12019-10-07 15:36:39 +01003934 }
Paolo Bonzini44901b52019-12-13 15:07:22 +01003935 return -errno;
Dr. David Alan Gilbert89ac5a12019-10-07 15:36:39 +01003936 }
Paolo Bonzini44901b52019-12-13 15:07:22 +01003937 memcpy(block->colo_cache, block->host, block->used_length);
Zhang Chen13af18f2018-09-03 12:38:48 +08003938 }
Zhang Chen13af18f2018-09-03 12:38:48 +08003939 }
Paolo Bonzini44901b52019-12-13 15:07:22 +01003940
Zhang Chen7d9acaf2018-09-03 12:38:49 +08003941 /*
3942 * Record the dirty pages that sent by PVM, we use this dirty bitmap together
3943 * with to decide which page in cache should be flushed into SVM's RAM. Here
3944 * we use the same name 'ram_bitmap' as for migration.
3945 */
3946 if (ram_bytes_total()) {
3947 RAMBlock *block;
3948
Yury Kotovfbd162e2019-02-15 20:45:46 +03003949 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
Zhang Chen7d9acaf2018-09-03 12:38:49 +08003950 unsigned long pages = block->max_length >> TARGET_PAGE_BITS;
3951
3952 block->bmap = bitmap_new(pages);
3953 bitmap_set(block->bmap, 0, pages);
3954 }
3955 }
3956 ram_state = g_new0(RAMState, 1);
3957 ram_state->migration_dirty_pages = 0;
Zhang Chenc6e5baf2019-03-30 06:29:51 +08003958 qemu_mutex_init(&ram_state->bitmap_mutex);
zhanghailiangd1955d22018-09-03 12:38:55 +08003959 memory_global_dirty_log_start();
Zhang Chen7d9acaf2018-09-03 12:38:49 +08003960
Zhang Chen13af18f2018-09-03 12:38:48 +08003961 return 0;
Zhang Chen13af18f2018-09-03 12:38:48 +08003962}
3963
3964/* It is need to hold the global lock to call this helper */
3965void colo_release_ram_cache(void)
3966{
3967 RAMBlock *block;
3968
zhanghailiangd1955d22018-09-03 12:38:55 +08003969 memory_global_dirty_log_stop();
Yury Kotovfbd162e2019-02-15 20:45:46 +03003970 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
Zhang Chen7d9acaf2018-09-03 12:38:49 +08003971 g_free(block->bmap);
3972 block->bmap = NULL;
3973 }
3974
Dr. David Alan Gilbert89ac5a12019-10-07 15:36:39 +01003975 WITH_RCU_READ_LOCK_GUARD() {
3976 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
3977 if (block->colo_cache) {
3978 qemu_anon_ram_free(block->colo_cache, block->used_length);
3979 block->colo_cache = NULL;
3980 }
Zhang Chen13af18f2018-09-03 12:38:48 +08003981 }
3982 }
Zhang Chenc6e5baf2019-03-30 06:29:51 +08003983 qemu_mutex_destroy(&ram_state->bitmap_mutex);
Zhang Chen7d9acaf2018-09-03 12:38:49 +08003984 g_free(ram_state);
3985 ram_state = NULL;
Zhang Chen13af18f2018-09-03 12:38:48 +08003986}
3987
Juan Quintela3d0684b2017-03-23 15:06:39 +01003988/**
Juan Quintelaf265e0e2017-06-28 11:52:27 +02003989 * ram_load_setup: Setup RAM for migration incoming side
3990 *
3991 * Returns zero to indicate success and negative for error
3992 *
3993 * @f: QEMUFile where to receive the data
3994 * @opaque: RAMState pointer
3995 */
3996static int ram_load_setup(QEMUFile *f, void *opaque)
3997{
Xiao Guangrong34ab9e92018-03-30 15:51:22 +08003998 if (compress_threads_load_setup(f)) {
Xiao Guangrong797ca152018-03-30 15:51:21 +08003999 return -1;
4000 }
4001
Juan Quintelaf265e0e2017-06-28 11:52:27 +02004002 xbzrle_load_setup();
Alexey Perevalovf9494612017-10-05 14:13:20 +03004003 ramblock_recv_map_init();
Zhang Chen13af18f2018-09-03 12:38:48 +08004004
Juan Quintelaf265e0e2017-06-28 11:52:27 +02004005 return 0;
4006}
4007
4008static int ram_load_cleanup(void *opaque)
4009{
Alexey Perevalovf9494612017-10-05 14:13:20 +03004010 RAMBlock *rb;
Junyan He56eb90a2018-07-18 15:48:03 +08004011
Yury Kotovfbd162e2019-02-15 20:45:46 +03004012 RAMBLOCK_FOREACH_NOT_IGNORED(rb) {
Beata Michalskabd108a42019-11-21 00:08:42 +00004013 qemu_ram_block_writeback(rb);
Junyan He56eb90a2018-07-18 15:48:03 +08004014 }
4015
Juan Quintelaf265e0e2017-06-28 11:52:27 +02004016 xbzrle_load_cleanup();
Juan Quintelaf0afa332017-06-28 11:52:28 +02004017 compress_threads_load_cleanup();
Alexey Perevalovf9494612017-10-05 14:13:20 +03004018
Yury Kotovfbd162e2019-02-15 20:45:46 +03004019 RAMBLOCK_FOREACH_NOT_IGNORED(rb) {
Alexey Perevalovf9494612017-10-05 14:13:20 +03004020 g_free(rb->receivedmap);
4021 rb->receivedmap = NULL;
4022 }
Zhang Chen13af18f2018-09-03 12:38:48 +08004023
Juan Quintelaf265e0e2017-06-28 11:52:27 +02004024 return 0;
4025}
4026
4027/**
Juan Quintela3d0684b2017-03-23 15:06:39 +01004028 * ram_postcopy_incoming_init: allocate postcopy data structures
4029 *
4030 * Returns 0 for success and negative if there was one error
4031 *
4032 * @mis: current migration incoming state
4033 *
4034 * Allocate data structures etc needed by incoming migration with
4035 * postcopy-ram. postcopy-ram's similarly names
4036 * postcopy_ram_incoming_init does the work.
Dr. David Alan Gilbert1caddf82015-11-05 18:11:03 +00004037 */
4038int ram_postcopy_incoming_init(MigrationIncomingState *mis)
4039{
David Hildenbrandc1361802018-06-20 22:27:36 +02004040 return postcopy_ram_incoming_init(mis);
Dr. David Alan Gilbert1caddf82015-11-05 18:11:03 +00004041}
4042
Juan Quintela3d0684b2017-03-23 15:06:39 +01004043/**
4044 * ram_load_postcopy: load a page in postcopy case
4045 *
4046 * Returns 0 for success or -errno in case of error
4047 *
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00004048 * Called in postcopy mode by ram_load().
4049 * rcu_read_lock is taken prior to this being called.
Juan Quintela3d0684b2017-03-23 15:06:39 +01004050 *
4051 * @f: QEMUFile where to send the data
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00004052 */
4053static int ram_load_postcopy(QEMUFile *f)
4054{
4055 int flags = 0, ret = 0;
4056 bool place_needed = false;
Peter Xu1aa83672018-07-10 17:18:53 +08004057 bool matches_target_page_size = false;
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00004058 MigrationIncomingState *mis = migration_incoming_get_current();
4059 /* Temporary page that is later 'placed' */
Wei Yang34143222019-10-05 21:50:20 +08004060 void *postcopy_host_page = mis->postcopy_tmp_page;
Wei Yang91ba4422019-11-07 20:39:06 +08004061 void *this_host = NULL;
Dr. David Alan Gilberta3b6ff62015-11-11 14:02:28 +00004062 bool all_zero = false;
Wei Yang4cbb3c62019-11-07 20:39:04 +08004063 int target_pages = 0;
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00004064
4065 while (!ret && !(flags & RAM_SAVE_FLAG_EOS)) {
4066 ram_addr_t addr;
4067 void *host = NULL;
4068 void *page_buffer = NULL;
4069 void *place_source = NULL;
Dr. David Alan Gilbertdf9ff5e2017-02-24 18:28:35 +00004070 RAMBlock *block = NULL;
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00004071 uint8_t ch;
Wei Yang644acf92019-11-07 20:39:07 +08004072 int len;
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00004073
4074 addr = qemu_get_be64(f);
Peter Xu7a9ddfb2018-02-08 18:31:05 +08004075
4076 /*
4077 * If qemu file error, we should stop here, and then "addr"
4078 * may be invalid
4079 */
4080 ret = qemu_file_get_error(f);
4081 if (ret) {
4082 break;
4083 }
4084
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00004085 flags = addr & ~TARGET_PAGE_MASK;
4086 addr &= TARGET_PAGE_MASK;
4087
4088 trace_ram_load_postcopy_loop((uint64_t)addr, flags);
4089 place_needed = false;
Wei Yang644acf92019-11-07 20:39:07 +08004090 if (flags & (RAM_SAVE_FLAG_ZERO | RAM_SAVE_FLAG_PAGE |
4091 RAM_SAVE_FLAG_COMPRESS_PAGE)) {
Dr. David Alan Gilbertdf9ff5e2017-02-24 18:28:35 +00004092 block = ram_block_from_stream(f, flags);
zhanghailiang4c4bad42016-01-15 11:37:41 +08004093
4094 host = host_from_ram_block_offset(block, addr);
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00004095 if (!host) {
4096 error_report("Illegal RAM offset " RAM_ADDR_FMT, addr);
4097 ret = -EINVAL;
4098 break;
4099 }
Wei Yang4cbb3c62019-11-07 20:39:04 +08004100 target_pages++;
Peter Xu1aa83672018-07-10 17:18:53 +08004101 matches_target_page_size = block->page_size == TARGET_PAGE_SIZE;
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00004102 /*
Dr. David Alan Gilbert28abd202017-02-24 18:28:37 +00004103 * Postcopy requires that we place whole host pages atomically;
4104 * these may be huge pages for RAMBlocks that are backed by
4105 * hugetlbfs.
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00004106 * To make it atomic, the data is read into a temporary page
4107 * that's moved into place later.
4108 * The migration protocol uses, possibly smaller, target-pages
4109 * however the source ensures it always sends all the components
Wei Yang91ba4422019-11-07 20:39:06 +08004110 * of a host page in one chunk.
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00004111 */
4112 page_buffer = postcopy_host_page +
Dr. David Alan Gilbert28abd202017-02-24 18:28:37 +00004113 ((uintptr_t)host & (block->page_size - 1));
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00004114 /* If all TP are zero then we can optimise the place */
Wei Yange5e73b02019-11-07 20:39:05 +08004115 if (target_pages == 1) {
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00004116 all_zero = true;
Wei Yang91ba4422019-11-07 20:39:06 +08004117 this_host = (void *)QEMU_ALIGN_DOWN((uintptr_t)host,
4118 block->page_size);
Dr. David Alan Gilbertc53b7dd2015-11-05 18:11:12 +00004119 } else {
4120 /* not the 1st TP within the HP */
Wei Yang91ba4422019-11-07 20:39:06 +08004121 if (QEMU_ALIGN_DOWN((uintptr_t)host, block->page_size) !=
4122 (uintptr_t)this_host) {
4123 error_report("Non-same host page %p/%p",
4124 host, this_host);
Dr. David Alan Gilbertc53b7dd2015-11-05 18:11:12 +00004125 ret = -EINVAL;
4126 break;
4127 }
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00004128 }
4129
4130 /*
4131 * If it's the last part of a host page then we place the host
4132 * page
4133 */
Wei Yang4cbb3c62019-11-07 20:39:04 +08004134 if (target_pages == (block->page_size / TARGET_PAGE_SIZE)) {
4135 place_needed = true;
4136 target_pages = 0;
4137 }
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00004138 place_source = postcopy_host_page;
4139 }
4140
4141 switch (flags & ~RAM_SAVE_FLAG_CONTINUE) {
Juan Quintelabb890ed2017-04-28 09:39:55 +02004142 case RAM_SAVE_FLAG_ZERO:
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00004143 ch = qemu_get_byte(f);
Wei Yang2e36bc12019-11-07 20:39:02 +08004144 /*
4145 * Can skip to set page_buffer when
4146 * this is a zero page and (block->page_size == TARGET_PAGE_SIZE).
4147 */
4148 if (ch || !matches_target_page_size) {
4149 memset(page_buffer, ch, TARGET_PAGE_SIZE);
4150 }
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00004151 if (ch) {
4152 all_zero = false;
4153 }
4154 break;
4155
4156 case RAM_SAVE_FLAG_PAGE:
4157 all_zero = false;
Peter Xu1aa83672018-07-10 17:18:53 +08004158 if (!matches_target_page_size) {
4159 /* For huge pages, we always use temporary buffer */
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00004160 qemu_get_buffer(f, page_buffer, TARGET_PAGE_SIZE);
4161 } else {
Peter Xu1aa83672018-07-10 17:18:53 +08004162 /*
4163 * For small pages that matches target page size, we
4164 * avoid the qemu_file copy. Instead we directly use
4165 * the buffer of QEMUFile to place the page. Note: we
4166 * cannot do any QEMUFile operation before using that
4167 * buffer to make sure the buffer is valid when
4168 * placing the page.
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00004169 */
4170 qemu_get_buffer_in_place(f, (uint8_t **)&place_source,
4171 TARGET_PAGE_SIZE);
4172 }
4173 break;
Wei Yang644acf92019-11-07 20:39:07 +08004174 case RAM_SAVE_FLAG_COMPRESS_PAGE:
4175 all_zero = false;
4176 len = qemu_get_be32(f);
4177 if (len < 0 || len > compressBound(TARGET_PAGE_SIZE)) {
4178 error_report("Invalid compressed data length: %d", len);
4179 ret = -EINVAL;
4180 break;
4181 }
4182 decompress_data_with_multi_threads(f, page_buffer, len);
4183 break;
4184
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00004185 case RAM_SAVE_FLAG_EOS:
4186 /* normal exit */
Juan Quintela6df264a2018-02-28 09:10:07 +01004187 multifd_recv_sync_main();
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00004188 break;
4189 default:
4190 error_report("Unknown combination of migration flags: %#x"
4191 " (postcopy mode)", flags);
4192 ret = -EINVAL;
Peter Xu7a9ddfb2018-02-08 18:31:05 +08004193 break;
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00004194 }
4195
Wei Yang644acf92019-11-07 20:39:07 +08004196 /* Got the whole host page, wait for decompress before placing. */
4197 if (place_needed) {
4198 ret |= wait_for_decompress_done();
4199 }
4200
Peter Xu7a9ddfb2018-02-08 18:31:05 +08004201 /* Detect for any possible file errors */
4202 if (!ret && qemu_file_get_error(f)) {
4203 ret = qemu_file_get_error(f);
4204 }
4205
4206 if (!ret && place_needed) {
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00004207 /* This gets called at the last target page in the host page */
Wei Yang91ba4422019-11-07 20:39:06 +08004208 void *place_dest = (void *)QEMU_ALIGN_DOWN((uintptr_t)host,
4209 block->page_size);
Dr. David Alan Gilbertdf9ff5e2017-02-24 18:28:35 +00004210
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00004211 if (all_zero) {
Dr. David Alan Gilbertdf9ff5e2017-02-24 18:28:35 +00004212 ret = postcopy_place_page_zero(mis, place_dest,
Alexey Perevalov8be46202017-10-05 14:13:18 +03004213 block);
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00004214 } else {
Dr. David Alan Gilbertdf9ff5e2017-02-24 18:28:35 +00004215 ret = postcopy_place_page(mis, place_dest,
Alexey Perevalov8be46202017-10-05 14:13:18 +03004216 place_source, block);
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00004217 }
4218 }
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00004219 }
4220
4221 return ret;
4222}
4223
Daniel Henrique Barbozaacab30b2017-11-16 20:35:26 -02004224static bool postcopy_is_advised(void)
4225{
4226 PostcopyState ps = postcopy_state_get();
4227 return ps >= POSTCOPY_INCOMING_ADVISE && ps < POSTCOPY_INCOMING_END;
4228}
4229
4230static bool postcopy_is_running(void)
4231{
4232 PostcopyState ps = postcopy_state_get();
4233 return ps >= POSTCOPY_INCOMING_LISTENING && ps < POSTCOPY_INCOMING_END;
4234}
4235
Zhang Chene6f4aa12018-09-03 12:38:50 +08004236/*
4237 * Flush content of RAM cache into SVM's memory.
4238 * Only flush the pages that be dirtied by PVM or SVM or both.
4239 */
4240static void colo_flush_ram_cache(void)
4241{
4242 RAMBlock *block = NULL;
4243 void *dst_host;
4244 void *src_host;
4245 unsigned long offset = 0;
4246
zhanghailiangd1955d22018-09-03 12:38:55 +08004247 memory_global_dirty_log_sync();
Dr. David Alan Gilbert89ac5a12019-10-07 15:36:39 +01004248 WITH_RCU_READ_LOCK_GUARD() {
4249 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
4250 ramblock_sync_dirty_bitmap(ram_state, block);
Zhang Chene6f4aa12018-09-03 12:38:50 +08004251 }
4252 }
4253
Dr. David Alan Gilbert89ac5a12019-10-07 15:36:39 +01004254 trace_colo_flush_ram_cache_begin(ram_state->migration_dirty_pages);
4255 WITH_RCU_READ_LOCK_GUARD() {
4256 block = QLIST_FIRST_RCU(&ram_list.blocks);
4257
4258 while (block) {
4259 offset = migration_bitmap_find_dirty(ram_state, block, offset);
4260
4261 if (offset << TARGET_PAGE_BITS >= block->used_length) {
4262 offset = 0;
4263 block = QLIST_NEXT_RCU(block, next);
4264 } else {
4265 migration_bitmap_clear_dirty(ram_state, block, offset);
4266 dst_host = block->host + (offset << TARGET_PAGE_BITS);
4267 src_host = block->colo_cache + (offset << TARGET_PAGE_BITS);
4268 memcpy(dst_host, src_host, TARGET_PAGE_SIZE);
4269 }
4270 }
4271 }
Zhang Chene6f4aa12018-09-03 12:38:50 +08004272 trace_colo_flush_ram_cache_end();
4273}
4274
Wei Yang10da4a32019-07-25 08:20:23 +08004275/**
4276 * ram_load_precopy: load pages in precopy case
4277 *
4278 * Returns 0 for success or -errno in case of error
4279 *
4280 * Called in precopy mode by ram_load().
4281 * rcu_read_lock is taken prior to this being called.
4282 *
4283 * @f: QEMUFile where to send the data
4284 */
4285static int ram_load_precopy(QEMUFile *f)
Juan Quintela56e93d22015-05-07 19:33:31 +02004286{
Yury Kotove65cec52019-11-25 16:36:32 +03004287 int flags = 0, ret = 0, invalid_flags = 0, len = 0, i = 0;
Dr. David Alan Gilbertef08fb32017-02-24 18:28:30 +00004288 /* ADVISE is earlier, it shows the source has the postcopy capability on */
Daniel Henrique Barbozaacab30b2017-11-16 20:35:26 -02004289 bool postcopy_advised = postcopy_is_advised();
Juan Quintelaedc60122016-11-02 12:40:46 +01004290 if (!migrate_use_compression()) {
4291 invalid_flags |= RAM_SAVE_FLAG_COMPRESS_PAGE;
4292 }
Dr. David Alan Gilberta7180872015-11-05 18:11:11 +00004293
Wei Yang10da4a32019-07-25 08:20:23 +08004294 while (!ret && !(flags & RAM_SAVE_FLAG_EOS)) {
Juan Quintela56e93d22015-05-07 19:33:31 +02004295 ram_addr_t addr, total_ram_bytes;
Dr. David Alan Gilberta776aa12015-11-05 18:10:39 +00004296 void *host = NULL;
Juan Quintela56e93d22015-05-07 19:33:31 +02004297 uint8_t ch;
4298
Yury Kotove65cec52019-11-25 16:36:32 +03004299 /*
4300 * Yield periodically to let main loop run, but an iteration of
4301 * the main loop is expensive, so do it each some iterations
4302 */
4303 if ((i & 32767) == 0 && qemu_in_coroutine()) {
4304 aio_co_schedule(qemu_get_current_aio_context(),
4305 qemu_coroutine_self());
4306 qemu_coroutine_yield();
4307 }
4308 i++;
4309
Juan Quintela56e93d22015-05-07 19:33:31 +02004310 addr = qemu_get_be64(f);
4311 flags = addr & ~TARGET_PAGE_MASK;
4312 addr &= TARGET_PAGE_MASK;
4313
Juan Quintelaedc60122016-11-02 12:40:46 +01004314 if (flags & invalid_flags) {
4315 if (flags & invalid_flags & RAM_SAVE_FLAG_COMPRESS_PAGE) {
4316 error_report("Received an unexpected compressed page");
4317 }
4318
4319 ret = -EINVAL;
4320 break;
4321 }
4322
Juan Quintelabb890ed2017-04-28 09:39:55 +02004323 if (flags & (RAM_SAVE_FLAG_ZERO | RAM_SAVE_FLAG_PAGE |
Dr. David Alan Gilberta776aa12015-11-05 18:10:39 +00004324 RAM_SAVE_FLAG_COMPRESS_PAGE | RAM_SAVE_FLAG_XBZRLE)) {
zhanghailiang4c4bad42016-01-15 11:37:41 +08004325 RAMBlock *block = ram_block_from_stream(f, flags);
4326
Zhang Chen13af18f2018-09-03 12:38:48 +08004327 /*
4328 * After going into COLO, we should load the Page into colo_cache.
4329 */
4330 if (migration_incoming_in_colo_state()) {
4331 host = colo_cache_from_block_offset(block, addr);
4332 } else {
4333 host = host_from_ram_block_offset(block, addr);
4334 }
Dr. David Alan Gilberta776aa12015-11-05 18:10:39 +00004335 if (!host) {
4336 error_report("Illegal RAM offset " RAM_ADDR_FMT, addr);
4337 ret = -EINVAL;
4338 break;
4339 }
Zhang Chen13af18f2018-09-03 12:38:48 +08004340
4341 if (!migration_incoming_in_colo_state()) {
4342 ramblock_recv_bitmap_set(block, host);
4343 }
4344
Dr. David Alan Gilbert1db9d8e2017-04-26 19:37:21 +01004345 trace_ram_load_loop(block->idstr, (uint64_t)addr, flags, host);
Dr. David Alan Gilberta776aa12015-11-05 18:10:39 +00004346 }
4347
Juan Quintela56e93d22015-05-07 19:33:31 +02004348 switch (flags & ~RAM_SAVE_FLAG_CONTINUE) {
4349 case RAM_SAVE_FLAG_MEM_SIZE:
4350 /* Synchronize RAM block list */
4351 total_ram_bytes = addr;
4352 while (!ret && total_ram_bytes) {
4353 RAMBlock *block;
Juan Quintela56e93d22015-05-07 19:33:31 +02004354 char id[256];
4355 ram_addr_t length;
4356
4357 len = qemu_get_byte(f);
4358 qemu_get_buffer(f, (uint8_t *)id, len);
4359 id[len] = 0;
4360 length = qemu_get_be64(f);
4361
Dr. David Alan Gilberte3dd7492015-11-05 18:10:33 +00004362 block = qemu_ram_block_by_name(id);
Cédric Le Goaterb895de52018-05-14 08:57:00 +02004363 if (block && !qemu_ram_is_migratable(block)) {
4364 error_report("block %s should not be migrated !", id);
4365 ret = -EINVAL;
4366 } else if (block) {
Dr. David Alan Gilberte3dd7492015-11-05 18:10:33 +00004367 if (length != block->used_length) {
4368 Error *local_err = NULL;
Juan Quintela56e93d22015-05-07 19:33:31 +02004369
Gongleifa53a0e2016-05-10 10:04:59 +08004370 ret = qemu_ram_resize(block, length,
Dr. David Alan Gilberte3dd7492015-11-05 18:10:33 +00004371 &local_err);
4372 if (local_err) {
4373 error_report_err(local_err);
Juan Quintela56e93d22015-05-07 19:33:31 +02004374 }
Juan Quintela56e93d22015-05-07 19:33:31 +02004375 }
Dr. David Alan Gilbertef08fb32017-02-24 18:28:30 +00004376 /* For postcopy we need to check hugepage sizes match */
4377 if (postcopy_advised &&
4378 block->page_size != qemu_host_page_size) {
4379 uint64_t remote_page_size = qemu_get_be64(f);
4380 if (remote_page_size != block->page_size) {
4381 error_report("Mismatched RAM page size %s "
4382 "(local) %zd != %" PRId64,
4383 id, block->page_size,
4384 remote_page_size);
4385 ret = -EINVAL;
4386 }
4387 }
Yury Kotovfbd162e2019-02-15 20:45:46 +03004388 if (migrate_ignore_shared()) {
4389 hwaddr addr = qemu_get_be64(f);
Yury Kotovfbd162e2019-02-15 20:45:46 +03004390 if (ramblock_is_ignored(block) &&
4391 block->mr->addr != addr) {
4392 error_report("Mismatched GPAs for block %s "
4393 "%" PRId64 "!= %" PRId64,
4394 id, (uint64_t)addr,
4395 (uint64_t)block->mr->addr);
4396 ret = -EINVAL;
4397 }
4398 }
Dr. David Alan Gilberte3dd7492015-11-05 18:10:33 +00004399 ram_control_load_hook(f, RAM_CONTROL_BLOCK_REG,
4400 block->idstr);
4401 } else {
Juan Quintela56e93d22015-05-07 19:33:31 +02004402 error_report("Unknown ramblock \"%s\", cannot "
4403 "accept migration", id);
4404 ret = -EINVAL;
4405 }
4406
4407 total_ram_bytes -= length;
4408 }
4409 break;
Dr. David Alan Gilberta776aa12015-11-05 18:10:39 +00004410
Juan Quintelabb890ed2017-04-28 09:39:55 +02004411 case RAM_SAVE_FLAG_ZERO:
Juan Quintela56e93d22015-05-07 19:33:31 +02004412 ch = qemu_get_byte(f);
4413 ram_handle_compressed(host, ch, TARGET_PAGE_SIZE);
4414 break;
Dr. David Alan Gilberta776aa12015-11-05 18:10:39 +00004415
Juan Quintela56e93d22015-05-07 19:33:31 +02004416 case RAM_SAVE_FLAG_PAGE:
Juan Quintela56e93d22015-05-07 19:33:31 +02004417 qemu_get_buffer(f, host, TARGET_PAGE_SIZE);
4418 break;
Juan Quintela56e93d22015-05-07 19:33:31 +02004419
Dr. David Alan Gilberta776aa12015-11-05 18:10:39 +00004420 case RAM_SAVE_FLAG_COMPRESS_PAGE:
Juan Quintela56e93d22015-05-07 19:33:31 +02004421 len = qemu_get_be32(f);
4422 if (len < 0 || len > compressBound(TARGET_PAGE_SIZE)) {
4423 error_report("Invalid compressed data length: %d", len);
4424 ret = -EINVAL;
4425 break;
4426 }
Dr. David Alan Gilbertc1bc6622015-12-16 11:47:38 +00004427 decompress_data_with_multi_threads(f, host, len);
Juan Quintela56e93d22015-05-07 19:33:31 +02004428 break;
Dr. David Alan Gilberta776aa12015-11-05 18:10:39 +00004429
Juan Quintela56e93d22015-05-07 19:33:31 +02004430 case RAM_SAVE_FLAG_XBZRLE:
Juan Quintela56e93d22015-05-07 19:33:31 +02004431 if (load_xbzrle(f, addr, host) < 0) {
4432 error_report("Failed to decompress XBZRLE page at "
4433 RAM_ADDR_FMT, addr);
4434 ret = -EINVAL;
4435 break;
4436 }
4437 break;
4438 case RAM_SAVE_FLAG_EOS:
4439 /* normal exit */
Juan Quintela6df264a2018-02-28 09:10:07 +01004440 multifd_recv_sync_main();
Juan Quintela56e93d22015-05-07 19:33:31 +02004441 break;
4442 default:
4443 if (flags & RAM_SAVE_FLAG_HOOK) {
Dr. David Alan Gilbert632e3a52015-06-11 18:17:23 +01004444 ram_control_load_hook(f, RAM_CONTROL_HOOK, NULL);
Juan Quintela56e93d22015-05-07 19:33:31 +02004445 } else {
4446 error_report("Unknown combination of migration flags: %#x",
4447 flags);
4448 ret = -EINVAL;
4449 }
4450 }
4451 if (!ret) {
4452 ret = qemu_file_get_error(f);
4453 }
4454 }
4455
Wei Yangca1a6b72019-11-07 20:39:03 +08004456 ret |= wait_for_decompress_done();
Wei Yang10da4a32019-07-25 08:20:23 +08004457 return ret;
4458}
4459
4460static int ram_load(QEMUFile *f, void *opaque, int version_id)
4461{
4462 int ret = 0;
4463 static uint64_t seq_iter;
4464 /*
4465 * If system is running in postcopy mode, page inserts to host memory must
4466 * be atomic
4467 */
4468 bool postcopy_running = postcopy_is_running();
4469
4470 seq_iter++;
4471
4472 if (version_id != 4) {
4473 return -EINVAL;
4474 }
4475
4476 /*
4477 * This RCU critical section can be very long running.
4478 * When RCU reclaims in the code start to become numerous,
4479 * it will be necessary to reduce the granularity of this
4480 * critical section.
4481 */
Dr. David Alan Gilbert89ac5a12019-10-07 15:36:39 +01004482 WITH_RCU_READ_LOCK_GUARD() {
4483 if (postcopy_running) {
4484 ret = ram_load_postcopy(f);
4485 } else {
4486 ret = ram_load_precopy(f);
4487 }
Wei Yang10da4a32019-07-25 08:20:23 +08004488 }
Juan Quintela55c44462017-01-23 22:32:05 +01004489 trace_ram_load_complete(ret, seq_iter);
Zhang Chene6f4aa12018-09-03 12:38:50 +08004490
4491 if (!ret && migration_incoming_in_colo_state()) {
4492 colo_flush_ram_cache();
4493 }
Juan Quintela56e93d22015-05-07 19:33:31 +02004494 return ret;
4495}
4496
Vladimir Sementsov-Ogievskiyc6467622017-07-10 19:30:14 +03004497static bool ram_has_postcopy(void *opaque)
4498{
Junyan He469dd512018-07-18 15:48:02 +08004499 RAMBlock *rb;
Yury Kotovfbd162e2019-02-15 20:45:46 +03004500 RAMBLOCK_FOREACH_NOT_IGNORED(rb) {
Junyan He469dd512018-07-18 15:48:02 +08004501 if (ramblock_is_pmem(rb)) {
4502 info_report("Block: %s, host: %p is a nvdimm memory, postcopy"
4503 "is not supported now!", rb->idstr, rb->host);
4504 return false;
4505 }
4506 }
4507
Vladimir Sementsov-Ogievskiyc6467622017-07-10 19:30:14 +03004508 return migrate_postcopy_ram();
4509}
4510
Peter Xuedd090c2018-05-02 18:47:32 +08004511/* Sync all the dirty bitmap with destination VM. */
4512static int ram_dirty_bitmap_sync_all(MigrationState *s, RAMState *rs)
4513{
4514 RAMBlock *block;
4515 QEMUFile *file = s->to_dst_file;
4516 int ramblock_count = 0;
4517
4518 trace_ram_dirty_bitmap_sync_start();
4519
Yury Kotovfbd162e2019-02-15 20:45:46 +03004520 RAMBLOCK_FOREACH_NOT_IGNORED(block) {
Peter Xuedd090c2018-05-02 18:47:32 +08004521 qemu_savevm_send_recv_bitmap(file, block->idstr);
4522 trace_ram_dirty_bitmap_request(block->idstr);
4523 ramblock_count++;
4524 }
4525
4526 trace_ram_dirty_bitmap_sync_wait();
4527
4528 /* Wait until all the ramblocks' dirty bitmap synced */
4529 while (ramblock_count--) {
4530 qemu_sem_wait(&s->rp_state.rp_sem);
4531 }
4532
4533 trace_ram_dirty_bitmap_sync_complete();
4534
4535 return 0;
4536}
4537
4538static void ram_dirty_bitmap_reload_notify(MigrationState *s)
4539{
4540 qemu_sem_post(&s->rp_state.rp_sem);
4541}
4542
Peter Xua335deb2018-05-02 18:47:28 +08004543/*
4544 * Read the received bitmap, revert it as the initial dirty bitmap.
4545 * This is only used when the postcopy migration is paused but wants
4546 * to resume from a middle point.
4547 */
4548int ram_dirty_bitmap_reload(MigrationState *s, RAMBlock *block)
4549{
4550 int ret = -EINVAL;
4551 QEMUFile *file = s->rp_state.from_dst_file;
4552 unsigned long *le_bitmap, nbits = block->used_length >> TARGET_PAGE_BITS;
Peter Xua725ef92018-07-10 17:18:55 +08004553 uint64_t local_size = DIV_ROUND_UP(nbits, 8);
Peter Xua335deb2018-05-02 18:47:28 +08004554 uint64_t size, end_mark;
4555
4556 trace_ram_dirty_bitmap_reload_begin(block->idstr);
4557
4558 if (s->state != MIGRATION_STATUS_POSTCOPY_RECOVER) {
4559 error_report("%s: incorrect state %s", __func__,
4560 MigrationStatus_str(s->state));
4561 return -EINVAL;
4562 }
4563
4564 /*
4565 * Note: see comments in ramblock_recv_bitmap_send() on why we
4566 * need the endianess convertion, and the paddings.
4567 */
4568 local_size = ROUND_UP(local_size, 8);
4569
4570 /* Add paddings */
4571 le_bitmap = bitmap_new(nbits + BITS_PER_LONG);
4572
4573 size = qemu_get_be64(file);
4574
4575 /* The size of the bitmap should match with our ramblock */
4576 if (size != local_size) {
4577 error_report("%s: ramblock '%s' bitmap size mismatch "
4578 "(0x%"PRIx64" != 0x%"PRIx64")", __func__,
4579 block->idstr, size, local_size);
4580 ret = -EINVAL;
4581 goto out;
4582 }
4583
4584 size = qemu_get_buffer(file, (uint8_t *)le_bitmap, local_size);
4585 end_mark = qemu_get_be64(file);
4586
4587 ret = qemu_file_get_error(file);
4588 if (ret || size != local_size) {
4589 error_report("%s: read bitmap failed for ramblock '%s': %d"
4590 " (size 0x%"PRIx64", got: 0x%"PRIx64")",
4591 __func__, block->idstr, ret, local_size, size);
4592 ret = -EIO;
4593 goto out;
4594 }
4595
4596 if (end_mark != RAMBLOCK_RECV_BITMAP_ENDING) {
4597 error_report("%s: ramblock '%s' end mark incorrect: 0x%"PRIu64,
4598 __func__, block->idstr, end_mark);
4599 ret = -EINVAL;
4600 goto out;
4601 }
4602
4603 /*
4604 * Endianess convertion. We are during postcopy (though paused).
4605 * The dirty bitmap won't change. We can directly modify it.
4606 */
4607 bitmap_from_le(block->bmap, le_bitmap, nbits);
4608
4609 /*
4610 * What we received is "received bitmap". Revert it as the initial
4611 * dirty bitmap for this ramblock.
4612 */
4613 bitmap_complement(block->bmap, block->bmap, nbits);
4614
4615 trace_ram_dirty_bitmap_reload_complete(block->idstr);
4616
Peter Xuedd090c2018-05-02 18:47:32 +08004617 /*
4618 * We succeeded to sync bitmap for current ramblock. If this is
4619 * the last one to sync, we need to notify the main send thread.
4620 */
4621 ram_dirty_bitmap_reload_notify(s);
4622
Peter Xua335deb2018-05-02 18:47:28 +08004623 ret = 0;
4624out:
Peter Xubf269902018-05-25 09:50:42 +08004625 g_free(le_bitmap);
Peter Xua335deb2018-05-02 18:47:28 +08004626 return ret;
4627}
4628
Peter Xuedd090c2018-05-02 18:47:32 +08004629static int ram_resume_prepare(MigrationState *s, void *opaque)
4630{
4631 RAMState *rs = *(RAMState **)opaque;
Peter Xu08614f32018-05-02 18:47:33 +08004632 int ret;
Peter Xuedd090c2018-05-02 18:47:32 +08004633
Peter Xu08614f32018-05-02 18:47:33 +08004634 ret = ram_dirty_bitmap_sync_all(s, rs);
4635 if (ret) {
4636 return ret;
4637 }
4638
4639 ram_state_resume_prepare(rs, s->to_dst_file);
4640
4641 return 0;
Peter Xuedd090c2018-05-02 18:47:32 +08004642}
4643
Juan Quintela56e93d22015-05-07 19:33:31 +02004644static SaveVMHandlers savevm_ram_handlers = {
Juan Quintela9907e842017-06-28 11:52:24 +02004645 .save_setup = ram_save_setup,
Juan Quintela56e93d22015-05-07 19:33:31 +02004646 .save_live_iterate = ram_save_iterate,
Dr. David Alan Gilbert763c9062015-11-05 18:11:00 +00004647 .save_live_complete_postcopy = ram_save_complete,
Dr. David Alan Gilberta3e06c32015-11-05 18:10:41 +00004648 .save_live_complete_precopy = ram_save_complete,
Vladimir Sementsov-Ogievskiyc6467622017-07-10 19:30:14 +03004649 .has_postcopy = ram_has_postcopy,
Juan Quintela56e93d22015-05-07 19:33:31 +02004650 .save_live_pending = ram_save_pending,
4651 .load_state = ram_load,
Juan Quintelaf265e0e2017-06-28 11:52:27 +02004652 .save_cleanup = ram_save_cleanup,
4653 .load_setup = ram_load_setup,
4654 .load_cleanup = ram_load_cleanup,
Peter Xuedd090c2018-05-02 18:47:32 +08004655 .resume_prepare = ram_resume_prepare,
Juan Quintela56e93d22015-05-07 19:33:31 +02004656};
4657
4658void ram_mig_init(void)
4659{
4660 qemu_mutex_init(&XBZRLE.lock);
Dr. David Alan Gilbertce62df52019-08-22 12:54:33 +01004661 register_savevm_live("ram", 0, 4, &savevm_ram_handlers, &ram_state);
Juan Quintela56e93d22015-05-07 19:33:31 +02004662}