Dr. David Alan Gilbert | eb59db5 | 2015-11-05 18:10:55 +0000 | [diff] [blame] | 1 | /* |
| 2 | * Postcopy migration for RAM |
| 3 | * |
| 4 | * Copyright 2013-2015 Red Hat, Inc. and/or its affiliates |
| 5 | * |
| 6 | * Authors: |
| 7 | * Dave Gilbert <dgilbert@redhat.com> |
| 8 | * |
| 9 | * This work is licensed under the terms of the GNU GPL, version 2 or later. |
| 10 | * See the COPYING file in the top-level directory. |
| 11 | * |
| 12 | */ |
| 13 | |
| 14 | /* |
| 15 | * Postcopy is a migration technique where the execution flips from the |
| 16 | * source to the destination before all the data has been copied. |
| 17 | */ |
| 18 | |
Peter Maydell | 1393a48 | 2016-01-26 18:16:54 +0000 | [diff] [blame] | 19 | #include "qemu/osdep.h" |
Peter Maydell | b85ea5f | 2022-02-08 20:08:52 +0000 | [diff] [blame] | 20 | #include "qemu/madvise.h" |
Juan Quintela | 5118042 | 2017-04-24 20:50:19 +0200 | [diff] [blame] | 21 | #include "exec/target_page.h" |
Juan Quintela | 6666c96 | 2017-04-24 20:07:27 +0200 | [diff] [blame] | 22 | #include "migration.h" |
Juan Quintela | 08a0aee | 2017-04-20 18:52:18 +0200 | [diff] [blame] | 23 | #include "qemu-file.h" |
Juan Quintela | 20a519a | 2017-04-20 14:48:46 +0200 | [diff] [blame] | 24 | #include "savevm.h" |
Juan Quintela | be07b0a | 2017-04-20 13:12:24 +0200 | [diff] [blame] | 25 | #include "postcopy-ram.h" |
Juan Quintela | 7b1e1a2 | 2017-04-17 20:26:27 +0200 | [diff] [blame] | 26 | #include "ram.h" |
Dr. David Alan Gilbert | 1693c64 | 2018-03-12 17:20:59 +0000 | [diff] [blame] | 27 | #include "qapi/error.h" |
| 28 | #include "qemu/notify.h" |
Markus Armbruster | d484205 | 2019-08-12 07:23:46 +0200 | [diff] [blame] | 29 | #include "qemu/rcu.h" |
Dr. David Alan Gilbert | eb59db5 | 2015-11-05 18:10:55 +0000 | [diff] [blame] | 30 | #include "sysemu/sysemu.h" |
| 31 | #include "qemu/error-report.h" |
| 32 | #include "trace.h" |
Like Xu | 5cc8767 | 2019-05-19 04:54:21 +0800 | [diff] [blame] | 33 | #include "hw/boards.h" |
David Hildenbrand | 898ba90 | 2021-04-29 13:27:06 +0200 | [diff] [blame] | 34 | #include "exec/ramblock.h" |
Peter Xu | 36f62f1 | 2022-07-07 14:55:02 -0400 | [diff] [blame] | 35 | #include "socket.h" |
Peter Xu | 36f62f1 | 2022-07-07 14:55:02 -0400 | [diff] [blame] | 36 | #include "yank_functions.h" |
Peter Xu | f0afaf6 | 2022-07-07 14:55:18 -0400 | [diff] [blame] | 37 | #include "tls.h" |
Peter Xu | d5890ea | 2023-02-01 16:10:54 -0500 | [diff] [blame] | 38 | #include "qemu/userfaultfd.h" |
Peter Xu | ae30b9b | 2023-04-19 12:17:38 -0400 | [diff] [blame] | 39 | #include "qemu/mmap-alloc.h" |
Juan Quintela | 1f0776f | 2023-03-01 21:18:45 +0100 | [diff] [blame] | 40 | #include "options.h" |
Dr. David Alan Gilbert | eb59db5 | 2015-11-05 18:10:55 +0000 | [diff] [blame] | 41 | |
Dr. David Alan Gilbert | e0b266f | 2015-11-05 18:11:02 +0000 | [diff] [blame] | 42 | /* Arbitrary limit on size of each discard command, |
| 43 | * keeps them around ~200 bytes |
| 44 | */ |
| 45 | #define MAX_DISCARDS_PER_COMMAND 12 |
| 46 | |
| 47 | struct PostcopyDiscardState { |
| 48 | const char *ramblock_name; |
Dr. David Alan Gilbert | e0b266f | 2015-11-05 18:11:02 +0000 | [diff] [blame] | 49 | uint16_t cur_entry; |
| 50 | /* |
| 51 | * Start and length of a discard range (bytes) |
| 52 | */ |
| 53 | uint64_t start_list[MAX_DISCARDS_PER_COMMAND]; |
| 54 | uint64_t length_list[MAX_DISCARDS_PER_COMMAND]; |
| 55 | unsigned int nsentwords; |
| 56 | unsigned int nsentcmds; |
| 57 | }; |
| 58 | |
Dr. David Alan Gilbert | 1693c64 | 2018-03-12 17:20:59 +0000 | [diff] [blame] | 59 | static NotifierWithReturnList postcopy_notifier_list; |
| 60 | |
| 61 | void postcopy_infrastructure_init(void) |
| 62 | { |
| 63 | notifier_with_return_list_init(&postcopy_notifier_list); |
| 64 | } |
| 65 | |
| 66 | void postcopy_add_notifier(NotifierWithReturn *nn) |
| 67 | { |
| 68 | notifier_with_return_list_add(&postcopy_notifier_list, nn); |
| 69 | } |
| 70 | |
| 71 | void postcopy_remove_notifier(NotifierWithReturn *n) |
| 72 | { |
| 73 | notifier_with_return_remove(n); |
| 74 | } |
| 75 | |
| 76 | int postcopy_notify(enum PostcopyNotifyReason reason, Error **errp) |
| 77 | { |
| 78 | struct PostcopyNotifyData pnd; |
| 79 | pnd.reason = reason; |
| 80 | pnd.errp = errp; |
| 81 | |
| 82 | return notifier_with_return_list_notify(&postcopy_notifier_list, |
| 83 | &pnd); |
| 84 | } |
| 85 | |
Peter Xu | 095c12a | 2022-03-01 16:39:06 +0800 | [diff] [blame] | 86 | /* |
| 87 | * NOTE: this routine is not thread safe, we can't call it concurrently. But it |
| 88 | * should be good enough for migration's purposes. |
| 89 | */ |
| 90 | void postcopy_thread_create(MigrationIncomingState *mis, |
| 91 | QemuThread *thread, const char *name, |
| 92 | void *(*fn)(void *), int joinable) |
| 93 | { |
| 94 | qemu_sem_init(&mis->thread_sync_sem, 0); |
| 95 | qemu_thread_create(thread, name, fn, mis, joinable); |
| 96 | qemu_sem_wait(&mis->thread_sync_sem); |
| 97 | qemu_sem_destroy(&mis->thread_sync_sem); |
| 98 | } |
| 99 | |
Dr. David Alan Gilbert | eb59db5 | 2015-11-05 18:10:55 +0000 | [diff] [blame] | 100 | /* Postcopy needs to detect accesses to pages that haven't yet been copied |
| 101 | * across, and efficiently map new pages in, the techniques for doing this |
| 102 | * are target OS specific. |
| 103 | */ |
| 104 | #if defined(__linux__) |
| 105 | |
Dr. David Alan Gilbert | c4faeed | 2015-11-05 18:11:17 +0000 | [diff] [blame] | 106 | #include <poll.h> |
Dr. David Alan Gilbert | eb59db5 | 2015-11-05 18:10:55 +0000 | [diff] [blame] | 107 | #include <sys/ioctl.h> |
| 108 | #include <sys/syscall.h> |
Dr. David Alan Gilbert | eb59db5 | 2015-11-05 18:10:55 +0000 | [diff] [blame] | 109 | #include <asm/types.h> /* for __u64 */ |
| 110 | #endif |
| 111 | |
Matthew Fortune | d8b9d77 | 2016-02-23 16:09:15 +0000 | [diff] [blame] | 112 | #if defined(__linux__) && defined(__NR_userfaultfd) && defined(CONFIG_EVENTFD) |
| 113 | #include <sys/eventfd.h> |
Dr. David Alan Gilbert | eb59db5 | 2015-11-05 18:10:55 +0000 | [diff] [blame] | 114 | #include <linux/userfaultfd.h> |
| 115 | |
Alexey Perevalov | 2a4c42f | 2018-03-22 21:17:23 +0300 | [diff] [blame] | 116 | typedef struct PostcopyBlocktimeContext { |
| 117 | /* time when page fault initiated per vCPU */ |
| 118 | uint32_t *page_fault_vcpu_time; |
| 119 | /* page address per vCPU */ |
| 120 | uintptr_t *vcpu_addr; |
| 121 | uint32_t total_blocktime; |
| 122 | /* blocktime per vCPU */ |
| 123 | uint32_t *vcpu_blocktime; |
| 124 | /* point in time when last page fault was initiated */ |
| 125 | uint32_t last_begin; |
| 126 | /* number of vCPU are suspended */ |
| 127 | int smp_cpus_down; |
| 128 | uint64_t start_time; |
| 129 | |
| 130 | /* |
| 131 | * Handler for exit event, necessary for |
| 132 | * releasing whole blocktime_ctx |
| 133 | */ |
| 134 | Notifier exit_notifier; |
| 135 | } PostcopyBlocktimeContext; |
| 136 | |
| 137 | static void destroy_blocktime_context(struct PostcopyBlocktimeContext *ctx) |
| 138 | { |
| 139 | g_free(ctx->page_fault_vcpu_time); |
| 140 | g_free(ctx->vcpu_addr); |
| 141 | g_free(ctx->vcpu_blocktime); |
| 142 | g_free(ctx); |
| 143 | } |
| 144 | |
| 145 | static void migration_exit_cb(Notifier *n, void *data) |
| 146 | { |
| 147 | PostcopyBlocktimeContext *ctx = container_of(n, PostcopyBlocktimeContext, |
| 148 | exit_notifier); |
| 149 | destroy_blocktime_context(ctx); |
| 150 | } |
| 151 | |
| 152 | static struct PostcopyBlocktimeContext *blocktime_context_new(void) |
| 153 | { |
Like Xu | 5cc8767 | 2019-05-19 04:54:21 +0800 | [diff] [blame] | 154 | MachineState *ms = MACHINE(qdev_get_machine()); |
| 155 | unsigned int smp_cpus = ms->smp.cpus; |
Alexey Perevalov | 2a4c42f | 2018-03-22 21:17:23 +0300 | [diff] [blame] | 156 | PostcopyBlocktimeContext *ctx = g_new0(PostcopyBlocktimeContext, 1); |
| 157 | ctx->page_fault_vcpu_time = g_new0(uint32_t, smp_cpus); |
| 158 | ctx->vcpu_addr = g_new0(uintptr_t, smp_cpus); |
| 159 | ctx->vcpu_blocktime = g_new0(uint32_t, smp_cpus); |
| 160 | |
| 161 | ctx->exit_notifier.notify = migration_exit_cb; |
| 162 | ctx->start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME); |
| 163 | qemu_add_exit_notifier(&ctx->exit_notifier); |
| 164 | return ctx; |
| 165 | } |
Alexey Perevalov | ca6011c | 2017-10-30 16:16:30 +0300 | [diff] [blame] | 166 | |
Alexey Perevalov | 65ace06 | 2018-03-22 21:17:27 +0300 | [diff] [blame] | 167 | static uint32List *get_vcpu_blocktime_list(PostcopyBlocktimeContext *ctx) |
| 168 | { |
Like Xu | 5cc8767 | 2019-05-19 04:54:21 +0800 | [diff] [blame] | 169 | MachineState *ms = MACHINE(qdev_get_machine()); |
Eric Blake | 54aa3de | 2020-11-12 19:13:37 -0600 | [diff] [blame] | 170 | uint32List *list = NULL; |
Alexey Perevalov | 65ace06 | 2018-03-22 21:17:27 +0300 | [diff] [blame] | 171 | int i; |
| 172 | |
Like Xu | 5cc8767 | 2019-05-19 04:54:21 +0800 | [diff] [blame] | 173 | for (i = ms->smp.cpus - 1; i >= 0; i--) { |
Eric Blake | 54aa3de | 2020-11-12 19:13:37 -0600 | [diff] [blame] | 174 | QAPI_LIST_PREPEND(list, ctx->vcpu_blocktime[i]); |
Alexey Perevalov | 65ace06 | 2018-03-22 21:17:27 +0300 | [diff] [blame] | 175 | } |
| 176 | |
| 177 | return list; |
| 178 | } |
| 179 | |
| 180 | /* |
| 181 | * This function just populates MigrationInfo from postcopy's |
| 182 | * blocktime context. It will not populate MigrationInfo, |
| 183 | * unless postcopy-blocktime capability was set. |
| 184 | * |
| 185 | * @info: pointer to MigrationInfo to populate |
| 186 | */ |
| 187 | void fill_destination_postcopy_migration_info(MigrationInfo *info) |
| 188 | { |
| 189 | MigrationIncomingState *mis = migration_incoming_get_current(); |
| 190 | PostcopyBlocktimeContext *bc = mis->blocktime_ctx; |
| 191 | |
| 192 | if (!bc) { |
| 193 | return; |
| 194 | } |
| 195 | |
| 196 | info->has_postcopy_blocktime = true; |
| 197 | info->postcopy_blocktime = bc->total_blocktime; |
| 198 | info->has_postcopy_vcpu_blocktime = true; |
| 199 | info->postcopy_vcpu_blocktime = get_vcpu_blocktime_list(bc); |
| 200 | } |
| 201 | |
| 202 | static uint32_t get_postcopy_total_blocktime(void) |
| 203 | { |
| 204 | MigrationIncomingState *mis = migration_incoming_get_current(); |
| 205 | PostcopyBlocktimeContext *bc = mis->blocktime_ctx; |
| 206 | |
| 207 | if (!bc) { |
| 208 | return 0; |
| 209 | } |
| 210 | |
| 211 | return bc->total_blocktime; |
| 212 | } |
| 213 | |
Alexey Perevalov | 54ae088 | 2017-09-19 19:47:58 +0300 | [diff] [blame] | 214 | /** |
| 215 | * receive_ufd_features: check userfault fd features, to request only supported |
| 216 | * features in the future. |
| 217 | * |
| 218 | * Returns: true on success |
| 219 | * |
| 220 | * __NR_userfaultfd - should be checked before |
| 221 | * @features: out parameter will contain uffdio_api.features provided by kernel |
| 222 | * in case of success |
| 223 | */ |
| 224 | static bool receive_ufd_features(uint64_t *features) |
| 225 | { |
| 226 | struct uffdio_api api_struct = {0}; |
| 227 | int ufd; |
| 228 | bool ret = true; |
| 229 | |
Peter Xu | d5890ea | 2023-02-01 16:10:54 -0500 | [diff] [blame] | 230 | ufd = uffd_open(O_CLOEXEC); |
Alexey Perevalov | 54ae088 | 2017-09-19 19:47:58 +0300 | [diff] [blame] | 231 | if (ufd == -1) { |
Peter Xu | d5890ea | 2023-02-01 16:10:54 -0500 | [diff] [blame] | 232 | error_report("%s: uffd_open() failed: %s", __func__, strerror(errno)); |
Alexey Perevalov | 54ae088 | 2017-09-19 19:47:58 +0300 | [diff] [blame] | 233 | return false; |
| 234 | } |
| 235 | |
| 236 | /* ask features */ |
Dr. David Alan Gilbert | eb59db5 | 2015-11-05 18:10:55 +0000 | [diff] [blame] | 237 | api_struct.api = UFFD_API; |
| 238 | api_struct.features = 0; |
| 239 | if (ioctl(ufd, UFFDIO_API, &api_struct)) { |
Alexey Perevalov | 5553499 | 2017-09-19 19:47:57 +0300 | [diff] [blame] | 240 | error_report("%s: UFFDIO_API failed: %s", __func__, |
Dr. David Alan Gilbert | eb59db5 | 2015-11-05 18:10:55 +0000 | [diff] [blame] | 241 | strerror(errno)); |
Alexey Perevalov | 54ae088 | 2017-09-19 19:47:58 +0300 | [diff] [blame] | 242 | ret = false; |
| 243 | goto release_ufd; |
| 244 | } |
| 245 | |
| 246 | *features = api_struct.features; |
| 247 | |
| 248 | release_ufd: |
| 249 | close(ufd); |
| 250 | return ret; |
| 251 | } |
| 252 | |
| 253 | /** |
| 254 | * request_ufd_features: this function should be called only once on a newly |
| 255 | * opened ufd, subsequent calls will lead to error. |
| 256 | * |
zhaolichang | 3a4452d | 2020-09-17 15:50:21 +0800 | [diff] [blame] | 257 | * Returns: true on success |
Alexey Perevalov | 54ae088 | 2017-09-19 19:47:58 +0300 | [diff] [blame] | 258 | * |
| 259 | * @ufd: fd obtained from userfaultfd syscall |
| 260 | * @features: bit mask see UFFD_API_FEATURES |
| 261 | */ |
| 262 | static bool request_ufd_features(int ufd, uint64_t features) |
| 263 | { |
| 264 | struct uffdio_api api_struct = {0}; |
| 265 | uint64_t ioctl_mask; |
| 266 | |
| 267 | api_struct.api = UFFD_API; |
| 268 | api_struct.features = features; |
| 269 | if (ioctl(ufd, UFFDIO_API, &api_struct)) { |
| 270 | error_report("%s failed: UFFDIO_API failed: %s", __func__, |
| 271 | strerror(errno)); |
Dr. David Alan Gilbert | eb59db5 | 2015-11-05 18:10:55 +0000 | [diff] [blame] | 272 | return false; |
| 273 | } |
| 274 | |
| 275 | ioctl_mask = (__u64)1 << _UFFDIO_REGISTER | |
| 276 | (__u64)1 << _UFFDIO_UNREGISTER; |
| 277 | if ((api_struct.ioctls & ioctl_mask) != ioctl_mask) { |
| 278 | error_report("Missing userfault features: %" PRIx64, |
| 279 | (uint64_t)(~api_struct.ioctls & ioctl_mask)); |
| 280 | return false; |
| 281 | } |
| 282 | |
Alexey Perevalov | 54ae088 | 2017-09-19 19:47:58 +0300 | [diff] [blame] | 283 | return true; |
| 284 | } |
| 285 | |
Peter Xu | 74c38cf | 2023-04-25 21:15:14 -0400 | [diff] [blame] | 286 | static bool ufd_check_and_apply(int ufd, MigrationIncomingState *mis, |
| 287 | Error **errp) |
Alexey Perevalov | 54ae088 | 2017-09-19 19:47:58 +0300 | [diff] [blame] | 288 | { |
| 289 | uint64_t asked_features = 0; |
| 290 | static uint64_t supported_features; |
| 291 | |
Peter Xu | 74c38cf | 2023-04-25 21:15:14 -0400 | [diff] [blame] | 292 | ERRP_GUARD(); |
Alexey Perevalov | 54ae088 | 2017-09-19 19:47:58 +0300 | [diff] [blame] | 293 | /* |
| 294 | * it's not possible to |
| 295 | * request UFFD_API twice per one fd |
| 296 | * userfault fd features is persistent |
| 297 | */ |
| 298 | if (!supported_features) { |
| 299 | if (!receive_ufd_features(&supported_features)) { |
Peter Xu | 74c38cf | 2023-04-25 21:15:14 -0400 | [diff] [blame] | 300 | error_setg(errp, "Userfault feature detection failed"); |
Alexey Perevalov | 54ae088 | 2017-09-19 19:47:58 +0300 | [diff] [blame] | 301 | return false; |
| 302 | } |
| 303 | } |
| 304 | |
Alexey Perevalov | 2a4c42f | 2018-03-22 21:17:23 +0300 | [diff] [blame] | 305 | #ifdef UFFD_FEATURE_THREAD_ID |
Peter Xu | 2d1c37c | 2022-01-19 16:09:17 +0800 | [diff] [blame] | 306 | if (UFFD_FEATURE_THREAD_ID & supported_features) { |
Alexey Perevalov | 2a4c42f | 2018-03-22 21:17:23 +0300 | [diff] [blame] | 307 | asked_features |= UFFD_FEATURE_THREAD_ID; |
Peter Xu | 2d1c37c | 2022-01-19 16:09:17 +0800 | [diff] [blame] | 308 | if (migrate_postcopy_blocktime()) { |
| 309 | if (!mis->blocktime_ctx) { |
| 310 | mis->blocktime_ctx = blocktime_context_new(); |
| 311 | } |
| 312 | } |
Alexey Perevalov | 2a4c42f | 2018-03-22 21:17:23 +0300 | [diff] [blame] | 313 | } |
| 314 | #endif |
| 315 | |
Alexey Perevalov | 54ae088 | 2017-09-19 19:47:58 +0300 | [diff] [blame] | 316 | /* |
| 317 | * request features, even if asked_features is 0, due to |
| 318 | * kernel expects UFFD_API before UFFDIO_REGISTER, per |
| 319 | * userfault file descriptor |
| 320 | */ |
| 321 | if (!request_ufd_features(ufd, asked_features)) { |
Peter Xu | 74c38cf | 2023-04-25 21:15:14 -0400 | [diff] [blame] | 322 | error_setg(errp, "Failed features %" PRIu64, asked_features); |
Alexey Perevalov | 54ae088 | 2017-09-19 19:47:58 +0300 | [diff] [blame] | 323 | return false; |
| 324 | } |
| 325 | |
Marc-André Lureau | 8e3b0cb | 2022-03-23 19:57:22 +0400 | [diff] [blame] | 326 | if (qemu_real_host_page_size() != ram_pagesize_summary()) { |
Dr. David Alan Gilbert | 7e8cafb | 2017-02-24 18:28:42 +0000 | [diff] [blame] | 327 | bool have_hp = false; |
| 328 | /* We've got a huge page */ |
| 329 | #ifdef UFFD_FEATURE_MISSING_HUGETLBFS |
Alexey Perevalov | 54ae088 | 2017-09-19 19:47:58 +0300 | [diff] [blame] | 330 | have_hp = supported_features & UFFD_FEATURE_MISSING_HUGETLBFS; |
Dr. David Alan Gilbert | 7e8cafb | 2017-02-24 18:28:42 +0000 | [diff] [blame] | 331 | #endif |
| 332 | if (!have_hp) { |
Peter Xu | 74c38cf | 2023-04-25 21:15:14 -0400 | [diff] [blame] | 333 | error_setg(errp, |
| 334 | "Userfault on this host does not support huge pages"); |
Dr. David Alan Gilbert | 7e8cafb | 2017-02-24 18:28:42 +0000 | [diff] [blame] | 335 | return false; |
| 336 | } |
| 337 | } |
Dr. David Alan Gilbert | eb59db5 | 2015-11-05 18:10:55 +0000 | [diff] [blame] | 338 | return true; |
| 339 | } |
| 340 | |
Dr. David Alan Gilbert | 8679638 | 2017-03-07 18:36:37 +0000 | [diff] [blame] | 341 | /* Callback from postcopy_ram_supported_by_host block iterator. |
| 342 | */ |
Peter Xu | 74c38cf | 2023-04-25 21:15:14 -0400 | [diff] [blame] | 343 | static int test_ramblock_postcopiable(RAMBlock *rb, Error **errp) |
Dr. David Alan Gilbert | 8679638 | 2017-03-07 18:36:37 +0000 | [diff] [blame] | 344 | { |
Yury Kotov | 754cb9c | 2019-02-15 20:45:44 +0300 | [diff] [blame] | 345 | const char *block_name = qemu_ram_get_idstr(rb); |
| 346 | ram_addr_t length = qemu_ram_get_used_length(rb); |
Dr. David Alan Gilbert | 5d214a9 | 2017-05-17 17:58:10 +0100 | [diff] [blame] | 347 | size_t pagesize = qemu_ram_pagesize(rb); |
Peter Xu | ae30b9b | 2023-04-19 12:17:38 -0400 | [diff] [blame] | 348 | QemuFsType fs; |
Dr. David Alan Gilbert | 5d214a9 | 2017-05-17 17:58:10 +0100 | [diff] [blame] | 349 | |
Dr. David Alan Gilbert | 5d214a9 | 2017-05-17 17:58:10 +0100 | [diff] [blame] | 350 | if (length % pagesize) { |
Peter Xu | 74c38cf | 2023-04-25 21:15:14 -0400 | [diff] [blame] | 351 | error_setg(errp, |
| 352 | "Postcopy requires RAM blocks to be a page size multiple," |
| 353 | " block %s is 0x" RAM_ADDR_FMT " bytes with a " |
| 354 | "page size of 0x%zx", block_name, length, pagesize); |
Dr. David Alan Gilbert | 5d214a9 | 2017-05-17 17:58:10 +0100 | [diff] [blame] | 355 | return 1; |
| 356 | } |
Peter Xu | ae30b9b | 2023-04-19 12:17:38 -0400 | [diff] [blame] | 357 | |
| 358 | if (rb->fd >= 0) { |
| 359 | fs = qemu_fd_getfs(rb->fd); |
| 360 | if (fs != QEMU_FS_TYPE_TMPFS && fs != QEMU_FS_TYPE_HUGETLBFS) { |
Peter Xu | 74c38cf | 2023-04-25 21:15:14 -0400 | [diff] [blame] | 361 | error_setg(errp, |
| 362 | "Host backend files need to be TMPFS or HUGETLBFS only"); |
Peter Xu | ae30b9b | 2023-04-19 12:17:38 -0400 | [diff] [blame] | 363 | return 1; |
| 364 | } |
| 365 | } |
| 366 | |
Dr. David Alan Gilbert | 8679638 | 2017-03-07 18:36:37 +0000 | [diff] [blame] | 367 | return 0; |
| 368 | } |
| 369 | |
Dr. David Alan Gilbert | 58b7c17 | 2015-11-05 18:11:22 +0000 | [diff] [blame] | 370 | /* |
| 371 | * Note: This has the side effect of munlock'ing all of RAM, that's |
| 372 | * normally fine since if the postcopy succeeds it gets turned back on at the |
| 373 | * end. |
| 374 | */ |
Peter Xu | 74c38cf | 2023-04-25 21:15:14 -0400 | [diff] [blame] | 375 | bool postcopy_ram_supported_by_host(MigrationIncomingState *mis, Error **errp) |
Dr. David Alan Gilbert | eb59db5 | 2015-11-05 18:10:55 +0000 | [diff] [blame] | 376 | { |
Marc-André Lureau | 8e3b0cb | 2022-03-23 19:57:22 +0400 | [diff] [blame] | 377 | long pagesize = qemu_real_host_page_size(); |
Dr. David Alan Gilbert | eb59db5 | 2015-11-05 18:10:55 +0000 | [diff] [blame] | 378 | int ufd = -1; |
| 379 | bool ret = false; /* Error unless we change it */ |
| 380 | void *testarea = NULL; |
| 381 | struct uffdio_register reg_struct; |
| 382 | struct uffdio_range range_struct; |
| 383 | uint64_t feature_mask; |
Peter Xu | ae30b9b | 2023-04-19 12:17:38 -0400 | [diff] [blame] | 384 | RAMBlock *block; |
Dr. David Alan Gilbert | eb59db5 | 2015-11-05 18:10:55 +0000 | [diff] [blame] | 385 | |
Peter Xu | 74c38cf | 2023-04-25 21:15:14 -0400 | [diff] [blame] | 386 | ERRP_GUARD(); |
Juan Quintela | 20afaed | 2017-03-21 09:09:14 +0100 | [diff] [blame] | 387 | if (qemu_target_page_size() > pagesize) { |
Peter Xu | 74c38cf | 2023-04-25 21:15:14 -0400 | [diff] [blame] | 388 | error_setg(errp, "Target page size bigger than host page size"); |
Dr. David Alan Gilbert | eb59db5 | 2015-11-05 18:10:55 +0000 | [diff] [blame] | 389 | goto out; |
| 390 | } |
| 391 | |
Peter Xu | d5890ea | 2023-02-01 16:10:54 -0500 | [diff] [blame] | 392 | ufd = uffd_open(O_CLOEXEC); |
Dr. David Alan Gilbert | eb59db5 | 2015-11-05 18:10:55 +0000 | [diff] [blame] | 393 | if (ufd == -1) { |
Peter Xu | 74c38cf | 2023-04-25 21:15:14 -0400 | [diff] [blame] | 394 | error_setg(errp, "Userfaultfd not available: %s", strerror(errno)); |
Dr. David Alan Gilbert | eb59db5 | 2015-11-05 18:10:55 +0000 | [diff] [blame] | 395 | goto out; |
| 396 | } |
| 397 | |
Dr. David Alan Gilbert | 1693c64 | 2018-03-12 17:20:59 +0000 | [diff] [blame] | 398 | /* Give devices a chance to object */ |
Peter Xu | 74c38cf | 2023-04-25 21:15:14 -0400 | [diff] [blame] | 399 | if (postcopy_notify(POSTCOPY_NOTIFY_PROBE, errp)) { |
Dr. David Alan Gilbert | 1693c64 | 2018-03-12 17:20:59 +0000 | [diff] [blame] | 400 | goto out; |
| 401 | } |
| 402 | |
Dr. David Alan Gilbert | eb59db5 | 2015-11-05 18:10:55 +0000 | [diff] [blame] | 403 | /* Version and features check */ |
Peter Xu | 74c38cf | 2023-04-25 21:15:14 -0400 | [diff] [blame] | 404 | if (!ufd_check_and_apply(ufd, mis, errp)) { |
Dr. David Alan Gilbert | eb59db5 | 2015-11-05 18:10:55 +0000 | [diff] [blame] | 405 | goto out; |
| 406 | } |
| 407 | |
Peter Xu | ae30b9b | 2023-04-19 12:17:38 -0400 | [diff] [blame] | 408 | /* |
| 409 | * We don't support postcopy with some type of ramblocks. |
| 410 | * |
David Hildenbrand | f161c88 | 2023-07-06 09:56:08 +0200 | [diff] [blame] | 411 | * NOTE: we explicitly ignored migrate_ram_is_ignored() instead we checked |
Peter Xu | ae30b9b | 2023-04-19 12:17:38 -0400 | [diff] [blame] | 412 | * all possible ramblocks. This is because this function can be called |
| 413 | * when creating the migration object, during the phase RAM_MIGRATABLE |
| 414 | * is not even properly set for all the ramblocks. |
| 415 | * |
| 416 | * A side effect of this is we'll also check against RAM_SHARED |
| 417 | * ramblocks even if migrate_ignore_shared() is set (in which case |
| 418 | * we'll never migrate RAM_SHARED at all), but normally this shouldn't |
| 419 | * affect in reality, or we can revisit. |
| 420 | */ |
| 421 | RAMBLOCK_FOREACH(block) { |
Peter Xu | 74c38cf | 2023-04-25 21:15:14 -0400 | [diff] [blame] | 422 | if (test_ramblock_postcopiable(block, errp)) { |
Peter Xu | ae30b9b | 2023-04-19 12:17:38 -0400 | [diff] [blame] | 423 | goto out; |
| 424 | } |
Dr. David Alan Gilbert | 8679638 | 2017-03-07 18:36:37 +0000 | [diff] [blame] | 425 | } |
| 426 | |
Dr. David Alan Gilbert | eb59db5 | 2015-11-05 18:10:55 +0000 | [diff] [blame] | 427 | /* |
Dr. David Alan Gilbert | 58b7c17 | 2015-11-05 18:11:22 +0000 | [diff] [blame] | 428 | * userfault and mlock don't go together; we'll put it back later if |
| 429 | * it was enabled. |
| 430 | */ |
| 431 | if (munlockall()) { |
Peter Xu | 74c38cf | 2023-04-25 21:15:14 -0400 | [diff] [blame] | 432 | error_setg(errp, "munlockall() failed: %s", strerror(errno)); |
Dr. David Alan Gilbert | 617a32f | 2020-07-01 10:35:57 +0100 | [diff] [blame] | 433 | goto out; |
Dr. David Alan Gilbert | 58b7c17 | 2015-11-05 18:11:22 +0000 | [diff] [blame] | 434 | } |
| 435 | |
| 436 | /* |
Dr. David Alan Gilbert | eb59db5 | 2015-11-05 18:10:55 +0000 | [diff] [blame] | 437 | * We need to check that the ops we need are supported on anon memory |
| 438 | * To do that we need to register a chunk and see the flags that |
| 439 | * are returned. |
| 440 | */ |
| 441 | testarea = mmap(NULL, pagesize, PROT_READ | PROT_WRITE, MAP_PRIVATE | |
| 442 | MAP_ANONYMOUS, -1, 0); |
| 443 | if (testarea == MAP_FAILED) { |
Peter Xu | 74c38cf | 2023-04-25 21:15:14 -0400 | [diff] [blame] | 444 | error_setg(errp, "Failed to map test area: %s", strerror(errno)); |
Dr. David Alan Gilbert | eb59db5 | 2015-11-05 18:10:55 +0000 | [diff] [blame] | 445 | goto out; |
| 446 | } |
David Hildenbrand | 7648297 | 2021-10-11 19:53:44 +0200 | [diff] [blame] | 447 | g_assert(QEMU_PTR_IS_ALIGNED(testarea, pagesize)); |
Dr. David Alan Gilbert | eb59db5 | 2015-11-05 18:10:55 +0000 | [diff] [blame] | 448 | |
| 449 | reg_struct.range.start = (uintptr_t)testarea; |
| 450 | reg_struct.range.len = pagesize; |
| 451 | reg_struct.mode = UFFDIO_REGISTER_MODE_MISSING; |
| 452 | |
| 453 | if (ioctl(ufd, UFFDIO_REGISTER, ®_struct)) { |
Peter Xu | 74c38cf | 2023-04-25 21:15:14 -0400 | [diff] [blame] | 454 | error_setg(errp, "UFFDIO_REGISTER failed: %s", strerror(errno)); |
Dr. David Alan Gilbert | eb59db5 | 2015-11-05 18:10:55 +0000 | [diff] [blame] | 455 | goto out; |
| 456 | } |
| 457 | |
| 458 | range_struct.start = (uintptr_t)testarea; |
| 459 | range_struct.len = pagesize; |
| 460 | if (ioctl(ufd, UFFDIO_UNREGISTER, &range_struct)) { |
Peter Xu | 74c38cf | 2023-04-25 21:15:14 -0400 | [diff] [blame] | 461 | error_setg(errp, "UFFDIO_UNREGISTER failed: %s", strerror(errno)); |
Dr. David Alan Gilbert | eb59db5 | 2015-11-05 18:10:55 +0000 | [diff] [blame] | 462 | goto out; |
| 463 | } |
| 464 | |
| 465 | feature_mask = (__u64)1 << _UFFDIO_WAKE | |
| 466 | (__u64)1 << _UFFDIO_COPY | |
| 467 | (__u64)1 << _UFFDIO_ZEROPAGE; |
| 468 | if ((reg_struct.ioctls & feature_mask) != feature_mask) { |
Peter Xu | 74c38cf | 2023-04-25 21:15:14 -0400 | [diff] [blame] | 469 | error_setg(errp, "Missing userfault map features: %" PRIx64, |
| 470 | (uint64_t)(~reg_struct.ioctls & feature_mask)); |
Dr. David Alan Gilbert | eb59db5 | 2015-11-05 18:10:55 +0000 | [diff] [blame] | 471 | goto out; |
| 472 | } |
| 473 | |
| 474 | /* Success! */ |
| 475 | ret = true; |
| 476 | out: |
| 477 | if (testarea) { |
| 478 | munmap(testarea, pagesize); |
| 479 | } |
| 480 | if (ufd != -1) { |
| 481 | close(ufd); |
| 482 | } |
| 483 | return ret; |
| 484 | } |
| 485 | |
Dr. David Alan Gilbert | 1caddf8 | 2015-11-05 18:11:03 +0000 | [diff] [blame] | 486 | /* |
| 487 | * Setup an area of RAM so that it *can* be used for postcopy later; this |
| 488 | * must be done right at the start prior to pre-copy. |
| 489 | * opaque should be the MIS. |
| 490 | */ |
Yury Kotov | 754cb9c | 2019-02-15 20:45:44 +0300 | [diff] [blame] | 491 | static int init_range(RAMBlock *rb, void *opaque) |
Dr. David Alan Gilbert | 1caddf8 | 2015-11-05 18:11:03 +0000 | [diff] [blame] | 492 | { |
Yury Kotov | 754cb9c | 2019-02-15 20:45:44 +0300 | [diff] [blame] | 493 | const char *block_name = qemu_ram_get_idstr(rb); |
| 494 | void *host_addr = qemu_ram_get_host_addr(rb); |
| 495 | ram_addr_t offset = qemu_ram_get_offset(rb); |
| 496 | ram_addr_t length = qemu_ram_get_used_length(rb); |
Dr. David Alan Gilbert | 1caddf8 | 2015-11-05 18:11:03 +0000 | [diff] [blame] | 497 | trace_postcopy_init_range(block_name, host_addr, offset, length); |
| 498 | |
| 499 | /* |
David Hildenbrand | 898ba90 | 2021-04-29 13:27:06 +0200 | [diff] [blame] | 500 | * Save the used_length before running the guest. In case we have to |
| 501 | * resize RAM blocks when syncing RAM block sizes from the source during |
| 502 | * precopy, we'll update it manually via the ram block notifier. |
| 503 | */ |
| 504 | rb->postcopy_length = length; |
| 505 | |
| 506 | /* |
Dr. David Alan Gilbert | 1caddf8 | 2015-11-05 18:11:03 +0000 | [diff] [blame] | 507 | * We need the whole of RAM to be truly empty for postcopy, so things |
| 508 | * like ROMs and any data tables built during init must be zero'd |
| 509 | * - we're going to get the copy from the source anyway. |
| 510 | * (Precopy will just overwrite this data, so doesn't need the discard) |
| 511 | */ |
Juan Quintela | aaa2064 | 2017-03-21 11:35:24 +0100 | [diff] [blame] | 512 | if (ram_discard_range(block_name, 0, length)) { |
Dr. David Alan Gilbert | 1caddf8 | 2015-11-05 18:11:03 +0000 | [diff] [blame] | 513 | return -1; |
| 514 | } |
| 515 | |
| 516 | return 0; |
| 517 | } |
| 518 | |
| 519 | /* |
| 520 | * At the end of migration, undo the effects of init_range |
| 521 | * opaque should be the MIS. |
| 522 | */ |
Yury Kotov | 754cb9c | 2019-02-15 20:45:44 +0300 | [diff] [blame] | 523 | static int cleanup_range(RAMBlock *rb, void *opaque) |
Dr. David Alan Gilbert | 1caddf8 | 2015-11-05 18:11:03 +0000 | [diff] [blame] | 524 | { |
Yury Kotov | 754cb9c | 2019-02-15 20:45:44 +0300 | [diff] [blame] | 525 | const char *block_name = qemu_ram_get_idstr(rb); |
| 526 | void *host_addr = qemu_ram_get_host_addr(rb); |
| 527 | ram_addr_t offset = qemu_ram_get_offset(rb); |
David Hildenbrand | 898ba90 | 2021-04-29 13:27:06 +0200 | [diff] [blame] | 528 | ram_addr_t length = rb->postcopy_length; |
Dr. David Alan Gilbert | 1caddf8 | 2015-11-05 18:11:03 +0000 | [diff] [blame] | 529 | MigrationIncomingState *mis = opaque; |
| 530 | struct uffdio_range range_struct; |
| 531 | trace_postcopy_cleanup_range(block_name, host_addr, offset, length); |
| 532 | |
| 533 | /* |
| 534 | * We turned off hugepage for the precopy stage with postcopy enabled |
| 535 | * we can turn it back on now. |
| 536 | */ |
Dr. David Alan Gilbert | 1d74143 | 2015-11-19 15:27:48 +0000 | [diff] [blame] | 537 | qemu_madvise(host_addr, length, QEMU_MADV_HUGEPAGE); |
Dr. David Alan Gilbert | 1caddf8 | 2015-11-05 18:11:03 +0000 | [diff] [blame] | 538 | |
| 539 | /* |
| 540 | * We can also turn off userfault now since we should have all the |
| 541 | * pages. It can be useful to leave it on to debug postcopy |
| 542 | * if you're not sure it's always getting every page. |
| 543 | */ |
| 544 | range_struct.start = (uintptr_t)host_addr; |
| 545 | range_struct.len = length; |
| 546 | |
| 547 | if (ioctl(mis->userfault_fd, UFFDIO_UNREGISTER, &range_struct)) { |
| 548 | error_report("%s: userfault unregister %s", __func__, strerror(errno)); |
| 549 | |
| 550 | return -1; |
| 551 | } |
| 552 | |
| 553 | return 0; |
| 554 | } |
| 555 | |
| 556 | /* |
| 557 | * Initialise postcopy-ram, setting the RAM to a state where we can go into |
| 558 | * postcopy later; must be called prior to any precopy. |
| 559 | * called from arch_init's similarly named ram_postcopy_incoming_init |
| 560 | */ |
David Hildenbrand | c136180 | 2018-06-20 22:27:36 +0200 | [diff] [blame] | 561 | int postcopy_ram_incoming_init(MigrationIncomingState *mis) |
Dr. David Alan Gilbert | 1caddf8 | 2015-11-05 18:11:03 +0000 | [diff] [blame] | 562 | { |
Yury Kotov | fbd162e | 2019-02-15 20:45:46 +0300 | [diff] [blame] | 563 | if (foreach_not_ignored_block(init_range, NULL)) { |
Dr. David Alan Gilbert | 1caddf8 | 2015-11-05 18:11:03 +0000 | [diff] [blame] | 564 | return -1; |
| 565 | } |
| 566 | |
| 567 | return 0; |
| 568 | } |
| 569 | |
Peter Xu | 476ebf7 | 2022-01-19 16:09:20 +0800 | [diff] [blame] | 570 | static void postcopy_temp_pages_cleanup(MigrationIncomingState *mis) |
| 571 | { |
Peter Xu | 77dadc3 | 2022-03-01 16:39:04 +0800 | [diff] [blame] | 572 | int i; |
| 573 | |
| 574 | if (mis->postcopy_tmp_pages) { |
| 575 | for (i = 0; i < mis->postcopy_channels; i++) { |
| 576 | if (mis->postcopy_tmp_pages[i].tmp_huge_page) { |
| 577 | munmap(mis->postcopy_tmp_pages[i].tmp_huge_page, |
| 578 | mis->largest_page_size); |
| 579 | mis->postcopy_tmp_pages[i].tmp_huge_page = NULL; |
| 580 | } |
| 581 | } |
| 582 | g_free(mis->postcopy_tmp_pages); |
| 583 | mis->postcopy_tmp_pages = NULL; |
Peter Xu | 476ebf7 | 2022-01-19 16:09:20 +0800 | [diff] [blame] | 584 | } |
| 585 | |
| 586 | if (mis->postcopy_tmp_zero_page) { |
| 587 | munmap(mis->postcopy_tmp_zero_page, mis->largest_page_size); |
| 588 | mis->postcopy_tmp_zero_page = NULL; |
| 589 | } |
| 590 | } |
| 591 | |
Dr. David Alan Gilbert | 1caddf8 | 2015-11-05 18:11:03 +0000 | [diff] [blame] | 592 | /* |
| 593 | * At the end of a migration where postcopy_ram_incoming_init was called. |
| 594 | */ |
| 595 | int postcopy_ram_incoming_cleanup(MigrationIncomingState *mis) |
| 596 | { |
Dr. David Alan Gilbert | c4faeed | 2015-11-05 18:11:17 +0000 | [diff] [blame] | 597 | trace_postcopy_ram_incoming_cleanup_entry(); |
| 598 | |
Peter Xu | 6621883 | 2023-03-26 13:25:39 -0400 | [diff] [blame] | 599 | if (mis->preempt_thread_status == PREEMPT_THREAD_CREATED) { |
| 600 | /* Notify the fast load thread to quit */ |
| 601 | mis->preempt_thread_status = PREEMPT_THREAD_QUIT; |
| 602 | if (mis->postcopy_qemufile_dst) { |
| 603 | qemu_file_shutdown(mis->postcopy_qemufile_dst); |
| 604 | } |
Peter Xu | 36f62f1 | 2022-07-07 14:55:02 -0400 | [diff] [blame] | 605 | qemu_thread_join(&mis->postcopy_prio_thread); |
Peter Xu | 6621883 | 2023-03-26 13:25:39 -0400 | [diff] [blame] | 606 | mis->preempt_thread_status = PREEMPT_THREAD_NONE; |
Peter Xu | 36f62f1 | 2022-07-07 14:55:02 -0400 | [diff] [blame] | 607 | } |
| 608 | |
Dr. David Alan Gilbert | c4faeed | 2015-11-05 18:11:17 +0000 | [diff] [blame] | 609 | if (mis->have_fault_thread) { |
Dr. David Alan Gilbert | 4634357 | 2018-03-12 17:21:20 +0000 | [diff] [blame] | 610 | Error *local_err = NULL; |
| 611 | |
Ilya Maximets | 55d0fe8 | 2018-10-08 19:05:35 +0300 | [diff] [blame] | 612 | /* Let the fault thread quit */ |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 613 | qatomic_set(&mis->fault_thread_quit, 1); |
Ilya Maximets | 55d0fe8 | 2018-10-08 19:05:35 +0300 | [diff] [blame] | 614 | postcopy_fault_thread_notify(mis); |
| 615 | trace_postcopy_ram_incoming_cleanup_join(); |
| 616 | qemu_thread_join(&mis->fault_thread); |
| 617 | |
Dr. David Alan Gilbert | 4634357 | 2018-03-12 17:21:20 +0000 | [diff] [blame] | 618 | if (postcopy_notify(POSTCOPY_NOTIFY_INBOUND_END, &local_err)) { |
| 619 | error_report_err(local_err); |
| 620 | return -1; |
| 621 | } |
| 622 | |
Yury Kotov | fbd162e | 2019-02-15 20:45:46 +0300 | [diff] [blame] | 623 | if (foreach_not_ignored_block(cleanup_range, mis)) { |
Dr. David Alan Gilbert | c4faeed | 2015-11-05 18:11:17 +0000 | [diff] [blame] | 624 | return -1; |
| 625 | } |
Peter Xu | 9ab7ef9 | 2018-02-08 18:31:07 +0800 | [diff] [blame] | 626 | |
Dr. David Alan Gilbert | c4faeed | 2015-11-05 18:11:17 +0000 | [diff] [blame] | 627 | trace_postcopy_ram_incoming_cleanup_closeuf(); |
| 628 | close(mis->userfault_fd); |
Peter Xu | 64f615f | 2018-02-08 18:31:06 +0800 | [diff] [blame] | 629 | close(mis->userfault_event_fd); |
Dr. David Alan Gilbert | c4faeed | 2015-11-05 18:11:17 +0000 | [diff] [blame] | 630 | mis->have_fault_thread = false; |
Dr. David Alan Gilbert | 1caddf8 | 2015-11-05 18:11:03 +0000 | [diff] [blame] | 631 | } |
| 632 | |
Dr. David Alan Gilbert | 58b7c17 | 2015-11-05 18:11:22 +0000 | [diff] [blame] | 633 | if (enable_mlock) { |
| 634 | if (os_mlock() < 0) { |
| 635 | error_report("mlock: %s", strerror(errno)); |
| 636 | /* |
| 637 | * It doesn't feel right to fail at this point, we have a valid |
| 638 | * VM state. |
| 639 | */ |
| 640 | } |
| 641 | } |
| 642 | |
Peter Xu | 476ebf7 | 2022-01-19 16:09:20 +0800 | [diff] [blame] | 643 | postcopy_temp_pages_cleanup(mis); |
| 644 | |
Alexey Perevalov | 65ace06 | 2018-03-22 21:17:27 +0300 | [diff] [blame] | 645 | trace_postcopy_ram_incoming_cleanup_blocktime( |
| 646 | get_postcopy_total_blocktime()); |
| 647 | |
Dr. David Alan Gilbert | c4faeed | 2015-11-05 18:11:17 +0000 | [diff] [blame] | 648 | trace_postcopy_ram_incoming_cleanup_exit(); |
Dr. David Alan Gilbert | 1caddf8 | 2015-11-05 18:11:03 +0000 | [diff] [blame] | 649 | return 0; |
| 650 | } |
| 651 | |
Dr. David Alan Gilbert | f0a227a | 2015-11-05 18:11:04 +0000 | [diff] [blame] | 652 | /* |
Dr. David Alan Gilbert | f952710 | 2015-11-05 18:11:20 +0000 | [diff] [blame] | 653 | * Disable huge pages on an area |
| 654 | */ |
Yury Kotov | 754cb9c | 2019-02-15 20:45:44 +0300 | [diff] [blame] | 655 | static int nhp_range(RAMBlock *rb, void *opaque) |
Dr. David Alan Gilbert | f952710 | 2015-11-05 18:11:20 +0000 | [diff] [blame] | 656 | { |
Yury Kotov | 754cb9c | 2019-02-15 20:45:44 +0300 | [diff] [blame] | 657 | const char *block_name = qemu_ram_get_idstr(rb); |
| 658 | void *host_addr = qemu_ram_get_host_addr(rb); |
| 659 | ram_addr_t offset = qemu_ram_get_offset(rb); |
David Hildenbrand | 898ba90 | 2021-04-29 13:27:06 +0200 | [diff] [blame] | 660 | ram_addr_t length = rb->postcopy_length; |
Dr. David Alan Gilbert | f952710 | 2015-11-05 18:11:20 +0000 | [diff] [blame] | 661 | trace_postcopy_nhp_range(block_name, host_addr, offset, length); |
| 662 | |
| 663 | /* |
| 664 | * Before we do discards we need to ensure those discards really |
| 665 | * do delete areas of the page, even if THP thinks a hugepage would |
| 666 | * be a good idea, so force hugepages off. |
| 667 | */ |
Dr. David Alan Gilbert | 1d74143 | 2015-11-19 15:27:48 +0000 | [diff] [blame] | 668 | qemu_madvise(host_addr, length, QEMU_MADV_NOHUGEPAGE); |
Dr. David Alan Gilbert | f952710 | 2015-11-05 18:11:20 +0000 | [diff] [blame] | 669 | |
| 670 | return 0; |
| 671 | } |
| 672 | |
| 673 | /* |
| 674 | * Userfault requires us to mark RAM as NOHUGEPAGE prior to discard |
| 675 | * however leaving it until after precopy means that most of the precopy |
| 676 | * data is still THPd |
| 677 | */ |
| 678 | int postcopy_ram_prepare_discard(MigrationIncomingState *mis) |
| 679 | { |
Yury Kotov | fbd162e | 2019-02-15 20:45:46 +0300 | [diff] [blame] | 680 | if (foreach_not_ignored_block(nhp_range, mis)) { |
Dr. David Alan Gilbert | f952710 | 2015-11-05 18:11:20 +0000 | [diff] [blame] | 681 | return -1; |
| 682 | } |
| 683 | |
| 684 | postcopy_state_set(POSTCOPY_INCOMING_DISCARD); |
| 685 | |
| 686 | return 0; |
| 687 | } |
| 688 | |
| 689 | /* |
Dr. David Alan Gilbert | f0a227a | 2015-11-05 18:11:04 +0000 | [diff] [blame] | 690 | * Mark the given area of RAM as requiring notification to unwritten areas |
Yury Kotov | fbd162e | 2019-02-15 20:45:46 +0300 | [diff] [blame] | 691 | * Used as a callback on foreach_not_ignored_block. |
Dr. David Alan Gilbert | f0a227a | 2015-11-05 18:11:04 +0000 | [diff] [blame] | 692 | * host_addr: Base of area to mark |
| 693 | * offset: Offset in the whole ram arena |
| 694 | * length: Length of the section |
| 695 | * opaque: MigrationIncomingState pointer |
| 696 | * Returns 0 on success |
| 697 | */ |
Yury Kotov | 754cb9c | 2019-02-15 20:45:44 +0300 | [diff] [blame] | 698 | static int ram_block_enable_notify(RAMBlock *rb, void *opaque) |
Dr. David Alan Gilbert | f0a227a | 2015-11-05 18:11:04 +0000 | [diff] [blame] | 699 | { |
| 700 | MigrationIncomingState *mis = opaque; |
| 701 | struct uffdio_register reg_struct; |
| 702 | |
Yury Kotov | 754cb9c | 2019-02-15 20:45:44 +0300 | [diff] [blame] | 703 | reg_struct.range.start = (uintptr_t)qemu_ram_get_host_addr(rb); |
David Hildenbrand | 898ba90 | 2021-04-29 13:27:06 +0200 | [diff] [blame] | 704 | reg_struct.range.len = rb->postcopy_length; |
Dr. David Alan Gilbert | f0a227a | 2015-11-05 18:11:04 +0000 | [diff] [blame] | 705 | reg_struct.mode = UFFDIO_REGISTER_MODE_MISSING; |
| 706 | |
| 707 | /* Now tell our userfault_fd that it's responsible for this area */ |
| 708 | if (ioctl(mis->userfault_fd, UFFDIO_REGISTER, ®_struct)) { |
| 709 | error_report("%s userfault register: %s", __func__, strerror(errno)); |
| 710 | return -1; |
| 711 | } |
Dr. David Alan Gilbert | 665414a | 2017-02-24 18:28:44 +0000 | [diff] [blame] | 712 | if (!(reg_struct.ioctls & ((__u64)1 << _UFFDIO_COPY))) { |
| 713 | error_report("%s userfault: Region doesn't support COPY", __func__); |
| 714 | return -1; |
| 715 | } |
Dr. David Alan Gilbert | 2ce1664 | 2018-03-12 17:20:58 +0000 | [diff] [blame] | 716 | if (reg_struct.ioctls & ((__u64)1 << _UFFDIO_ZEROPAGE)) { |
Dr. David Alan Gilbert | 2ce1664 | 2018-03-12 17:20:58 +0000 | [diff] [blame] | 717 | qemu_ram_set_uf_zeroable(rb); |
| 718 | } |
Dr. David Alan Gilbert | f0a227a | 2015-11-05 18:11:04 +0000 | [diff] [blame] | 719 | |
| 720 | return 0; |
| 721 | } |
| 722 | |
Dr. David Alan Gilbert | 5efc356 | 2018-03-12 17:21:14 +0000 | [diff] [blame] | 723 | int postcopy_wake_shared(struct PostCopyFD *pcfd, |
| 724 | uint64_t client_addr, |
| 725 | RAMBlock *rb) |
| 726 | { |
| 727 | size_t pagesize = qemu_ram_pagesize(rb); |
| 728 | struct uffdio_range range; |
| 729 | int ret; |
| 730 | trace_postcopy_wake_shared(client_addr, qemu_ram_get_idstr(rb)); |
David Hildenbrand | 7648297 | 2021-10-11 19:53:44 +0200 | [diff] [blame] | 731 | range.start = ROUND_DOWN(client_addr, pagesize); |
Dr. David Alan Gilbert | 5efc356 | 2018-03-12 17:21:14 +0000 | [diff] [blame] | 732 | range.len = pagesize; |
| 733 | ret = ioctl(pcfd->fd, UFFDIO_WAKE, &range); |
| 734 | if (ret) { |
| 735 | error_report("%s: Failed to wake: %zx in %s (%s)", |
| 736 | __func__, (size_t)client_addr, qemu_ram_get_idstr(rb), |
| 737 | strerror(errno)); |
| 738 | } |
| 739 | return ret; |
| 740 | } |
| 741 | |
David Hildenbrand | 9470c5e | 2021-10-11 19:53:43 +0200 | [diff] [blame] | 742 | static int postcopy_request_page(MigrationIncomingState *mis, RAMBlock *rb, |
| 743 | ram_addr_t start, uint64_t haddr) |
| 744 | { |
| 745 | void *aligned = (void *)(uintptr_t)ROUND_DOWN(haddr, qemu_ram_pagesize(rb)); |
| 746 | |
| 747 | /* |
| 748 | * Discarded pages (via RamDiscardManager) are never migrated. On unlikely |
| 749 | * access, place a zeropage, which will also set the relevant bits in the |
| 750 | * recv_bitmap accordingly, so we won't try placing a zeropage twice. |
| 751 | * |
| 752 | * Checking a single bit is sufficient to handle pagesize > TPS as either |
| 753 | * all relevant bits are set or not. |
| 754 | */ |
| 755 | assert(QEMU_IS_ALIGNED(start, qemu_ram_pagesize(rb))); |
| 756 | if (ramblock_page_is_discarded(rb, start)) { |
| 757 | bool received = ramblock_recv_bitmap_test_byte_offset(rb, start); |
| 758 | |
| 759 | return received ? 0 : postcopy_place_page_zero(mis, aligned, rb); |
| 760 | } |
| 761 | |
| 762 | return migrate_send_rp_req_pages(mis, rb, start, haddr); |
| 763 | } |
| 764 | |
Dr. David Alan Gilbert | f0a227a | 2015-11-05 18:11:04 +0000 | [diff] [blame] | 765 | /* |
Dr. David Alan Gilbert | 096bf4c | 2018-03-12 17:21:12 +0000 | [diff] [blame] | 766 | * Callback from shared fault handlers to ask for a page, |
| 767 | * the page must be specified by a RAMBlock and an offset in that rb |
| 768 | * Note: Only for use by shared fault handlers (in fault thread) |
| 769 | */ |
| 770 | int postcopy_request_shared_page(struct PostCopyFD *pcfd, RAMBlock *rb, |
| 771 | uint64_t client_addr, uint64_t rb_offset) |
| 772 | { |
David Hildenbrand | 7648297 | 2021-10-11 19:53:44 +0200 | [diff] [blame] | 773 | uint64_t aligned_rbo = ROUND_DOWN(rb_offset, qemu_ram_pagesize(rb)); |
Dr. David Alan Gilbert | 096bf4c | 2018-03-12 17:21:12 +0000 | [diff] [blame] | 774 | MigrationIncomingState *mis = migration_incoming_get_current(); |
| 775 | |
| 776 | trace_postcopy_request_shared_page(pcfd->idstr, qemu_ram_get_idstr(rb), |
| 777 | rb_offset); |
Dr. David Alan Gilbert | dedfb4b | 2018-03-12 17:21:17 +0000 | [diff] [blame] | 778 | if (ramblock_recv_bitmap_test_byte_offset(rb, aligned_rbo)) { |
| 779 | trace_postcopy_request_shared_page_present(pcfd->idstr, |
| 780 | qemu_ram_get_idstr(rb), rb_offset); |
| 781 | return postcopy_wake_shared(pcfd, client_addr, rb); |
| 782 | } |
David Hildenbrand | 9470c5e | 2021-10-11 19:53:43 +0200 | [diff] [blame] | 783 | postcopy_request_page(mis, rb, aligned_rbo, client_addr); |
Dr. David Alan Gilbert | 096bf4c | 2018-03-12 17:21:12 +0000 | [diff] [blame] | 784 | return 0; |
| 785 | } |
| 786 | |
Alexey Perevalov | 575b0b3 | 2018-03-22 21:17:24 +0300 | [diff] [blame] | 787 | static int get_mem_fault_cpu_index(uint32_t pid) |
| 788 | { |
| 789 | CPUState *cpu_iter; |
| 790 | |
| 791 | CPU_FOREACH(cpu_iter) { |
| 792 | if (cpu_iter->thread_id == pid) { |
| 793 | trace_get_mem_fault_cpu_index(cpu_iter->cpu_index, pid); |
| 794 | return cpu_iter->cpu_index; |
| 795 | } |
| 796 | } |
| 797 | trace_get_mem_fault_cpu_index(-1, pid); |
| 798 | return -1; |
| 799 | } |
| 800 | |
| 801 | static uint32_t get_low_time_offset(PostcopyBlocktimeContext *dc) |
| 802 | { |
| 803 | int64_t start_time_offset = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) - |
| 804 | dc->start_time; |
| 805 | return start_time_offset < 1 ? 1 : start_time_offset & UINT32_MAX; |
| 806 | } |
| 807 | |
| 808 | /* |
| 809 | * This function is being called when pagefault occurs. It |
| 810 | * tracks down vCPU blocking time. |
| 811 | * |
| 812 | * @addr: faulted host virtual address |
| 813 | * @ptid: faulted process thread id |
| 814 | * @rb: ramblock appropriate to addr |
| 815 | */ |
| 816 | static void mark_postcopy_blocktime_begin(uintptr_t addr, uint32_t ptid, |
| 817 | RAMBlock *rb) |
| 818 | { |
| 819 | int cpu, already_received; |
| 820 | MigrationIncomingState *mis = migration_incoming_get_current(); |
| 821 | PostcopyBlocktimeContext *dc = mis->blocktime_ctx; |
| 822 | uint32_t low_time_offset; |
| 823 | |
| 824 | if (!dc || ptid == 0) { |
| 825 | return; |
| 826 | } |
| 827 | cpu = get_mem_fault_cpu_index(ptid); |
| 828 | if (cpu < 0) { |
| 829 | return; |
| 830 | } |
| 831 | |
| 832 | low_time_offset = get_low_time_offset(dc); |
| 833 | if (dc->vcpu_addr[cpu] == 0) { |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 834 | qatomic_inc(&dc->smp_cpus_down); |
Alexey Perevalov | 575b0b3 | 2018-03-22 21:17:24 +0300 | [diff] [blame] | 835 | } |
| 836 | |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 837 | qatomic_xchg(&dc->last_begin, low_time_offset); |
| 838 | qatomic_xchg(&dc->page_fault_vcpu_time[cpu], low_time_offset); |
| 839 | qatomic_xchg(&dc->vcpu_addr[cpu], addr); |
Alexey Perevalov | 575b0b3 | 2018-03-22 21:17:24 +0300 | [diff] [blame] | 840 | |
Wei Yang | da1725d | 2019-10-06 06:05:15 +0800 | [diff] [blame] | 841 | /* |
| 842 | * check it here, not at the beginning of the function, |
| 843 | * due to, check could occur early than bitmap_set in |
| 844 | * qemu_ufd_copy_ioctl |
| 845 | */ |
Alexey Perevalov | 575b0b3 | 2018-03-22 21:17:24 +0300 | [diff] [blame] | 846 | already_received = ramblock_recv_bitmap_test(rb, (void *)addr); |
| 847 | if (already_received) { |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 848 | qatomic_xchg(&dc->vcpu_addr[cpu], 0); |
| 849 | qatomic_xchg(&dc->page_fault_vcpu_time[cpu], 0); |
| 850 | qatomic_dec(&dc->smp_cpus_down); |
Alexey Perevalov | 575b0b3 | 2018-03-22 21:17:24 +0300 | [diff] [blame] | 851 | } |
| 852 | trace_mark_postcopy_blocktime_begin(addr, dc, dc->page_fault_vcpu_time[cpu], |
| 853 | cpu, already_received); |
| 854 | } |
| 855 | |
| 856 | /* |
| 857 | * This function just provide calculated blocktime per cpu and trace it. |
| 858 | * Total blocktime is calculated in mark_postcopy_blocktime_end. |
| 859 | * |
| 860 | * |
| 861 | * Assume we have 3 CPU |
| 862 | * |
| 863 | * S1 E1 S1 E1 |
| 864 | * -----***********------------xxx***************------------------------> CPU1 |
| 865 | * |
| 866 | * S2 E2 |
| 867 | * ------------****************xxx---------------------------------------> CPU2 |
| 868 | * |
| 869 | * S3 E3 |
| 870 | * ------------------------****xxx********-------------------------------> CPU3 |
| 871 | * |
| 872 | * We have sequence S1,S2,E1,S3,S1,E2,E3,E1 |
| 873 | * S2,E1 - doesn't match condition due to sequence S1,S2,E1 doesn't include CPU3 |
| 874 | * S3,S1,E2 - sequence includes all CPUs, in this case overlap will be S1,E2 - |
| 875 | * it's a part of total blocktime. |
| 876 | * S1 - here is last_begin |
| 877 | * Legend of the picture is following: |
| 878 | * * - means blocktime per vCPU |
| 879 | * x - means overlapped blocktime (total blocktime) |
| 880 | * |
| 881 | * @addr: host virtual address |
| 882 | */ |
| 883 | static void mark_postcopy_blocktime_end(uintptr_t addr) |
| 884 | { |
| 885 | MigrationIncomingState *mis = migration_incoming_get_current(); |
| 886 | PostcopyBlocktimeContext *dc = mis->blocktime_ctx; |
Like Xu | 5cc8767 | 2019-05-19 04:54:21 +0800 | [diff] [blame] | 887 | MachineState *ms = MACHINE(qdev_get_machine()); |
| 888 | unsigned int smp_cpus = ms->smp.cpus; |
Alexey Perevalov | 575b0b3 | 2018-03-22 21:17:24 +0300 | [diff] [blame] | 889 | int i, affected_cpu = 0; |
| 890 | bool vcpu_total_blocktime = false; |
| 891 | uint32_t read_vcpu_time, low_time_offset; |
| 892 | |
| 893 | if (!dc) { |
| 894 | return; |
| 895 | } |
| 896 | |
| 897 | low_time_offset = get_low_time_offset(dc); |
| 898 | /* lookup cpu, to clear it, |
zhaolichang | 3a4452d | 2020-09-17 15:50:21 +0800 | [diff] [blame] | 899 | * that algorithm looks straightforward, but it's not |
Alexey Perevalov | 575b0b3 | 2018-03-22 21:17:24 +0300 | [diff] [blame] | 900 | * optimal, more optimal algorithm is keeping tree or hash |
| 901 | * where key is address value is a list of */ |
| 902 | for (i = 0; i < smp_cpus; i++) { |
| 903 | uint32_t vcpu_blocktime = 0; |
| 904 | |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 905 | read_vcpu_time = qatomic_fetch_add(&dc->page_fault_vcpu_time[i], 0); |
| 906 | if (qatomic_fetch_add(&dc->vcpu_addr[i], 0) != addr || |
Alexey Perevalov | 575b0b3 | 2018-03-22 21:17:24 +0300 | [diff] [blame] | 907 | read_vcpu_time == 0) { |
| 908 | continue; |
| 909 | } |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 910 | qatomic_xchg(&dc->vcpu_addr[i], 0); |
Alexey Perevalov | 575b0b3 | 2018-03-22 21:17:24 +0300 | [diff] [blame] | 911 | vcpu_blocktime = low_time_offset - read_vcpu_time; |
| 912 | affected_cpu += 1; |
| 913 | /* we need to know is that mark_postcopy_end was due to |
| 914 | * faulted page, another possible case it's prefetched |
| 915 | * page and in that case we shouldn't be here */ |
| 916 | if (!vcpu_total_blocktime && |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 917 | qatomic_fetch_add(&dc->smp_cpus_down, 0) == smp_cpus) { |
Alexey Perevalov | 575b0b3 | 2018-03-22 21:17:24 +0300 | [diff] [blame] | 918 | vcpu_total_blocktime = true; |
| 919 | } |
| 920 | /* continue cycle, due to one page could affect several vCPUs */ |
| 921 | dc->vcpu_blocktime[i] += vcpu_blocktime; |
| 922 | } |
| 923 | |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 924 | qatomic_sub(&dc->smp_cpus_down, affected_cpu); |
Alexey Perevalov | 575b0b3 | 2018-03-22 21:17:24 +0300 | [diff] [blame] | 925 | if (vcpu_total_blocktime) { |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 926 | dc->total_blocktime += low_time_offset - qatomic_fetch_add( |
Alexey Perevalov | 575b0b3 | 2018-03-22 21:17:24 +0300 | [diff] [blame] | 927 | &dc->last_begin, 0); |
| 928 | } |
| 929 | trace_mark_postcopy_blocktime_end(addr, dc, dc->total_blocktime, |
| 930 | affected_cpu); |
| 931 | } |
| 932 | |
Peter Xu | 27dd21b | 2022-03-01 16:39:11 +0800 | [diff] [blame] | 933 | static void postcopy_pause_fault_thread(MigrationIncomingState *mis) |
Peter Xu | 3a7804c | 2018-05-02 18:47:22 +0800 | [diff] [blame] | 934 | { |
| 935 | trace_postcopy_pause_fault_thread(); |
Peter Xu | 3a7804c | 2018-05-02 18:47:22 +0800 | [diff] [blame] | 936 | qemu_sem_wait(&mis->postcopy_pause_sem_fault); |
Peter Xu | 3a7804c | 2018-05-02 18:47:22 +0800 | [diff] [blame] | 937 | trace_postcopy_pause_fault_thread_continued(); |
Peter Xu | 3a7804c | 2018-05-02 18:47:22 +0800 | [diff] [blame] | 938 | } |
| 939 | |
Dr. David Alan Gilbert | 096bf4c | 2018-03-12 17:21:12 +0000 | [diff] [blame] | 940 | /* |
Dr. David Alan Gilbert | f0a227a | 2015-11-05 18:11:04 +0000 | [diff] [blame] | 941 | * Handle faults detected by the USERFAULT markings |
| 942 | */ |
| 943 | static void *postcopy_ram_fault_thread(void *opaque) |
| 944 | { |
| 945 | MigrationIncomingState *mis = opaque; |
Dr. David Alan Gilbert | c4faeed | 2015-11-05 18:11:17 +0000 | [diff] [blame] | 946 | struct uffd_msg msg; |
| 947 | int ret; |
Dr. David Alan Gilbert | 00fa4fc | 2018-03-12 17:21:04 +0000 | [diff] [blame] | 948 | size_t index; |
Dr. David Alan Gilbert | c4faeed | 2015-11-05 18:11:17 +0000 | [diff] [blame] | 949 | RAMBlock *rb = NULL; |
Dr. David Alan Gilbert | f0a227a | 2015-11-05 18:11:04 +0000 | [diff] [blame] | 950 | |
Dr. David Alan Gilbert | c4faeed | 2015-11-05 18:11:17 +0000 | [diff] [blame] | 951 | trace_postcopy_ram_fault_thread_entry(); |
Lidong Chen | 74637e6 | 2018-08-06 21:29:29 +0800 | [diff] [blame] | 952 | rcu_register_thread(); |
Dr. David Alan Gilbert | 096bf4c | 2018-03-12 17:21:12 +0000 | [diff] [blame] | 953 | mis->last_rb = NULL; /* last RAMBlock we sent part of */ |
Peter Xu | 095c12a | 2022-03-01 16:39:06 +0800 | [diff] [blame] | 954 | qemu_sem_post(&mis->thread_sync_sem); |
Dr. David Alan Gilbert | f0a227a | 2015-11-05 18:11:04 +0000 | [diff] [blame] | 955 | |
Dr. David Alan Gilbert | 00fa4fc | 2018-03-12 17:21:04 +0000 | [diff] [blame] | 956 | struct pollfd *pfd; |
| 957 | size_t pfd_len = 2 + mis->postcopy_remote_fds->len; |
| 958 | |
| 959 | pfd = g_new0(struct pollfd, pfd_len); |
| 960 | |
| 961 | pfd[0].fd = mis->userfault_fd; |
| 962 | pfd[0].events = POLLIN; |
| 963 | pfd[1].fd = mis->userfault_event_fd; |
| 964 | pfd[1].events = POLLIN; /* Waiting for eventfd to go positive */ |
| 965 | trace_postcopy_ram_fault_thread_fds_core(pfd[0].fd, pfd[1].fd); |
| 966 | for (index = 0; index < mis->postcopy_remote_fds->len; index++) { |
| 967 | struct PostCopyFD *pcfd = &g_array_index(mis->postcopy_remote_fds, |
| 968 | struct PostCopyFD, index); |
| 969 | pfd[2 + index].fd = pcfd->fd; |
| 970 | pfd[2 + index].events = POLLIN; |
| 971 | trace_postcopy_ram_fault_thread_fds_extra(2 + index, pcfd->idstr, |
| 972 | pcfd->fd); |
| 973 | } |
| 974 | |
Dr. David Alan Gilbert | c4faeed | 2015-11-05 18:11:17 +0000 | [diff] [blame] | 975 | while (true) { |
| 976 | ram_addr_t rb_offset; |
Dr. David Alan Gilbert | 00fa4fc | 2018-03-12 17:21:04 +0000 | [diff] [blame] | 977 | int poll_result; |
Dr. David Alan Gilbert | c4faeed | 2015-11-05 18:11:17 +0000 | [diff] [blame] | 978 | |
| 979 | /* |
| 980 | * We're mainly waiting for the kernel to give us a faulting HVA, |
| 981 | * however we can be told to quit via userfault_quit_fd which is |
| 982 | * an eventfd |
| 983 | */ |
Dr. David Alan Gilbert | c4faeed | 2015-11-05 18:11:17 +0000 | [diff] [blame] | 984 | |
Dr. David Alan Gilbert | 00fa4fc | 2018-03-12 17:21:04 +0000 | [diff] [blame] | 985 | poll_result = poll(pfd, pfd_len, -1 /* Wait forever */); |
| 986 | if (poll_result == -1) { |
Dr. David Alan Gilbert | c4faeed | 2015-11-05 18:11:17 +0000 | [diff] [blame] | 987 | error_report("%s: userfault poll: %s", __func__, strerror(errno)); |
| 988 | break; |
| 989 | } |
| 990 | |
Peter Xu | 3a7804c | 2018-05-02 18:47:22 +0800 | [diff] [blame] | 991 | if (!mis->to_src_file) { |
| 992 | /* |
| 993 | * Possibly someone tells us that the return path is |
| 994 | * broken already using the event. We should hold until |
| 995 | * the channel is rebuilt. |
| 996 | */ |
Peter Xu | 27dd21b | 2022-03-01 16:39:11 +0800 | [diff] [blame] | 997 | postcopy_pause_fault_thread(mis); |
Peter Xu | 3a7804c | 2018-05-02 18:47:22 +0800 | [diff] [blame] | 998 | } |
| 999 | |
Dr. David Alan Gilbert | c4faeed | 2015-11-05 18:11:17 +0000 | [diff] [blame] | 1000 | if (pfd[1].revents) { |
Peter Xu | 64f615f | 2018-02-08 18:31:06 +0800 | [diff] [blame] | 1001 | uint64_t tmp64 = 0; |
| 1002 | |
| 1003 | /* Consume the signal */ |
| 1004 | if (read(mis->userfault_event_fd, &tmp64, 8) != 8) { |
| 1005 | /* Nothing obviously nicer than posting this error. */ |
| 1006 | error_report("%s: read() failed", __func__); |
| 1007 | } |
| 1008 | |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 1009 | if (qatomic_read(&mis->fault_thread_quit)) { |
Peter Xu | 64f615f | 2018-02-08 18:31:06 +0800 | [diff] [blame] | 1010 | trace_postcopy_ram_fault_thread_quit(); |
| 1011 | break; |
| 1012 | } |
Dr. David Alan Gilbert | c4faeed | 2015-11-05 18:11:17 +0000 | [diff] [blame] | 1013 | } |
| 1014 | |
Dr. David Alan Gilbert | 00fa4fc | 2018-03-12 17:21:04 +0000 | [diff] [blame] | 1015 | if (pfd[0].revents) { |
| 1016 | poll_result--; |
| 1017 | ret = read(mis->userfault_fd, &msg, sizeof(msg)); |
| 1018 | if (ret != sizeof(msg)) { |
| 1019 | if (errno == EAGAIN) { |
| 1020 | /* |
| 1021 | * if a wake up happens on the other thread just after |
| 1022 | * the poll, there is nothing to read. |
| 1023 | */ |
| 1024 | continue; |
| 1025 | } |
| 1026 | if (ret < 0) { |
| 1027 | error_report("%s: Failed to read full userfault " |
| 1028 | "message: %s", |
| 1029 | __func__, strerror(errno)); |
| 1030 | break; |
| 1031 | } else { |
| 1032 | error_report("%s: Read %d bytes from userfaultfd " |
| 1033 | "expected %zd", |
| 1034 | __func__, ret, sizeof(msg)); |
| 1035 | break; /* Lost alignment, don't know what we'd read next */ |
| 1036 | } |
Dr. David Alan Gilbert | c4faeed | 2015-11-05 18:11:17 +0000 | [diff] [blame] | 1037 | } |
Dr. David Alan Gilbert | 00fa4fc | 2018-03-12 17:21:04 +0000 | [diff] [blame] | 1038 | if (msg.event != UFFD_EVENT_PAGEFAULT) { |
| 1039 | error_report("%s: Read unexpected event %ud from userfaultfd", |
| 1040 | __func__, msg.event); |
| 1041 | continue; /* It's not a page fault, shouldn't happen */ |
| 1042 | } |
| 1043 | |
| 1044 | rb = qemu_ram_block_from_host( |
| 1045 | (void *)(uintptr_t)msg.arg.pagefault.address, |
| 1046 | true, &rb_offset); |
| 1047 | if (!rb) { |
| 1048 | error_report("postcopy_ram_fault_thread: Fault outside guest: %" |
| 1049 | PRIx64, (uint64_t)msg.arg.pagefault.address); |
Dr. David Alan Gilbert | c4faeed | 2015-11-05 18:11:17 +0000 | [diff] [blame] | 1050 | break; |
Dr. David Alan Gilbert | c4faeed | 2015-11-05 18:11:17 +0000 | [diff] [blame] | 1051 | } |
Dr. David Alan Gilbert | c4faeed | 2015-11-05 18:11:17 +0000 | [diff] [blame] | 1052 | |
David Hildenbrand | 7648297 | 2021-10-11 19:53:44 +0200 | [diff] [blame] | 1053 | rb_offset = ROUND_DOWN(rb_offset, qemu_ram_pagesize(rb)); |
Dr. David Alan Gilbert | 00fa4fc | 2018-03-12 17:21:04 +0000 | [diff] [blame] | 1054 | trace_postcopy_ram_fault_thread_request(msg.arg.pagefault.address, |
Dr. David Alan Gilbert | c4faeed | 2015-11-05 18:11:17 +0000 | [diff] [blame] | 1055 | qemu_ram_get_idstr(rb), |
Alexey Perevalov | 575b0b3 | 2018-03-22 21:17:24 +0300 | [diff] [blame] | 1056 | rb_offset, |
| 1057 | msg.arg.pagefault.feat.ptid); |
| 1058 | mark_postcopy_blocktime_begin( |
| 1059 | (uintptr_t)(msg.arg.pagefault.address), |
| 1060 | msg.arg.pagefault.feat.ptid, rb); |
| 1061 | |
Peter Xu | 3a7804c | 2018-05-02 18:47:22 +0800 | [diff] [blame] | 1062 | retry: |
Dr. David Alan Gilbert | 00fa4fc | 2018-03-12 17:21:04 +0000 | [diff] [blame] | 1063 | /* |
| 1064 | * Send the request to the source - we want to request one |
| 1065 | * of our host page sizes (which is >= TPS) |
| 1066 | */ |
David Hildenbrand | 9470c5e | 2021-10-11 19:53:43 +0200 | [diff] [blame] | 1067 | ret = postcopy_request_page(mis, rb, rb_offset, |
| 1068 | msg.arg.pagefault.address); |
Peter Xu | 3a7804c | 2018-05-02 18:47:22 +0800 | [diff] [blame] | 1069 | if (ret) { |
| 1070 | /* May be network failure, try to wait for recovery */ |
Peter Xu | 27dd21b | 2022-03-01 16:39:11 +0800 | [diff] [blame] | 1071 | postcopy_pause_fault_thread(mis); |
| 1072 | goto retry; |
Dr. David Alan Gilbert | 00fa4fc | 2018-03-12 17:21:04 +0000 | [diff] [blame] | 1073 | } |
| 1074 | } |
Dr. David Alan Gilbert | c4faeed | 2015-11-05 18:11:17 +0000 | [diff] [blame] | 1075 | |
Dr. David Alan Gilbert | 00fa4fc | 2018-03-12 17:21:04 +0000 | [diff] [blame] | 1076 | /* Now handle any requests from external processes on shared memory */ |
| 1077 | /* TODO: May need to handle devices deregistering during postcopy */ |
| 1078 | for (index = 2; index < pfd_len && poll_result; index++) { |
| 1079 | if (pfd[index].revents) { |
| 1080 | struct PostCopyFD *pcfd = |
| 1081 | &g_array_index(mis->postcopy_remote_fds, |
| 1082 | struct PostCopyFD, index - 2); |
| 1083 | |
| 1084 | poll_result--; |
| 1085 | if (pfd[index].revents & POLLERR) { |
| 1086 | error_report("%s: POLLERR on poll %zd fd=%d", |
| 1087 | __func__, index, pcfd->fd); |
| 1088 | pfd[index].events = 0; |
| 1089 | continue; |
| 1090 | } |
| 1091 | |
| 1092 | ret = read(pcfd->fd, &msg, sizeof(msg)); |
| 1093 | if (ret != sizeof(msg)) { |
| 1094 | if (errno == EAGAIN) { |
| 1095 | /* |
| 1096 | * if a wake up happens on the other thread just after |
| 1097 | * the poll, there is nothing to read. |
| 1098 | */ |
| 1099 | continue; |
| 1100 | } |
| 1101 | if (ret < 0) { |
| 1102 | error_report("%s: Failed to read full userfault " |
| 1103 | "message: %s (shared) revents=%d", |
| 1104 | __func__, strerror(errno), |
| 1105 | pfd[index].revents); |
| 1106 | /*TODO: Could just disable this sharer */ |
| 1107 | break; |
| 1108 | } else { |
| 1109 | error_report("%s: Read %d bytes from userfaultfd " |
| 1110 | "expected %zd (shared)", |
| 1111 | __func__, ret, sizeof(msg)); |
| 1112 | /*TODO: Could just disable this sharer */ |
| 1113 | break; /*Lost alignment,don't know what we'd read next*/ |
| 1114 | } |
| 1115 | } |
| 1116 | if (msg.event != UFFD_EVENT_PAGEFAULT) { |
| 1117 | error_report("%s: Read unexpected event %ud " |
| 1118 | "from userfaultfd (shared)", |
| 1119 | __func__, msg.event); |
| 1120 | continue; /* It's not a page fault, shouldn't happen */ |
| 1121 | } |
| 1122 | /* Call the device handler registered with us */ |
| 1123 | ret = pcfd->handler(pcfd, &msg); |
| 1124 | if (ret) { |
| 1125 | error_report("%s: Failed to resolve shared fault on %zd/%s", |
| 1126 | __func__, index, pcfd->idstr); |
| 1127 | /* TODO: Fail? Disable this sharer? */ |
| 1128 | } |
| 1129 | } |
Dr. David Alan Gilbert | c4faeed | 2015-11-05 18:11:17 +0000 | [diff] [blame] | 1130 | } |
| 1131 | } |
Lidong Chen | 74637e6 | 2018-08-06 21:29:29 +0800 | [diff] [blame] | 1132 | rcu_unregister_thread(); |
Dr. David Alan Gilbert | c4faeed | 2015-11-05 18:11:17 +0000 | [diff] [blame] | 1133 | trace_postcopy_ram_fault_thread_exit(); |
Marc-André Lureau | fc6008f | 2018-03-21 12:36:44 +0100 | [diff] [blame] | 1134 | g_free(pfd); |
Dr. David Alan Gilbert | f0a227a | 2015-11-05 18:11:04 +0000 | [diff] [blame] | 1135 | return NULL; |
| 1136 | } |
| 1137 | |
Peter Xu | 476ebf7 | 2022-01-19 16:09:20 +0800 | [diff] [blame] | 1138 | static int postcopy_temp_pages_setup(MigrationIncomingState *mis) |
| 1139 | { |
Peter Xu | 77dadc3 | 2022-03-01 16:39:04 +0800 | [diff] [blame] | 1140 | PostcopyTmpPage *tmp_page; |
| 1141 | int err, i, channels; |
| 1142 | void *temp_page; |
Peter Xu | 476ebf7 | 2022-01-19 16:09:20 +0800 | [diff] [blame] | 1143 | |
Peter Xu | 36f62f1 | 2022-07-07 14:55:02 -0400 | [diff] [blame] | 1144 | if (migrate_postcopy_preempt()) { |
| 1145 | /* If preemption enabled, need extra channel for urgent requests */ |
| 1146 | mis->postcopy_channels = RAM_CHANNEL_MAX; |
| 1147 | } else { |
| 1148 | /* Both precopy/postcopy on the same channel */ |
| 1149 | mis->postcopy_channels = 1; |
| 1150 | } |
Peter Xu | 77dadc3 | 2022-03-01 16:39:04 +0800 | [diff] [blame] | 1151 | |
| 1152 | channels = mis->postcopy_channels; |
| 1153 | mis->postcopy_tmp_pages = g_malloc0_n(sizeof(PostcopyTmpPage), channels); |
| 1154 | |
| 1155 | for (i = 0; i < channels; i++) { |
| 1156 | tmp_page = &mis->postcopy_tmp_pages[i]; |
| 1157 | temp_page = mmap(NULL, mis->largest_page_size, PROT_READ | PROT_WRITE, |
| 1158 | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); |
| 1159 | if (temp_page == MAP_FAILED) { |
| 1160 | err = errno; |
| 1161 | error_report("%s: Failed to map postcopy_tmp_pages[%d]: %s", |
| 1162 | __func__, i, strerror(err)); |
| 1163 | /* Clean up will be done later */ |
| 1164 | return -err; |
| 1165 | } |
| 1166 | tmp_page->tmp_huge_page = temp_page; |
| 1167 | /* Initialize default states for each tmp page */ |
| 1168 | postcopy_temp_page_reset(tmp_page); |
Peter Xu | 476ebf7 | 2022-01-19 16:09:20 +0800 | [diff] [blame] | 1169 | } |
| 1170 | |
| 1171 | /* |
| 1172 | * Map large zero page when kernel can't use UFFDIO_ZEROPAGE for hugepages |
| 1173 | */ |
| 1174 | mis->postcopy_tmp_zero_page = mmap(NULL, mis->largest_page_size, |
| 1175 | PROT_READ | PROT_WRITE, |
| 1176 | MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); |
| 1177 | if (mis->postcopy_tmp_zero_page == MAP_FAILED) { |
| 1178 | err = errno; |
| 1179 | mis->postcopy_tmp_zero_page = NULL; |
| 1180 | error_report("%s: Failed to map large zero page %s", |
| 1181 | __func__, strerror(err)); |
| 1182 | return -err; |
| 1183 | } |
| 1184 | |
| 1185 | memset(mis->postcopy_tmp_zero_page, '\0', mis->largest_page_size); |
| 1186 | |
| 1187 | return 0; |
| 1188 | } |
| 1189 | |
Wei Yang | 2a7eb14 | 2019-10-10 09:13:15 +0800 | [diff] [blame] | 1190 | int postcopy_ram_incoming_setup(MigrationIncomingState *mis) |
Dr. David Alan Gilbert | f0a227a | 2015-11-05 18:11:04 +0000 | [diff] [blame] | 1191 | { |
Peter Xu | 74c38cf | 2023-04-25 21:15:14 -0400 | [diff] [blame] | 1192 | Error *local_err = NULL; |
| 1193 | |
Dr. David Alan Gilbert | c4faeed | 2015-11-05 18:11:17 +0000 | [diff] [blame] | 1194 | /* Open the fd for the kernel to give us userfaults */ |
Peter Xu | d5890ea | 2023-02-01 16:10:54 -0500 | [diff] [blame] | 1195 | mis->userfault_fd = uffd_open(O_CLOEXEC | O_NONBLOCK); |
Dr. David Alan Gilbert | c4faeed | 2015-11-05 18:11:17 +0000 | [diff] [blame] | 1196 | if (mis->userfault_fd == -1) { |
| 1197 | error_report("%s: Failed to open userfault fd: %s", __func__, |
| 1198 | strerror(errno)); |
| 1199 | return -1; |
| 1200 | } |
| 1201 | |
| 1202 | /* |
| 1203 | * Although the host check already tested the API, we need to |
| 1204 | * do the check again as an ABI handshake on the new fd. |
| 1205 | */ |
Peter Xu | 74c38cf | 2023-04-25 21:15:14 -0400 | [diff] [blame] | 1206 | if (!ufd_check_and_apply(mis->userfault_fd, mis, &local_err)) { |
| 1207 | error_report_err(local_err); |
Dr. David Alan Gilbert | c4faeed | 2015-11-05 18:11:17 +0000 | [diff] [blame] | 1208 | return -1; |
| 1209 | } |
| 1210 | |
| 1211 | /* Now an eventfd we use to tell the fault-thread to quit */ |
Peter Xu | 64f615f | 2018-02-08 18:31:06 +0800 | [diff] [blame] | 1212 | mis->userfault_event_fd = eventfd(0, EFD_CLOEXEC); |
| 1213 | if (mis->userfault_event_fd == -1) { |
| 1214 | error_report("%s: Opening userfault_event_fd: %s", __func__, |
Dr. David Alan Gilbert | c4faeed | 2015-11-05 18:11:17 +0000 | [diff] [blame] | 1215 | strerror(errno)); |
| 1216 | close(mis->userfault_fd); |
| 1217 | return -1; |
| 1218 | } |
| 1219 | |
Peter Xu | 36f62f1 | 2022-07-07 14:55:02 -0400 | [diff] [blame] | 1220 | postcopy_thread_create(mis, &mis->fault_thread, "fault-default", |
Peter Xu | 095c12a | 2022-03-01 16:39:06 +0800 | [diff] [blame] | 1221 | postcopy_ram_fault_thread, QEMU_THREAD_JOINABLE); |
Dr. David Alan Gilbert | c4faeed | 2015-11-05 18:11:17 +0000 | [diff] [blame] | 1222 | mis->have_fault_thread = true; |
Dr. David Alan Gilbert | f0a227a | 2015-11-05 18:11:04 +0000 | [diff] [blame] | 1223 | |
| 1224 | /* Mark so that we get notified of accesses to unwritten areas */ |
Yury Kotov | fbd162e | 2019-02-15 20:45:46 +0300 | [diff] [blame] | 1225 | if (foreach_not_ignored_block(ram_block_enable_notify, mis)) { |
Fei Li | 91b02dc | 2019-01-13 22:08:48 +0800 | [diff] [blame] | 1226 | error_report("ram_block_enable_notify failed"); |
Dr. David Alan Gilbert | f0a227a | 2015-11-05 18:11:04 +0000 | [diff] [blame] | 1227 | return -1; |
| 1228 | } |
| 1229 | |
Peter Xu | 476ebf7 | 2022-01-19 16:09:20 +0800 | [diff] [blame] | 1230 | if (postcopy_temp_pages_setup(mis)) { |
| 1231 | /* Error dumped in the sub-function */ |
Wei Yang | 3414322 | 2019-10-05 21:50:20 +0800 | [diff] [blame] | 1232 | return -1; |
| 1233 | } |
| 1234 | |
Peter Xu | 36f62f1 | 2022-07-07 14:55:02 -0400 | [diff] [blame] | 1235 | if (migrate_postcopy_preempt()) { |
| 1236 | /* |
| 1237 | * This thread needs to be created after the temp pages because |
| 1238 | * it'll fetch RAM_CHANNEL_POSTCOPY PostcopyTmpPage immediately. |
| 1239 | */ |
| 1240 | postcopy_thread_create(mis, &mis->postcopy_prio_thread, "fault-fast", |
| 1241 | postcopy_preempt_thread, QEMU_THREAD_JOINABLE); |
Peter Xu | 6621883 | 2023-03-26 13:25:39 -0400 | [diff] [blame] | 1242 | mis->preempt_thread_status = PREEMPT_THREAD_CREATED; |
Peter Xu | 36f62f1 | 2022-07-07 14:55:02 -0400 | [diff] [blame] | 1243 | } |
| 1244 | |
Dr. David Alan Gilbert | c4faeed | 2015-11-05 18:11:17 +0000 | [diff] [blame] | 1245 | trace_postcopy_ram_enable_notify(); |
| 1246 | |
Dr. David Alan Gilbert | f0a227a | 2015-11-05 18:11:04 +0000 | [diff] [blame] | 1247 | return 0; |
| 1248 | } |
| 1249 | |
Peter Xu | eef621c | 2020-10-21 17:27:16 -0400 | [diff] [blame] | 1250 | static int qemu_ufd_copy_ioctl(MigrationIncomingState *mis, void *host_addr, |
Alexey Perevalov | f949461 | 2017-10-05 14:13:20 +0300 | [diff] [blame] | 1251 | void *from_addr, uint64_t pagesize, RAMBlock *rb) |
Alexey Perevalov | 727b9d7 | 2017-10-05 14:13:19 +0300 | [diff] [blame] | 1252 | { |
Peter Xu | eef621c | 2020-10-21 17:27:16 -0400 | [diff] [blame] | 1253 | int userfault_fd = mis->userfault_fd; |
Alexey Perevalov | f949461 | 2017-10-05 14:13:20 +0300 | [diff] [blame] | 1254 | int ret; |
Peter Xu | eef621c | 2020-10-21 17:27:16 -0400 | [diff] [blame] | 1255 | |
Alexey Perevalov | 727b9d7 | 2017-10-05 14:13:19 +0300 | [diff] [blame] | 1256 | if (from_addr) { |
| 1257 | struct uffdio_copy copy_struct; |
| 1258 | copy_struct.dst = (uint64_t)(uintptr_t)host_addr; |
| 1259 | copy_struct.src = (uint64_t)(uintptr_t)from_addr; |
| 1260 | copy_struct.len = pagesize; |
| 1261 | copy_struct.mode = 0; |
Alexey Perevalov | f949461 | 2017-10-05 14:13:20 +0300 | [diff] [blame] | 1262 | ret = ioctl(userfault_fd, UFFDIO_COPY, ©_struct); |
Alexey Perevalov | 727b9d7 | 2017-10-05 14:13:19 +0300 | [diff] [blame] | 1263 | } else { |
| 1264 | struct uffdio_zeropage zero_struct; |
| 1265 | zero_struct.range.start = (uint64_t)(uintptr_t)host_addr; |
| 1266 | zero_struct.range.len = pagesize; |
| 1267 | zero_struct.mode = 0; |
Alexey Perevalov | f949461 | 2017-10-05 14:13:20 +0300 | [diff] [blame] | 1268 | ret = ioctl(userfault_fd, UFFDIO_ZEROPAGE, &zero_struct); |
Alexey Perevalov | 727b9d7 | 2017-10-05 14:13:19 +0300 | [diff] [blame] | 1269 | } |
Alexey Perevalov | f949461 | 2017-10-05 14:13:20 +0300 | [diff] [blame] | 1270 | if (!ret) { |
Peter Xu | 8f8bfff | 2020-10-21 17:27:18 -0400 | [diff] [blame] | 1271 | qemu_mutex_lock(&mis->page_request_mutex); |
Alexey Perevalov | f949461 | 2017-10-05 14:13:20 +0300 | [diff] [blame] | 1272 | ramblock_recv_bitmap_set_range(rb, host_addr, |
| 1273 | pagesize / qemu_target_page_size()); |
Peter Xu | 8f8bfff | 2020-10-21 17:27:18 -0400 | [diff] [blame] | 1274 | /* |
| 1275 | * If this page resolves a page fault for a previous recorded faulted |
| 1276 | * address, take a special note to maintain the requested page list. |
| 1277 | */ |
| 1278 | if (g_tree_lookup(mis->page_requested, host_addr)) { |
| 1279 | g_tree_remove(mis->page_requested, host_addr); |
| 1280 | mis->page_requested_count--; |
| 1281 | trace_postcopy_page_req_del(host_addr, mis->page_requested_count); |
| 1282 | } |
| 1283 | qemu_mutex_unlock(&mis->page_request_mutex); |
Alexey Perevalov | 575b0b3 | 2018-03-22 21:17:24 +0300 | [diff] [blame] | 1284 | mark_postcopy_blocktime_end((uintptr_t)host_addr); |
Alexey Perevalov | f949461 | 2017-10-05 14:13:20 +0300 | [diff] [blame] | 1285 | } |
| 1286 | return ret; |
Alexey Perevalov | 727b9d7 | 2017-10-05 14:13:19 +0300 | [diff] [blame] | 1287 | } |
| 1288 | |
Dr. David Alan Gilbert | d488b34 | 2018-03-12 17:21:15 +0000 | [diff] [blame] | 1289 | int postcopy_notify_shared_wake(RAMBlock *rb, uint64_t offset) |
| 1290 | { |
| 1291 | int i; |
| 1292 | MigrationIncomingState *mis = migration_incoming_get_current(); |
| 1293 | GArray *pcrfds = mis->postcopy_remote_fds; |
| 1294 | |
| 1295 | for (i = 0; i < pcrfds->len; i++) { |
| 1296 | struct PostCopyFD *cur = &g_array_index(pcrfds, struct PostCopyFD, i); |
| 1297 | int ret = cur->waker(cur, rb, offset); |
| 1298 | if (ret) { |
| 1299 | return ret; |
| 1300 | } |
| 1301 | } |
| 1302 | return 0; |
| 1303 | } |
| 1304 | |
Dr. David Alan Gilbert | 696ed9a | 2015-11-05 18:11:10 +0000 | [diff] [blame] | 1305 | /* |
| 1306 | * Place a host page (from) at (host) atomically |
| 1307 | * returns 0 on success |
| 1308 | */ |
Dr. David Alan Gilbert | df9ff5e | 2017-02-24 18:28:35 +0000 | [diff] [blame] | 1309 | int postcopy_place_page(MigrationIncomingState *mis, void *host, void *from, |
Alexey Perevalov | 8be4620 | 2017-10-05 14:13:18 +0300 | [diff] [blame] | 1310 | RAMBlock *rb) |
Dr. David Alan Gilbert | 696ed9a | 2015-11-05 18:11:10 +0000 | [diff] [blame] | 1311 | { |
Alexey Perevalov | 8be4620 | 2017-10-05 14:13:18 +0300 | [diff] [blame] | 1312 | size_t pagesize = qemu_ram_pagesize(rb); |
Dr. David Alan Gilbert | 696ed9a | 2015-11-05 18:11:10 +0000 | [diff] [blame] | 1313 | |
Dr. David Alan Gilbert | 696ed9a | 2015-11-05 18:11:10 +0000 | [diff] [blame] | 1314 | /* copy also acks to the kernel waking the stalled thread up |
| 1315 | * TODO: We can inhibit that ack and only do it if it was requested |
| 1316 | * which would be slightly cheaper, but we'd have to be careful |
| 1317 | * of the order of updating our page state. |
| 1318 | */ |
Peter Xu | eef621c | 2020-10-21 17:27:16 -0400 | [diff] [blame] | 1319 | if (qemu_ufd_copy_ioctl(mis, host, from, pagesize, rb)) { |
Dr. David Alan Gilbert | 696ed9a | 2015-11-05 18:11:10 +0000 | [diff] [blame] | 1320 | int e = errno; |
Dr. David Alan Gilbert | df9ff5e | 2017-02-24 18:28:35 +0000 | [diff] [blame] | 1321 | error_report("%s: %s copy host: %p from: %p (size: %zd)", |
| 1322 | __func__, strerror(e), host, from, pagesize); |
Dr. David Alan Gilbert | 696ed9a | 2015-11-05 18:11:10 +0000 | [diff] [blame] | 1323 | |
| 1324 | return -e; |
| 1325 | } |
| 1326 | |
| 1327 | trace_postcopy_place_page(host); |
Dr. David Alan Gilbert | dedfb4b | 2018-03-12 17:21:17 +0000 | [diff] [blame] | 1328 | return postcopy_notify_shared_wake(rb, |
| 1329 | qemu_ram_block_host_offset(rb, host)); |
Dr. David Alan Gilbert | 696ed9a | 2015-11-05 18:11:10 +0000 | [diff] [blame] | 1330 | } |
| 1331 | |
| 1332 | /* |
| 1333 | * Place a zero page at (host) atomically |
| 1334 | * returns 0 on success |
| 1335 | */ |
Dr. David Alan Gilbert | df9ff5e | 2017-02-24 18:28:35 +0000 | [diff] [blame] | 1336 | int postcopy_place_page_zero(MigrationIncomingState *mis, void *host, |
Alexey Perevalov | 8be4620 | 2017-10-05 14:13:18 +0300 | [diff] [blame] | 1337 | RAMBlock *rb) |
Dr. David Alan Gilbert | 696ed9a | 2015-11-05 18:11:10 +0000 | [diff] [blame] | 1338 | { |
Dr. David Alan Gilbert | 2ce1664 | 2018-03-12 17:20:58 +0000 | [diff] [blame] | 1339 | size_t pagesize = qemu_ram_pagesize(rb); |
Dr. David Alan Gilbert | df9ff5e | 2017-02-24 18:28:35 +0000 | [diff] [blame] | 1340 | trace_postcopy_place_page_zero(host); |
Dr. David Alan Gilbert | 696ed9a | 2015-11-05 18:11:10 +0000 | [diff] [blame] | 1341 | |
Dr. David Alan Gilbert | 2ce1664 | 2018-03-12 17:20:58 +0000 | [diff] [blame] | 1342 | /* Normal RAMBlocks can zero a page using UFFDIO_ZEROPAGE |
| 1343 | * but it's not available for everything (e.g. hugetlbpages) |
| 1344 | */ |
| 1345 | if (qemu_ram_is_uf_zeroable(rb)) { |
Peter Xu | eef621c | 2020-10-21 17:27:16 -0400 | [diff] [blame] | 1346 | if (qemu_ufd_copy_ioctl(mis, host, NULL, pagesize, rb)) { |
Dr. David Alan Gilbert | df9ff5e | 2017-02-24 18:28:35 +0000 | [diff] [blame] | 1347 | int e = errno; |
| 1348 | error_report("%s: %s zero host: %p", |
| 1349 | __func__, strerror(e), host); |
Dr. David Alan Gilbert | 696ed9a | 2015-11-05 18:11:10 +0000 | [diff] [blame] | 1350 | |
Dr. David Alan Gilbert | df9ff5e | 2017-02-24 18:28:35 +0000 | [diff] [blame] | 1351 | return -e; |
| 1352 | } |
Dr. David Alan Gilbert | dedfb4b | 2018-03-12 17:21:17 +0000 | [diff] [blame] | 1353 | return postcopy_notify_shared_wake(rb, |
| 1354 | qemu_ram_block_host_offset(rb, |
| 1355 | host)); |
Dr. David Alan Gilbert | df9ff5e | 2017-02-24 18:28:35 +0000 | [diff] [blame] | 1356 | } else { |
Wei Yang | 6629890 | 2019-10-05 21:50:21 +0800 | [diff] [blame] | 1357 | return postcopy_place_page(mis, host, mis->postcopy_tmp_zero_page, rb); |
Dr. David Alan Gilbert | 696ed9a | 2015-11-05 18:11:10 +0000 | [diff] [blame] | 1358 | } |
Dr. David Alan Gilbert | 696ed9a | 2015-11-05 18:11:10 +0000 | [diff] [blame] | 1359 | } |
| 1360 | |
Dr. David Alan Gilbert | eb59db5 | 2015-11-05 18:10:55 +0000 | [diff] [blame] | 1361 | #else |
| 1362 | /* No target OS support, stubs just fail */ |
Alexey Perevalov | 65ace06 | 2018-03-22 21:17:27 +0300 | [diff] [blame] | 1363 | void fill_destination_postcopy_migration_info(MigrationInfo *info) |
| 1364 | { |
| 1365 | } |
| 1366 | |
Peter Xu | 74c38cf | 2023-04-25 21:15:14 -0400 | [diff] [blame] | 1367 | bool postcopy_ram_supported_by_host(MigrationIncomingState *mis, Error **errp) |
Dr. David Alan Gilbert | eb59db5 | 2015-11-05 18:10:55 +0000 | [diff] [blame] | 1368 | { |
| 1369 | error_report("%s: No OS support", __func__); |
| 1370 | return false; |
| 1371 | } |
| 1372 | |
David Hildenbrand | c136180 | 2018-06-20 22:27:36 +0200 | [diff] [blame] | 1373 | int postcopy_ram_incoming_init(MigrationIncomingState *mis) |
Dr. David Alan Gilbert | 1caddf8 | 2015-11-05 18:11:03 +0000 | [diff] [blame] | 1374 | { |
| 1375 | error_report("postcopy_ram_incoming_init: No OS support"); |
| 1376 | return -1; |
| 1377 | } |
| 1378 | |
| 1379 | int postcopy_ram_incoming_cleanup(MigrationIncomingState *mis) |
| 1380 | { |
| 1381 | assert(0); |
| 1382 | return -1; |
| 1383 | } |
| 1384 | |
Dr. David Alan Gilbert | f952710 | 2015-11-05 18:11:20 +0000 | [diff] [blame] | 1385 | int postcopy_ram_prepare_discard(MigrationIncomingState *mis) |
| 1386 | { |
| 1387 | assert(0); |
| 1388 | return -1; |
| 1389 | } |
| 1390 | |
Michael S. Tsirkin | c188c53 | 2018-03-20 16:26:10 +0200 | [diff] [blame] | 1391 | int postcopy_request_shared_page(struct PostCopyFD *pcfd, RAMBlock *rb, |
| 1392 | uint64_t client_addr, uint64_t rb_offset) |
| 1393 | { |
| 1394 | assert(0); |
| 1395 | return -1; |
| 1396 | } |
| 1397 | |
Wei Yang | 2a7eb14 | 2019-10-10 09:13:15 +0800 | [diff] [blame] | 1398 | int postcopy_ram_incoming_setup(MigrationIncomingState *mis) |
Dr. David Alan Gilbert | f0a227a | 2015-11-05 18:11:04 +0000 | [diff] [blame] | 1399 | { |
| 1400 | assert(0); |
| 1401 | return -1; |
| 1402 | } |
Dr. David Alan Gilbert | 696ed9a | 2015-11-05 18:11:10 +0000 | [diff] [blame] | 1403 | |
Dr. David Alan Gilbert | df9ff5e | 2017-02-24 18:28:35 +0000 | [diff] [blame] | 1404 | int postcopy_place_page(MigrationIncomingState *mis, void *host, void *from, |
Alexey Perevalov | 8be4620 | 2017-10-05 14:13:18 +0300 | [diff] [blame] | 1405 | RAMBlock *rb) |
Dr. David Alan Gilbert | 696ed9a | 2015-11-05 18:11:10 +0000 | [diff] [blame] | 1406 | { |
| 1407 | assert(0); |
| 1408 | return -1; |
| 1409 | } |
| 1410 | |
Dr. David Alan Gilbert | df9ff5e | 2017-02-24 18:28:35 +0000 | [diff] [blame] | 1411 | int postcopy_place_page_zero(MigrationIncomingState *mis, void *host, |
Alexey Perevalov | 8be4620 | 2017-10-05 14:13:18 +0300 | [diff] [blame] | 1412 | RAMBlock *rb) |
Dr. David Alan Gilbert | 696ed9a | 2015-11-05 18:11:10 +0000 | [diff] [blame] | 1413 | { |
| 1414 | assert(0); |
| 1415 | return -1; |
| 1416 | } |
| 1417 | |
Dr. David Alan Gilbert | 5efc356 | 2018-03-12 17:21:14 +0000 | [diff] [blame] | 1418 | int postcopy_wake_shared(struct PostCopyFD *pcfd, |
| 1419 | uint64_t client_addr, |
| 1420 | RAMBlock *rb) |
| 1421 | { |
| 1422 | assert(0); |
| 1423 | return -1; |
| 1424 | } |
Dr. David Alan Gilbert | eb59db5 | 2015-11-05 18:10:55 +0000 | [diff] [blame] | 1425 | #endif |
| 1426 | |
Dr. David Alan Gilbert | e0b266f | 2015-11-05 18:11:02 +0000 | [diff] [blame] | 1427 | /* ------------------------------------------------------------------------- */ |
Peter Xu | 77dadc3 | 2022-03-01 16:39:04 +0800 | [diff] [blame] | 1428 | void postcopy_temp_page_reset(PostcopyTmpPage *tmp_page) |
| 1429 | { |
| 1430 | tmp_page->target_pages = 0; |
| 1431 | tmp_page->host_addr = NULL; |
| 1432 | /* |
| 1433 | * This is set to true when reset, and cleared as long as we received any |
| 1434 | * of the non-zero small page within this huge page. |
| 1435 | */ |
| 1436 | tmp_page->all_zero = true; |
| 1437 | } |
Dr. David Alan Gilbert | e0b266f | 2015-11-05 18:11:02 +0000 | [diff] [blame] | 1438 | |
Peter Xu | 9ab7ef9 | 2018-02-08 18:31:07 +0800 | [diff] [blame] | 1439 | void postcopy_fault_thread_notify(MigrationIncomingState *mis) |
| 1440 | { |
| 1441 | uint64_t tmp64 = 1; |
| 1442 | |
| 1443 | /* |
| 1444 | * Wakeup the fault_thread. It's an eventfd that should currently |
| 1445 | * be at 0, we're going to increment it to 1 |
| 1446 | */ |
| 1447 | if (write(mis->userfault_event_fd, &tmp64, 8) != 8) { |
| 1448 | /* Not much we can do here, but may as well report it */ |
| 1449 | error_report("%s: incrementing failed: %s", __func__, |
| 1450 | strerror(errno)); |
| 1451 | } |
| 1452 | } |
| 1453 | |
Dr. David Alan Gilbert | e0b266f | 2015-11-05 18:11:02 +0000 | [diff] [blame] | 1454 | /** |
| 1455 | * postcopy_discard_send_init: Called at the start of each RAMBlock before |
| 1456 | * asking to discard individual ranges. |
| 1457 | * |
| 1458 | * @ms: The current migration state. |
Wei Yang | 810cf2b | 2019-07-24 09:07:21 +0800 | [diff] [blame] | 1459 | * @offset: the bitmap offset of the named RAMBlock in the migration bitmap. |
Dr. David Alan Gilbert | e0b266f | 2015-11-05 18:11:02 +0000 | [diff] [blame] | 1460 | * @name: RAMBlock that discards will operate on. |
Dr. David Alan Gilbert | e0b266f | 2015-11-05 18:11:02 +0000 | [diff] [blame] | 1461 | */ |
Wei Yang | 810cf2b | 2019-07-24 09:07:21 +0800 | [diff] [blame] | 1462 | static PostcopyDiscardState pds = {0}; |
| 1463 | void postcopy_discard_send_init(MigrationState *ms, const char *name) |
Dr. David Alan Gilbert | e0b266f | 2015-11-05 18:11:02 +0000 | [diff] [blame] | 1464 | { |
Wei Yang | 810cf2b | 2019-07-24 09:07:21 +0800 | [diff] [blame] | 1465 | pds.ramblock_name = name; |
| 1466 | pds.cur_entry = 0; |
| 1467 | pds.nsentwords = 0; |
| 1468 | pds.nsentcmds = 0; |
Dr. David Alan Gilbert | e0b266f | 2015-11-05 18:11:02 +0000 | [diff] [blame] | 1469 | } |
| 1470 | |
| 1471 | /** |
| 1472 | * postcopy_discard_send_range: Called by the bitmap code for each chunk to |
| 1473 | * discard. May send a discard message, may just leave it queued to |
| 1474 | * be sent later. |
| 1475 | * |
| 1476 | * @ms: Current migration state. |
Dr. David Alan Gilbert | e0b266f | 2015-11-05 18:11:02 +0000 | [diff] [blame] | 1477 | * @start,@length: a range of pages in the migration bitmap in the |
| 1478 | * RAM block passed to postcopy_discard_send_init() (length=1 is one page) |
| 1479 | */ |
Wei Yang | 810cf2b | 2019-07-24 09:07:21 +0800 | [diff] [blame] | 1480 | void postcopy_discard_send_range(MigrationState *ms, unsigned long start, |
| 1481 | unsigned long length) |
Dr. David Alan Gilbert | e0b266f | 2015-11-05 18:11:02 +0000 | [diff] [blame] | 1482 | { |
Juan Quintela | 20afaed | 2017-03-21 09:09:14 +0100 | [diff] [blame] | 1483 | size_t tp_size = qemu_target_page_size(); |
Dr. David Alan Gilbert | e0b266f | 2015-11-05 18:11:02 +0000 | [diff] [blame] | 1484 | /* Convert to byte offsets within the RAM block */ |
Wei Yang | 810cf2b | 2019-07-24 09:07:21 +0800 | [diff] [blame] | 1485 | pds.start_list[pds.cur_entry] = start * tp_size; |
| 1486 | pds.length_list[pds.cur_entry] = length * tp_size; |
| 1487 | trace_postcopy_discard_send_range(pds.ramblock_name, start, length); |
| 1488 | pds.cur_entry++; |
| 1489 | pds.nsentwords++; |
Dr. David Alan Gilbert | e0b266f | 2015-11-05 18:11:02 +0000 | [diff] [blame] | 1490 | |
Wei Yang | 810cf2b | 2019-07-24 09:07:21 +0800 | [diff] [blame] | 1491 | if (pds.cur_entry == MAX_DISCARDS_PER_COMMAND) { |
Dr. David Alan Gilbert | e0b266f | 2015-11-05 18:11:02 +0000 | [diff] [blame] | 1492 | /* Full set, ship it! */ |
zhanghailiang | 89a02a9 | 2016-01-15 11:37:42 +0800 | [diff] [blame] | 1493 | qemu_savevm_send_postcopy_ram_discard(ms->to_dst_file, |
Wei Yang | 810cf2b | 2019-07-24 09:07:21 +0800 | [diff] [blame] | 1494 | pds.ramblock_name, |
| 1495 | pds.cur_entry, |
| 1496 | pds.start_list, |
| 1497 | pds.length_list); |
| 1498 | pds.nsentcmds++; |
| 1499 | pds.cur_entry = 0; |
Dr. David Alan Gilbert | e0b266f | 2015-11-05 18:11:02 +0000 | [diff] [blame] | 1500 | } |
| 1501 | } |
| 1502 | |
| 1503 | /** |
| 1504 | * postcopy_discard_send_finish: Called at the end of each RAMBlock by the |
| 1505 | * bitmap code. Sends any outstanding discard messages, frees the PDS |
| 1506 | * |
| 1507 | * @ms: Current migration state. |
Dr. David Alan Gilbert | e0b266f | 2015-11-05 18:11:02 +0000 | [diff] [blame] | 1508 | */ |
Wei Yang | 810cf2b | 2019-07-24 09:07:21 +0800 | [diff] [blame] | 1509 | void postcopy_discard_send_finish(MigrationState *ms) |
Dr. David Alan Gilbert | e0b266f | 2015-11-05 18:11:02 +0000 | [diff] [blame] | 1510 | { |
| 1511 | /* Anything unsent? */ |
Wei Yang | 810cf2b | 2019-07-24 09:07:21 +0800 | [diff] [blame] | 1512 | if (pds.cur_entry) { |
zhanghailiang | 89a02a9 | 2016-01-15 11:37:42 +0800 | [diff] [blame] | 1513 | qemu_savevm_send_postcopy_ram_discard(ms->to_dst_file, |
Wei Yang | 810cf2b | 2019-07-24 09:07:21 +0800 | [diff] [blame] | 1514 | pds.ramblock_name, |
| 1515 | pds.cur_entry, |
| 1516 | pds.start_list, |
| 1517 | pds.length_list); |
| 1518 | pds.nsentcmds++; |
Dr. David Alan Gilbert | e0b266f | 2015-11-05 18:11:02 +0000 | [diff] [blame] | 1519 | } |
| 1520 | |
Wei Yang | 810cf2b | 2019-07-24 09:07:21 +0800 | [diff] [blame] | 1521 | trace_postcopy_discard_send_finish(pds.ramblock_name, pds.nsentwords, |
| 1522 | pds.nsentcmds); |
Dr. David Alan Gilbert | e0b266f | 2015-11-05 18:11:02 +0000 | [diff] [blame] | 1523 | } |
Juan Quintela | bac3b21 | 2017-04-24 16:50:35 +0200 | [diff] [blame] | 1524 | |
| 1525 | /* |
| 1526 | * Current state of incoming postcopy; note this is not part of |
| 1527 | * MigrationIncomingState since it's state is used during cleanup |
| 1528 | * at the end as MIS is being freed. |
| 1529 | */ |
| 1530 | static PostcopyState incoming_postcopy_state; |
| 1531 | |
| 1532 | PostcopyState postcopy_state_get(void) |
| 1533 | { |
Paolo Bonzini | 4592eaf | 2023-03-03 11:15:28 +0100 | [diff] [blame] | 1534 | return qatomic_load_acquire(&incoming_postcopy_state); |
Juan Quintela | bac3b21 | 2017-04-24 16:50:35 +0200 | [diff] [blame] | 1535 | } |
| 1536 | |
| 1537 | /* Set the state and return the old state */ |
| 1538 | PostcopyState postcopy_state_set(PostcopyState new_state) |
| 1539 | { |
Stefan Hajnoczi | d73415a | 2020-09-23 11:56:46 +0100 | [diff] [blame] | 1540 | return qatomic_xchg(&incoming_postcopy_state, new_state); |
Juan Quintela | bac3b21 | 2017-04-24 16:50:35 +0200 | [diff] [blame] | 1541 | } |
Dr. David Alan Gilbert | 00fa4fc | 2018-03-12 17:21:04 +0000 | [diff] [blame] | 1542 | |
| 1543 | /* Register a handler for external shared memory postcopy |
| 1544 | * called on the destination. |
| 1545 | */ |
| 1546 | void postcopy_register_shared_ufd(struct PostCopyFD *pcfd) |
| 1547 | { |
| 1548 | MigrationIncomingState *mis = migration_incoming_get_current(); |
| 1549 | |
| 1550 | mis->postcopy_remote_fds = g_array_append_val(mis->postcopy_remote_fds, |
| 1551 | *pcfd); |
| 1552 | } |
| 1553 | |
| 1554 | /* Unregister a handler for external shared memory postcopy |
| 1555 | */ |
| 1556 | void postcopy_unregister_shared_ufd(struct PostCopyFD *pcfd) |
| 1557 | { |
| 1558 | guint i; |
| 1559 | MigrationIncomingState *mis = migration_incoming_get_current(); |
| 1560 | GArray *pcrfds = mis->postcopy_remote_fds; |
| 1561 | |
Juan Quintela | 5655998 | 2021-11-03 20:24:27 +0100 | [diff] [blame] | 1562 | if (!pcrfds) { |
| 1563 | /* migration has already finished and freed the array */ |
| 1564 | return; |
| 1565 | } |
Dr. David Alan Gilbert | 00fa4fc | 2018-03-12 17:21:04 +0000 | [diff] [blame] | 1566 | for (i = 0; i < pcrfds->len; i++) { |
| 1567 | struct PostCopyFD *cur = &g_array_index(pcrfds, struct PostCopyFD, i); |
| 1568 | if (cur->fd == pcfd->fd) { |
| 1569 | mis->postcopy_remote_fds = g_array_remove_index(pcrfds, i); |
| 1570 | return; |
| 1571 | } |
| 1572 | } |
| 1573 | } |
Peter Xu | 36f62f1 | 2022-07-07 14:55:02 -0400 | [diff] [blame] | 1574 | |
manish.mishra | 6720c2b | 2022-12-20 18:44:18 +0000 | [diff] [blame] | 1575 | void postcopy_preempt_new_channel(MigrationIncomingState *mis, QEMUFile *file) |
Peter Xu | 36f62f1 | 2022-07-07 14:55:02 -0400 | [diff] [blame] | 1576 | { |
| 1577 | /* |
| 1578 | * The new loading channel has its own threads, so it needs to be |
| 1579 | * blocked too. It's by default true, just be explicit. |
| 1580 | */ |
| 1581 | qemu_file_set_blocking(file, true); |
| 1582 | mis->postcopy_qemufile_dst = file; |
Peter Xu | 5655aab | 2023-02-08 15:28:13 -0500 | [diff] [blame] | 1583 | qemu_sem_post(&mis->postcopy_qemufile_dst_done); |
Peter Xu | 36f62f1 | 2022-07-07 14:55:02 -0400 | [diff] [blame] | 1584 | trace_postcopy_preempt_new_channel(); |
Peter Xu | 36f62f1 | 2022-07-07 14:55:02 -0400 | [diff] [blame] | 1585 | } |
| 1586 | |
Peter Xu | f0afaf6 | 2022-07-07 14:55:18 -0400 | [diff] [blame] | 1587 | /* |
| 1588 | * Setup the postcopy preempt channel with the IOC. If ERROR is specified, |
| 1589 | * setup the error instead. This helper will free the ERROR if specified. |
| 1590 | */ |
Peter Xu | d0edb8a | 2022-07-07 14:55:09 -0400 | [diff] [blame] | 1591 | static void |
Peter Xu | f0afaf6 | 2022-07-07 14:55:18 -0400 | [diff] [blame] | 1592 | postcopy_preempt_send_channel_done(MigrationState *s, |
| 1593 | QIOChannel *ioc, Error *local_err) |
Peter Xu | d0edb8a | 2022-07-07 14:55:09 -0400 | [diff] [blame] | 1594 | { |
Peter Xu | f0afaf6 | 2022-07-07 14:55:18 -0400 | [diff] [blame] | 1595 | if (local_err) { |
Peter Xu | d0edb8a | 2022-07-07 14:55:09 -0400 | [diff] [blame] | 1596 | migrate_set_error(s, local_err); |
| 1597 | error_free(local_err); |
| 1598 | } else { |
| 1599 | migration_ioc_register_yank(ioc); |
| 1600 | s->postcopy_qemufile_src = qemu_file_new_output(ioc); |
| 1601 | trace_postcopy_preempt_new_channel(); |
| 1602 | } |
| 1603 | |
| 1604 | /* |
| 1605 | * Kick the waiter in all cases. The waiter should check upon |
| 1606 | * postcopy_qemufile_src to know whether it failed or not. |
| 1607 | */ |
| 1608 | qemu_sem_post(&s->postcopy_qemufile_src_sem); |
Peter Xu | f0afaf6 | 2022-07-07 14:55:18 -0400 | [diff] [blame] | 1609 | } |
| 1610 | |
| 1611 | static void |
| 1612 | postcopy_preempt_tls_handshake(QIOTask *task, gpointer opaque) |
| 1613 | { |
| 1614 | g_autoptr(QIOChannel) ioc = QIO_CHANNEL(qio_task_get_source(task)); |
| 1615 | MigrationState *s = opaque; |
| 1616 | Error *local_err = NULL; |
| 1617 | |
| 1618 | qio_task_propagate_error(task, &local_err); |
| 1619 | postcopy_preempt_send_channel_done(s, ioc, local_err); |
| 1620 | } |
| 1621 | |
| 1622 | static void |
| 1623 | postcopy_preempt_send_channel_new(QIOTask *task, gpointer opaque) |
| 1624 | { |
| 1625 | g_autoptr(QIOChannel) ioc = QIO_CHANNEL(qio_task_get_source(task)); |
| 1626 | MigrationState *s = opaque; |
| 1627 | QIOChannelTLS *tioc; |
| 1628 | Error *local_err = NULL; |
| 1629 | |
| 1630 | if (qio_task_propagate_error(task, &local_err)) { |
| 1631 | goto out; |
| 1632 | } |
| 1633 | |
| 1634 | if (migrate_channel_requires_tls_upgrade(ioc)) { |
Juan Quintela | 0deb7e9 | 2023-04-26 21:33:08 +0200 | [diff] [blame] | 1635 | tioc = migration_tls_client_create(ioc, s->hostname, &local_err); |
Peter Xu | f0afaf6 | 2022-07-07 14:55:18 -0400 | [diff] [blame] | 1636 | if (!tioc) { |
| 1637 | goto out; |
| 1638 | } |
| 1639 | trace_postcopy_preempt_tls_handshake(); |
| 1640 | qio_channel_set_name(QIO_CHANNEL(tioc), "migration-tls-preempt"); |
| 1641 | qio_channel_tls_handshake(tioc, postcopy_preempt_tls_handshake, |
| 1642 | s, NULL, NULL); |
| 1643 | /* Setup the channel until TLS handshake finished */ |
| 1644 | return; |
| 1645 | } |
| 1646 | |
| 1647 | out: |
| 1648 | /* This handles both good and error cases */ |
| 1649 | postcopy_preempt_send_channel_done(s, ioc, local_err); |
Peter Xu | d0edb8a | 2022-07-07 14:55:09 -0400 | [diff] [blame] | 1650 | } |
| 1651 | |
Peter Xu | 5655aab | 2023-02-08 15:28:13 -0500 | [diff] [blame] | 1652 | /* |
| 1653 | * This function will kick off an async task to establish the preempt |
| 1654 | * channel, and wait until the connection setup completed. Returns 0 if |
| 1655 | * channel established, -1 for error. |
| 1656 | */ |
| 1657 | int postcopy_preempt_establish_channel(MigrationState *s) |
Peter Xu | d0edb8a | 2022-07-07 14:55:09 -0400 | [diff] [blame] | 1658 | { |
| 1659 | /* If preempt not enabled, no need to wait */ |
| 1660 | if (!migrate_postcopy_preempt()) { |
| 1661 | return 0; |
| 1662 | } |
| 1663 | |
Peter Xu | 06064a6 | 2023-03-26 13:25:40 -0400 | [diff] [blame] | 1664 | /* |
| 1665 | * Kick off async task to establish preempt channel. Only do so with |
| 1666 | * 8.0+ machines, because 7.1/7.2 require the channel to be created in |
| 1667 | * setup phase of migration (even if racy in an unreliable network). |
| 1668 | */ |
| 1669 | if (!s->preempt_pre_7_2) { |
| 1670 | postcopy_preempt_setup(s); |
| 1671 | } |
Peter Xu | 5655aab | 2023-02-08 15:28:13 -0500 | [diff] [blame] | 1672 | |
Peter Xu | d0edb8a | 2022-07-07 14:55:09 -0400 | [diff] [blame] | 1673 | /* |
| 1674 | * We need the postcopy preempt channel to be established before |
| 1675 | * starting doing anything. |
| 1676 | */ |
| 1677 | qemu_sem_wait(&s->postcopy_qemufile_src_sem); |
| 1678 | |
| 1679 | return s->postcopy_qemufile_src ? 0 : -1; |
| 1680 | } |
| 1681 | |
Peter Xu | fc063a7 | 2023-02-08 15:28:11 -0500 | [diff] [blame] | 1682 | void postcopy_preempt_setup(MigrationState *s) |
Peter Xu | 36f62f1 | 2022-07-07 14:55:02 -0400 | [diff] [blame] | 1683 | { |
Peter Xu | d0edb8a | 2022-07-07 14:55:09 -0400 | [diff] [blame] | 1684 | /* Kick an async task to connect */ |
| 1685 | socket_send_channel_create(postcopy_preempt_send_channel_new, s); |
Peter Xu | 36f62f1 | 2022-07-07 14:55:02 -0400 | [diff] [blame] | 1686 | } |
| 1687 | |
Peter Xu | 60bb3c5 | 2022-07-07 14:55:06 -0400 | [diff] [blame] | 1688 | static void postcopy_pause_ram_fast_load(MigrationIncomingState *mis) |
| 1689 | { |
| 1690 | trace_postcopy_pause_fast_load(); |
| 1691 | qemu_mutex_unlock(&mis->postcopy_prio_thread_mutex); |
| 1692 | qemu_sem_wait(&mis->postcopy_pause_sem_fast_load); |
| 1693 | qemu_mutex_lock(&mis->postcopy_prio_thread_mutex); |
| 1694 | trace_postcopy_pause_fast_load_continued(); |
| 1695 | } |
| 1696 | |
Peter Xu | 6621883 | 2023-03-26 13:25:39 -0400 | [diff] [blame] | 1697 | static bool preempt_thread_should_run(MigrationIncomingState *mis) |
| 1698 | { |
| 1699 | return mis->preempt_thread_status != PREEMPT_THREAD_QUIT; |
| 1700 | } |
| 1701 | |
Peter Xu | 36f62f1 | 2022-07-07 14:55:02 -0400 | [diff] [blame] | 1702 | void *postcopy_preempt_thread(void *opaque) |
| 1703 | { |
| 1704 | MigrationIncomingState *mis = opaque; |
| 1705 | int ret; |
| 1706 | |
| 1707 | trace_postcopy_preempt_thread_entry(); |
| 1708 | |
| 1709 | rcu_register_thread(); |
| 1710 | |
| 1711 | qemu_sem_post(&mis->thread_sync_sem); |
| 1712 | |
Peter Xu | a5d35dc | 2023-03-14 16:54:29 -0400 | [diff] [blame] | 1713 | /* |
| 1714 | * The preempt channel is established in asynchronous way. Wait |
| 1715 | * for its completion. |
| 1716 | */ |
| 1717 | qemu_sem_wait(&mis->postcopy_qemufile_dst_done); |
| 1718 | |
Peter Xu | 36f62f1 | 2022-07-07 14:55:02 -0400 | [diff] [blame] | 1719 | /* Sending RAM_SAVE_FLAG_EOS to terminate this thread */ |
Peter Xu | 60bb3c5 | 2022-07-07 14:55:06 -0400 | [diff] [blame] | 1720 | qemu_mutex_lock(&mis->postcopy_prio_thread_mutex); |
Peter Xu | 6621883 | 2023-03-26 13:25:39 -0400 | [diff] [blame] | 1721 | while (preempt_thread_should_run(mis)) { |
Peter Xu | 60bb3c5 | 2022-07-07 14:55:06 -0400 | [diff] [blame] | 1722 | ret = ram_load_postcopy(mis->postcopy_qemufile_dst, |
| 1723 | RAM_CHANNEL_POSTCOPY); |
| 1724 | /* If error happened, go into recovery routine */ |
Peter Xu | 6621883 | 2023-03-26 13:25:39 -0400 | [diff] [blame] | 1725 | if (ret && preempt_thread_should_run(mis)) { |
Peter Xu | 60bb3c5 | 2022-07-07 14:55:06 -0400 | [diff] [blame] | 1726 | postcopy_pause_ram_fast_load(mis); |
| 1727 | } else { |
| 1728 | /* We're done */ |
| 1729 | break; |
| 1730 | } |
| 1731 | } |
| 1732 | qemu_mutex_unlock(&mis->postcopy_prio_thread_mutex); |
Peter Xu | 36f62f1 | 2022-07-07 14:55:02 -0400 | [diff] [blame] | 1733 | |
| 1734 | rcu_unregister_thread(); |
| 1735 | |
| 1736 | trace_postcopy_preempt_thread_exit(); |
| 1737 | |
Peter Xu | 60bb3c5 | 2022-07-07 14:55:06 -0400 | [diff] [blame] | 1738 | return NULL; |
Peter Xu | 36f62f1 | 2022-07-07 14:55:02 -0400 | [diff] [blame] | 1739 | } |