1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16#include "qemu/osdep.h"
17#include "qemu/cutils.h"
18#include "qemu/error-report.h"
19#include "qemu/main-loop.h"
20#include "migration/blocker.h"
21#include "exec.h"
22#include "fd.h"
23#include "socket.h"
24#include "sysemu/runstate.h"
25#include "sysemu/sysemu.h"
26#include "sysemu/cpu-throttle.h"
27#include "rdma.h"
28#include "ram.h"
29#include "migration/global_state.h"
30#include "migration/misc.h"
31#include "migration.h"
32#include "savevm.h"
33#include "qemu-file.h"
34#include "channel.h"
35#include "migration/vmstate.h"
36#include "block/block.h"
37#include "qapi/error.h"
38#include "qapi/clone-visitor.h"
39#include "qapi/qapi-visit-migration.h"
40#include "qapi/qapi-visit-sockets.h"
41#include "qapi/qapi-commands-migration.h"
42#include "qapi/qapi-events-migration.h"
43#include "qapi/qmp/qerror.h"
44#include "qapi/qmp/qnull.h"
45#include "qemu/rcu.h"
46#include "block.h"
47#include "postcopy-ram.h"
48#include "qemu/thread.h"
49#include "trace.h"
50#include "exec/target_page.h"
51#include "io/channel-buffer.h"
52#include "io/channel-tls.h"
53#include "migration/colo.h"
54#include "hw/boards.h"
55#include "hw/qdev-properties.h"
56#include "hw/qdev-properties-system.h"
57#include "monitor/monitor.h"
58#include "net/announce.h"
59#include "qemu/queue.h"
60#include "multifd.h"
61#include "threadinfo.h"
62#include "qemu/yank.h"
63#include "sysemu/cpus.h"
64#include "yank_functions.h"
65#include "sysemu/qtest.h"
66#include "ui/qemu-spice.h"
67
68#define MAX_THROTTLE (128 << 20)
69
70
71
72#define BUFFER_DELAY 100
73#define XFER_LIMIT_RATIO (1000 / BUFFER_DELAY)
74
75
76
77#define DEFAULT_MIGRATE_SET_DOWNTIME 300
78
79
80#define MAX_MIGRATE_DOWNTIME_SECONDS 2000
81#define MAX_MIGRATE_DOWNTIME (MAX_MIGRATE_DOWNTIME_SECONDS * 1000)
82
83
84#define DEFAULT_MIGRATE_COMPRESS_THREAD_COUNT 8
85
86
87#define DEFAULT_MIGRATE_DECOMPRESS_THREAD_COUNT 2
88
89#define DEFAULT_MIGRATE_COMPRESS_LEVEL 1
90
91#define DEFAULT_MIGRATE_THROTTLE_TRIGGER_THRESHOLD 50
92#define DEFAULT_MIGRATE_CPU_THROTTLE_INITIAL 20
93#define DEFAULT_MIGRATE_CPU_THROTTLE_INCREMENT 10
94#define DEFAULT_MIGRATE_MAX_CPU_THROTTLE 99
95
96
97#define DEFAULT_MIGRATE_XBZRLE_CACHE_SIZE (64 * 1024 * 1024)
98
99
100#define DEFAULT_MIGRATE_X_CHECKPOINT_DELAY (200 * 100)
101#define DEFAULT_MIGRATE_MULTIFD_CHANNELS 2
102#define DEFAULT_MIGRATE_MULTIFD_COMPRESSION MULTIFD_COMPRESSION_NONE
103
104#define DEFAULT_MIGRATE_MULTIFD_ZLIB_LEVEL 1
105
106#define DEFAULT_MIGRATE_MULTIFD_ZSTD_LEVEL 1
107
108
109
110
111#define DEFAULT_MIGRATE_MAX_POSTCOPY_BANDWIDTH 0
112
113
114
115
116
117#define DEFAULT_MIGRATE_ANNOUNCE_INITIAL 50
118#define DEFAULT_MIGRATE_ANNOUNCE_MAX 550
119#define DEFAULT_MIGRATE_ANNOUNCE_ROUNDS 5
120#define DEFAULT_MIGRATE_ANNOUNCE_STEP 100
121
122static NotifierList migration_state_notifiers =
123 NOTIFIER_LIST_INITIALIZER(migration_state_notifiers);
124
125
126enum mig_rp_message_type {
127 MIG_RP_MSG_INVALID = 0,
128 MIG_RP_MSG_SHUT,
129 MIG_RP_MSG_PONG,
130
131 MIG_RP_MSG_REQ_PAGES_ID,
132 MIG_RP_MSG_REQ_PAGES,
133 MIG_RP_MSG_RECV_BITMAP,
134 MIG_RP_MSG_RESUME_ACK,
135
136 MIG_RP_MSG_MAX
137};
138
139
140struct MigrateCapsSet {
141 int size;
142 MigrationCapability caps[];
143};
144typedef struct MigrateCapsSet MigrateCapsSet;
145
146
147#define INITIALIZE_MIGRATE_CAPS_SET(_name, ...) \
148 MigrateCapsSet _name = { \
149 .size = sizeof((int []) { __VA_ARGS__ }) / sizeof(int), \
150 .caps = { __VA_ARGS__ } \
151 }
152
153
154static const
155INITIALIZE_MIGRATE_CAPS_SET(check_caps_background_snapshot,
156 MIGRATION_CAPABILITY_POSTCOPY_RAM,
157 MIGRATION_CAPABILITY_DIRTY_BITMAPS,
158 MIGRATION_CAPABILITY_POSTCOPY_BLOCKTIME,
159 MIGRATION_CAPABILITY_LATE_BLOCK_ACTIVATE,
160 MIGRATION_CAPABILITY_RETURN_PATH,
161 MIGRATION_CAPABILITY_MULTIFD,
162 MIGRATION_CAPABILITY_PAUSE_BEFORE_SWITCHOVER,
163 MIGRATION_CAPABILITY_AUTO_CONVERGE,
164 MIGRATION_CAPABILITY_RELEASE_RAM,
165 MIGRATION_CAPABILITY_RDMA_PIN_ALL,
166 MIGRATION_CAPABILITY_COMPRESS,
167 MIGRATION_CAPABILITY_XBZRLE,
168 MIGRATION_CAPABILITY_X_COLO,
169 MIGRATION_CAPABILITY_VALIDATE_UUID,
170 MIGRATION_CAPABILITY_ZERO_COPY_SEND);
171
172
173
174
175
176static MigrationState *current_migration;
177static MigrationIncomingState *current_incoming;
178
179static GSList *migration_blockers;
180
181static bool migration_object_check(MigrationState *ms, Error **errp);
182static int migration_maybe_pause(MigrationState *s,
183 int *current_active_state,
184 int new_state);
185static void migrate_fd_cancel(MigrationState *s);
186
187static bool migration_needs_multiple_sockets(void)
188{
189 return migrate_use_multifd() || migrate_postcopy_preempt();
190}
191
192static bool uri_supports_multi_channels(const char *uri)
193{
194 return strstart(uri, "tcp:", NULL) || strstart(uri, "unix:", NULL) ||
195 strstart(uri, "vsock:", NULL);
196}
197
198static bool
199migration_channels_and_uri_compatible(const char *uri, Error **errp)
200{
201 if (migration_needs_multiple_sockets() &&
202 !uri_supports_multi_channels(uri)) {
203 error_setg(errp, "Migration requires multi-channel URIs (e.g. tcp)");
204 return false;
205 }
206
207 return true;
208}
209
210static gint page_request_addr_cmp(gconstpointer ap, gconstpointer bp)
211{
212 uintptr_t a = (uintptr_t) ap, b = (uintptr_t) bp;
213
214 return (a > b) - (a < b);
215}
216
217void migration_object_init(void)
218{
219
220 assert(!current_migration);
221 current_migration = MIGRATION_OBJ(object_new(TYPE_MIGRATION));
222
223
224
225
226
227 assert(!current_incoming);
228 current_incoming = g_new0(MigrationIncomingState, 1);
229 current_incoming->state = MIGRATION_STATUS_NONE;
230 current_incoming->postcopy_remote_fds =
231 g_array_new(FALSE, TRUE, sizeof(struct PostCopyFD));
232 qemu_mutex_init(¤t_incoming->rp_mutex);
233 qemu_mutex_init(¤t_incoming->postcopy_prio_thread_mutex);
234 qemu_event_init(¤t_incoming->main_thread_load_event, false);
235 qemu_sem_init(¤t_incoming->postcopy_pause_sem_dst, 0);
236 qemu_sem_init(¤t_incoming->postcopy_pause_sem_fault, 0);
237 qemu_sem_init(¤t_incoming->postcopy_pause_sem_fast_load, 0);
238 qemu_sem_init(¤t_incoming->postcopy_qemufile_dst_done, 0);
239
240 qemu_mutex_init(¤t_incoming->page_request_mutex);
241 current_incoming->page_requested = g_tree_new(page_request_addr_cmp);
242
243 migration_object_check(current_migration, &error_fatal);
244
245 blk_mig_init();
246 ram_mig_init();
247 dirty_bitmap_mig_init();
248}
249
250void migration_cancel(const Error *error)
251{
252 if (error) {
253 migrate_set_error(current_migration, error);
254 }
255 migrate_fd_cancel(current_migration);
256}
257
258void migration_shutdown(void)
259{
260
261
262
263
264
265 colo_shutdown();
266
267
268
269
270 migration_cancel(NULL);
271 object_unref(OBJECT(current_migration));
272
273
274
275
276
277 dirty_bitmap_mig_cancel_outgoing();
278
279
280
281
282
283
284 dirty_bitmap_mig_cancel_incoming();
285}
286
287
288MigrationState *migrate_get_current(void)
289{
290
291 assert(current_migration);
292 return current_migration;
293}
294
295MigrationIncomingState *migration_incoming_get_current(void)
296{
297 assert(current_incoming);
298 return current_incoming;
299}
300
301void migration_incoming_transport_cleanup(MigrationIncomingState *mis)
302{
303 if (mis->socket_address_list) {
304 qapi_free_SocketAddressList(mis->socket_address_list);
305 mis->socket_address_list = NULL;
306 }
307
308 if (mis->transport_cleanup) {
309 mis->transport_cleanup(mis->transport_data);
310 mis->transport_data = mis->transport_cleanup = NULL;
311 }
312}
313
314void migration_incoming_state_destroy(void)
315{
316 struct MigrationIncomingState *mis = migration_incoming_get_current();
317
318 multifd_load_cleanup();
319
320 if (mis->to_src_file) {
321
322 migrate_send_rp_shut(mis, qemu_file_get_error(mis->from_src_file) != 0);
323 qemu_fclose(mis->to_src_file);
324 mis->to_src_file = NULL;
325 }
326
327 if (mis->from_src_file) {
328 migration_ioc_unregister_yank_from_file(mis->from_src_file);
329 qemu_fclose(mis->from_src_file);
330 mis->from_src_file = NULL;
331 }
332 if (mis->postcopy_remote_fds) {
333 g_array_free(mis->postcopy_remote_fds, TRUE);
334 mis->postcopy_remote_fds = NULL;
335 }
336
337 migration_incoming_transport_cleanup(mis);
338 qemu_event_reset(&mis->main_thread_load_event);
339
340 if (mis->page_requested) {
341 g_tree_destroy(mis->page_requested);
342 mis->page_requested = NULL;
343 }
344
345 if (mis->postcopy_qemufile_dst) {
346 migration_ioc_unregister_yank_from_file(mis->postcopy_qemufile_dst);
347 qemu_fclose(mis->postcopy_qemufile_dst);
348 mis->postcopy_qemufile_dst = NULL;
349 }
350
351 yank_unregister_instance(MIGRATION_YANK_INSTANCE);
352}
353
354static void migrate_generate_event(int new_state)
355{
356 if (migrate_use_events()) {
357 qapi_event_send_migration(new_state);
358 }
359}
360
361static bool migrate_late_block_activate(void)
362{
363 MigrationState *s;
364
365 s = migrate_get_current();
366
367 return s->enabled_capabilities[
368 MIGRATION_CAPABILITY_LATE_BLOCK_ACTIVATE];
369}
370
371
372
373
374
375static int migrate_send_rp_message(MigrationIncomingState *mis,
376 enum mig_rp_message_type message_type,
377 uint16_t len, void *data)
378{
379 int ret = 0;
380
381 trace_migrate_send_rp_message((int)message_type, len);
382 QEMU_LOCK_GUARD(&mis->rp_mutex);
383
384
385
386
387
388 if (!mis->to_src_file) {
389 ret = -EIO;
390 return ret;
391 }
392
393 qemu_put_be16(mis->to_src_file, (unsigned int)message_type);
394 qemu_put_be16(mis->to_src_file, len);
395 qemu_put_buffer(mis->to_src_file, data, len);
396 qemu_fflush(mis->to_src_file);
397
398
399 ret = qemu_file_get_error(mis->to_src_file);
400
401 return ret;
402}
403
404
405
406
407
408
409int migrate_send_rp_message_req_pages(MigrationIncomingState *mis,
410 RAMBlock *rb, ram_addr_t start)
411{
412 uint8_t bufc[12 + 1 + 255];
413 size_t msglen = 12;
414 size_t len = qemu_ram_pagesize(rb);
415 enum mig_rp_message_type msg_type;
416 const char *rbname;
417 int rbname_len;
418
419 *(uint64_t *)bufc = cpu_to_be64((uint64_t)start);
420 *(uint32_t *)(bufc + 8) = cpu_to_be32((uint32_t)len);
421
422
423
424
425
426
427 if (rb != mis->last_rb) {
428 mis->last_rb = rb;
429
430 rbname = qemu_ram_get_idstr(rb);
431 rbname_len = strlen(rbname);
432
433 assert(rbname_len < 256);
434
435 bufc[msglen++] = rbname_len;
436 memcpy(bufc + msglen, rbname, rbname_len);
437 msglen += rbname_len;
438 msg_type = MIG_RP_MSG_REQ_PAGES_ID;
439 } else {
440 msg_type = MIG_RP_MSG_REQ_PAGES;
441 }
442
443 return migrate_send_rp_message(mis, msg_type, msglen, bufc);
444}
445
446int migrate_send_rp_req_pages(MigrationIncomingState *mis,
447 RAMBlock *rb, ram_addr_t start, uint64_t haddr)
448{
449 void *aligned = (void *)(uintptr_t)ROUND_DOWN(haddr, qemu_ram_pagesize(rb));
450 bool received = false;
451
452 WITH_QEMU_LOCK_GUARD(&mis->page_request_mutex) {
453 received = ramblock_recv_bitmap_test_byte_offset(rb, start);
454 if (!received && !g_tree_lookup(mis->page_requested, aligned)) {
455
456
457
458
459
460 g_tree_insert(mis->page_requested, aligned, (gpointer)1);
461 mis->page_requested_count++;
462 trace_postcopy_page_req_add(aligned, mis->page_requested_count);
463 }
464 }
465
466
467
468
469
470 if (received) {
471 return 0;
472 }
473
474 return migrate_send_rp_message_req_pages(mis, rb, start);
475}
476
477static bool migration_colo_enabled;
478bool migration_incoming_colo_enabled(void)
479{
480 return migration_colo_enabled;
481}
482
483void migration_incoming_disable_colo(void)
484{
485 ram_block_discard_disable(false);
486 migration_colo_enabled = false;
487}
488
489int migration_incoming_enable_colo(void)
490{
491 if (ram_block_discard_disable(true)) {
492 error_report("COLO: cannot disable RAM discard");
493 return -EBUSY;
494 }
495 migration_colo_enabled = true;
496 return 0;
497}
498
499void migrate_add_address(SocketAddress *address)
500{
501 MigrationIncomingState *mis = migration_incoming_get_current();
502
503 QAPI_LIST_PREPEND(mis->socket_address_list,
504 QAPI_CLONE(SocketAddress, address));
505}
506
507static void qemu_start_incoming_migration(const char *uri, Error **errp)
508{
509 const char *p = NULL;
510
511
512 if (!migration_channels_and_uri_compatible(uri, errp)) {
513 return;
514 }
515
516 qapi_event_send_migration(MIGRATION_STATUS_SETUP);
517 if (strstart(uri, "tcp:", &p) ||
518 strstart(uri, "unix:", NULL) ||
519 strstart(uri, "vsock:", NULL)) {
520 socket_start_incoming_migration(p ? p : uri, errp);
521#ifdef CONFIG_RDMA
522 } else if (strstart(uri, "rdma:", &p)) {
523 rdma_start_incoming_migration(p, errp);
524#endif
525 } else if (strstart(uri, "exec:", &p)) {
526 exec_start_incoming_migration(p, errp);
527 } else if (strstart(uri, "fd:", &p)) {
528 fd_start_incoming_migration(p, errp);
529 } else {
530 error_setg(errp, "unknown migration protocol: %s", uri);
531 }
532}
533
534static void process_incoming_migration_bh(void *opaque)
535{
536 Error *local_err = NULL;
537 MigrationIncomingState *mis = opaque;
538
539
540
541
542
543
544
545 if (!migrate_late_block_activate() ||
546 (autostart && (!global_state_received() ||
547 global_state_get_runstate() == RUN_STATE_RUNNING))) {
548
549
550 bdrv_activate_all(&local_err);
551 if (local_err) {
552 error_report_err(local_err);
553 local_err = NULL;
554 autostart = false;
555 }
556 }
557
558
559
560
561
562 qemu_announce_self(&mis->announce_timer, migrate_announce_params());
563
564 multifd_load_shutdown();
565
566 dirty_bitmap_mig_before_vm_start();
567
568 if (!global_state_received() ||
569 global_state_get_runstate() == RUN_STATE_RUNNING) {
570 if (autostart) {
571 vm_start();
572 } else {
573 runstate_set(RUN_STATE_PAUSED);
574 }
575 } else if (migration_incoming_colo_enabled()) {
576 migration_incoming_disable_colo();
577 vm_start();
578 } else {
579 runstate_set(global_state_get_runstate());
580 }
581
582
583
584
585
586 migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE,
587 MIGRATION_STATUS_COMPLETED);
588 qemu_bh_delete(mis->bh);
589 migration_incoming_state_destroy();
590}
591
592static void coroutine_fn
593process_incoming_migration_co(void *opaque)
594{
595 MigrationIncomingState *mis = migration_incoming_get_current();
596 PostcopyState ps;
597 int ret;
598 Error *local_err = NULL;
599
600 assert(mis->from_src_file);
601 mis->migration_incoming_co = qemu_coroutine_self();
602 mis->largest_page_size = qemu_ram_pagesize_largest();
603 postcopy_state_set(POSTCOPY_INCOMING_NONE);
604 migrate_set_state(&mis->state, MIGRATION_STATUS_NONE,
605 MIGRATION_STATUS_ACTIVE);
606 ret = qemu_loadvm_state(mis->from_src_file);
607
608 ps = postcopy_state_get();
609 trace_process_incoming_migration_co_end(ret, ps);
610 if (ps != POSTCOPY_INCOMING_NONE) {
611 if (ps == POSTCOPY_INCOMING_ADVISE) {
612
613
614
615
616
617 postcopy_ram_incoming_cleanup(mis);
618 } else if (ret >= 0) {
619
620
621
622
623 trace_process_incoming_migration_co_postcopy_end_main();
624 return;
625 }
626
627 }
628
629
630 if (!ret && migration_incoming_colo_enabled()) {
631
632 bdrv_activate_all(&local_err);
633 if (local_err) {
634 error_report_err(local_err);
635 goto fail;
636 }
637
638 qemu_thread_create(&mis->colo_incoming_thread, "COLO incoming",
639 colo_process_incoming_thread, mis, QEMU_THREAD_JOINABLE);
640 mis->have_colo_incoming_thread = true;
641 qemu_coroutine_yield();
642
643 qemu_mutex_unlock_iothread();
644
645 qemu_thread_join(&mis->colo_incoming_thread);
646 qemu_mutex_lock_iothread();
647
648 colo_release_ram_cache();
649 }
650
651 if (ret < 0) {
652 error_report("load of migration failed: %s", strerror(-ret));
653 goto fail;
654 }
655 mis->bh = qemu_bh_new(process_incoming_migration_bh, mis);
656 qemu_bh_schedule(mis->bh);
657 mis->migration_incoming_co = NULL;
658 return;
659fail:
660 local_err = NULL;
661 migrate_set_state(&mis->state, MIGRATION_STATUS_ACTIVE,
662 MIGRATION_STATUS_FAILED);
663 qemu_fclose(mis->from_src_file);
664
665 multifd_load_cleanup();
666
667 exit(EXIT_FAILURE);
668}
669
670
671
672
673
674
675
676
677static bool migration_incoming_setup(QEMUFile *f, Error **errp)
678{
679 MigrationIncomingState *mis = migration_incoming_get_current();
680
681 if (!mis->from_src_file) {
682 mis->from_src_file = f;
683 }
684 qemu_file_set_blocking(f, false);
685 return true;
686}
687
688void migration_incoming_process(void)
689{
690 Coroutine *co = qemu_coroutine_create(process_incoming_migration_co, NULL);
691 qemu_coroutine_enter(co);
692}
693
694
695static bool postcopy_try_recover(void)
696{
697 MigrationIncomingState *mis = migration_incoming_get_current();
698
699 if (mis->state == MIGRATION_STATUS_POSTCOPY_PAUSED) {
700
701
702
703 assert(mis->from_src_file);
704
705 qemu_file_set_blocking(mis->from_src_file, true);
706
707
708 mis->to_src_file = qemu_file_get_return_path(mis->from_src_file);
709
710 migrate_set_state(&mis->state, MIGRATION_STATUS_POSTCOPY_PAUSED,
711 MIGRATION_STATUS_POSTCOPY_RECOVER);
712
713
714
715
716
717
718
719
720 qemu_sem_post(&mis->postcopy_pause_sem_dst);
721 return true;
722 }
723
724 return false;
725}
726
727void migration_fd_process_incoming(QEMUFile *f, Error **errp)
728{
729 if (!migration_incoming_setup(f, errp)) {
730 return;
731 }
732 if (postcopy_try_recover()) {
733 return;
734 }
735 migration_incoming_process();
736}
737
738
739
740
741
742static bool migration_should_start_incoming(bool main_channel)
743{
744
745 if (migrate_use_multifd()) {
746 return migration_has_all_channels();
747 }
748
749
750 if (migrate_postcopy_preempt()) {
751 return main_channel;
752 }
753
754
755
756
757
758
759 assert(main_channel);
760 return true;
761}
762
763void migration_ioc_process_incoming(QIOChannel *ioc, Error **errp)
764{
765 MigrationIncomingState *mis = migration_incoming_get_current();
766 Error *local_err = NULL;
767 QEMUFile *f;
768 bool default_channel = true;
769 uint32_t channel_magic = 0;
770 int ret = 0;
771
772 if (migrate_use_multifd() && !migrate_postcopy_ram() &&
773 qio_channel_has_feature(ioc, QIO_CHANNEL_FEATURE_READ_MSG_PEEK)) {
774
775
776
777
778
779
780
781
782
783
784 ret = migration_channel_read_peek(ioc, (void *)&channel_magic,
785 sizeof(channel_magic), &local_err);
786
787 if (ret != 0) {
788 error_propagate(errp, local_err);
789 return;
790 }
791
792 default_channel = (channel_magic == cpu_to_be32(QEMU_VM_FILE_MAGIC));
793 } else {
794 default_channel = !mis->from_src_file;
795 }
796
797 if (multifd_load_setup(errp) != 0) {
798 error_setg(errp, "Failed to setup multifd channels");
799 return;
800 }
801
802 if (default_channel) {
803 f = qemu_file_new_input(ioc);
804
805 if (!migration_incoming_setup(f, errp)) {
806 return;
807 }
808 } else {
809
810 assert(migration_needs_multiple_sockets());
811 if (migrate_use_multifd()) {
812 multifd_recv_new_channel(ioc, &local_err);
813 } else {
814 assert(migrate_postcopy_preempt());
815 f = qemu_file_new_input(ioc);
816 postcopy_preempt_new_channel(mis, f);
817 }
818 if (local_err) {
819 error_propagate(errp, local_err);
820 return;
821 }
822 }
823
824 if (migration_should_start_incoming(default_channel)) {
825
826 if (postcopy_try_recover()) {
827 return;
828 }
829 migration_incoming_process();
830 }
831}
832
833
834
835
836
837
838
839bool migration_has_all_channels(void)
840{
841 MigrationIncomingState *mis = migration_incoming_get_current();
842
843 if (!mis->from_src_file) {
844 return false;
845 }
846
847 if (migrate_use_multifd()) {
848 return multifd_recv_all_channels_created();
849 }
850
851 if (migrate_postcopy_preempt()) {
852 return mis->postcopy_qemufile_dst != NULL;
853 }
854
855 return true;
856}
857
858
859
860
861
862
863void migrate_send_rp_shut(MigrationIncomingState *mis,
864 uint32_t value)
865{
866 uint32_t buf;
867
868 buf = cpu_to_be32(value);
869 migrate_send_rp_message(mis, MIG_RP_MSG_SHUT, sizeof(buf), &buf);
870}
871
872
873
874
875
876void migrate_send_rp_pong(MigrationIncomingState *mis,
877 uint32_t value)
878{
879 uint32_t buf;
880
881 buf = cpu_to_be32(value);
882 migrate_send_rp_message(mis, MIG_RP_MSG_PONG, sizeof(buf), &buf);
883}
884
885void migrate_send_rp_recv_bitmap(MigrationIncomingState *mis,
886 char *block_name)
887{
888 char buf[512];
889 int len;
890 int64_t res;
891
892
893
894
895
896 len = strlen(block_name);
897 buf[0] = len;
898 memcpy(buf + 1, block_name, len);
899
900 if (mis->state != MIGRATION_STATUS_POSTCOPY_RECOVER) {
901 error_report("%s: MSG_RP_RECV_BITMAP only used for recovery",
902 __func__);
903 return;
904 }
905
906 migrate_send_rp_message(mis, MIG_RP_MSG_RECV_BITMAP, len + 1, buf);
907
908
909
910
911
912
913
914
915
916
917 qemu_mutex_lock(&mis->rp_mutex);
918 res = ramblock_recv_bitmap_send(mis->to_src_file, block_name);
919 qemu_mutex_unlock(&mis->rp_mutex);
920
921 trace_migrate_send_rp_recv_bitmap(block_name, res);
922}
923
924void migrate_send_rp_resume_ack(MigrationIncomingState *mis, uint32_t value)
925{
926 uint32_t buf;
927
928 buf = cpu_to_be32(value);
929 migrate_send_rp_message(mis, MIG_RP_MSG_RESUME_ACK, sizeof(buf), &buf);
930}
931
932MigrationCapabilityStatusList *qmp_query_migrate_capabilities(Error **errp)
933{
934 MigrationCapabilityStatusList *head = NULL, **tail = &head;
935 MigrationCapabilityStatus *caps;
936 MigrationState *s = migrate_get_current();
937 int i;
938
939 for (i = 0; i < MIGRATION_CAPABILITY__MAX; i++) {
940#ifndef CONFIG_LIVE_BLOCK_MIGRATION
941 if (i == MIGRATION_CAPABILITY_BLOCK) {
942 continue;
943 }
944#endif
945 caps = g_malloc0(sizeof(*caps));
946 caps->capability = i;
947 caps->state = s->enabled_capabilities[i];
948 QAPI_LIST_APPEND(tail, caps);
949 }
950
951 return head;
952}
953
954MigrationParameters *qmp_query_migrate_parameters(Error **errp)
955{
956 MigrationParameters *params;
957 MigrationState *s = migrate_get_current();
958
959
960 params = g_malloc0(sizeof(*params));
961 params->has_compress_level = true;
962 params->compress_level = s->parameters.compress_level;
963 params->has_compress_threads = true;
964 params->compress_threads = s->parameters.compress_threads;
965 params->has_compress_wait_thread = true;
966 params->compress_wait_thread = s->parameters.compress_wait_thread;
967 params->has_decompress_threads = true;
968 params->decompress_threads = s->parameters.decompress_threads;
969 params->has_throttle_trigger_threshold = true;
970 params->throttle_trigger_threshold = s->parameters.throttle_trigger_threshold;
971 params->has_cpu_throttle_initial = true;
972 params->cpu_throttle_initial = s->parameters.cpu_throttle_initial;
973 params->has_cpu_throttle_increment = true;
974 params->cpu_throttle_increment = s->parameters.cpu_throttle_increment;
975 params->has_cpu_throttle_tailslow = true;
976 params->cpu_throttle_tailslow = s->parameters.cpu_throttle_tailslow;
977 params->tls_creds = g_strdup(s->parameters.tls_creds);
978 params->tls_hostname = g_strdup(s->parameters.tls_hostname);
979 params->tls_authz = g_strdup(s->parameters.tls_authz ?
980 s->parameters.tls_authz : "");
981 params->has_max_bandwidth = true;
982 params->max_bandwidth = s->parameters.max_bandwidth;
983 params->has_downtime_limit = true;
984 params->downtime_limit = s->parameters.downtime_limit;
985 params->has_x_checkpoint_delay = true;
986 params->x_checkpoint_delay = s->parameters.x_checkpoint_delay;
987 params->has_block_incremental = true;
988 params->block_incremental = s->parameters.block_incremental;
989 params->has_multifd_channels = true;
990 params->multifd_channels = s->parameters.multifd_channels;
991 params->has_multifd_compression = true;
992 params->multifd_compression = s->parameters.multifd_compression;
993 params->has_multifd_zlib_level = true;
994 params->multifd_zlib_level = s->parameters.multifd_zlib_level;
995 params->has_multifd_zstd_level = true;
996 params->multifd_zstd_level = s->parameters.multifd_zstd_level;
997 params->has_xbzrle_cache_size = true;
998 params->xbzrle_cache_size = s->parameters.xbzrle_cache_size;
999 params->has_max_postcopy_bandwidth = true;
1000 params->max_postcopy_bandwidth = s->parameters.max_postcopy_bandwidth;
1001 params->has_max_cpu_throttle = true;
1002 params->max_cpu_throttle = s->parameters.max_cpu_throttle;
1003 params->has_announce_initial = true;
1004 params->announce_initial = s->parameters.announce_initial;
1005 params->has_announce_max = true;
1006 params->announce_max = s->parameters.announce_max;
1007 params->has_announce_rounds = true;
1008 params->announce_rounds = s->parameters.announce_rounds;
1009 params->has_announce_step = true;
1010 params->announce_step = s->parameters.announce_step;
1011
1012 if (s->parameters.has_block_bitmap_mapping) {
1013 params->has_block_bitmap_mapping = true;
1014 params->block_bitmap_mapping =
1015 QAPI_CLONE(BitmapMigrationNodeAliasList,
1016 s->parameters.block_bitmap_mapping);
1017 }
1018
1019 return params;
1020}
1021
1022void qmp_client_migrate_info(const char *protocol, const char *hostname,
1023 bool has_port, int64_t port,
1024 bool has_tls_port, int64_t tls_port,
1025 const char *cert_subject,
1026 Error **errp)
1027{
1028 if (strcmp(protocol, "spice") == 0) {
1029 if (!qemu_using_spice(errp)) {
1030 return;
1031 }
1032
1033 if (!has_port && !has_tls_port) {
1034 error_setg(errp, QERR_MISSING_PARAMETER, "port/tls-port");
1035 return;
1036 }
1037
1038 if (qemu_spice.migrate_info(hostname,
1039 has_port ? port : -1,
1040 has_tls_port ? tls_port : -1,
1041 cert_subject)) {
1042 error_setg(errp, "Could not set up display for migration");
1043 return;
1044 }
1045 return;
1046 }
1047
1048 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "protocol", "'spice'");
1049}
1050
1051AnnounceParameters *migrate_announce_params(void)
1052{
1053 static AnnounceParameters ap;
1054
1055 MigrationState *s = migrate_get_current();
1056
1057 ap.initial = s->parameters.announce_initial;
1058 ap.max = s->parameters.announce_max;
1059 ap.rounds = s->parameters.announce_rounds;
1060 ap.step = s->parameters.announce_step;
1061
1062 return ≈
1063}
1064
1065
1066
1067
1068
1069bool migration_is_setup_or_active(int state)
1070{
1071 switch (state) {
1072 case MIGRATION_STATUS_ACTIVE:
1073 case MIGRATION_STATUS_POSTCOPY_ACTIVE:
1074 case MIGRATION_STATUS_POSTCOPY_PAUSED:
1075 case MIGRATION_STATUS_POSTCOPY_RECOVER:
1076 case MIGRATION_STATUS_SETUP:
1077 case MIGRATION_STATUS_PRE_SWITCHOVER:
1078 case MIGRATION_STATUS_DEVICE:
1079 case MIGRATION_STATUS_WAIT_UNPLUG:
1080 case MIGRATION_STATUS_COLO:
1081 return true;
1082
1083 default:
1084 return false;
1085
1086 }
1087}
1088
1089bool migration_is_running(int state)
1090{
1091 switch (state) {
1092 case MIGRATION_STATUS_ACTIVE:
1093 case MIGRATION_STATUS_POSTCOPY_ACTIVE:
1094 case MIGRATION_STATUS_POSTCOPY_PAUSED:
1095 case MIGRATION_STATUS_POSTCOPY_RECOVER:
1096 case MIGRATION_STATUS_SETUP:
1097 case MIGRATION_STATUS_PRE_SWITCHOVER:
1098 case MIGRATION_STATUS_DEVICE:
1099 case MIGRATION_STATUS_WAIT_UNPLUG:
1100 case MIGRATION_STATUS_CANCELLING:
1101 return true;
1102
1103 default:
1104 return false;
1105
1106 }
1107}
1108
1109static bool migrate_show_downtime(MigrationState *s)
1110{
1111 return (s->state == MIGRATION_STATUS_COMPLETED) || migration_in_postcopy();
1112}
1113
1114static void populate_time_info(MigrationInfo *info, MigrationState *s)
1115{
1116 info->has_status = true;
1117 info->has_setup_time = true;
1118 info->setup_time = s->setup_time;
1119
1120 if (s->state == MIGRATION_STATUS_COMPLETED) {
1121 info->has_total_time = true;
1122 info->total_time = s->total_time;
1123 } else {
1124 info->has_total_time = true;
1125 info->total_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) -
1126 s->start_time;
1127 }
1128
1129 if (migrate_show_downtime(s)) {
1130 info->has_downtime = true;
1131 info->downtime = s->downtime;
1132 } else {
1133 info->has_expected_downtime = true;
1134 info->expected_downtime = s->expected_downtime;
1135 }
1136}
1137
1138static void populate_ram_info(MigrationInfo *info, MigrationState *s)
1139{
1140 size_t page_size = qemu_target_page_size();
1141
1142 info->ram = g_malloc0(sizeof(*info->ram));
1143 info->ram->transferred = stat64_get(&ram_atomic_counters.transferred);
1144 info->ram->total = ram_bytes_total();
1145 info->ram->duplicate = stat64_get(&ram_atomic_counters.duplicate);
1146
1147 info->ram->skipped = 0;
1148 info->ram->normal = stat64_get(&ram_atomic_counters.normal);
1149 info->ram->normal_bytes = info->ram->normal * page_size;
1150 info->ram->mbps = s->mbps;
1151 info->ram->dirty_sync_count = ram_counters.dirty_sync_count;
1152 info->ram->dirty_sync_missed_zero_copy =
1153 ram_counters.dirty_sync_missed_zero_copy;
1154 info->ram->postcopy_requests = ram_counters.postcopy_requests;
1155 info->ram->page_size = page_size;
1156 info->ram->multifd_bytes = ram_counters.multifd_bytes;
1157 info->ram->pages_per_second = s->pages_per_second;
1158 info->ram->precopy_bytes = ram_counters.precopy_bytes;
1159 info->ram->downtime_bytes = ram_counters.downtime_bytes;
1160 info->ram->postcopy_bytes = stat64_get(&ram_atomic_counters.postcopy_bytes);
1161
1162 if (migrate_use_xbzrle()) {
1163 info->xbzrle_cache = g_malloc0(sizeof(*info->xbzrle_cache));
1164 info->xbzrle_cache->cache_size = migrate_xbzrle_cache_size();
1165 info->xbzrle_cache->bytes = xbzrle_counters.bytes;
1166 info->xbzrle_cache->pages = xbzrle_counters.pages;
1167 info->xbzrle_cache->cache_miss = xbzrle_counters.cache_miss;
1168 info->xbzrle_cache->cache_miss_rate = xbzrle_counters.cache_miss_rate;
1169 info->xbzrle_cache->encoding_rate = xbzrle_counters.encoding_rate;
1170 info->xbzrle_cache->overflow = xbzrle_counters.overflow;
1171 }
1172
1173 if (migrate_use_compression()) {
1174 info->compression = g_malloc0(sizeof(*info->compression));
1175 info->compression->pages = compression_counters.pages;
1176 info->compression->busy = compression_counters.busy;
1177 info->compression->busy_rate = compression_counters.busy_rate;
1178 info->compression->compressed_size =
1179 compression_counters.compressed_size;
1180 info->compression->compression_rate =
1181 compression_counters.compression_rate;
1182 }
1183
1184 if (cpu_throttle_active()) {
1185 info->has_cpu_throttle_percentage = true;
1186 info->cpu_throttle_percentage = cpu_throttle_get_percentage();
1187 }
1188
1189 if (s->state != MIGRATION_STATUS_COMPLETED) {
1190 info->ram->remaining = ram_bytes_remaining();
1191 info->ram->dirty_pages_rate = ram_counters.dirty_pages_rate;
1192 }
1193}
1194
1195static void populate_disk_info(MigrationInfo *info)
1196{
1197 if (blk_mig_active()) {
1198 info->disk = g_malloc0(sizeof(*info->disk));
1199 info->disk->transferred = blk_mig_bytes_transferred();
1200 info->disk->remaining = blk_mig_bytes_remaining();
1201 info->disk->total = blk_mig_bytes_total();
1202 }
1203}
1204
1205static void fill_source_migration_info(MigrationInfo *info)
1206{
1207 MigrationState *s = migrate_get_current();
1208 int state = qatomic_read(&s->state);
1209 GSList *cur_blocker = migration_blockers;
1210
1211 info->blocked_reasons = NULL;
1212
1213
1214
1215
1216
1217
1218
1219 qemu_savevm_non_migratable_list(&info->blocked_reasons);
1220
1221 while (cur_blocker) {
1222 QAPI_LIST_PREPEND(info->blocked_reasons,
1223 g_strdup(error_get_pretty(cur_blocker->data)));
1224 cur_blocker = g_slist_next(cur_blocker);
1225 }
1226 info->has_blocked_reasons = info->blocked_reasons != NULL;
1227
1228 switch (state) {
1229 case MIGRATION_STATUS_NONE:
1230
1231
1232 return;
1233 case MIGRATION_STATUS_SETUP:
1234 info->has_status = true;
1235 info->has_total_time = false;
1236 break;
1237 case MIGRATION_STATUS_ACTIVE:
1238 case MIGRATION_STATUS_CANCELLING:
1239 case MIGRATION_STATUS_POSTCOPY_ACTIVE:
1240 case MIGRATION_STATUS_PRE_SWITCHOVER:
1241 case MIGRATION_STATUS_DEVICE:
1242 case MIGRATION_STATUS_POSTCOPY_PAUSED:
1243 case MIGRATION_STATUS_POSTCOPY_RECOVER:
1244
1245 populate_time_info(info, s);
1246 populate_ram_info(info, s);
1247 populate_disk_info(info);
1248 populate_vfio_info(info);
1249 break;
1250 case MIGRATION_STATUS_COLO:
1251 info->has_status = true;
1252
1253 break;
1254 case MIGRATION_STATUS_COMPLETED:
1255 populate_time_info(info, s);
1256 populate_ram_info(info, s);
1257 populate_vfio_info(info);
1258 break;
1259 case MIGRATION_STATUS_FAILED:
1260 info->has_status = true;
1261 if (s->error) {
1262 info->error_desc = g_strdup(error_get_pretty(s->error));
1263 }
1264 break;
1265 case MIGRATION_STATUS_CANCELLED:
1266 info->has_status = true;
1267 break;
1268 case MIGRATION_STATUS_WAIT_UNPLUG:
1269 info->has_status = true;
1270 break;
1271 }
1272 info->status = state;
1273}
1274
1275typedef enum WriteTrackingSupport {
1276 WT_SUPPORT_UNKNOWN = 0,
1277 WT_SUPPORT_ABSENT,
1278 WT_SUPPORT_AVAILABLE,
1279 WT_SUPPORT_COMPATIBLE
1280} WriteTrackingSupport;
1281
1282static
1283WriteTrackingSupport migrate_query_write_tracking(void)
1284{
1285
1286 if (!ram_write_tracking_available()) {
1287 return WT_SUPPORT_ABSENT;
1288 }
1289
1290
1291
1292
1293 if (!ram_write_tracking_compatible()) {
1294 return WT_SUPPORT_AVAILABLE;
1295 }
1296
1297 return WT_SUPPORT_COMPATIBLE;
1298}
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309static bool migrate_caps_check(bool *cap_list,
1310 MigrationCapabilityStatusList *params,
1311 Error **errp)
1312{
1313 MigrationCapabilityStatusList *cap;
1314 bool old_postcopy_cap;
1315 MigrationIncomingState *mis = migration_incoming_get_current();
1316
1317 old_postcopy_cap = cap_list[MIGRATION_CAPABILITY_POSTCOPY_RAM];
1318
1319 for (cap = params; cap; cap = cap->next) {
1320 cap_list[cap->value->capability] = cap->value->state;
1321 }
1322
1323#ifndef CONFIG_LIVE_BLOCK_MIGRATION
1324 if (cap_list[MIGRATION_CAPABILITY_BLOCK]) {
1325 error_setg(errp, "QEMU compiled without old-style (blk/-b, inc/-i) "
1326 "block migration");
1327 error_append_hint(errp, "Use drive_mirror+NBD instead.\n");
1328 return false;
1329 }
1330#endif
1331
1332#ifndef CONFIG_REPLICATION
1333 if (cap_list[MIGRATION_CAPABILITY_X_COLO]) {
1334 error_setg(errp, "QEMU compiled without replication module"
1335 " can't enable COLO");
1336 error_append_hint(errp, "Please enable replication before COLO.\n");
1337 return false;
1338 }
1339#endif
1340
1341 if (cap_list[MIGRATION_CAPABILITY_POSTCOPY_RAM]) {
1342
1343
1344
1345
1346 if (!old_postcopy_cap && runstate_check(RUN_STATE_INMIGRATE) &&
1347 !postcopy_ram_supported_by_host(mis)) {
1348
1349
1350
1351 error_setg(errp, "Postcopy is not supported");
1352 return false;
1353 }
1354
1355 if (cap_list[MIGRATION_CAPABILITY_X_IGNORE_SHARED]) {
1356 error_setg(errp, "Postcopy is not compatible with ignore-shared");
1357 return false;
1358 }
1359 }
1360
1361 if (cap_list[MIGRATION_CAPABILITY_BACKGROUND_SNAPSHOT]) {
1362 WriteTrackingSupport wt_support;
1363 int idx;
1364
1365
1366
1367
1368 wt_support = migrate_query_write_tracking();
1369 if (wt_support < WT_SUPPORT_AVAILABLE) {
1370 error_setg(errp, "Background-snapshot is not supported by host kernel");
1371 return false;
1372 }
1373 if (wt_support < WT_SUPPORT_COMPATIBLE) {
1374 error_setg(errp, "Background-snapshot is not compatible "
1375 "with guest memory configuration");
1376 return false;
1377 }
1378
1379
1380
1381
1382
1383 for (idx = 0; idx < check_caps_background_snapshot.size; idx++) {
1384 int incomp_cap = check_caps_background_snapshot.caps[idx];
1385 if (cap_list[incomp_cap]) {
1386 error_setg(errp,
1387 "Background-snapshot is not compatible with %s",
1388 MigrationCapability_str(incomp_cap));
1389 return false;
1390 }
1391 }
1392 }
1393
1394#ifdef CONFIG_LINUX
1395 if (cap_list[MIGRATION_CAPABILITY_ZERO_COPY_SEND] &&
1396 (!cap_list[MIGRATION_CAPABILITY_MULTIFD] ||
1397 cap_list[MIGRATION_CAPABILITY_COMPRESS] ||
1398 cap_list[MIGRATION_CAPABILITY_XBZRLE] ||
1399 migrate_multifd_compression() ||
1400 migrate_use_tls())) {
1401 error_setg(errp,
1402 "Zero copy only available for non-compressed non-TLS multifd migration");
1403 return false;
1404 }
1405#else
1406 if (cap_list[MIGRATION_CAPABILITY_ZERO_COPY_SEND]) {
1407 error_setg(errp,
1408 "Zero copy currently only available on Linux");
1409 return false;
1410 }
1411#endif
1412
1413 if (cap_list[MIGRATION_CAPABILITY_POSTCOPY_PREEMPT]) {
1414 if (!cap_list[MIGRATION_CAPABILITY_POSTCOPY_RAM]) {
1415 error_setg(errp, "Postcopy preempt requires postcopy-ram");
1416 return false;
1417 }
1418
1419
1420
1421
1422
1423
1424
1425 if (cap_list[MIGRATION_CAPABILITY_COMPRESS]) {
1426 error_setg(errp, "Postcopy preempt not compatible with compress");
1427 return false;
1428 }
1429 }
1430
1431 if (cap_list[MIGRATION_CAPABILITY_MULTIFD]) {
1432 if (cap_list[MIGRATION_CAPABILITY_COMPRESS]) {
1433 error_setg(errp, "Multifd is not compatible with compress");
1434 return false;
1435 }
1436 }
1437
1438 return true;
1439}
1440
1441static void fill_destination_migration_info(MigrationInfo *info)
1442{
1443 MigrationIncomingState *mis = migration_incoming_get_current();
1444
1445 if (mis->socket_address_list) {
1446 info->has_socket_address = true;
1447 info->socket_address =
1448 QAPI_CLONE(SocketAddressList, mis->socket_address_list);
1449 }
1450
1451 switch (mis->state) {
1452 case MIGRATION_STATUS_NONE:
1453 return;
1454 case MIGRATION_STATUS_SETUP:
1455 case MIGRATION_STATUS_CANCELLING:
1456 case MIGRATION_STATUS_CANCELLED:
1457 case MIGRATION_STATUS_ACTIVE:
1458 case MIGRATION_STATUS_POSTCOPY_ACTIVE:
1459 case MIGRATION_STATUS_POSTCOPY_PAUSED:
1460 case MIGRATION_STATUS_POSTCOPY_RECOVER:
1461 case MIGRATION_STATUS_FAILED:
1462 case MIGRATION_STATUS_COLO:
1463 info->has_status = true;
1464 break;
1465 case MIGRATION_STATUS_COMPLETED:
1466 info->has_status = true;
1467 fill_destination_postcopy_migration_info(info);
1468 break;
1469 }
1470 info->status = mis->state;
1471}
1472
1473MigrationInfo *qmp_query_migrate(Error **errp)
1474{
1475 MigrationInfo *info = g_malloc0(sizeof(*info));
1476
1477 fill_destination_migration_info(info);
1478 fill_source_migration_info(info);
1479
1480 return info;
1481}
1482
1483void qmp_migrate_set_capabilities(MigrationCapabilityStatusList *params,
1484 Error **errp)
1485{
1486 MigrationState *s = migrate_get_current();
1487 MigrationCapabilityStatusList *cap;
1488 bool cap_list[MIGRATION_CAPABILITY__MAX];
1489
1490 if (migration_is_running(s->state)) {
1491 error_setg(errp, QERR_MIGRATION_ACTIVE);
1492 return;
1493 }
1494
1495 memcpy(cap_list, s->enabled_capabilities, sizeof(cap_list));
1496 if (!migrate_caps_check(cap_list, params, errp)) {
1497 return;
1498 }
1499
1500 for (cap = params; cap; cap = cap->next) {
1501 s->enabled_capabilities[cap->value->capability] = cap->value->state;
1502 }
1503}
1504
1505
1506
1507
1508
1509static bool migrate_params_check(MigrationParameters *params, Error **errp)
1510{
1511 if (params->has_compress_level &&
1512 (params->compress_level > 9)) {
1513 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "compress_level",
1514 "a value between 0 and 9");
1515 return false;
1516 }
1517
1518 if (params->has_compress_threads && (params->compress_threads < 1)) {
1519 error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
1520 "compress_threads",
1521 "a value between 1 and 255");
1522 return false;
1523 }
1524
1525 if (params->has_decompress_threads && (params->decompress_threads < 1)) {
1526 error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
1527 "decompress_threads",
1528 "a value between 1 and 255");
1529 return false;
1530 }
1531
1532 if (params->has_throttle_trigger_threshold &&
1533 (params->throttle_trigger_threshold < 1 ||
1534 params->throttle_trigger_threshold > 100)) {
1535 error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
1536 "throttle_trigger_threshold",
1537 "an integer in the range of 1 to 100");
1538 return false;
1539 }
1540
1541 if (params->has_cpu_throttle_initial &&
1542 (params->cpu_throttle_initial < 1 ||
1543 params->cpu_throttle_initial > 99)) {
1544 error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
1545 "cpu_throttle_initial",
1546 "an integer in the range of 1 to 99");
1547 return false;
1548 }
1549
1550 if (params->has_cpu_throttle_increment &&
1551 (params->cpu_throttle_increment < 1 ||
1552 params->cpu_throttle_increment > 99)) {
1553 error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
1554 "cpu_throttle_increment",
1555 "an integer in the range of 1 to 99");
1556 return false;
1557 }
1558
1559 if (params->has_max_bandwidth && (params->max_bandwidth > SIZE_MAX)) {
1560 error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
1561 "max_bandwidth",
1562 "an integer in the range of 0 to "stringify(SIZE_MAX)
1563 " bytes/second");
1564 return false;
1565 }
1566
1567 if (params->has_downtime_limit &&
1568 (params->downtime_limit > MAX_MIGRATE_DOWNTIME)) {
1569 error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
1570 "downtime_limit",
1571 "an integer in the range of 0 to "
1572 stringify(MAX_MIGRATE_DOWNTIME)" ms");
1573 return false;
1574 }
1575
1576
1577
1578 if (params->has_multifd_channels && (params->multifd_channels < 1)) {
1579 error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
1580 "multifd_channels",
1581 "a value between 1 and 255");
1582 return false;
1583 }
1584
1585 if (params->has_multifd_zlib_level &&
1586 (params->multifd_zlib_level > 9)) {
1587 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "multifd_zlib_level",
1588 "a value between 0 and 9");
1589 return false;
1590 }
1591
1592 if (params->has_multifd_zstd_level &&
1593 (params->multifd_zstd_level > 20)) {
1594 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "multifd_zstd_level",
1595 "a value between 0 and 20");
1596 return false;
1597 }
1598
1599 if (params->has_xbzrle_cache_size &&
1600 (params->xbzrle_cache_size < qemu_target_page_size() ||
1601 !is_power_of_2(params->xbzrle_cache_size))) {
1602 error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
1603 "xbzrle_cache_size",
1604 "a power of two no less than the target page size");
1605 return false;
1606 }
1607
1608 if (params->has_max_cpu_throttle &&
1609 (params->max_cpu_throttle < params->cpu_throttle_initial ||
1610 params->max_cpu_throttle > 99)) {
1611 error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
1612 "max_cpu_throttle",
1613 "an integer in the range of cpu_throttle_initial to 99");
1614 return false;
1615 }
1616
1617 if (params->has_announce_initial &&
1618 params->announce_initial > 100000) {
1619 error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
1620 "announce_initial",
1621 "a value between 0 and 100000");
1622 return false;
1623 }
1624 if (params->has_announce_max &&
1625 params->announce_max > 100000) {
1626 error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
1627 "announce_max",
1628 "a value between 0 and 100000");
1629 return false;
1630 }
1631 if (params->has_announce_rounds &&
1632 params->announce_rounds > 1000) {
1633 error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
1634 "announce_rounds",
1635 "a value between 0 and 1000");
1636 return false;
1637 }
1638 if (params->has_announce_step &&
1639 (params->announce_step < 1 ||
1640 params->announce_step > 10000)) {
1641 error_setg(errp, QERR_INVALID_PARAMETER_VALUE,
1642 "announce_step",
1643 "a value between 0 and 10000");
1644 return false;
1645 }
1646
1647 if (params->has_block_bitmap_mapping &&
1648 !check_dirty_bitmap_mig_alias_map(params->block_bitmap_mapping, errp)) {
1649 error_prepend(errp, "Invalid mapping given for block-bitmap-mapping: ");
1650 return false;
1651 }
1652
1653#ifdef CONFIG_LINUX
1654 if (migrate_use_zero_copy_send() &&
1655 ((params->has_multifd_compression && params->multifd_compression) ||
1656 (params->tls_creds && *params->tls_creds))) {
1657 error_setg(errp,
1658 "Zero copy only available for non-compressed non-TLS multifd migration");
1659 return false;
1660 }
1661#endif
1662
1663 return true;
1664}
1665
1666static void migrate_params_test_apply(MigrateSetParameters *params,
1667 MigrationParameters *dest)
1668{
1669 *dest = migrate_get_current()->parameters;
1670
1671
1672
1673 if (params->has_compress_level) {
1674 dest->compress_level = params->compress_level;
1675 }
1676
1677 if (params->has_compress_threads) {
1678 dest->compress_threads = params->compress_threads;
1679 }
1680
1681 if (params->has_compress_wait_thread) {
1682 dest->compress_wait_thread = params->compress_wait_thread;
1683 }
1684
1685 if (params->has_decompress_threads) {
1686 dest->decompress_threads = params->decompress_threads;
1687 }
1688
1689 if (params->has_throttle_trigger_threshold) {
1690 dest->throttle_trigger_threshold = params->throttle_trigger_threshold;
1691 }
1692
1693 if (params->has_cpu_throttle_initial) {
1694 dest->cpu_throttle_initial = params->cpu_throttle_initial;
1695 }
1696
1697 if (params->has_cpu_throttle_increment) {
1698 dest->cpu_throttle_increment = params->cpu_throttle_increment;
1699 }
1700
1701 if (params->has_cpu_throttle_tailslow) {
1702 dest->cpu_throttle_tailslow = params->cpu_throttle_tailslow;
1703 }
1704
1705 if (params->tls_creds) {
1706 assert(params->tls_creds->type == QTYPE_QSTRING);
1707 dest->tls_creds = params->tls_creds->u.s;
1708 }
1709
1710 if (params->tls_hostname) {
1711 assert(params->tls_hostname->type == QTYPE_QSTRING);
1712 dest->tls_hostname = params->tls_hostname->u.s;
1713 }
1714
1715 if (params->has_max_bandwidth) {
1716 dest->max_bandwidth = params->max_bandwidth;
1717 }
1718
1719 if (params->has_downtime_limit) {
1720 dest->downtime_limit = params->downtime_limit;
1721 }
1722
1723 if (params->has_x_checkpoint_delay) {
1724 dest->x_checkpoint_delay = params->x_checkpoint_delay;
1725 }
1726
1727 if (params->has_block_incremental) {
1728 dest->block_incremental = params->block_incremental;
1729 }
1730 if (params->has_multifd_channels) {
1731 dest->multifd_channels = params->multifd_channels;
1732 }
1733 if (params->has_multifd_compression) {
1734 dest->multifd_compression = params->multifd_compression;
1735 }
1736 if (params->has_xbzrle_cache_size) {
1737 dest->xbzrle_cache_size = params->xbzrle_cache_size;
1738 }
1739 if (params->has_max_postcopy_bandwidth) {
1740 dest->max_postcopy_bandwidth = params->max_postcopy_bandwidth;
1741 }
1742 if (params->has_max_cpu_throttle) {
1743 dest->max_cpu_throttle = params->max_cpu_throttle;
1744 }
1745 if (params->has_announce_initial) {
1746 dest->announce_initial = params->announce_initial;
1747 }
1748 if (params->has_announce_max) {
1749 dest->announce_max = params->announce_max;
1750 }
1751 if (params->has_announce_rounds) {
1752 dest->announce_rounds = params->announce_rounds;
1753 }
1754 if (params->has_announce_step) {
1755 dest->announce_step = params->announce_step;
1756 }
1757
1758 if (params->has_block_bitmap_mapping) {
1759 dest->has_block_bitmap_mapping = true;
1760 dest->block_bitmap_mapping = params->block_bitmap_mapping;
1761 }
1762}
1763
1764static void migrate_params_apply(MigrateSetParameters *params, Error **errp)
1765{
1766 MigrationState *s = migrate_get_current();
1767
1768
1769
1770 if (params->has_compress_level) {
1771 s->parameters.compress_level = params->compress_level;
1772 }
1773
1774 if (params->has_compress_threads) {
1775 s->parameters.compress_threads = params->compress_threads;
1776 }
1777
1778 if (params->has_compress_wait_thread) {
1779 s->parameters.compress_wait_thread = params->compress_wait_thread;
1780 }
1781
1782 if (params->has_decompress_threads) {
1783 s->parameters.decompress_threads = params->decompress_threads;
1784 }
1785
1786 if (params->has_throttle_trigger_threshold) {
1787 s->parameters.throttle_trigger_threshold = params->throttle_trigger_threshold;
1788 }
1789
1790 if (params->has_cpu_throttle_initial) {
1791 s->parameters.cpu_throttle_initial = params->cpu_throttle_initial;
1792 }
1793
1794 if (params->has_cpu_throttle_increment) {
1795 s->parameters.cpu_throttle_increment = params->cpu_throttle_increment;
1796 }
1797
1798 if (params->has_cpu_throttle_tailslow) {
1799 s->parameters.cpu_throttle_tailslow = params->cpu_throttle_tailslow;
1800 }
1801
1802 if (params->tls_creds) {
1803 g_free(s->parameters.tls_creds);
1804 assert(params->tls_creds->type == QTYPE_QSTRING);
1805 s->parameters.tls_creds = g_strdup(params->tls_creds->u.s);
1806 }
1807
1808 if (params->tls_hostname) {
1809 g_free(s->parameters.tls_hostname);
1810 assert(params->tls_hostname->type == QTYPE_QSTRING);
1811 s->parameters.tls_hostname = g_strdup(params->tls_hostname->u.s);
1812 }
1813
1814 if (params->tls_authz) {
1815 g_free(s->parameters.tls_authz);
1816 assert(params->tls_authz->type == QTYPE_QSTRING);
1817 s->parameters.tls_authz = g_strdup(params->tls_authz->u.s);
1818 }
1819
1820 if (params->has_max_bandwidth) {
1821 s->parameters.max_bandwidth = params->max_bandwidth;
1822 if (s->to_dst_file && !migration_in_postcopy()) {
1823 qemu_file_set_rate_limit(s->to_dst_file,
1824 s->parameters.max_bandwidth / XFER_LIMIT_RATIO);
1825 }
1826 }
1827
1828 if (params->has_downtime_limit) {
1829 s->parameters.downtime_limit = params->downtime_limit;
1830 }
1831
1832 if (params->has_x_checkpoint_delay) {
1833 s->parameters.x_checkpoint_delay = params->x_checkpoint_delay;
1834 if (migration_in_colo_state()) {
1835 colo_checkpoint_notify(s);
1836 }
1837 }
1838
1839 if (params->has_block_incremental) {
1840 s->parameters.block_incremental = params->block_incremental;
1841 }
1842 if (params->has_multifd_channels) {
1843 s->parameters.multifd_channels = params->multifd_channels;
1844 }
1845 if (params->has_multifd_compression) {
1846 s->parameters.multifd_compression = params->multifd_compression;
1847 }
1848 if (params->has_xbzrle_cache_size) {
1849 s->parameters.xbzrle_cache_size = params->xbzrle_cache_size;
1850 xbzrle_cache_resize(params->xbzrle_cache_size, errp);
1851 }
1852 if (params->has_max_postcopy_bandwidth) {
1853 s->parameters.max_postcopy_bandwidth = params->max_postcopy_bandwidth;
1854 if (s->to_dst_file && migration_in_postcopy()) {
1855 qemu_file_set_rate_limit(s->to_dst_file,
1856 s->parameters.max_postcopy_bandwidth / XFER_LIMIT_RATIO);
1857 }
1858 }
1859 if (params->has_max_cpu_throttle) {
1860 s->parameters.max_cpu_throttle = params->max_cpu_throttle;
1861 }
1862 if (params->has_announce_initial) {
1863 s->parameters.announce_initial = params->announce_initial;
1864 }
1865 if (params->has_announce_max) {
1866 s->parameters.announce_max = params->announce_max;
1867 }
1868 if (params->has_announce_rounds) {
1869 s->parameters.announce_rounds = params->announce_rounds;
1870 }
1871 if (params->has_announce_step) {
1872 s->parameters.announce_step = params->announce_step;
1873 }
1874
1875 if (params->has_block_bitmap_mapping) {
1876 qapi_free_BitmapMigrationNodeAliasList(
1877 s->parameters.block_bitmap_mapping);
1878
1879 s->parameters.has_block_bitmap_mapping = true;
1880 s->parameters.block_bitmap_mapping =
1881 QAPI_CLONE(BitmapMigrationNodeAliasList,
1882 params->block_bitmap_mapping);
1883 }
1884}
1885
1886void qmp_migrate_set_parameters(MigrateSetParameters *params, Error **errp)
1887{
1888 MigrationParameters tmp;
1889
1890
1891 if (params->tls_creds
1892 && params->tls_creds->type == QTYPE_QNULL) {
1893 qobject_unref(params->tls_creds->u.n);
1894 params->tls_creds->type = QTYPE_QSTRING;
1895 params->tls_creds->u.s = strdup("");
1896 }
1897
1898 if (params->tls_hostname
1899 && params->tls_hostname->type == QTYPE_QNULL) {
1900 qobject_unref(params->tls_hostname->u.n);
1901 params->tls_hostname->type = QTYPE_QSTRING;
1902 params->tls_hostname->u.s = strdup("");
1903 }
1904
1905 migrate_params_test_apply(params, &tmp);
1906
1907 if (!migrate_params_check(&tmp, errp)) {
1908
1909 return;
1910 }
1911
1912 migrate_params_apply(params, errp);
1913}
1914
1915
1916void qmp_migrate_start_postcopy(Error **errp)
1917{
1918 MigrationState *s = migrate_get_current();
1919
1920 if (!migrate_postcopy()) {
1921 error_setg(errp, "Enable postcopy with migrate_set_capability before"
1922 " the start of migration");
1923 return;
1924 }
1925
1926 if (s->state == MIGRATION_STATUS_NONE) {
1927 error_setg(errp, "Postcopy must be started after migration has been"
1928 " started");
1929 return;
1930 }
1931
1932
1933
1934
1935 qatomic_set(&s->start_postcopy, true);
1936}
1937
1938
1939
1940void migrate_set_state(int *state, int old_state, int new_state)
1941{
1942 assert(new_state < MIGRATION_STATUS__MAX);
1943 if (qatomic_cmpxchg(state, old_state, new_state) == old_state) {
1944 trace_migrate_set_state(MigrationStatus_str(new_state));
1945 migrate_generate_event(new_state);
1946 }
1947}
1948
1949static MigrationCapabilityStatus *migrate_cap_add(MigrationCapability index,
1950 bool state)
1951{
1952 MigrationCapabilityStatus *cap;
1953
1954 cap = g_new0(MigrationCapabilityStatus, 1);
1955 cap->capability = index;
1956 cap->state = state;
1957
1958 return cap;
1959}
1960
1961void migrate_set_block_enabled(bool value, Error **errp)
1962{
1963 MigrationCapabilityStatusList *cap = NULL;
1964
1965 QAPI_LIST_PREPEND(cap, migrate_cap_add(MIGRATION_CAPABILITY_BLOCK, value));
1966 qmp_migrate_set_capabilities(cap, errp);
1967 qapi_free_MigrationCapabilityStatusList(cap);
1968}
1969
1970static void migrate_set_block_incremental(MigrationState *s, bool value)
1971{
1972 s->parameters.block_incremental = value;
1973}
1974
1975static void block_cleanup_parameters(MigrationState *s)
1976{
1977 if (s->must_remove_block_options) {
1978
1979 migrate_set_block_enabled(false, &error_abort);
1980 migrate_set_block_incremental(s, false);
1981 s->must_remove_block_options = false;
1982 }
1983}
1984
1985static void migrate_fd_cleanup(MigrationState *s)
1986{
1987 qemu_bh_delete(s->cleanup_bh);
1988 s->cleanup_bh = NULL;
1989
1990 g_free(s->hostname);
1991 s->hostname = NULL;
1992 json_writer_free(s->vmdesc);
1993 s->vmdesc = NULL;
1994
1995 qemu_savevm_state_cleanup();
1996
1997 if (s->to_dst_file) {
1998 QEMUFile *tmp;
1999
2000 trace_migrate_fd_cleanup();
2001 qemu_mutex_unlock_iothread();
2002 if (s->migration_thread_running) {
2003 qemu_thread_join(&s->thread);
2004 s->migration_thread_running = false;
2005 }
2006 qemu_mutex_lock_iothread();
2007
2008 multifd_save_cleanup();
2009 qemu_mutex_lock(&s->qemu_file_lock);
2010 tmp = s->to_dst_file;
2011 s->to_dst_file = NULL;
2012 qemu_mutex_unlock(&s->qemu_file_lock);
2013
2014
2015
2016
2017 migration_ioc_unregister_yank_from_file(tmp);
2018 qemu_fclose(tmp);
2019 }
2020
2021 if (s->postcopy_qemufile_src) {
2022 migration_ioc_unregister_yank_from_file(s->postcopy_qemufile_src);
2023 qemu_fclose(s->postcopy_qemufile_src);
2024 s->postcopy_qemufile_src = NULL;
2025 }
2026
2027 assert(!migration_is_active(s));
2028
2029 if (s->state == MIGRATION_STATUS_CANCELLING) {
2030 migrate_set_state(&s->state, MIGRATION_STATUS_CANCELLING,
2031 MIGRATION_STATUS_CANCELLED);
2032 }
2033
2034 if (s->error) {
2035
2036 error_report_err(error_copy(s->error));
2037 }
2038 notifier_list_notify(&migration_state_notifiers, s);
2039 block_cleanup_parameters(s);
2040 yank_unregister_instance(MIGRATION_YANK_INSTANCE);
2041}
2042
2043static void migrate_fd_cleanup_schedule(MigrationState *s)
2044{
2045
2046
2047
2048
2049 object_ref(OBJECT(s));
2050 qemu_bh_schedule(s->cleanup_bh);
2051}
2052
2053static void migrate_fd_cleanup_bh(void *opaque)
2054{
2055 MigrationState *s = opaque;
2056 migrate_fd_cleanup(s);
2057 object_unref(OBJECT(s));
2058}
2059
2060void migrate_set_error(MigrationState *s, const Error *error)
2061{
2062 QEMU_LOCK_GUARD(&s->error_mutex);
2063 if (!s->error) {
2064 s->error = error_copy(error);
2065 }
2066}
2067
2068static void migrate_error_free(MigrationState *s)
2069{
2070 QEMU_LOCK_GUARD(&s->error_mutex);
2071 if (s->error) {
2072 error_free(s->error);
2073 s->error = NULL;
2074 }
2075}
2076
2077void migrate_fd_error(MigrationState *s, const Error *error)
2078{
2079 trace_migrate_fd_error(error_get_pretty(error));
2080 assert(s->to_dst_file == NULL);
2081 migrate_set_state(&s->state, MIGRATION_STATUS_SETUP,
2082 MIGRATION_STATUS_FAILED);
2083 migrate_set_error(s, error);
2084}
2085
2086static void migrate_fd_cancel(MigrationState *s)
2087{
2088 int old_state ;
2089 QEMUFile *f = migrate_get_current()->to_dst_file;
2090 trace_migrate_fd_cancel();
2091
2092 WITH_QEMU_LOCK_GUARD(&s->qemu_file_lock) {
2093 if (s->rp_state.from_dst_file) {
2094
2095 qemu_file_shutdown(s->rp_state.from_dst_file);
2096 }
2097 }
2098
2099 do {
2100 old_state = s->state;
2101 if (!migration_is_running(old_state)) {
2102 break;
2103 }
2104
2105 if (old_state == MIGRATION_STATUS_PRE_SWITCHOVER) {
2106 qemu_sem_post(&s->pause_sem);
2107 }
2108 migrate_set_state(&s->state, old_state, MIGRATION_STATUS_CANCELLING);
2109 } while (s->state != MIGRATION_STATUS_CANCELLING);
2110
2111
2112
2113
2114
2115
2116
2117
2118 if (s->state == MIGRATION_STATUS_CANCELLING && f) {
2119 qemu_file_shutdown(f);
2120 }
2121 if (s->state == MIGRATION_STATUS_CANCELLING && s->block_inactive) {
2122 Error *local_err = NULL;
2123
2124 bdrv_activate_all(&local_err);
2125 if (local_err) {
2126 error_report_err(local_err);
2127 } else {
2128 s->block_inactive = false;
2129 }
2130 }
2131}
2132
2133void add_migration_state_change_notifier(Notifier *notify)
2134{
2135 notifier_list_add(&migration_state_notifiers, notify);
2136}
2137
2138void remove_migration_state_change_notifier(Notifier *notify)
2139{
2140 notifier_remove(notify);
2141}
2142
2143bool migration_in_setup(MigrationState *s)
2144{
2145 return s->state == MIGRATION_STATUS_SETUP;
2146}
2147
2148bool migration_has_finished(MigrationState *s)
2149{
2150 return s->state == MIGRATION_STATUS_COMPLETED;
2151}
2152
2153bool migration_has_failed(MigrationState *s)
2154{
2155 return (s->state == MIGRATION_STATUS_CANCELLED ||
2156 s->state == MIGRATION_STATUS_FAILED);
2157}
2158
2159bool migration_in_postcopy(void)
2160{
2161 MigrationState *s = migrate_get_current();
2162
2163 switch (s->state) {
2164 case MIGRATION_STATUS_POSTCOPY_ACTIVE:
2165 case MIGRATION_STATUS_POSTCOPY_PAUSED:
2166 case MIGRATION_STATUS_POSTCOPY_RECOVER:
2167 return true;
2168 default:
2169 return false;
2170 }
2171}
2172
2173bool migration_in_postcopy_after_devices(MigrationState *s)
2174{
2175 return migration_in_postcopy() && s->postcopy_after_devices;
2176}
2177
2178bool migration_in_incoming_postcopy(void)
2179{
2180 PostcopyState ps = postcopy_state_get();
2181
2182 return ps >= POSTCOPY_INCOMING_DISCARD && ps < POSTCOPY_INCOMING_END;
2183}
2184
2185bool migration_incoming_postcopy_advised(void)
2186{
2187 PostcopyState ps = postcopy_state_get();
2188
2189 return ps >= POSTCOPY_INCOMING_ADVISE && ps < POSTCOPY_INCOMING_END;
2190}
2191
2192bool migration_in_bg_snapshot(void)
2193{
2194 MigrationState *s = migrate_get_current();
2195
2196 return migrate_background_snapshot() &&
2197 migration_is_setup_or_active(s->state);
2198}
2199
2200bool migration_is_idle(void)
2201{
2202 MigrationState *s = current_migration;
2203
2204 if (!s) {
2205 return true;
2206 }
2207
2208 switch (s->state) {
2209 case MIGRATION_STATUS_NONE:
2210 case MIGRATION_STATUS_CANCELLED:
2211 case MIGRATION_STATUS_COMPLETED:
2212 case MIGRATION_STATUS_FAILED:
2213 return true;
2214 case MIGRATION_STATUS_SETUP:
2215 case MIGRATION_STATUS_CANCELLING:
2216 case MIGRATION_STATUS_ACTIVE:
2217 case MIGRATION_STATUS_POSTCOPY_ACTIVE:
2218 case MIGRATION_STATUS_COLO:
2219 case MIGRATION_STATUS_PRE_SWITCHOVER:
2220 case MIGRATION_STATUS_DEVICE:
2221 case MIGRATION_STATUS_WAIT_UNPLUG:
2222 return false;
2223 case MIGRATION_STATUS__MAX:
2224 g_assert_not_reached();
2225 }
2226
2227 return false;
2228}
2229
2230bool migration_is_active(MigrationState *s)
2231{
2232 return (s->state == MIGRATION_STATUS_ACTIVE ||
2233 s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE);
2234}
2235
2236void migrate_init(MigrationState *s)
2237{
2238
2239
2240
2241
2242
2243 s->cleanup_bh = 0;
2244 s->vm_start_bh = 0;
2245 s->to_dst_file = NULL;
2246 s->state = MIGRATION_STATUS_NONE;
2247 s->rp_state.from_dst_file = NULL;
2248 s->rp_state.error = false;
2249 s->mbps = 0.0;
2250 s->pages_per_second = 0.0;
2251 s->downtime = 0;
2252 s->expected_downtime = 0;
2253 s->setup_time = 0;
2254 s->start_postcopy = false;
2255 s->postcopy_after_devices = false;
2256 s->migration_thread_running = false;
2257 error_free(s->error);
2258 s->error = NULL;
2259 s->hostname = NULL;
2260
2261 migrate_set_state(&s->state, MIGRATION_STATUS_NONE, MIGRATION_STATUS_SETUP);
2262
2263 s->start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
2264 s->total_time = 0;
2265 s->vm_was_running = false;
2266 s->iteration_initial_bytes = 0;
2267 s->threshold_size = 0;
2268}
2269
2270int migrate_add_blocker_internal(Error *reason, Error **errp)
2271{
2272
2273 if (runstate_check(RUN_STATE_SAVE_VM) || !migration_is_idle()) {
2274 error_propagate_prepend(errp, error_copy(reason),
2275 "disallowing migration blocker "
2276 "(migration/snapshot in progress) for: ");
2277 return -EBUSY;
2278 }
2279
2280 migration_blockers = g_slist_prepend(migration_blockers, reason);
2281 return 0;
2282}
2283
2284int migrate_add_blocker(Error *reason, Error **errp)
2285{
2286 if (only_migratable) {
2287 error_propagate_prepend(errp, error_copy(reason),
2288 "disallowing migration blocker "
2289 "(--only-migratable) for: ");
2290 return -EACCES;
2291 }
2292
2293 return migrate_add_blocker_internal(reason, errp);
2294}
2295
2296void migrate_del_blocker(Error *reason)
2297{
2298 migration_blockers = g_slist_remove(migration_blockers, reason);
2299}
2300
2301void qmp_migrate_incoming(const char *uri, Error **errp)
2302{
2303 Error *local_err = NULL;
2304 static bool once = true;
2305
2306 if (!once) {
2307 error_setg(errp, "The incoming migration has already been started");
2308 return;
2309 }
2310 if (!runstate_check(RUN_STATE_INMIGRATE)) {
2311 error_setg(errp, "'-incoming' was not specified on the command line");
2312 return;
2313 }
2314
2315 if (!yank_register_instance(MIGRATION_YANK_INSTANCE, errp)) {
2316 return;
2317 }
2318
2319 qemu_start_incoming_migration(uri, &local_err);
2320
2321 if (local_err) {
2322 yank_unregister_instance(MIGRATION_YANK_INSTANCE);
2323 error_propagate(errp, local_err);
2324 return;
2325 }
2326
2327 once = false;
2328}
2329
2330void qmp_migrate_recover(const char *uri, Error **errp)
2331{
2332 MigrationIncomingState *mis = migration_incoming_get_current();
2333
2334
2335
2336
2337
2338
2339 assert(errp);
2340
2341 if (mis->state != MIGRATION_STATUS_POSTCOPY_PAUSED) {
2342 error_setg(errp, "Migrate recover can only be run "
2343 "when postcopy is paused.");
2344 return;
2345 }
2346
2347
2348 migration_incoming_transport_cleanup(mis);
2349
2350
2351
2352
2353
2354
2355 qemu_start_incoming_migration(uri, errp);
2356}
2357
2358void qmp_migrate_pause(Error **errp)
2359{
2360 MigrationState *ms = migrate_get_current();
2361 MigrationIncomingState *mis = migration_incoming_get_current();
2362 int ret;
2363
2364 if (ms->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) {
2365
2366 qemu_mutex_lock(&ms->qemu_file_lock);
2367 ret = qemu_file_shutdown(ms->to_dst_file);
2368 qemu_mutex_unlock(&ms->qemu_file_lock);
2369 if (ret) {
2370 error_setg(errp, "Failed to pause source migration");
2371 }
2372 return;
2373 }
2374
2375 if (mis->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) {
2376 ret = qemu_file_shutdown(mis->from_src_file);
2377 if (ret) {
2378 error_setg(errp, "Failed to pause destination migration");
2379 }
2380 return;
2381 }
2382
2383 error_setg(errp, "migrate-pause is currently only supported "
2384 "during postcopy-active state");
2385}
2386
2387bool migration_is_blocked(Error **errp)
2388{
2389 if (qemu_savevm_state_blocked(errp)) {
2390 return true;
2391 }
2392
2393 if (migration_blockers) {
2394 error_propagate(errp, error_copy(migration_blockers->data));
2395 return true;
2396 }
2397
2398 return false;
2399}
2400
2401
2402static bool migrate_prepare(MigrationState *s, bool blk, bool blk_inc,
2403 bool resume, Error **errp)
2404{
2405 Error *local_err = NULL;
2406
2407 if (resume) {
2408 if (s->state != MIGRATION_STATUS_POSTCOPY_PAUSED) {
2409 error_setg(errp, "Cannot resume if there is no "
2410 "paused migration");
2411 return false;
2412 }
2413
2414
2415
2416
2417
2418
2419
2420
2421
2422
2423
2424
2425
2426 if (migrate_release_ram()) {
2427 error_setg(errp, "Postcopy recovery cannot work "
2428 "when release-ram capability is set");
2429 return false;
2430 }
2431
2432
2433 return true;
2434 }
2435
2436 if (migration_is_running(s->state)) {
2437 error_setg(errp, QERR_MIGRATION_ACTIVE);
2438 return false;
2439 }
2440
2441 if (runstate_check(RUN_STATE_INMIGRATE)) {
2442 error_setg(errp, "Guest is waiting for an incoming migration");
2443 return false;
2444 }
2445
2446 if (runstate_check(RUN_STATE_POSTMIGRATE)) {
2447 error_setg(errp, "Can't migrate the vm that was paused due to "
2448 "previous migration");
2449 return false;
2450 }
2451
2452 if (migration_is_blocked(errp)) {
2453 return false;
2454 }
2455
2456 if (blk || blk_inc) {
2457 if (migrate_colo_enabled()) {
2458 error_setg(errp, "No disk migration is required in COLO mode");
2459 return false;
2460 }
2461 if (migrate_use_block() || migrate_use_block_incremental()) {
2462 error_setg(errp, "Command options are incompatible with "
2463 "current migration capabilities");
2464 return false;
2465 }
2466 migrate_set_block_enabled(true, &local_err);
2467 if (local_err) {
2468 error_propagate(errp, local_err);
2469 return false;
2470 }
2471 s->must_remove_block_options = true;
2472 }
2473
2474 if (blk_inc) {
2475 migrate_set_block_incremental(s, true);
2476 }
2477
2478 migrate_init(s);
2479
2480
2481
2482
2483 memset(&ram_counters, 0, sizeof(ram_counters));
2484 memset(&compression_counters, 0, sizeof(compression_counters));
2485
2486 return true;
2487}
2488
2489void qmp_migrate(const char *uri, bool has_blk, bool blk,
2490 bool has_inc, bool inc, bool has_detach, bool detach,
2491 bool has_resume, bool resume, Error **errp)
2492{
2493 Error *local_err = NULL;
2494 MigrationState *s = migrate_get_current();
2495 const char *p = NULL;
2496
2497
2498 if (!migration_channels_and_uri_compatible(uri, errp)) {
2499 return;
2500 }
2501
2502 if (!migrate_prepare(s, has_blk && blk, has_inc && inc,
2503 has_resume && resume, errp)) {
2504
2505 return;
2506 }
2507
2508 if (!(has_resume && resume)) {
2509 if (!yank_register_instance(MIGRATION_YANK_INSTANCE, errp)) {
2510 return;
2511 }
2512 }
2513
2514 if (strstart(uri, "tcp:", &p) ||
2515 strstart(uri, "unix:", NULL) ||
2516 strstart(uri, "vsock:", NULL)) {
2517 socket_start_outgoing_migration(s, p ? p : uri, &local_err);
2518#ifdef CONFIG_RDMA
2519 } else if (strstart(uri, "rdma:", &p)) {
2520 rdma_start_outgoing_migration(s, p, &local_err);
2521#endif
2522 } else if (strstart(uri, "exec:", &p)) {
2523 exec_start_outgoing_migration(s, p, &local_err);
2524 } else if (strstart(uri, "fd:", &p)) {
2525 fd_start_outgoing_migration(s, p, &local_err);
2526 } else {
2527 if (!(has_resume && resume)) {
2528 yank_unregister_instance(MIGRATION_YANK_INSTANCE);
2529 }
2530 error_setg(errp, QERR_INVALID_PARAMETER_VALUE, "uri",
2531 "a valid migration protocol");
2532 migrate_set_state(&s->state, MIGRATION_STATUS_SETUP,
2533 MIGRATION_STATUS_FAILED);
2534 block_cleanup_parameters(s);
2535 return;
2536 }
2537
2538 if (local_err) {
2539 if (!(has_resume && resume)) {
2540 yank_unregister_instance(MIGRATION_YANK_INSTANCE);
2541 }
2542 migrate_fd_error(s, local_err);
2543 error_propagate(errp, local_err);
2544 return;
2545 }
2546}
2547
2548void qmp_migrate_cancel(Error **errp)
2549{
2550 migration_cancel(NULL);
2551}
2552
2553void qmp_migrate_continue(MigrationStatus state, Error **errp)
2554{
2555 MigrationState *s = migrate_get_current();
2556 if (s->state != state) {
2557 error_setg(errp, "Migration not in expected state: %s",
2558 MigrationStatus_str(s->state));
2559 return;
2560 }
2561 qemu_sem_post(&s->pause_sem);
2562}
2563
2564bool migrate_release_ram(void)
2565{
2566 MigrationState *s;
2567
2568 s = migrate_get_current();
2569
2570 return s->enabled_capabilities[MIGRATION_CAPABILITY_RELEASE_RAM];
2571}
2572
2573bool migrate_postcopy_ram(void)
2574{
2575 MigrationState *s;
2576
2577 s = migrate_get_current();
2578
2579 return s->enabled_capabilities[MIGRATION_CAPABILITY_POSTCOPY_RAM];
2580}
2581
2582bool migrate_postcopy(void)
2583{
2584 return migrate_postcopy_ram() || migrate_dirty_bitmaps();
2585}
2586
2587bool migrate_auto_converge(void)
2588{
2589 MigrationState *s;
2590
2591 s = migrate_get_current();
2592
2593 return s->enabled_capabilities[MIGRATION_CAPABILITY_AUTO_CONVERGE];
2594}
2595
2596bool migrate_zero_blocks(void)
2597{
2598 MigrationState *s;
2599
2600 s = migrate_get_current();
2601
2602 return s->enabled_capabilities[MIGRATION_CAPABILITY_ZERO_BLOCKS];
2603}
2604
2605bool migrate_postcopy_blocktime(void)
2606{
2607 MigrationState *s;
2608
2609 s = migrate_get_current();
2610
2611 return s->enabled_capabilities[MIGRATION_CAPABILITY_POSTCOPY_BLOCKTIME];
2612}
2613
2614bool migrate_use_compression(void)
2615{
2616 MigrationState *s;
2617
2618 s = migrate_get_current();
2619
2620 return s->enabled_capabilities[MIGRATION_CAPABILITY_COMPRESS];
2621}
2622
2623int migrate_compress_level(void)
2624{
2625 MigrationState *s;
2626
2627 s = migrate_get_current();
2628
2629 return s->parameters.compress_level;
2630}
2631
2632int migrate_compress_threads(void)
2633{
2634 MigrationState *s;
2635
2636 s = migrate_get_current();
2637
2638 return s->parameters.compress_threads;
2639}
2640
2641int migrate_compress_wait_thread(void)
2642{
2643 MigrationState *s;
2644
2645 s = migrate_get_current();
2646
2647 return s->parameters.compress_wait_thread;
2648}
2649
2650int migrate_decompress_threads(void)
2651{
2652 MigrationState *s;
2653
2654 s = migrate_get_current();
2655
2656 return s->parameters.decompress_threads;
2657}
2658
2659bool migrate_dirty_bitmaps(void)
2660{
2661 MigrationState *s;
2662
2663 s = migrate_get_current();
2664
2665 return s->enabled_capabilities[MIGRATION_CAPABILITY_DIRTY_BITMAPS];
2666}
2667
2668bool migrate_ignore_shared(void)
2669{
2670 MigrationState *s;
2671
2672 s = migrate_get_current();
2673
2674 return s->enabled_capabilities[MIGRATION_CAPABILITY_X_IGNORE_SHARED];
2675}
2676
2677bool migrate_validate_uuid(void)
2678{
2679 MigrationState *s;
2680
2681 s = migrate_get_current();
2682
2683 return s->enabled_capabilities[MIGRATION_CAPABILITY_VALIDATE_UUID];
2684}
2685
2686bool migrate_use_events(void)
2687{
2688 MigrationState *s;
2689
2690 s = migrate_get_current();
2691
2692 return s->enabled_capabilities[MIGRATION_CAPABILITY_EVENTS];
2693}
2694
2695bool migrate_use_multifd(void)
2696{
2697 MigrationState *s;
2698
2699 s = migrate_get_current();
2700
2701 return s->enabled_capabilities[MIGRATION_CAPABILITY_MULTIFD];
2702}
2703
2704bool migrate_pause_before_switchover(void)
2705{
2706 MigrationState *s;
2707
2708 s = migrate_get_current();
2709
2710 return s->enabled_capabilities[
2711 MIGRATION_CAPABILITY_PAUSE_BEFORE_SWITCHOVER];
2712}
2713
2714int migrate_multifd_channels(void)
2715{
2716 MigrationState *s;
2717
2718 s = migrate_get_current();
2719
2720 return s->parameters.multifd_channels;
2721}
2722
2723MultiFDCompression migrate_multifd_compression(void)
2724{
2725 MigrationState *s;
2726
2727 s = migrate_get_current();
2728
2729 assert(s->parameters.multifd_compression < MULTIFD_COMPRESSION__MAX);
2730 return s->parameters.multifd_compression;
2731}
2732
2733int migrate_multifd_zlib_level(void)
2734{
2735 MigrationState *s;
2736
2737 s = migrate_get_current();
2738
2739 return s->parameters.multifd_zlib_level;
2740}
2741
2742int migrate_multifd_zstd_level(void)
2743{
2744 MigrationState *s;
2745
2746 s = migrate_get_current();
2747
2748 return s->parameters.multifd_zstd_level;
2749}
2750
2751#ifdef CONFIG_LINUX
2752bool migrate_use_zero_copy_send(void)
2753{
2754 MigrationState *s;
2755
2756 s = migrate_get_current();
2757
2758 return s->enabled_capabilities[MIGRATION_CAPABILITY_ZERO_COPY_SEND];
2759}
2760#endif
2761
2762int migrate_use_tls(void)
2763{
2764 MigrationState *s;
2765
2766 s = migrate_get_current();
2767
2768 return s->parameters.tls_creds && *s->parameters.tls_creds;
2769}
2770
2771int migrate_use_xbzrle(void)
2772{
2773 MigrationState *s;
2774
2775 s = migrate_get_current();
2776
2777 return s->enabled_capabilities[MIGRATION_CAPABILITY_XBZRLE];
2778}
2779
2780uint64_t migrate_xbzrle_cache_size(void)
2781{
2782 MigrationState *s;
2783
2784 s = migrate_get_current();
2785
2786 return s->parameters.xbzrle_cache_size;
2787}
2788
2789static int64_t migrate_max_postcopy_bandwidth(void)
2790{
2791 MigrationState *s;
2792
2793 s = migrate_get_current();
2794
2795 return s->parameters.max_postcopy_bandwidth;
2796}
2797
2798bool migrate_use_block(void)
2799{
2800 MigrationState *s;
2801
2802 s = migrate_get_current();
2803
2804 return s->enabled_capabilities[MIGRATION_CAPABILITY_BLOCK];
2805}
2806
2807bool migrate_use_return_path(void)
2808{
2809 MigrationState *s;
2810
2811 s = migrate_get_current();
2812
2813 return s->enabled_capabilities[MIGRATION_CAPABILITY_RETURN_PATH];
2814}
2815
2816bool migrate_use_block_incremental(void)
2817{
2818 MigrationState *s;
2819
2820 s = migrate_get_current();
2821
2822 return s->parameters.block_incremental;
2823}
2824
2825bool migrate_background_snapshot(void)
2826{
2827 MigrationState *s;
2828
2829 s = migrate_get_current();
2830
2831 return s->enabled_capabilities[MIGRATION_CAPABILITY_BACKGROUND_SNAPSHOT];
2832}
2833
2834bool migrate_postcopy_preempt(void)
2835{
2836 MigrationState *s;
2837
2838 s = migrate_get_current();
2839
2840 return s->enabled_capabilities[MIGRATION_CAPABILITY_POSTCOPY_PREEMPT];
2841}
2842
2843
2844
2845
2846
2847
2848static void mark_source_rp_bad(MigrationState *s)
2849{
2850 s->rp_state.error = true;
2851}
2852
2853static struct rp_cmd_args {
2854 ssize_t len;
2855 const char *name;
2856} rp_cmd_args[] = {
2857 [MIG_RP_MSG_INVALID] = { .len = -1, .name = "INVALID" },
2858 [MIG_RP_MSG_SHUT] = { .len = 4, .name = "SHUT" },
2859 [MIG_RP_MSG_PONG] = { .len = 4, .name = "PONG" },
2860 [MIG_RP_MSG_REQ_PAGES] = { .len = 12, .name = "REQ_PAGES" },
2861 [MIG_RP_MSG_REQ_PAGES_ID] = { .len = -1, .name = "REQ_PAGES_ID" },
2862 [MIG_RP_MSG_RECV_BITMAP] = { .len = -1, .name = "RECV_BITMAP" },
2863 [MIG_RP_MSG_RESUME_ACK] = { .len = 4, .name = "RESUME_ACK" },
2864 [MIG_RP_MSG_MAX] = { .len = -1, .name = "MAX" },
2865};
2866
2867
2868
2869
2870
2871
2872static void migrate_handle_rp_req_pages(MigrationState *ms, const char* rbname,
2873 ram_addr_t start, size_t len)
2874{
2875 long our_host_ps = qemu_real_host_page_size();
2876
2877 trace_migrate_handle_rp_req_pages(rbname, start, len);
2878
2879
2880
2881
2882
2883 if (!QEMU_IS_ALIGNED(start, our_host_ps) ||
2884 !QEMU_IS_ALIGNED(len, our_host_ps)) {
2885 error_report("%s: Misaligned page request, start: " RAM_ADDR_FMT
2886 " len: %zd", __func__, start, len);
2887 mark_source_rp_bad(ms);
2888 return;
2889 }
2890
2891 if (ram_save_queue_pages(rbname, start, len)) {
2892 mark_source_rp_bad(ms);
2893 }
2894}
2895
2896
2897static bool postcopy_pause_return_path_thread(MigrationState *s)
2898{
2899 trace_postcopy_pause_return_path();
2900
2901 qemu_sem_wait(&s->postcopy_pause_rp_sem);
2902
2903 trace_postcopy_pause_return_path_continued();
2904
2905 return true;
2906}
2907
2908static int migrate_handle_rp_recv_bitmap(MigrationState *s, char *block_name)
2909{
2910 RAMBlock *block = qemu_ram_block_by_name(block_name);
2911
2912 if (!block) {
2913 error_report("%s: invalid block name '%s'", __func__, block_name);
2914 return -EINVAL;
2915 }
2916
2917
2918 return ram_dirty_bitmap_reload(s, block);
2919}
2920
2921static int migrate_handle_rp_resume_ack(MigrationState *s, uint32_t value)
2922{
2923 trace_source_return_path_thread_resume_ack(value);
2924
2925 if (value != MIGRATION_RESUME_ACK_VALUE) {
2926 error_report("%s: illegal resume_ack value %"PRIu32,
2927 __func__, value);
2928 return -1;
2929 }
2930
2931
2932 migrate_set_state(&s->state, MIGRATION_STATUS_POSTCOPY_RECOVER,
2933 MIGRATION_STATUS_POSTCOPY_ACTIVE);
2934
2935
2936 qemu_sem_post(&s->rp_state.rp_sem);
2937
2938 return 0;
2939}
2940
2941
2942
2943
2944
2945static void migration_release_dst_files(MigrationState *ms)
2946{
2947 QEMUFile *file;
2948
2949 WITH_QEMU_LOCK_GUARD(&ms->qemu_file_lock) {
2950
2951
2952
2953
2954 file = ms->rp_state.from_dst_file;
2955 ms->rp_state.from_dst_file = NULL;
2956 }
2957
2958
2959
2960
2961
2962
2963 if (ms->postcopy_qemufile_src) {
2964 migration_ioc_unregister_yank_from_file(ms->postcopy_qemufile_src);
2965 qemu_file_shutdown(ms->postcopy_qemufile_src);
2966 qemu_fclose(ms->postcopy_qemufile_src);
2967 ms->postcopy_qemufile_src = NULL;
2968 }
2969
2970 qemu_fclose(file);
2971}
2972
2973
2974
2975
2976
2977static void *source_return_path_thread(void *opaque)
2978{
2979 MigrationState *ms = opaque;
2980 QEMUFile *rp = ms->rp_state.from_dst_file;
2981 uint16_t header_len, header_type;
2982 uint8_t buf[512];
2983 uint32_t tmp32, sibling_error;
2984 ram_addr_t start = 0;
2985 size_t len = 0, expected_len;
2986 int res;
2987
2988 trace_source_return_path_thread_entry();
2989 rcu_register_thread();
2990
2991retry:
2992 while (!ms->rp_state.error && !qemu_file_get_error(rp) &&
2993 migration_is_setup_or_active(ms->state)) {
2994 trace_source_return_path_thread_loop_top();
2995 header_type = qemu_get_be16(rp);
2996 header_len = qemu_get_be16(rp);
2997
2998 if (qemu_file_get_error(rp)) {
2999 mark_source_rp_bad(ms);
3000 goto out;
3001 }
3002
3003 if (header_type >= MIG_RP_MSG_MAX ||
3004 header_type == MIG_RP_MSG_INVALID) {
3005 error_report("RP: Received invalid message 0x%04x length 0x%04x",
3006 header_type, header_len);
3007 mark_source_rp_bad(ms);
3008 goto out;
3009 }
3010
3011 if ((rp_cmd_args[header_type].len != -1 &&
3012 header_len != rp_cmd_args[header_type].len) ||
3013 header_len > sizeof(buf)) {
3014 error_report("RP: Received '%s' message (0x%04x) with"
3015 "incorrect length %d expecting %zu",
3016 rp_cmd_args[header_type].name, header_type, header_len,
3017 (size_t)rp_cmd_args[header_type].len);
3018 mark_source_rp_bad(ms);
3019 goto out;
3020 }
3021
3022
3023 res = qemu_get_buffer(rp, buf, header_len);
3024 if (res != header_len) {
3025 error_report("RP: Failed reading data for message 0x%04x"
3026 " read %d expected %d",
3027 header_type, res, header_len);
3028 mark_source_rp_bad(ms);
3029 goto out;
3030 }
3031
3032
3033 switch (header_type) {
3034 case MIG_RP_MSG_SHUT:
3035 sibling_error = ldl_be_p(buf);
3036 trace_source_return_path_thread_shut(sibling_error);
3037 if (sibling_error) {
3038 error_report("RP: Sibling indicated error %d", sibling_error);
3039 mark_source_rp_bad(ms);
3040 }
3041
3042
3043
3044
3045
3046 goto out;
3047
3048 case MIG_RP_MSG_PONG:
3049 tmp32 = ldl_be_p(buf);
3050 trace_source_return_path_thread_pong(tmp32);
3051 qemu_sem_post(&ms->rp_state.rp_pong_acks);
3052 break;
3053
3054 case MIG_RP_MSG_REQ_PAGES:
3055 start = ldq_be_p(buf);
3056 len = ldl_be_p(buf + 8);
3057 migrate_handle_rp_req_pages(ms, NULL, start, len);
3058 break;
3059
3060 case MIG_RP_MSG_REQ_PAGES_ID:
3061 expected_len = 12 + 1;
3062
3063 if (header_len >= expected_len) {
3064 start = ldq_be_p(buf);
3065 len = ldl_be_p(buf + 8);
3066
3067 tmp32 = buf[12];
3068 buf[13 + tmp32] = '\0';
3069 expected_len += tmp32;
3070 }
3071 if (header_len != expected_len) {
3072 error_report("RP: Req_Page_id with length %d expecting %zd",
3073 header_len, expected_len);
3074 mark_source_rp_bad(ms);
3075 goto out;
3076 }
3077 migrate_handle_rp_req_pages(ms, (char *)&buf[13], start, len);
3078 break;
3079
3080 case MIG_RP_MSG_RECV_BITMAP:
3081 if (header_len < 1) {
3082 error_report("%s: missing block name", __func__);
3083 mark_source_rp_bad(ms);
3084 goto out;
3085 }
3086
3087 buf[buf[0] + 1] = '\0';
3088 if (migrate_handle_rp_recv_bitmap(ms, (char *)(buf + 1))) {
3089 mark_source_rp_bad(ms);
3090 goto out;
3091 }
3092 break;
3093
3094 case MIG_RP_MSG_RESUME_ACK:
3095 tmp32 = ldl_be_p(buf);
3096 if (migrate_handle_rp_resume_ack(ms, tmp32)) {
3097 mark_source_rp_bad(ms);
3098 goto out;
3099 }
3100 break;
3101
3102 default:
3103 break;
3104 }
3105 }
3106
3107out:
3108 res = qemu_file_get_error(rp);
3109 if (res) {
3110 if (res && migration_in_postcopy()) {
3111
3112
3113
3114
3115 migration_release_dst_files(ms);
3116 rp = NULL;
3117 if (postcopy_pause_return_path_thread(ms)) {
3118
3119
3120
3121
3122 rp = ms->rp_state.from_dst_file;
3123 ms->rp_state.error = false;
3124 goto retry;
3125 }
3126 }
3127
3128 trace_source_return_path_thread_bad_end();
3129 mark_source_rp_bad(ms);
3130 }
3131
3132 trace_source_return_path_thread_end();
3133 migration_release_dst_files(ms);
3134 rcu_unregister_thread();
3135 return NULL;
3136}
3137
3138static int open_return_path_on_source(MigrationState *ms,
3139 bool create_thread)
3140{
3141 ms->rp_state.from_dst_file = qemu_file_get_return_path(ms->to_dst_file);
3142 if (!ms->rp_state.from_dst_file) {
3143 return -1;
3144 }
3145
3146 trace_open_return_path_on_source();
3147
3148 if (!create_thread) {
3149
3150 return 0;
3151 }
3152
3153 qemu_thread_create(&ms->rp_state.rp_thread, "return path",
3154 source_return_path_thread, ms, QEMU_THREAD_JOINABLE);
3155 ms->rp_state.rp_thread_created = true;
3156
3157 trace_open_return_path_on_source_continue();
3158
3159 return 0;
3160}
3161
3162
3163static int await_return_path_close_on_source(MigrationState *ms)
3164{
3165
3166
3167
3168
3169
3170 if (qemu_file_get_error(ms->to_dst_file) && ms->rp_state.from_dst_file) {
3171
3172
3173
3174
3175 qemu_file_shutdown(ms->rp_state.from_dst_file);
3176 mark_source_rp_bad(ms);
3177 }
3178 trace_await_return_path_close_on_source_joining();
3179 qemu_thread_join(&ms->rp_state.rp_thread);
3180 ms->rp_state.rp_thread_created = false;
3181 trace_await_return_path_close_on_source_close();
3182 return ms->rp_state.error;
3183}
3184
3185static inline void
3186migration_wait_main_channel(MigrationState *ms)
3187{
3188
3189 qemu_sem_wait(&ms->rp_state.rp_pong_acks);
3190}
3191
3192
3193
3194
3195
3196static int postcopy_start(MigrationState *ms)
3197{
3198 int ret;
3199 QIOChannelBuffer *bioc;
3200 QEMUFile *fb;
3201 int64_t time_at_stop = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
3202 int64_t bandwidth = migrate_max_postcopy_bandwidth();
3203 bool restart_block = false;
3204 int cur_state = MIGRATION_STATUS_ACTIVE;
3205
3206 if (migrate_postcopy_preempt()) {
3207 migration_wait_main_channel(ms);
3208 if (postcopy_preempt_establish_channel(ms)) {
3209 migrate_set_state(&ms->state, ms->state, MIGRATION_STATUS_FAILED);
3210 return -1;
3211 }
3212 }
3213
3214 if (!migrate_pause_before_switchover()) {
3215 migrate_set_state(&ms->state, MIGRATION_STATUS_ACTIVE,
3216 MIGRATION_STATUS_POSTCOPY_ACTIVE);
3217 }
3218
3219 trace_postcopy_start();
3220 qemu_mutex_lock_iothread();
3221 trace_postcopy_start_set_run();
3222
3223 qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER, NULL);
3224 global_state_store();
3225 ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE);
3226 if (ret < 0) {
3227 goto fail;
3228 }
3229
3230 ret = migration_maybe_pause(ms, &cur_state,
3231 MIGRATION_STATUS_POSTCOPY_ACTIVE);
3232 if (ret < 0) {
3233 goto fail;
3234 }
3235
3236 ret = bdrv_inactivate_all();
3237 if (ret < 0) {
3238 goto fail;
3239 }
3240 restart_block = true;
3241
3242
3243
3244
3245
3246 qemu_savevm_state_complete_precopy(ms->to_dst_file, true, false);
3247
3248
3249
3250
3251
3252
3253
3254 if (migrate_postcopy_ram()) {
3255 ram_postcopy_send_discard_bitmap(ms);
3256 }
3257
3258
3259
3260
3261
3262
3263
3264 if (!bandwidth) {
3265 qemu_file_set_rate_limit(ms->to_dst_file, INT64_MAX);
3266 } else {
3267 qemu_file_set_rate_limit(ms->to_dst_file, bandwidth / XFER_LIMIT_RATIO);
3268 }
3269 if (migrate_postcopy_ram()) {
3270
3271 qemu_savevm_send_ping(ms->to_dst_file, 2);
3272 }
3273
3274
3275
3276
3277
3278
3279
3280
3281
3282
3283
3284
3285 bioc = qio_channel_buffer_new(4096);
3286 qio_channel_set_name(QIO_CHANNEL(bioc), "migration-postcopy-buffer");
3287 fb = qemu_file_new_output(QIO_CHANNEL(bioc));
3288 object_unref(OBJECT(bioc));
3289
3290
3291
3292
3293
3294 qemu_savevm_send_postcopy_listen(fb);
3295
3296 qemu_savevm_state_complete_precopy(fb, false, false);
3297 if (migrate_postcopy_ram()) {
3298 qemu_savevm_send_ping(fb, 3);
3299 }
3300
3301 qemu_savevm_send_postcopy_run(fb);
3302
3303
3304
3305
3306
3307
3308
3309 ret = qemu_file_get_error(ms->to_dst_file);
3310 if (ret) {
3311 error_report("postcopy_start: Migration stream errored (pre package)");
3312 goto fail_closefb;
3313 }
3314
3315 restart_block = false;
3316
3317
3318 if (qemu_savevm_send_packaged(ms->to_dst_file, bioc->data, bioc->usage)) {
3319 goto fail_closefb;
3320 }
3321 qemu_fclose(fb);
3322
3323
3324
3325
3326
3327 ms->postcopy_after_devices = true;
3328 notifier_list_notify(&migration_state_notifiers, ms);
3329
3330 ms->downtime = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) - time_at_stop;
3331
3332 qemu_mutex_unlock_iothread();
3333
3334 if (migrate_postcopy_ram()) {
3335
3336
3337
3338
3339 qemu_savevm_send_ping(ms->to_dst_file, 4);
3340 }
3341
3342 if (migrate_release_ram()) {
3343 ram_postcopy_migrated_memory_release(ms);
3344 }
3345
3346 ret = qemu_file_get_error(ms->to_dst_file);
3347 if (ret) {
3348 error_report("postcopy_start: Migration stream errored");
3349 migrate_set_state(&ms->state, MIGRATION_STATUS_POSTCOPY_ACTIVE,
3350 MIGRATION_STATUS_FAILED);
3351 }
3352
3353 trace_postcopy_preempt_enabled(migrate_postcopy_preempt());
3354
3355 return ret;
3356
3357fail_closefb:
3358 qemu_fclose(fb);
3359fail:
3360 migrate_set_state(&ms->state, MIGRATION_STATUS_POSTCOPY_ACTIVE,
3361 MIGRATION_STATUS_FAILED);
3362 if (restart_block) {
3363
3364
3365
3366 Error *local_err = NULL;
3367
3368 bdrv_activate_all(&local_err);
3369 if (local_err) {
3370 error_report_err(local_err);
3371 }
3372 }
3373 qemu_mutex_unlock_iothread();
3374 return -1;
3375}
3376
3377
3378
3379
3380
3381
3382static int migration_maybe_pause(MigrationState *s,
3383 int *current_active_state,
3384 int new_state)
3385{
3386 if (!migrate_pause_before_switchover()) {
3387 return 0;
3388 }
3389
3390
3391
3392
3393
3394
3395
3396 while (qemu_sem_timedwait(&s->pause_sem, 1) == 0) {
3397
3398 }
3399
3400
3401
3402
3403
3404
3405
3406 if (s->state != MIGRATION_STATUS_CANCELLING) {
3407 qemu_mutex_unlock_iothread();
3408 migrate_set_state(&s->state, *current_active_state,
3409 MIGRATION_STATUS_PRE_SWITCHOVER);
3410 qemu_sem_wait(&s->pause_sem);
3411 migrate_set_state(&s->state, MIGRATION_STATUS_PRE_SWITCHOVER,
3412 new_state);
3413 *current_active_state = new_state;
3414 qemu_mutex_lock_iothread();
3415 }
3416
3417 return s->state == new_state ? 0 : -EINVAL;
3418}
3419
3420
3421
3422
3423
3424
3425
3426static void migration_completion(MigrationState *s)
3427{
3428 int ret;
3429 int current_active_state = s->state;
3430
3431 if (s->state == MIGRATION_STATUS_ACTIVE) {
3432 qemu_mutex_lock_iothread();
3433 s->downtime_start = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
3434 qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER, NULL);
3435 s->vm_was_running = runstate_is_running();
3436 ret = global_state_store();
3437
3438 if (!ret) {
3439 ret = vm_stop_force_state(RUN_STATE_FINISH_MIGRATE);
3440 trace_migration_completion_vm_stop(ret);
3441 if (ret >= 0) {
3442 ret = migration_maybe_pause(s, ¤t_active_state,
3443 MIGRATION_STATUS_DEVICE);
3444 }
3445 if (ret >= 0) {
3446
3447
3448
3449
3450
3451 s->block_inactive = !migrate_colo_enabled();
3452 qemu_file_set_rate_limit(s->to_dst_file, INT64_MAX);
3453 ret = qemu_savevm_state_complete_precopy(s->to_dst_file, false,
3454 s->block_inactive);
3455 }
3456 }
3457 qemu_mutex_unlock_iothread();
3458
3459 if (ret < 0) {
3460 goto fail;
3461 }
3462 } else if (s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) {
3463 trace_migration_completion_postcopy_end();
3464
3465 qemu_mutex_lock_iothread();
3466 qemu_savevm_state_complete_postcopy(s->to_dst_file);
3467 qemu_mutex_unlock_iothread();
3468
3469
3470
3471
3472
3473
3474 if (migrate_postcopy_preempt() && s->preempt_pre_7_2) {
3475 postcopy_preempt_shutdown_file(s);
3476 }
3477
3478 trace_migration_completion_postcopy_end_after_complete();
3479 } else {
3480 goto fail;
3481 }
3482
3483
3484
3485
3486
3487
3488
3489 if (s->rp_state.rp_thread_created) {
3490 int rp_error;
3491 trace_migration_return_path_end_before();
3492 rp_error = await_return_path_close_on_source(s);
3493 trace_migration_return_path_end_after(rp_error);
3494 if (rp_error) {
3495 goto fail;
3496 }
3497 }
3498
3499 if (qemu_file_get_error(s->to_dst_file)) {
3500 trace_migration_completion_file_err();
3501 goto fail;
3502 }
3503
3504 if (migrate_colo_enabled() && s->state == MIGRATION_STATUS_ACTIVE) {
3505
3506 migrate_set_state(&s->state, MIGRATION_STATUS_ACTIVE,
3507 MIGRATION_STATUS_COLO);
3508 } else {
3509 migrate_set_state(&s->state, current_active_state,
3510 MIGRATION_STATUS_COMPLETED);
3511 }
3512
3513 return;
3514
3515fail:
3516 if (s->block_inactive && (s->state == MIGRATION_STATUS_ACTIVE ||
3517 s->state == MIGRATION_STATUS_DEVICE)) {
3518
3519
3520
3521
3522 Error *local_err = NULL;
3523
3524 qemu_mutex_lock_iothread();
3525 bdrv_activate_all(&local_err);
3526 if (local_err) {
3527 error_report_err(local_err);
3528 } else {
3529 s->block_inactive = false;
3530 }
3531 qemu_mutex_unlock_iothread();
3532 }
3533
3534 migrate_set_state(&s->state, current_active_state,
3535 MIGRATION_STATUS_FAILED);
3536}
3537
3538
3539
3540
3541
3542
3543
3544static void bg_migration_completion(MigrationState *s)
3545{
3546 int current_active_state = s->state;
3547
3548
3549
3550
3551
3552
3553 ram_write_tracking_stop();
3554
3555 if (s->state == MIGRATION_STATUS_ACTIVE) {
3556
3557
3558
3559
3560
3561
3562 qemu_put_buffer(s->to_dst_file, s->bioc->data, s->bioc->usage);
3563 qemu_fflush(s->to_dst_file);
3564 } else if (s->state == MIGRATION_STATUS_CANCELLING) {
3565 goto fail;
3566 }
3567
3568 if (qemu_file_get_error(s->to_dst_file)) {
3569 trace_migration_completion_file_err();
3570 goto fail;
3571 }
3572
3573 migrate_set_state(&s->state, current_active_state,
3574 MIGRATION_STATUS_COMPLETED);
3575 return;
3576
3577fail:
3578 migrate_set_state(&s->state, current_active_state,
3579 MIGRATION_STATUS_FAILED);
3580}
3581
3582bool migrate_colo_enabled(void)
3583{
3584 MigrationState *s = migrate_get_current();
3585 return s->enabled_capabilities[MIGRATION_CAPABILITY_X_COLO];
3586}
3587
3588typedef enum MigThrError {
3589
3590 MIG_THR_ERR_NONE = 0,
3591
3592 MIG_THR_ERR_RECOVERED = 1,
3593
3594 MIG_THR_ERR_FATAL = 2,
3595} MigThrError;
3596
3597static int postcopy_resume_handshake(MigrationState *s)
3598{
3599 qemu_savevm_send_postcopy_resume(s->to_dst_file);
3600
3601 while (s->state == MIGRATION_STATUS_POSTCOPY_RECOVER) {
3602 qemu_sem_wait(&s->rp_state.rp_sem);
3603 }
3604
3605 if (s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE) {
3606 return 0;
3607 }
3608
3609 return -1;
3610}
3611
3612
3613static int postcopy_do_resume(MigrationState *s)
3614{
3615 int ret;
3616
3617
3618
3619
3620
3621 ret = qemu_savevm_state_resume_prepare(s);
3622 if (ret) {
3623 error_report("%s: resume_prepare() failure detected: %d",
3624 __func__, ret);
3625 return ret;
3626 }
3627
3628
3629
3630
3631
3632
3633
3634
3635 ret = postcopy_preempt_establish_channel(s);
3636 if (ret) {
3637 error_report("%s: postcopy_preempt_establish_channel(): %d",
3638 __func__, ret);
3639 return ret;
3640 }
3641
3642
3643
3644
3645
3646 ret = postcopy_resume_handshake(s);
3647 if (ret) {
3648 error_report("%s: handshake failed: %d", __func__, ret);
3649 return ret;
3650 }
3651
3652 return 0;
3653}
3654
3655
3656
3657
3658
3659
3660static MigThrError postcopy_pause(MigrationState *s)
3661{
3662 assert(s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE);
3663
3664 while (true) {
3665 QEMUFile *file;
3666
3667
3668
3669
3670
3671
3672
3673
3674
3675
3676 assert(s->to_dst_file);
3677 migration_ioc_unregister_yank_from_file(s->to_dst_file);
3678 qemu_mutex_lock(&s->qemu_file_lock);
3679 file = s->to_dst_file;
3680 s->to_dst_file = NULL;
3681 qemu_mutex_unlock(&s->qemu_file_lock);
3682
3683 qemu_file_shutdown(file);
3684 qemu_fclose(file);
3685
3686 migrate_set_state(&s->state, s->state,
3687 MIGRATION_STATUS_POSTCOPY_PAUSED);
3688
3689 error_report("Detected IO failure for postcopy. "
3690 "Migration paused.");
3691
3692
3693
3694
3695
3696 while (s->state == MIGRATION_STATUS_POSTCOPY_PAUSED) {
3697 qemu_sem_wait(&s->postcopy_pause_sem);
3698 }
3699
3700 if (s->state == MIGRATION_STATUS_POSTCOPY_RECOVER) {
3701
3702
3703
3704
3705
3706
3707 qemu_sem_post(&s->postcopy_pause_rp_sem);
3708
3709
3710 if (postcopy_do_resume(s) == 0) {
3711
3712 trace_postcopy_pause_continued();
3713 return MIG_THR_ERR_RECOVERED;
3714 } else {
3715
3716
3717
3718
3719
3720 continue;
3721 }
3722 } else {
3723
3724 return MIG_THR_ERR_FATAL;
3725 }
3726 }
3727}
3728
3729static MigThrError migration_detect_error(MigrationState *s)
3730{
3731 int ret;
3732 int state = s->state;
3733 Error *local_error = NULL;
3734
3735 if (state == MIGRATION_STATUS_CANCELLING ||
3736 state == MIGRATION_STATUS_CANCELLED) {
3737
3738 return MIG_THR_ERR_FATAL;
3739 }
3740
3741
3742
3743
3744
3745 ret = qemu_file_get_error_obj_any(s->to_dst_file,
3746 s->postcopy_qemufile_src,
3747 &local_error);
3748 if (!ret) {
3749
3750 assert(!local_error);
3751 return MIG_THR_ERR_NONE;
3752 }
3753
3754 if (local_error) {
3755 migrate_set_error(s, local_error);
3756 error_free(local_error);
3757 }
3758
3759 if (state == MIGRATION_STATUS_POSTCOPY_ACTIVE && ret) {
3760
3761
3762
3763
3764
3765 return postcopy_pause(s);
3766 } else {
3767
3768
3769
3770
3771 migrate_set_state(&s->state, state, MIGRATION_STATUS_FAILED);
3772 trace_migration_thread_file_err();
3773
3774
3775 return MIG_THR_ERR_FATAL;
3776 }
3777}
3778
3779
3780static uint64_t migration_total_bytes(MigrationState *s)
3781{
3782 return qemu_file_total_transferred(s->to_dst_file) +
3783 ram_counters.multifd_bytes;
3784}
3785
3786static void migration_calculate_complete(MigrationState *s)
3787{
3788 uint64_t bytes = migration_total_bytes(s);
3789 int64_t end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
3790 int64_t transfer_time;
3791
3792 s->total_time = end_time - s->start_time;
3793 if (!s->downtime) {
3794
3795
3796
3797
3798 s->downtime = end_time - s->downtime_start;
3799 }
3800
3801 transfer_time = s->total_time - s->setup_time;
3802 if (transfer_time) {
3803 s->mbps = ((double) bytes * 8.0) / transfer_time / 1000;
3804 }
3805}
3806
3807static void update_iteration_initial_status(MigrationState *s)
3808{
3809
3810
3811
3812
3813 s->iteration_start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
3814 s->iteration_initial_bytes = migration_total_bytes(s);
3815 s->iteration_initial_pages = ram_get_total_transferred_pages();
3816}
3817
3818static void migration_update_counters(MigrationState *s,
3819 int64_t current_time)
3820{
3821 uint64_t transferred, transferred_pages, time_spent;
3822 uint64_t current_bytes;
3823 double bandwidth;
3824
3825 if (current_time < s->iteration_start_time + BUFFER_DELAY) {
3826 return;
3827 }
3828
3829 current_bytes = migration_total_bytes(s);
3830 transferred = current_bytes - s->iteration_initial_bytes;
3831 time_spent = current_time - s->iteration_start_time;
3832 bandwidth = (double)transferred / time_spent;
3833 s->threshold_size = bandwidth * s->parameters.downtime_limit;
3834
3835 s->mbps = (((double) transferred * 8.0) /
3836 ((double) time_spent / 1000.0)) / 1000.0 / 1000.0;
3837
3838 transferred_pages = ram_get_total_transferred_pages() -
3839 s->iteration_initial_pages;
3840 s->pages_per_second = (double) transferred_pages /
3841 (((double) time_spent / 1000.0));
3842
3843
3844
3845
3846
3847 if (ram_counters.dirty_pages_rate && transferred > 10000) {
3848 s->expected_downtime = ram_counters.remaining / bandwidth;
3849 }
3850
3851 qemu_file_reset_rate_limit(s->to_dst_file);
3852
3853 update_iteration_initial_status(s);
3854
3855 trace_migrate_transferred(transferred, time_spent,
3856 bandwidth, s->threshold_size);
3857}
3858
3859
3860typedef enum {
3861 MIG_ITERATE_RESUME,
3862 MIG_ITERATE_SKIP,
3863 MIG_ITERATE_BREAK,
3864} MigIterateState;
3865
3866
3867
3868
3869
3870static MigIterateState migration_iteration_run(MigrationState *s)
3871{
3872 uint64_t must_precopy, can_postcopy;
3873 bool in_postcopy = s->state == MIGRATION_STATUS_POSTCOPY_ACTIVE;
3874
3875 qemu_savevm_state_pending_estimate(&must_precopy, &can_postcopy);
3876 uint64_t pending_size = must_precopy + can_postcopy;
3877
3878 trace_migrate_pending_estimate(pending_size, must_precopy, can_postcopy);
3879
3880 if (must_precopy <= s->threshold_size) {
3881 qemu_savevm_state_pending_exact(&must_precopy, &can_postcopy);
3882 pending_size = must_precopy + can_postcopy;
3883 trace_migrate_pending_exact(pending_size, must_precopy, can_postcopy);
3884 }
3885
3886 if (!pending_size || pending_size < s->threshold_size) {
3887 trace_migration_thread_low_pending(pending_size);
3888 migration_completion(s);
3889 return MIG_ITERATE_BREAK;
3890 }
3891
3892
3893 if (!in_postcopy && must_precopy <= s->threshold_size &&
3894 qatomic_read(&s->start_postcopy)) {
3895 if (postcopy_start(s)) {
3896 error_report("%s: postcopy failed to start", __func__);
3897 }
3898 return MIG_ITERATE_SKIP;
3899 }
3900
3901
3902 qemu_savevm_state_iterate(s->to_dst_file, in_postcopy);
3903 return MIG_ITERATE_RESUME;
3904}
3905
3906static void migration_iteration_finish(MigrationState *s)
3907{
3908
3909 cpu_throttle_stop();
3910
3911 qemu_mutex_lock_iothread();
3912 switch (s->state) {
3913 case MIGRATION_STATUS_COMPLETED:
3914 migration_calculate_complete(s);
3915 runstate_set(RUN_STATE_POSTMIGRATE);
3916 break;
3917 case MIGRATION_STATUS_COLO:
3918 if (!migrate_colo_enabled()) {
3919 error_report("%s: critical error: calling COLO code without "
3920 "COLO enabled", __func__);
3921 }
3922 migrate_start_colo_process(s);
3923 s->vm_was_running = true;
3924
3925 case MIGRATION_STATUS_FAILED:
3926 case MIGRATION_STATUS_CANCELLED:
3927 case MIGRATION_STATUS_CANCELLING:
3928 if (s->vm_was_running) {
3929 if (!runstate_check(RUN_STATE_SHUTDOWN)) {
3930 vm_start();
3931 }
3932 } else {
3933 if (runstate_check(RUN_STATE_FINISH_MIGRATE)) {
3934 runstate_set(RUN_STATE_POSTMIGRATE);
3935 }
3936 }
3937 break;
3938
3939 default:
3940
3941 error_report("%s: Unknown ending state %d", __func__, s->state);
3942 break;
3943 }
3944 migrate_fd_cleanup_schedule(s);
3945 qemu_mutex_unlock_iothread();
3946}
3947
3948static void bg_migration_iteration_finish(MigrationState *s)
3949{
3950 qemu_mutex_lock_iothread();
3951 switch (s->state) {
3952 case MIGRATION_STATUS_COMPLETED:
3953 migration_calculate_complete(s);
3954 break;
3955
3956 case MIGRATION_STATUS_ACTIVE:
3957 case MIGRATION_STATUS_FAILED:
3958 case MIGRATION_STATUS_CANCELLED:
3959 case MIGRATION_STATUS_CANCELLING:
3960 break;
3961
3962 default:
3963
3964 error_report("%s: Unknown ending state %d", __func__, s->state);
3965 break;
3966 }
3967
3968 migrate_fd_cleanup_schedule(s);
3969 qemu_mutex_unlock_iothread();
3970}
3971
3972
3973
3974
3975
3976static MigIterateState bg_migration_iteration_run(MigrationState *s)
3977{
3978 int res;
3979
3980 res = qemu_savevm_state_iterate(s->to_dst_file, false);
3981 if (res > 0) {
3982 bg_migration_completion(s);
3983 return MIG_ITERATE_BREAK;
3984 }
3985
3986 return MIG_ITERATE_RESUME;
3987}
3988
3989void migration_make_urgent_request(void)
3990{
3991 qemu_sem_post(&migrate_get_current()->rate_limit_sem);
3992}
3993
3994void migration_consume_urgent_request(void)
3995{
3996 qemu_sem_wait(&migrate_get_current()->rate_limit_sem);
3997}
3998
3999
4000bool migration_rate_limit(void)
4001{
4002 int64_t now = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
4003 MigrationState *s = migrate_get_current();
4004
4005 bool urgent = false;
4006 migration_update_counters(s, now);
4007 if (qemu_file_rate_limit(s->to_dst_file)) {
4008
4009 if (qemu_file_get_error(s->to_dst_file)) {
4010 return false;
4011 }
4012
4013
4014
4015
4016 int ms = s->iteration_start_time + BUFFER_DELAY - now;
4017 trace_migration_rate_limit_pre(ms);
4018 if (qemu_sem_timedwait(&s->rate_limit_sem, ms) == 0) {
4019
4020
4021
4022
4023
4024
4025
4026 qemu_sem_post(&s->rate_limit_sem);
4027 urgent = true;
4028 }
4029 trace_migration_rate_limit_post(urgent);
4030 }
4031 return urgent;
4032}
4033
4034
4035
4036
4037
4038
4039static void qemu_savevm_wait_unplug(MigrationState *s, int old_state,
4040 int new_state)
4041{
4042 if (qemu_savevm_state_guest_unplug_pending()) {
4043 migrate_set_state(&s->state, old_state, MIGRATION_STATUS_WAIT_UNPLUG);
4044
4045 while (s->state == MIGRATION_STATUS_WAIT_UNPLUG &&
4046 qemu_savevm_state_guest_unplug_pending()) {
4047 qemu_sem_timedwait(&s->wait_unplug_sem, 250);
4048 }
4049 if (s->state != MIGRATION_STATUS_WAIT_UNPLUG) {
4050 int timeout = 120;
4051
4052
4053
4054
4055
4056 while (timeout-- && qemu_savevm_state_guest_unplug_pending()) {
4057 qemu_sem_timedwait(&s->wait_unplug_sem, 250);
4058 }
4059 if (qemu_savevm_state_guest_unplug_pending() &&
4060 !qtest_enabled()) {
4061 warn_report("migration: partially unplugged device on "
4062 "failure");
4063 }
4064 }
4065
4066 migrate_set_state(&s->state, MIGRATION_STATUS_WAIT_UNPLUG, new_state);
4067 } else {
4068 migrate_set_state(&s->state, old_state, new_state);
4069 }
4070}
4071
4072
4073
4074
4075
4076static void *migration_thread(void *opaque)
4077{
4078 MigrationState *s = opaque;
4079 MigrationThread *thread = NULL;
4080 int64_t setup_start = qemu_clock_get_ms(QEMU_CLOCK_HOST);
4081 MigThrError thr_error;
4082 bool urgent = false;
4083
4084 thread = MigrationThreadAdd("live_migration", qemu_get_thread_id());
4085
4086 rcu_register_thread();
4087
4088 object_ref(OBJECT(s));
4089 update_iteration_initial_status(s);
4090
4091 qemu_savevm_state_header(s->to_dst_file);
4092
4093
4094
4095
4096
4097 if (s->rp_state.rp_thread_created) {
4098
4099 qemu_savevm_send_open_return_path(s->to_dst_file);
4100
4101
4102 qemu_savevm_send_ping(s->to_dst_file, 1);
4103 }
4104
4105 if (migrate_postcopy()) {
4106
4107
4108
4109
4110
4111 qemu_savevm_send_postcopy_advise(s->to_dst_file);
4112 }
4113
4114 if (migrate_colo_enabled()) {
4115
4116 qemu_savevm_send_colo_enable(s->to_dst_file);
4117 }
4118
4119 qemu_savevm_state_setup(s->to_dst_file);
4120
4121 qemu_savevm_wait_unplug(s, MIGRATION_STATUS_SETUP,
4122 MIGRATION_STATUS_ACTIVE);
4123
4124 s->setup_time = qemu_clock_get_ms(QEMU_CLOCK_HOST) - setup_start;
4125
4126 trace_migration_thread_setup_complete();
4127
4128 while (migration_is_active(s)) {
4129 if (urgent || !qemu_file_rate_limit(s->to_dst_file)) {
4130 MigIterateState iter_state = migration_iteration_run(s);
4131 if (iter_state == MIG_ITERATE_SKIP) {
4132 continue;
4133 } else if (iter_state == MIG_ITERATE_BREAK) {
4134 break;
4135 }
4136 }
4137
4138
4139
4140
4141
4142 thr_error = migration_detect_error(s);
4143 if (thr_error == MIG_THR_ERR_FATAL) {
4144
4145 break;
4146 } else if (thr_error == MIG_THR_ERR_RECOVERED) {
4147
4148
4149
4150
4151
4152 update_iteration_initial_status(s);
4153 }
4154
4155 urgent = migration_rate_limit();
4156 }
4157
4158 trace_migration_thread_after_loop();
4159 migration_iteration_finish(s);
4160 object_unref(OBJECT(s));
4161 rcu_unregister_thread();
4162 MigrationThreadDel(thread);
4163 return NULL;
4164}
4165
4166static void bg_migration_vm_start_bh(void *opaque)
4167{
4168 MigrationState *s = opaque;
4169
4170 qemu_bh_delete(s->vm_start_bh);
4171 s->vm_start_bh = NULL;
4172
4173 vm_start();
4174 s->downtime = qemu_clock_get_ms(QEMU_CLOCK_REALTIME) - s->downtime_start;
4175}
4176
4177
4178
4179
4180
4181
4182
4183
4184
4185
4186
4187
4188
4189
4190
4191
4192static void *bg_migration_thread(void *opaque)
4193{
4194 MigrationState *s = opaque;
4195 int64_t setup_start;
4196 MigThrError thr_error;
4197 QEMUFile *fb;
4198 bool early_fail = true;
4199
4200 rcu_register_thread();
4201 object_ref(OBJECT(s));
4202
4203 qemu_file_set_rate_limit(s->to_dst_file, INT64_MAX);
4204
4205 setup_start = qemu_clock_get_ms(QEMU_CLOCK_HOST);
4206
4207
4208
4209
4210
4211
4212
4213
4214
4215 s->bioc = qio_channel_buffer_new(512 * 1024);
4216 qio_channel_set_name(QIO_CHANNEL(s->bioc), "vmstate-buffer");
4217 fb = qemu_file_new_output(QIO_CHANNEL(s->bioc));
4218 object_unref(OBJECT(s->bioc));
4219
4220 update_iteration_initial_status(s);
4221
4222
4223
4224
4225
4226#ifdef __linux__
4227 ram_write_tracking_prepare();
4228#endif
4229
4230 qemu_savevm_state_header(s->to_dst_file);
4231 qemu_savevm_state_setup(s->to_dst_file);
4232
4233 qemu_savevm_wait_unplug(s, MIGRATION_STATUS_SETUP,
4234 MIGRATION_STATUS_ACTIVE);
4235
4236 s->setup_time = qemu_clock_get_ms(QEMU_CLOCK_HOST) - setup_start;
4237
4238 trace_migration_thread_setup_complete();
4239 s->downtime_start = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
4240
4241 qemu_mutex_lock_iothread();
4242
4243
4244
4245
4246
4247 qemu_system_wakeup_request(QEMU_WAKEUP_REASON_OTHER, NULL);
4248 s->vm_was_running = runstate_is_running();
4249
4250 if (global_state_store()) {
4251 goto fail;
4252 }
4253
4254 if (vm_stop_force_state(RUN_STATE_PAUSED)) {
4255 goto fail;
4256 }
4257
4258
4259
4260
4261 cpu_synchronize_all_states();
4262 if (qemu_savevm_state_complete_precopy_non_iterable(fb, false, false)) {
4263 goto fail;
4264 }
4265
4266
4267
4268
4269 qemu_fflush(fb);
4270
4271
4272 if (ram_write_tracking_start()) {
4273 goto fail;
4274 }
4275 early_fail = false;
4276
4277
4278
4279
4280
4281
4282
4283 s->vm_start_bh = qemu_bh_new(bg_migration_vm_start_bh, s);
4284 qemu_bh_schedule(s->vm_start_bh);
4285
4286 qemu_mutex_unlock_iothread();
4287
4288 while (migration_is_active(s)) {
4289 MigIterateState iter_state = bg_migration_iteration_run(s);
4290 if (iter_state == MIG_ITERATE_SKIP) {
4291 continue;
4292 } else if (iter_state == MIG_ITERATE_BREAK) {
4293 break;
4294 }
4295
4296
4297
4298
4299
4300 thr_error = migration_detect_error(s);
4301 if (thr_error == MIG_THR_ERR_FATAL) {
4302
4303 break;
4304 }
4305
4306 migration_update_counters(s, qemu_clock_get_ms(QEMU_CLOCK_REALTIME));
4307 }
4308
4309 trace_migration_thread_after_loop();
4310
4311fail:
4312 if (early_fail) {
4313 migrate_set_state(&s->state, MIGRATION_STATUS_ACTIVE,
4314 MIGRATION_STATUS_FAILED);
4315 qemu_mutex_unlock_iothread();
4316 }
4317
4318 bg_migration_iteration_finish(s);
4319
4320 qemu_fclose(fb);
4321 object_unref(OBJECT(s));
4322 rcu_unregister_thread();
4323
4324 return NULL;
4325}
4326
4327void migrate_fd_connect(MigrationState *s, Error *error_in)
4328{
4329 Error *local_err = NULL;
4330 int64_t rate_limit;
4331 bool resume = s->state == MIGRATION_STATUS_POSTCOPY_PAUSED;
4332
4333
4334
4335
4336
4337
4338 migrate_error_free(s);
4339
4340 s->expected_downtime = s->parameters.downtime_limit;
4341 if (resume) {
4342 assert(s->cleanup_bh);
4343 } else {
4344 assert(!s->cleanup_bh);
4345 s->cleanup_bh = qemu_bh_new(migrate_fd_cleanup_bh, s);
4346 }
4347 if (error_in) {
4348 migrate_fd_error(s, error_in);
4349 if (resume) {
4350
4351
4352
4353
4354
4355
4356
4357 error_report_err(error_copy(s->error));
4358 } else {
4359 migrate_fd_cleanup(s);
4360 }
4361 return;
4362 }
4363
4364 if (resume) {
4365
4366 rate_limit = s->parameters.max_postcopy_bandwidth /
4367 XFER_LIMIT_RATIO;
4368 } else {
4369
4370 rate_limit = s->parameters.max_bandwidth / XFER_LIMIT_RATIO;
4371
4372
4373 notifier_list_notify(&migration_state_notifiers, s);
4374 }
4375
4376 qemu_file_set_rate_limit(s->to_dst_file, rate_limit);
4377 qemu_file_set_blocking(s->to_dst_file, true);
4378
4379
4380
4381
4382
4383
4384 if (migrate_postcopy_ram() || migrate_use_return_path()) {
4385 if (open_return_path_on_source(s, !resume)) {
4386 error_report("Unable to open return-path for postcopy");
4387 migrate_set_state(&s->state, s->state, MIGRATION_STATUS_FAILED);
4388 migrate_fd_cleanup(s);
4389 return;
4390 }
4391 }
4392
4393
4394
4395
4396
4397
4398 if (migrate_postcopy_preempt() && s->preempt_pre_7_2) {
4399 postcopy_preempt_setup(s);
4400 }
4401
4402 if (resume) {
4403
4404 migrate_set_state(&s->state, MIGRATION_STATUS_POSTCOPY_PAUSED,
4405 MIGRATION_STATUS_POSTCOPY_RECOVER);
4406 qemu_sem_post(&s->postcopy_pause_sem);
4407 return;
4408 }
4409
4410 if (multifd_save_setup(&local_err) != 0) {
4411 error_report_err(local_err);
4412 migrate_set_state(&s->state, MIGRATION_STATUS_SETUP,
4413 MIGRATION_STATUS_FAILED);
4414 migrate_fd_cleanup(s);
4415 return;
4416 }
4417
4418 if (migrate_background_snapshot()) {
4419 qemu_thread_create(&s->thread, "bg_snapshot",
4420 bg_migration_thread, s, QEMU_THREAD_JOINABLE);
4421 } else {
4422 qemu_thread_create(&s->thread, "live_migration",
4423 migration_thread, s, QEMU_THREAD_JOINABLE);
4424 }
4425 s->migration_thread_running = true;
4426}
4427
4428void migration_global_dump(Monitor *mon)
4429{
4430 MigrationState *ms = migrate_get_current();
4431
4432 monitor_printf(mon, "globals:\n");
4433 monitor_printf(mon, "store-global-state: %s\n",
4434 ms->store_global_state ? "on" : "off");
4435 monitor_printf(mon, "only-migratable: %s\n",
4436 only_migratable ? "on" : "off");
4437 monitor_printf(mon, "send-configuration: %s\n",
4438 ms->send_configuration ? "on" : "off");
4439 monitor_printf(mon, "send-section-footer: %s\n",
4440 ms->send_section_footer ? "on" : "off");
4441 monitor_printf(mon, "decompress-error-check: %s\n",
4442 ms->decompress_error_check ? "on" : "off");
4443 monitor_printf(mon, "clear-bitmap-shift: %u\n",
4444 ms->clear_bitmap_shift);
4445}
4446
4447#define DEFINE_PROP_MIG_CAP(name, x) \
4448 DEFINE_PROP_BOOL(name, MigrationState, enabled_capabilities[x], false)
4449
4450static Property migration_properties[] = {
4451 DEFINE_PROP_BOOL("store-global-state", MigrationState,
4452 store_global_state, true),
4453 DEFINE_PROP_BOOL("send-configuration", MigrationState,
4454 send_configuration, true),
4455 DEFINE_PROP_BOOL("send-section-footer", MigrationState,
4456 send_section_footer, true),
4457 DEFINE_PROP_BOOL("decompress-error-check", MigrationState,
4458 decompress_error_check, true),
4459 DEFINE_PROP_UINT8("x-clear-bitmap-shift", MigrationState,
4460 clear_bitmap_shift, CLEAR_BITMAP_SHIFT_DEFAULT),
4461 DEFINE_PROP_BOOL("x-preempt-pre-7-2", MigrationState,
4462 preempt_pre_7_2, false),
4463
4464
4465 DEFINE_PROP_UINT8("x-compress-level", MigrationState,
4466 parameters.compress_level,
4467 DEFAULT_MIGRATE_COMPRESS_LEVEL),
4468 DEFINE_PROP_UINT8("x-compress-threads", MigrationState,
4469 parameters.compress_threads,
4470 DEFAULT_MIGRATE_COMPRESS_THREAD_COUNT),
4471 DEFINE_PROP_BOOL("x-compress-wait-thread", MigrationState,
4472 parameters.compress_wait_thread, true),
4473 DEFINE_PROP_UINT8("x-decompress-threads", MigrationState,
4474 parameters.decompress_threads,
4475 DEFAULT_MIGRATE_DECOMPRESS_THREAD_COUNT),
4476 DEFINE_PROP_UINT8("x-throttle-trigger-threshold", MigrationState,
4477 parameters.throttle_trigger_threshold,
4478 DEFAULT_MIGRATE_THROTTLE_TRIGGER_THRESHOLD),
4479 DEFINE_PROP_UINT8("x-cpu-throttle-initial", MigrationState,
4480 parameters.cpu_throttle_initial,
4481 DEFAULT_MIGRATE_CPU_THROTTLE_INITIAL),
4482 DEFINE_PROP_UINT8("x-cpu-throttle-increment", MigrationState,
4483 parameters.cpu_throttle_increment,
4484 DEFAULT_MIGRATE_CPU_THROTTLE_INCREMENT),
4485 DEFINE_PROP_BOOL("x-cpu-throttle-tailslow", MigrationState,
4486 parameters.cpu_throttle_tailslow, false),
4487 DEFINE_PROP_SIZE("x-max-bandwidth", MigrationState,
4488 parameters.max_bandwidth, MAX_THROTTLE),
4489 DEFINE_PROP_UINT64("x-downtime-limit", MigrationState,
4490 parameters.downtime_limit,
4491 DEFAULT_MIGRATE_SET_DOWNTIME),
4492 DEFINE_PROP_UINT32("x-checkpoint-delay", MigrationState,
4493 parameters.x_checkpoint_delay,
4494 DEFAULT_MIGRATE_X_CHECKPOINT_DELAY),
4495 DEFINE_PROP_UINT8("multifd-channels", MigrationState,
4496 parameters.multifd_channels,
4497 DEFAULT_MIGRATE_MULTIFD_CHANNELS),
4498 DEFINE_PROP_MULTIFD_COMPRESSION("multifd-compression", MigrationState,
4499 parameters.multifd_compression,
4500 DEFAULT_MIGRATE_MULTIFD_COMPRESSION),
4501 DEFINE_PROP_UINT8("multifd-zlib-level", MigrationState,
4502 parameters.multifd_zlib_level,
4503 DEFAULT_MIGRATE_MULTIFD_ZLIB_LEVEL),
4504 DEFINE_PROP_UINT8("multifd-zstd-level", MigrationState,
4505 parameters.multifd_zstd_level,
4506 DEFAULT_MIGRATE_MULTIFD_ZSTD_LEVEL),
4507 DEFINE_PROP_SIZE("xbzrle-cache-size", MigrationState,
4508 parameters.xbzrle_cache_size,
4509 DEFAULT_MIGRATE_XBZRLE_CACHE_SIZE),
4510 DEFINE_PROP_SIZE("max-postcopy-bandwidth", MigrationState,
4511 parameters.max_postcopy_bandwidth,
4512 DEFAULT_MIGRATE_MAX_POSTCOPY_BANDWIDTH),
4513 DEFINE_PROP_UINT8("max-cpu-throttle", MigrationState,
4514 parameters.max_cpu_throttle,
4515 DEFAULT_MIGRATE_MAX_CPU_THROTTLE),
4516 DEFINE_PROP_SIZE("announce-initial", MigrationState,
4517 parameters.announce_initial,
4518 DEFAULT_MIGRATE_ANNOUNCE_INITIAL),
4519 DEFINE_PROP_SIZE("announce-max", MigrationState,
4520 parameters.announce_max,
4521 DEFAULT_MIGRATE_ANNOUNCE_MAX),
4522 DEFINE_PROP_SIZE("announce-rounds", MigrationState,
4523 parameters.announce_rounds,
4524 DEFAULT_MIGRATE_ANNOUNCE_ROUNDS),
4525 DEFINE_PROP_SIZE("announce-step", MigrationState,
4526 parameters.announce_step,
4527 DEFAULT_MIGRATE_ANNOUNCE_STEP),
4528 DEFINE_PROP_STRING("tls-creds", MigrationState, parameters.tls_creds),
4529 DEFINE_PROP_STRING("tls-hostname", MigrationState, parameters.tls_hostname),
4530 DEFINE_PROP_STRING("tls-authz", MigrationState, parameters.tls_authz),
4531
4532
4533 DEFINE_PROP_MIG_CAP("x-xbzrle", MIGRATION_CAPABILITY_XBZRLE),
4534 DEFINE_PROP_MIG_CAP("x-rdma-pin-all", MIGRATION_CAPABILITY_RDMA_PIN_ALL),
4535 DEFINE_PROP_MIG_CAP("x-auto-converge", MIGRATION_CAPABILITY_AUTO_CONVERGE),
4536 DEFINE_PROP_MIG_CAP("x-zero-blocks", MIGRATION_CAPABILITY_ZERO_BLOCKS),
4537 DEFINE_PROP_MIG_CAP("x-compress", MIGRATION_CAPABILITY_COMPRESS),
4538 DEFINE_PROP_MIG_CAP("x-events", MIGRATION_CAPABILITY_EVENTS),
4539 DEFINE_PROP_MIG_CAP("x-postcopy-ram", MIGRATION_CAPABILITY_POSTCOPY_RAM),
4540 DEFINE_PROP_MIG_CAP("x-postcopy-preempt",
4541 MIGRATION_CAPABILITY_POSTCOPY_PREEMPT),
4542 DEFINE_PROP_MIG_CAP("x-colo", MIGRATION_CAPABILITY_X_COLO),
4543 DEFINE_PROP_MIG_CAP("x-release-ram", MIGRATION_CAPABILITY_RELEASE_RAM),
4544 DEFINE_PROP_MIG_CAP("x-block", MIGRATION_CAPABILITY_BLOCK),
4545 DEFINE_PROP_MIG_CAP("x-return-path", MIGRATION_CAPABILITY_RETURN_PATH),
4546 DEFINE_PROP_MIG_CAP("x-multifd", MIGRATION_CAPABILITY_MULTIFD),
4547 DEFINE_PROP_MIG_CAP("x-background-snapshot",
4548 MIGRATION_CAPABILITY_BACKGROUND_SNAPSHOT),
4549#ifdef CONFIG_LINUX
4550 DEFINE_PROP_MIG_CAP("x-zero-copy-send",
4551 MIGRATION_CAPABILITY_ZERO_COPY_SEND),
4552#endif
4553
4554 DEFINE_PROP_END_OF_LIST(),
4555};
4556
4557static void migration_class_init(ObjectClass *klass, void *data)
4558{
4559 DeviceClass *dc = DEVICE_CLASS(klass);
4560
4561 dc->user_creatable = false;
4562 device_class_set_props(dc, migration_properties);
4563}
4564
4565static void migration_instance_finalize(Object *obj)
4566{
4567 MigrationState *ms = MIGRATION_OBJ(obj);
4568
4569 qemu_mutex_destroy(&ms->error_mutex);
4570 qemu_mutex_destroy(&ms->qemu_file_lock);
4571 qemu_sem_destroy(&ms->wait_unplug_sem);
4572 qemu_sem_destroy(&ms->rate_limit_sem);
4573 qemu_sem_destroy(&ms->pause_sem);
4574 qemu_sem_destroy(&ms->postcopy_pause_sem);
4575 qemu_sem_destroy(&ms->postcopy_pause_rp_sem);
4576 qemu_sem_destroy(&ms->rp_state.rp_sem);
4577 qemu_sem_destroy(&ms->rp_state.rp_pong_acks);
4578 qemu_sem_destroy(&ms->postcopy_qemufile_src_sem);
4579 error_free(ms->error);
4580}
4581
4582static void migration_instance_init(Object *obj)
4583{
4584 MigrationState *ms = MIGRATION_OBJ(obj);
4585 MigrationParameters *params = &ms->parameters;
4586
4587 ms->state = MIGRATION_STATUS_NONE;
4588 ms->mbps = -1;
4589 ms->pages_per_second = -1;
4590 qemu_sem_init(&ms->pause_sem, 0);
4591 qemu_mutex_init(&ms->error_mutex);
4592
4593 params->tls_hostname = g_strdup("");
4594 params->tls_creds = g_strdup("");
4595
4596
4597 params->has_compress_level = true;
4598 params->has_compress_threads = true;
4599 params->has_compress_wait_thread = true;
4600 params->has_decompress_threads = true;
4601 params->has_throttle_trigger_threshold = true;
4602 params->has_cpu_throttle_initial = true;
4603 params->has_cpu_throttle_increment = true;
4604 params->has_cpu_throttle_tailslow = true;
4605 params->has_max_bandwidth = true;
4606 params->has_downtime_limit = true;
4607 params->has_x_checkpoint_delay = true;
4608 params->has_block_incremental = true;
4609 params->has_multifd_channels = true;
4610 params->has_multifd_compression = true;
4611 params->has_multifd_zlib_level = true;
4612 params->has_multifd_zstd_level = true;
4613 params->has_xbzrle_cache_size = true;
4614 params->has_max_postcopy_bandwidth = true;
4615 params->has_max_cpu_throttle = true;
4616 params->has_announce_initial = true;
4617 params->has_announce_max = true;
4618 params->has_announce_rounds = true;
4619 params->has_announce_step = true;
4620
4621 qemu_sem_init(&ms->postcopy_pause_sem, 0);
4622 qemu_sem_init(&ms->postcopy_pause_rp_sem, 0);
4623 qemu_sem_init(&ms->rp_state.rp_sem, 0);
4624 qemu_sem_init(&ms->rp_state.rp_pong_acks, 0);
4625 qemu_sem_init(&ms->rate_limit_sem, 0);
4626 qemu_sem_init(&ms->wait_unplug_sem, 0);
4627 qemu_sem_init(&ms->postcopy_qemufile_src_sem, 0);
4628 qemu_mutex_init(&ms->qemu_file_lock);
4629}
4630
4631
4632
4633
4634
4635static bool migration_object_check(MigrationState *ms, Error **errp)
4636{
4637 MigrationCapabilityStatusList *head = NULL;
4638
4639 bool cap_list[MIGRATION_CAPABILITY__MAX] = { 0 }, ret;
4640 int i;
4641
4642 if (!migrate_params_check(&ms->parameters, errp)) {
4643 return false;
4644 }
4645
4646 for (i = 0; i < MIGRATION_CAPABILITY__MAX; i++) {
4647 if (ms->enabled_capabilities[i]) {
4648 QAPI_LIST_PREPEND(head, migrate_cap_add(i, true));
4649 }
4650 }
4651
4652 ret = migrate_caps_check(cap_list, head, errp);
4653
4654
4655 qapi_free_MigrationCapabilityStatusList(head);
4656
4657 return ret;
4658}
4659
4660static const TypeInfo migration_type = {
4661 .name = TYPE_MIGRATION,
4662
4663
4664
4665
4666
4667
4668
4669
4670 .parent = TYPE_DEVICE,
4671 .class_init = migration_class_init,
4672 .class_size = sizeof(MigrationClass),
4673 .instance_size = sizeof(MigrationState),
4674 .instance_init = migration_instance_init,
4675 .instance_finalize = migration_instance_finalize,
4676};
4677
4678static void register_migration_types(void)
4679{
4680 type_register_static(&migration_type);
4681}
4682
4683type_init(register_migration_types);
4684