1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28#include "qemu/osdep.h"
29#include "qemu-common.h"
30#include "cpu.h"
31#include <zlib.h>
32#include "qapi-event.h"
33#include "qemu/cutils.h"
34#include "qemu/bitops.h"
35#include "qemu/bitmap.h"
36#include "qemu/timer.h"
37#include "qemu/main-loop.h"
38#include "migration/migration.h"
39#include "migration/postcopy-ram.h"
40#include "exec/address-spaces.h"
41#include "migration/page_cache.h"
42#include "qemu/error-report.h"
43#include "trace.h"
44#include "exec/ram_addr.h"
45#include "qemu/rcu_queue.h"
46#include "migration/colo.h"
47
48#ifdef DEBUG_MIGRATION_RAM
49#define DPRINTF(fmt, ...) \
50 do { fprintf(stdout, "migration_ram: " fmt, ## __VA_ARGS__); } while (0)
51#else
52#define DPRINTF(fmt, ...) \
53 do { } while (0)
54#endif
55
56static int dirty_rate_high_cnt;
57
58static uint64_t bitmap_sync_count;
59
60
61
62
63#define RAM_SAVE_FLAG_FULL 0x01
64#define RAM_SAVE_FLAG_COMPRESS 0x02
65#define RAM_SAVE_FLAG_MEM_SIZE 0x04
66#define RAM_SAVE_FLAG_PAGE 0x08
67#define RAM_SAVE_FLAG_EOS 0x10
68#define RAM_SAVE_FLAG_CONTINUE 0x20
69#define RAM_SAVE_FLAG_XBZRLE 0x40
70
71#define RAM_SAVE_FLAG_COMPRESS_PAGE 0x100
72
73static uint8_t *ZERO_TARGET_PAGE;
74
75static inline bool is_zero_range(uint8_t *p, uint64_t size)
76{
77 return buffer_is_zero(p, size);
78}
79
80
81
82static struct {
83
84 uint8_t *encoded_buf;
85
86 uint8_t *current_buf;
87
88 PageCache *cache;
89 QemuMutex lock;
90} XBZRLE;
91
92
93static uint8_t *xbzrle_decoded_buf;
94
95static void XBZRLE_cache_lock(void)
96{
97 if (migrate_use_xbzrle())
98 qemu_mutex_lock(&XBZRLE.lock);
99}
100
101static void XBZRLE_cache_unlock(void)
102{
103 if (migrate_use_xbzrle())
104 qemu_mutex_unlock(&XBZRLE.lock);
105}
106
107
108
109
110
111
112
113int64_t xbzrle_cache_resize(int64_t new_size)
114{
115 PageCache *new_cache;
116 int64_t ret;
117
118 if (new_size < TARGET_PAGE_SIZE) {
119 return -1;
120 }
121
122 XBZRLE_cache_lock();
123
124 if (XBZRLE.cache != NULL) {
125 if (pow2floor(new_size) == migrate_xbzrle_cache_size()) {
126 goto out_new_size;
127 }
128 new_cache = cache_init(new_size / TARGET_PAGE_SIZE,
129 TARGET_PAGE_SIZE);
130 if (!new_cache) {
131 error_report("Error creating cache");
132 ret = -1;
133 goto out;
134 }
135
136 cache_fini(XBZRLE.cache);
137 XBZRLE.cache = new_cache;
138 }
139
140out_new_size:
141 ret = pow2floor(new_size);
142out:
143 XBZRLE_cache_unlock();
144 return ret;
145}
146
147
148typedef struct AccountingInfo {
149 uint64_t dup_pages;
150 uint64_t skipped_pages;
151 uint64_t norm_pages;
152 uint64_t iterations;
153 uint64_t xbzrle_bytes;
154 uint64_t xbzrle_pages;
155 uint64_t xbzrle_cache_miss;
156 double xbzrle_cache_miss_rate;
157 uint64_t xbzrle_overflows;
158} AccountingInfo;
159
160static AccountingInfo acct_info;
161
162static void acct_clear(void)
163{
164 memset(&acct_info, 0, sizeof(acct_info));
165}
166
167uint64_t dup_mig_bytes_transferred(void)
168{
169 return acct_info.dup_pages * TARGET_PAGE_SIZE;
170}
171
172uint64_t dup_mig_pages_transferred(void)
173{
174 return acct_info.dup_pages;
175}
176
177uint64_t skipped_mig_bytes_transferred(void)
178{
179 return acct_info.skipped_pages * TARGET_PAGE_SIZE;
180}
181
182uint64_t skipped_mig_pages_transferred(void)
183{
184 return acct_info.skipped_pages;
185}
186
187uint64_t norm_mig_bytes_transferred(void)
188{
189 return acct_info.norm_pages * TARGET_PAGE_SIZE;
190}
191
192uint64_t norm_mig_pages_transferred(void)
193{
194 return acct_info.norm_pages;
195}
196
197uint64_t xbzrle_mig_bytes_transferred(void)
198{
199 return acct_info.xbzrle_bytes;
200}
201
202uint64_t xbzrle_mig_pages_transferred(void)
203{
204 return acct_info.xbzrle_pages;
205}
206
207uint64_t xbzrle_mig_pages_cache_miss(void)
208{
209 return acct_info.xbzrle_cache_miss;
210}
211
212double xbzrle_mig_cache_miss_rate(void)
213{
214 return acct_info.xbzrle_cache_miss_rate;
215}
216
217uint64_t xbzrle_mig_pages_overflow(void)
218{
219 return acct_info.xbzrle_overflows;
220}
221
222
223
224static RAMBlock *last_seen_block;
225
226static RAMBlock *last_sent_block;
227static ram_addr_t last_offset;
228static QemuMutex migration_bitmap_mutex;
229static uint64_t migration_dirty_pages;
230static uint32_t last_version;
231static bool ram_bulk_stage;
232
233
234struct PageSearchStatus {
235
236 RAMBlock *block;
237
238 ram_addr_t offset;
239
240 bool complete_round;
241};
242typedef struct PageSearchStatus PageSearchStatus;
243
244static struct BitmapRcu {
245 struct rcu_head rcu;
246
247 unsigned long *bmap;
248
249
250
251
252
253 unsigned long *unsentmap;
254} *migration_bitmap_rcu;
255
256struct CompressParam {
257 bool done;
258 bool quit;
259 QEMUFile *file;
260 QemuMutex mutex;
261 QemuCond cond;
262 RAMBlock *block;
263 ram_addr_t offset;
264};
265typedef struct CompressParam CompressParam;
266
267struct DecompressParam {
268 bool done;
269 bool quit;
270 QemuMutex mutex;
271 QemuCond cond;
272 void *des;
273 uint8_t *compbuf;
274 int len;
275};
276typedef struct DecompressParam DecompressParam;
277
278static CompressParam *comp_param;
279static QemuThread *compress_threads;
280
281
282
283
284static QemuMutex comp_done_lock;
285static QemuCond comp_done_cond;
286
287static const QEMUFileOps empty_ops = { };
288
289static bool compression_switch;
290static DecompressParam *decomp_param;
291static QemuThread *decompress_threads;
292static QemuMutex decomp_done_lock;
293static QemuCond decomp_done_cond;
294
295static int do_compress_ram_page(QEMUFile *f, RAMBlock *block,
296 ram_addr_t offset);
297
298static void *do_data_compress(void *opaque)
299{
300 CompressParam *param = opaque;
301 RAMBlock *block;
302 ram_addr_t offset;
303
304 qemu_mutex_lock(¶m->mutex);
305 while (!param->quit) {
306 if (param->block) {
307 block = param->block;
308 offset = param->offset;
309 param->block = NULL;
310 qemu_mutex_unlock(¶m->mutex);
311
312 do_compress_ram_page(param->file, block, offset);
313
314 qemu_mutex_lock(&comp_done_lock);
315 param->done = true;
316 qemu_cond_signal(&comp_done_cond);
317 qemu_mutex_unlock(&comp_done_lock);
318
319 qemu_mutex_lock(¶m->mutex);
320 } else {
321 qemu_cond_wait(¶m->cond, ¶m->mutex);
322 }
323 }
324 qemu_mutex_unlock(¶m->mutex);
325
326 return NULL;
327}
328
329static inline void terminate_compression_threads(void)
330{
331 int idx, thread_count;
332
333 thread_count = migrate_compress_threads();
334 for (idx = 0; idx < thread_count; idx++) {
335 qemu_mutex_lock(&comp_param[idx].mutex);
336 comp_param[idx].quit = true;
337 qemu_cond_signal(&comp_param[idx].cond);
338 qemu_mutex_unlock(&comp_param[idx].mutex);
339 }
340}
341
342void migrate_compress_threads_join(void)
343{
344 int i, thread_count;
345
346 if (!migrate_use_compression()) {
347 return;
348 }
349 terminate_compression_threads();
350 thread_count = migrate_compress_threads();
351 for (i = 0; i < thread_count; i++) {
352 qemu_thread_join(compress_threads + i);
353 qemu_fclose(comp_param[i].file);
354 qemu_mutex_destroy(&comp_param[i].mutex);
355 qemu_cond_destroy(&comp_param[i].cond);
356 }
357 qemu_mutex_destroy(&comp_done_lock);
358 qemu_cond_destroy(&comp_done_cond);
359 g_free(compress_threads);
360 g_free(comp_param);
361 compress_threads = NULL;
362 comp_param = NULL;
363}
364
365void migrate_compress_threads_create(void)
366{
367 int i, thread_count;
368
369 if (!migrate_use_compression()) {
370 return;
371 }
372 compression_switch = true;
373 thread_count = migrate_compress_threads();
374 compress_threads = g_new0(QemuThread, thread_count);
375 comp_param = g_new0(CompressParam, thread_count);
376 qemu_cond_init(&comp_done_cond);
377 qemu_mutex_init(&comp_done_lock);
378 for (i = 0; i < thread_count; i++) {
379
380
381
382 comp_param[i].file = qemu_fopen_ops(NULL, &empty_ops);
383 comp_param[i].done = true;
384 comp_param[i].quit = false;
385 qemu_mutex_init(&comp_param[i].mutex);
386 qemu_cond_init(&comp_param[i].cond);
387 qemu_thread_create(compress_threads + i, "compress",
388 do_data_compress, comp_param + i,
389 QEMU_THREAD_JOINABLE);
390 }
391}
392
393
394
395
396
397
398
399
400
401
402
403
404
405static size_t save_page_header(QEMUFile *f, RAMBlock *block, ram_addr_t offset)
406{
407 size_t size, len;
408
409 qemu_put_be64(f, offset);
410 size = 8;
411
412 if (!(offset & RAM_SAVE_FLAG_CONTINUE)) {
413 len = strlen(block->idstr);
414 qemu_put_byte(f, len);
415 qemu_put_buffer(f, (uint8_t *)block->idstr, len);
416 size += 1 + len;
417 }
418 return size;
419}
420
421
422
423
424
425
426
427static void mig_throttle_guest_down(void)
428{
429 MigrationState *s = migrate_get_current();
430 uint64_t pct_initial = s->parameters.cpu_throttle_initial;
431 uint64_t pct_icrement = s->parameters.cpu_throttle_increment;
432
433
434 if (!cpu_throttle_active()) {
435 cpu_throttle_set(pct_initial);
436 } else {
437
438 cpu_throttle_set(cpu_throttle_get_percentage() + pct_icrement);
439 }
440}
441
442
443
444
445
446
447
448static void xbzrle_cache_zero_page(ram_addr_t current_addr)
449{
450 if (ram_bulk_stage || !migrate_use_xbzrle()) {
451 return;
452 }
453
454
455
456 cache_insert(XBZRLE.cache, current_addr, ZERO_TARGET_PAGE,
457 bitmap_sync_count);
458}
459
460#define ENCODING_FLAG_XBZRLE 0x1
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477static int save_xbzrle_page(QEMUFile *f, uint8_t **current_data,
478 ram_addr_t current_addr, RAMBlock *block,
479 ram_addr_t offset, bool last_stage,
480 uint64_t *bytes_transferred)
481{
482 int encoded_len = 0, bytes_xbzrle;
483 uint8_t *prev_cached_page;
484
485 if (!cache_is_cached(XBZRLE.cache, current_addr, bitmap_sync_count)) {
486 acct_info.xbzrle_cache_miss++;
487 if (!last_stage) {
488 if (cache_insert(XBZRLE.cache, current_addr, *current_data,
489 bitmap_sync_count) == -1) {
490 return -1;
491 } else {
492
493
494 *current_data = get_cached_data(XBZRLE.cache, current_addr);
495 }
496 }
497 return -1;
498 }
499
500 prev_cached_page = get_cached_data(XBZRLE.cache, current_addr);
501
502
503 memcpy(XBZRLE.current_buf, *current_data, TARGET_PAGE_SIZE);
504
505
506 encoded_len = xbzrle_encode_buffer(prev_cached_page, XBZRLE.current_buf,
507 TARGET_PAGE_SIZE, XBZRLE.encoded_buf,
508 TARGET_PAGE_SIZE);
509 if (encoded_len == 0) {
510 DPRINTF("Skipping unmodified page\n");
511 return 0;
512 } else if (encoded_len == -1) {
513 DPRINTF("Overflow\n");
514 acct_info.xbzrle_overflows++;
515
516 if (!last_stage) {
517 memcpy(prev_cached_page, *current_data, TARGET_PAGE_SIZE);
518 *current_data = prev_cached_page;
519 }
520 return -1;
521 }
522
523
524 if (!last_stage) {
525 memcpy(prev_cached_page, XBZRLE.current_buf, TARGET_PAGE_SIZE);
526 }
527
528
529 bytes_xbzrle = save_page_header(f, block, offset | RAM_SAVE_FLAG_XBZRLE);
530 qemu_put_byte(f, ENCODING_FLAG_XBZRLE);
531 qemu_put_be16(f, encoded_len);
532 qemu_put_buffer(f, XBZRLE.encoded_buf, encoded_len);
533 bytes_xbzrle += encoded_len + 1 + 2;
534 acct_info.xbzrle_pages++;
535 acct_info.xbzrle_bytes += bytes_xbzrle;
536 *bytes_transferred += bytes_xbzrle;
537
538 return 1;
539}
540
541
542
543
544
545
546
547
548
549static inline
550ram_addr_t migration_bitmap_find_dirty(RAMBlock *rb,
551 ram_addr_t start,
552 ram_addr_t *ram_addr_abs)
553{
554 unsigned long base = rb->offset >> TARGET_PAGE_BITS;
555 unsigned long nr = base + (start >> TARGET_PAGE_BITS);
556 uint64_t rb_size = rb->used_length;
557 unsigned long size = base + (rb_size >> TARGET_PAGE_BITS);
558 unsigned long *bitmap;
559
560 unsigned long next;
561
562 bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap;
563 if (ram_bulk_stage && nr > base) {
564 next = nr + 1;
565 } else {
566 next = find_next_bit(bitmap, size, nr);
567 }
568
569 *ram_addr_abs = next << TARGET_PAGE_BITS;
570 return (next - base) << TARGET_PAGE_BITS;
571}
572
573static inline bool migration_bitmap_clear_dirty(ram_addr_t addr)
574{
575 bool ret;
576 int nr = addr >> TARGET_PAGE_BITS;
577 unsigned long *bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap;
578
579 ret = test_and_clear_bit(nr, bitmap);
580
581 if (ret) {
582 migration_dirty_pages--;
583 }
584 return ret;
585}
586
587static void migration_bitmap_sync_range(ram_addr_t start, ram_addr_t length)
588{
589 unsigned long *bitmap;
590 bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap;
591 migration_dirty_pages +=
592 cpu_physical_memory_sync_dirty_bitmap(bitmap, start, length);
593}
594
595
596static int64_t start_time;
597static int64_t bytes_xfer_prev;
598static int64_t num_dirty_pages_period;
599static uint64_t xbzrle_cache_miss_prev;
600static uint64_t iterations_prev;
601
602static void migration_bitmap_sync_init(void)
603{
604 start_time = 0;
605 bytes_xfer_prev = 0;
606 num_dirty_pages_period = 0;
607 xbzrle_cache_miss_prev = 0;
608 iterations_prev = 0;
609}
610
611static void migration_bitmap_sync(void)
612{
613 RAMBlock *block;
614 uint64_t num_dirty_pages_init = migration_dirty_pages;
615 MigrationState *s = migrate_get_current();
616 int64_t end_time;
617 int64_t bytes_xfer_now;
618
619 bitmap_sync_count++;
620
621 if (!bytes_xfer_prev) {
622 bytes_xfer_prev = ram_bytes_transferred();
623 }
624
625 if (!start_time) {
626 start_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
627 }
628
629 trace_migration_bitmap_sync_start();
630 memory_global_dirty_log_sync();
631
632 qemu_mutex_lock(&migration_bitmap_mutex);
633 rcu_read_lock();
634 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
635 migration_bitmap_sync_range(block->offset, block->used_length);
636 }
637 rcu_read_unlock();
638 qemu_mutex_unlock(&migration_bitmap_mutex);
639
640 trace_migration_bitmap_sync_end(migration_dirty_pages
641 - num_dirty_pages_init);
642 num_dirty_pages_period += migration_dirty_pages - num_dirty_pages_init;
643 end_time = qemu_clock_get_ms(QEMU_CLOCK_REALTIME);
644
645
646 if (end_time > start_time + 1000) {
647 if (migrate_auto_converge()) {
648
649
650
651
652
653 bytes_xfer_now = ram_bytes_transferred();
654
655 if (s->dirty_pages_rate &&
656 (num_dirty_pages_period * TARGET_PAGE_SIZE >
657 (bytes_xfer_now - bytes_xfer_prev)/2) &&
658 (dirty_rate_high_cnt++ >= 2)) {
659 trace_migration_throttle();
660 dirty_rate_high_cnt = 0;
661 mig_throttle_guest_down();
662 }
663 bytes_xfer_prev = bytes_xfer_now;
664 }
665
666 if (migrate_use_xbzrle()) {
667 if (iterations_prev != acct_info.iterations) {
668 acct_info.xbzrle_cache_miss_rate =
669 (double)(acct_info.xbzrle_cache_miss -
670 xbzrle_cache_miss_prev) /
671 (acct_info.iterations - iterations_prev);
672 }
673 iterations_prev = acct_info.iterations;
674 xbzrle_cache_miss_prev = acct_info.xbzrle_cache_miss;
675 }
676 s->dirty_pages_rate = num_dirty_pages_period * 1000
677 / (end_time - start_time);
678 s->dirty_bytes_rate = s->dirty_pages_rate * TARGET_PAGE_SIZE;
679 start_time = end_time;
680 num_dirty_pages_period = 0;
681 }
682 s->dirty_sync_count = bitmap_sync_count;
683 if (migrate_use_events()) {
684 qapi_event_send_migration_pass(bitmap_sync_count, NULL);
685 }
686}
687
688
689
690
691
692
693
694
695
696
697
698
699static int save_zero_page(QEMUFile *f, RAMBlock *block, ram_addr_t offset,
700 uint8_t *p, uint64_t *bytes_transferred)
701{
702 int pages = -1;
703
704 if (is_zero_range(p, TARGET_PAGE_SIZE)) {
705 acct_info.dup_pages++;
706 *bytes_transferred += save_page_header(f, block,
707 offset | RAM_SAVE_FLAG_COMPRESS);
708 qemu_put_byte(f, 0);
709 *bytes_transferred += 1;
710 pages = 1;
711 }
712
713 return pages;
714}
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730static int ram_save_page(QEMUFile *f, PageSearchStatus *pss,
731 bool last_stage, uint64_t *bytes_transferred)
732{
733 int pages = -1;
734 uint64_t bytes_xmit;
735 ram_addr_t current_addr;
736 uint8_t *p;
737 int ret;
738 bool send_async = true;
739 RAMBlock *block = pss->block;
740 ram_addr_t offset = pss->offset;
741
742 p = block->host + offset;
743
744
745 bytes_xmit = 0;
746 ret = ram_control_save_page(f, block->offset,
747 offset, TARGET_PAGE_SIZE, &bytes_xmit);
748 if (bytes_xmit) {
749 *bytes_transferred += bytes_xmit;
750 pages = 1;
751 }
752
753 XBZRLE_cache_lock();
754
755 current_addr = block->offset + offset;
756
757 if (block == last_sent_block) {
758 offset |= RAM_SAVE_FLAG_CONTINUE;
759 }
760 if (ret != RAM_SAVE_CONTROL_NOT_SUPP) {
761 if (ret != RAM_SAVE_CONTROL_DELAYED) {
762 if (bytes_xmit > 0) {
763 acct_info.norm_pages++;
764 } else if (bytes_xmit == 0) {
765 acct_info.dup_pages++;
766 }
767 }
768 } else {
769 pages = save_zero_page(f, block, offset, p, bytes_transferred);
770 if (pages > 0) {
771
772
773
774 xbzrle_cache_zero_page(current_addr);
775 } else if (!ram_bulk_stage &&
776 !migration_in_postcopy(migrate_get_current()) &&
777 migrate_use_xbzrle()) {
778 pages = save_xbzrle_page(f, &p, current_addr, block,
779 offset, last_stage, bytes_transferred);
780 if (!last_stage) {
781
782
783
784 send_async = false;
785 }
786 }
787 }
788
789
790 if (pages == -1) {
791 *bytes_transferred += save_page_header(f, block,
792 offset | RAM_SAVE_FLAG_PAGE);
793 if (send_async) {
794 qemu_put_buffer_async(f, p, TARGET_PAGE_SIZE);
795 } else {
796 qemu_put_buffer(f, p, TARGET_PAGE_SIZE);
797 }
798 *bytes_transferred += TARGET_PAGE_SIZE;
799 pages = 1;
800 acct_info.norm_pages++;
801 }
802
803 XBZRLE_cache_unlock();
804
805 return pages;
806}
807
808static int do_compress_ram_page(QEMUFile *f, RAMBlock *block,
809 ram_addr_t offset)
810{
811 int bytes_sent, blen;
812 uint8_t *p = block->host + (offset & TARGET_PAGE_MASK);
813
814 bytes_sent = save_page_header(f, block, offset |
815 RAM_SAVE_FLAG_COMPRESS_PAGE);
816 blen = qemu_put_compression_data(f, p, TARGET_PAGE_SIZE,
817 migrate_compress_level());
818 if (blen < 0) {
819 bytes_sent = 0;
820 qemu_file_set_error(migrate_get_current()->to_dst_file, blen);
821 error_report("compressed data failed!");
822 } else {
823 bytes_sent += blen;
824 }
825
826 return bytes_sent;
827}
828
829static uint64_t bytes_transferred;
830
831static void flush_compressed_data(QEMUFile *f)
832{
833 int idx, len, thread_count;
834
835 if (!migrate_use_compression()) {
836 return;
837 }
838 thread_count = migrate_compress_threads();
839
840 qemu_mutex_lock(&comp_done_lock);
841 for (idx = 0; idx < thread_count; idx++) {
842 while (!comp_param[idx].done) {
843 qemu_cond_wait(&comp_done_cond, &comp_done_lock);
844 }
845 }
846 qemu_mutex_unlock(&comp_done_lock);
847
848 for (idx = 0; idx < thread_count; idx++) {
849 qemu_mutex_lock(&comp_param[idx].mutex);
850 if (!comp_param[idx].quit) {
851 len = qemu_put_qemu_file(f, comp_param[idx].file);
852 bytes_transferred += len;
853 }
854 qemu_mutex_unlock(&comp_param[idx].mutex);
855 }
856}
857
858static inline void set_compress_params(CompressParam *param, RAMBlock *block,
859 ram_addr_t offset)
860{
861 param->block = block;
862 param->offset = offset;
863}
864
865static int compress_page_with_multi_thread(QEMUFile *f, RAMBlock *block,
866 ram_addr_t offset,
867 uint64_t *bytes_transferred)
868{
869 int idx, thread_count, bytes_xmit = -1, pages = -1;
870
871 thread_count = migrate_compress_threads();
872 qemu_mutex_lock(&comp_done_lock);
873 while (true) {
874 for (idx = 0; idx < thread_count; idx++) {
875 if (comp_param[idx].done) {
876 comp_param[idx].done = false;
877 bytes_xmit = qemu_put_qemu_file(f, comp_param[idx].file);
878 qemu_mutex_lock(&comp_param[idx].mutex);
879 set_compress_params(&comp_param[idx], block, offset);
880 qemu_cond_signal(&comp_param[idx].cond);
881 qemu_mutex_unlock(&comp_param[idx].mutex);
882 pages = 1;
883 acct_info.norm_pages++;
884 *bytes_transferred += bytes_xmit;
885 break;
886 }
887 }
888 if (pages > 0) {
889 break;
890 } else {
891 qemu_cond_wait(&comp_done_cond, &comp_done_lock);
892 }
893 }
894 qemu_mutex_unlock(&comp_done_lock);
895
896 return pages;
897}
898
899
900
901
902
903
904
905
906
907
908
909
910static int ram_save_compressed_page(QEMUFile *f, PageSearchStatus *pss,
911 bool last_stage,
912 uint64_t *bytes_transferred)
913{
914 int pages = -1;
915 uint64_t bytes_xmit = 0;
916 uint8_t *p;
917 int ret, blen;
918 RAMBlock *block = pss->block;
919 ram_addr_t offset = pss->offset;
920
921 p = block->host + offset;
922
923 ret = ram_control_save_page(f, block->offset,
924 offset, TARGET_PAGE_SIZE, &bytes_xmit);
925 if (bytes_xmit) {
926 *bytes_transferred += bytes_xmit;
927 pages = 1;
928 }
929 if (ret != RAM_SAVE_CONTROL_NOT_SUPP) {
930 if (ret != RAM_SAVE_CONTROL_DELAYED) {
931 if (bytes_xmit > 0) {
932 acct_info.norm_pages++;
933 } else if (bytes_xmit == 0) {
934 acct_info.dup_pages++;
935 }
936 }
937 } else {
938
939
940
941
942
943
944 if (block != last_sent_block) {
945 flush_compressed_data(f);
946 pages = save_zero_page(f, block, offset, p, bytes_transferred);
947 if (pages == -1) {
948
949 bytes_xmit = save_page_header(f, block, offset |
950 RAM_SAVE_FLAG_COMPRESS_PAGE);
951 blen = qemu_put_compression_data(f, p, TARGET_PAGE_SIZE,
952 migrate_compress_level());
953 if (blen > 0) {
954 *bytes_transferred += bytes_xmit + blen;
955 acct_info.norm_pages++;
956 pages = 1;
957 } else {
958 qemu_file_set_error(f, blen);
959 error_report("compressed data failed!");
960 }
961 }
962 } else {
963 offset |= RAM_SAVE_FLAG_CONTINUE;
964 pages = save_zero_page(f, block, offset, p, bytes_transferred);
965 if (pages == -1) {
966 pages = compress_page_with_multi_thread(f, block, offset,
967 bytes_transferred);
968 }
969 }
970 }
971
972 return pages;
973}
974
975
976
977
978
979
980
981
982
983
984
985
986
987static bool find_dirty_block(QEMUFile *f, PageSearchStatus *pss,
988 bool *again, ram_addr_t *ram_addr_abs)
989{
990 pss->offset = migration_bitmap_find_dirty(pss->block, pss->offset,
991 ram_addr_abs);
992 if (pss->complete_round && pss->block == last_seen_block &&
993 pss->offset >= last_offset) {
994
995
996
997
998 *again = false;
999 return false;
1000 }
1001 if (pss->offset >= pss->block->used_length) {
1002
1003 pss->offset = 0;
1004 pss->block = QLIST_NEXT_RCU(pss->block, next);
1005 if (!pss->block) {
1006
1007 pss->block = QLIST_FIRST_RCU(&ram_list.blocks);
1008
1009 pss->complete_round = true;
1010 ram_bulk_stage = false;
1011 if (migrate_use_xbzrle()) {
1012
1013
1014
1015 flush_compressed_data(f);
1016 compression_switch = false;
1017 }
1018 }
1019
1020 *again = true;
1021 return false;
1022 } else {
1023
1024 *again = true;
1025
1026 return true;
1027 }
1028}
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038static RAMBlock *unqueue_page(MigrationState *ms, ram_addr_t *offset,
1039 ram_addr_t *ram_addr_abs)
1040{
1041 RAMBlock *block = NULL;
1042
1043 qemu_mutex_lock(&ms->src_page_req_mutex);
1044 if (!QSIMPLEQ_EMPTY(&ms->src_page_requests)) {
1045 struct MigrationSrcPageRequest *entry =
1046 QSIMPLEQ_FIRST(&ms->src_page_requests);
1047 block = entry->rb;
1048 *offset = entry->offset;
1049 *ram_addr_abs = (entry->offset + entry->rb->offset) &
1050 TARGET_PAGE_MASK;
1051
1052 if (entry->len > TARGET_PAGE_SIZE) {
1053 entry->len -= TARGET_PAGE_SIZE;
1054 entry->offset += TARGET_PAGE_SIZE;
1055 } else {
1056 memory_region_unref(block->mr);
1057 QSIMPLEQ_REMOVE_HEAD(&ms->src_page_requests, next_req);
1058 g_free(entry);
1059 }
1060 }
1061 qemu_mutex_unlock(&ms->src_page_req_mutex);
1062
1063 return block;
1064}
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076static bool get_queued_page(MigrationState *ms, PageSearchStatus *pss,
1077 ram_addr_t *ram_addr_abs)
1078{
1079 RAMBlock *block;
1080 ram_addr_t offset;
1081 bool dirty;
1082
1083 do {
1084 block = unqueue_page(ms, &offset, ram_addr_abs);
1085
1086
1087
1088
1089
1090
1091 if (block) {
1092 unsigned long *bitmap;
1093 bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap;
1094 dirty = test_bit(*ram_addr_abs >> TARGET_PAGE_BITS, bitmap);
1095 if (!dirty) {
1096 trace_get_queued_page_not_dirty(
1097 block->idstr, (uint64_t)offset,
1098 (uint64_t)*ram_addr_abs,
1099 test_bit(*ram_addr_abs >> TARGET_PAGE_BITS,
1100 atomic_rcu_read(&migration_bitmap_rcu)->unsentmap));
1101 } else {
1102 trace_get_queued_page(block->idstr,
1103 (uint64_t)offset,
1104 (uint64_t)*ram_addr_abs);
1105 }
1106 }
1107
1108 } while (block && !dirty);
1109
1110 if (block) {
1111
1112
1113
1114
1115
1116
1117 ram_bulk_stage = false;
1118
1119
1120
1121
1122
1123
1124 pss->block = block;
1125 pss->offset = offset;
1126 }
1127
1128 return !!block;
1129}
1130
1131
1132
1133
1134
1135
1136
1137
1138void flush_page_queue(MigrationState *ms)
1139{
1140 struct MigrationSrcPageRequest *mspr, *next_mspr;
1141
1142
1143
1144 rcu_read_lock();
1145 QSIMPLEQ_FOREACH_SAFE(mspr, &ms->src_page_requests, next_req, next_mspr) {
1146 memory_region_unref(mspr->rb->mr);
1147 QSIMPLEQ_REMOVE_HEAD(&ms->src_page_requests, next_req);
1148 g_free(mspr);
1149 }
1150 rcu_read_unlock();
1151}
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161int ram_save_queue_pages(MigrationState *ms, const char *rbname,
1162 ram_addr_t start, ram_addr_t len)
1163{
1164 RAMBlock *ramblock;
1165
1166 ms->postcopy_requests++;
1167 rcu_read_lock();
1168 if (!rbname) {
1169
1170 ramblock = ms->last_req_rb;
1171
1172 if (!ramblock) {
1173
1174
1175
1176
1177 error_report("ram_save_queue_pages no previous block");
1178 goto err;
1179 }
1180 } else {
1181 ramblock = qemu_ram_block_by_name(rbname);
1182
1183 if (!ramblock) {
1184
1185 error_report("ram_save_queue_pages no block '%s'", rbname);
1186 goto err;
1187 }
1188 ms->last_req_rb = ramblock;
1189 }
1190 trace_ram_save_queue_pages(ramblock->idstr, start, len);
1191 if (start+len > ramblock->used_length) {
1192 error_report("%s request overrun start=" RAM_ADDR_FMT " len="
1193 RAM_ADDR_FMT " blocklen=" RAM_ADDR_FMT,
1194 __func__, start, len, ramblock->used_length);
1195 goto err;
1196 }
1197
1198 struct MigrationSrcPageRequest *new_entry =
1199 g_malloc0(sizeof(struct MigrationSrcPageRequest));
1200 new_entry->rb = ramblock;
1201 new_entry->offset = start;
1202 new_entry->len = len;
1203
1204 memory_region_ref(ramblock->mr);
1205 qemu_mutex_lock(&ms->src_page_req_mutex);
1206 QSIMPLEQ_INSERT_TAIL(&ms->src_page_requests, new_entry, next_req);
1207 qemu_mutex_unlock(&ms->src_page_req_mutex);
1208 rcu_read_unlock();
1209
1210 return 0;
1211
1212err:
1213 rcu_read_unlock();
1214 return -1;
1215}
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230static int ram_save_target_page(MigrationState *ms, QEMUFile *f,
1231 PageSearchStatus *pss,
1232 bool last_stage,
1233 uint64_t *bytes_transferred,
1234 ram_addr_t dirty_ram_abs)
1235{
1236 int res = 0;
1237
1238
1239 if (migration_bitmap_clear_dirty(dirty_ram_abs)) {
1240 unsigned long *unsentmap;
1241 if (compression_switch && migrate_use_compression()) {
1242 res = ram_save_compressed_page(f, pss,
1243 last_stage,
1244 bytes_transferred);
1245 } else {
1246 res = ram_save_page(f, pss, last_stage,
1247 bytes_transferred);
1248 }
1249
1250 if (res < 0) {
1251 return res;
1252 }
1253 unsentmap = atomic_rcu_read(&migration_bitmap_rcu)->unsentmap;
1254 if (unsentmap) {
1255 clear_bit(dirty_ram_abs >> TARGET_PAGE_BITS, unsentmap);
1256 }
1257
1258
1259
1260
1261 if (res > 0) {
1262 last_sent_block = pss->block;
1263 }
1264 }
1265
1266 return res;
1267}
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286static int ram_save_host_page(MigrationState *ms, QEMUFile *f,
1287 PageSearchStatus *pss,
1288 bool last_stage,
1289 uint64_t *bytes_transferred,
1290 ram_addr_t dirty_ram_abs)
1291{
1292 int tmppages, pages = 0;
1293 do {
1294 tmppages = ram_save_target_page(ms, f, pss, last_stage,
1295 bytes_transferred, dirty_ram_abs);
1296 if (tmppages < 0) {
1297 return tmppages;
1298 }
1299
1300 pages += tmppages;
1301 pss->offset += TARGET_PAGE_SIZE;
1302 dirty_ram_abs += TARGET_PAGE_SIZE;
1303 } while (pss->offset & (qemu_host_page_size - 1));
1304
1305
1306 pss->offset -= TARGET_PAGE_SIZE;
1307 return pages;
1308}
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326static int ram_find_and_save_block(QEMUFile *f, bool last_stage,
1327 uint64_t *bytes_transferred)
1328{
1329 PageSearchStatus pss;
1330 MigrationState *ms = migrate_get_current();
1331 int pages = 0;
1332 bool again, found;
1333 ram_addr_t dirty_ram_abs;
1334
1335
1336 pss.block = last_seen_block;
1337 pss.offset = last_offset;
1338 pss.complete_round = false;
1339
1340 if (!pss.block) {
1341 pss.block = QLIST_FIRST_RCU(&ram_list.blocks);
1342 }
1343
1344 do {
1345 again = true;
1346 found = get_queued_page(ms, &pss, &dirty_ram_abs);
1347
1348 if (!found) {
1349
1350 found = find_dirty_block(f, &pss, &again, &dirty_ram_abs);
1351 }
1352
1353 if (found) {
1354 pages = ram_save_host_page(ms, f, &pss,
1355 last_stage, bytes_transferred,
1356 dirty_ram_abs);
1357 }
1358 } while (!pages && again);
1359
1360 last_seen_block = pss.block;
1361 last_offset = pss.offset;
1362
1363 return pages;
1364}
1365
1366void acct_update_position(QEMUFile *f, size_t size, bool zero)
1367{
1368 uint64_t pages = size / TARGET_PAGE_SIZE;
1369 if (zero) {
1370 acct_info.dup_pages += pages;
1371 } else {
1372 acct_info.norm_pages += pages;
1373 bytes_transferred += size;
1374 qemu_update_position(f, size);
1375 }
1376}
1377
1378static ram_addr_t ram_save_remaining(void)
1379{
1380 return migration_dirty_pages;
1381}
1382
1383uint64_t ram_bytes_remaining(void)
1384{
1385 return ram_save_remaining() * TARGET_PAGE_SIZE;
1386}
1387
1388uint64_t ram_bytes_transferred(void)
1389{
1390 return bytes_transferred;
1391}
1392
1393uint64_t ram_bytes_total(void)
1394{
1395 RAMBlock *block;
1396 uint64_t total = 0;
1397
1398 rcu_read_lock();
1399 QLIST_FOREACH_RCU(block, &ram_list.blocks, next)
1400 total += block->used_length;
1401 rcu_read_unlock();
1402 return total;
1403}
1404
1405void free_xbzrle_decoded_buf(void)
1406{
1407 g_free(xbzrle_decoded_buf);
1408 xbzrle_decoded_buf = NULL;
1409}
1410
1411static void migration_bitmap_free(struct BitmapRcu *bmap)
1412{
1413 g_free(bmap->bmap);
1414 g_free(bmap->unsentmap);
1415 g_free(bmap);
1416}
1417
1418static void ram_migration_cleanup(void *opaque)
1419{
1420
1421
1422
1423 struct BitmapRcu *bitmap = migration_bitmap_rcu;
1424 atomic_rcu_set(&migration_bitmap_rcu, NULL);
1425 if (bitmap) {
1426 memory_global_dirty_log_stop();
1427 call_rcu(bitmap, migration_bitmap_free, rcu);
1428 }
1429
1430 XBZRLE_cache_lock();
1431 if (XBZRLE.cache) {
1432 cache_fini(XBZRLE.cache);
1433 g_free(XBZRLE.encoded_buf);
1434 g_free(XBZRLE.current_buf);
1435 g_free(ZERO_TARGET_PAGE);
1436 XBZRLE.cache = NULL;
1437 XBZRLE.encoded_buf = NULL;
1438 XBZRLE.current_buf = NULL;
1439 }
1440 XBZRLE_cache_unlock();
1441}
1442
1443static void reset_ram_globals(void)
1444{
1445 last_seen_block = NULL;
1446 last_sent_block = NULL;
1447 last_offset = 0;
1448 last_version = ram_list.version;
1449 ram_bulk_stage = true;
1450}
1451
1452#define MAX_WAIT 50
1453
1454void migration_bitmap_extend(ram_addr_t old, ram_addr_t new)
1455{
1456
1457
1458
1459 if (migration_bitmap_rcu) {
1460 struct BitmapRcu *old_bitmap = migration_bitmap_rcu, *bitmap;
1461 bitmap = g_new(struct BitmapRcu, 1);
1462 bitmap->bmap = bitmap_new(new);
1463
1464
1465
1466
1467
1468
1469 qemu_mutex_lock(&migration_bitmap_mutex);
1470 bitmap_copy(bitmap->bmap, old_bitmap->bmap, old);
1471 bitmap_set(bitmap->bmap, old, new - old);
1472
1473
1474
1475
1476
1477 bitmap->unsentmap = NULL;
1478
1479 atomic_rcu_set(&migration_bitmap_rcu, bitmap);
1480 qemu_mutex_unlock(&migration_bitmap_mutex);
1481 migration_dirty_pages += new - old;
1482 call_rcu(old_bitmap, migration_bitmap_free, rcu);
1483 }
1484}
1485
1486
1487
1488
1489
1490
1491void ram_debug_dump_bitmap(unsigned long *todump, bool expected)
1492{
1493 int64_t ram_pages = last_ram_offset() >> TARGET_PAGE_BITS;
1494
1495 int64_t cur;
1496 int64_t linelen = 128;
1497 char linebuf[129];
1498
1499 if (!todump) {
1500 todump = atomic_rcu_read(&migration_bitmap_rcu)->bmap;
1501 }
1502
1503 for (cur = 0; cur < ram_pages; cur += linelen) {
1504 int64_t curb;
1505 bool found = false;
1506
1507
1508
1509
1510 if (cur + linelen > ram_pages) {
1511 linelen = ram_pages - cur;
1512 }
1513 for (curb = 0; curb < linelen; curb++) {
1514 bool thisbit = test_bit(cur + curb, todump);
1515 linebuf[curb] = thisbit ? '1' : '.';
1516 found = found || (thisbit != expected);
1517 }
1518 if (found) {
1519 linebuf[curb] = '\0';
1520 fprintf(stderr, "0x%08" PRIx64 " : %s\n", cur, linebuf);
1521 }
1522 }
1523}
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534static int postcopy_send_discard_bm_ram(MigrationState *ms,
1535 PostcopyDiscardState *pds,
1536 unsigned long start,
1537 unsigned long length)
1538{
1539 unsigned long end = start + length;
1540 unsigned long current;
1541 unsigned long *unsentmap;
1542
1543 unsentmap = atomic_rcu_read(&migration_bitmap_rcu)->unsentmap;
1544 for (current = start; current < end; ) {
1545 unsigned long one = find_next_bit(unsentmap, end, current);
1546
1547 if (one <= end) {
1548 unsigned long zero = find_next_zero_bit(unsentmap, end, one + 1);
1549 unsigned long discard_length;
1550
1551 if (zero >= end) {
1552 discard_length = end - one;
1553 } else {
1554 discard_length = zero - one;
1555 }
1556 if (discard_length) {
1557 postcopy_discard_send_range(ms, pds, one, discard_length);
1558 }
1559 current = one + discard_length;
1560 } else {
1561 current = one;
1562 }
1563 }
1564
1565 return 0;
1566}
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576static int postcopy_each_ram_send_discard(MigrationState *ms)
1577{
1578 struct RAMBlock *block;
1579 int ret;
1580
1581 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1582 unsigned long first = block->offset >> TARGET_PAGE_BITS;
1583 PostcopyDiscardState *pds = postcopy_discard_send_init(ms,
1584 first,
1585 block->idstr);
1586
1587
1588
1589
1590
1591
1592 ret = postcopy_send_discard_bm_ram(ms, pds, first,
1593 block->used_length >> TARGET_PAGE_BITS);
1594 postcopy_discard_send_finish(ms, pds);
1595 if (ret) {
1596 return ret;
1597 }
1598 }
1599
1600 return 0;
1601}
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615static void postcopy_chunk_hostpages_pass(MigrationState *ms, bool unsent_pass,
1616 RAMBlock *block,
1617 PostcopyDiscardState *pds)
1618{
1619 unsigned long *bitmap;
1620 unsigned long *unsentmap;
1621 unsigned int host_ratio = qemu_host_page_size / TARGET_PAGE_SIZE;
1622 unsigned long first = block->offset >> TARGET_PAGE_BITS;
1623 unsigned long len = block->used_length >> TARGET_PAGE_BITS;
1624 unsigned long last = first + (len - 1);
1625 unsigned long run_start;
1626
1627 bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap;
1628 unsentmap = atomic_rcu_read(&migration_bitmap_rcu)->unsentmap;
1629
1630 if (unsent_pass) {
1631
1632 run_start = find_next_zero_bit(unsentmap, last + 1, first);
1633 } else {
1634
1635 run_start = find_next_bit(bitmap, last + 1, first);
1636 }
1637
1638 while (run_start <= last) {
1639 bool do_fixup = false;
1640 unsigned long fixup_start_addr;
1641 unsigned long host_offset;
1642
1643
1644
1645
1646
1647 host_offset = run_start % host_ratio;
1648 if (host_offset) {
1649 do_fixup = true;
1650 run_start -= host_offset;
1651 fixup_start_addr = run_start;
1652
1653 run_start = run_start + host_ratio;
1654 } else {
1655
1656 unsigned long run_end;
1657 if (unsent_pass) {
1658 run_end = find_next_bit(unsentmap, last + 1, run_start + 1);
1659 } else {
1660 run_end = find_next_zero_bit(bitmap, last + 1, run_start + 1);
1661 }
1662
1663
1664
1665
1666
1667 host_offset = run_end % host_ratio;
1668 if (host_offset) {
1669 do_fixup = true;
1670 fixup_start_addr = run_end - host_offset;
1671
1672
1673
1674
1675 run_start = fixup_start_addr + host_ratio;
1676 } else {
1677
1678
1679
1680
1681 run_start = run_end + 1;
1682 }
1683 }
1684
1685 if (do_fixup) {
1686 unsigned long page;
1687
1688
1689 if (unsent_pass || !test_bit(fixup_start_addr, unsentmap)) {
1690
1691
1692
1693
1694
1695
1696
1697 postcopy_discard_send_range(ms, pds, fixup_start_addr,
1698 host_ratio);
1699 }
1700
1701
1702 for (page = fixup_start_addr;
1703 page < fixup_start_addr + host_ratio; page++) {
1704
1705 set_bit(page, unsentmap);
1706
1707
1708
1709
1710
1711 migration_dirty_pages += !test_and_set_bit(page, bitmap);
1712 }
1713 }
1714
1715 if (unsent_pass) {
1716
1717 run_start = find_next_zero_bit(unsentmap, last + 1,
1718 run_start);
1719 } else {
1720
1721 run_start = find_next_bit(bitmap, last + 1, run_start);
1722 }
1723 }
1724}
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734static int postcopy_chunk_hostpages(MigrationState *ms)
1735{
1736 struct RAMBlock *block;
1737
1738 if (qemu_host_page_size == TARGET_PAGE_SIZE) {
1739
1740 return 0;
1741 }
1742
1743
1744 last_seen_block = NULL;
1745 last_sent_block = NULL;
1746 last_offset = 0;
1747
1748 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1749 unsigned long first = block->offset >> TARGET_PAGE_BITS;
1750
1751 PostcopyDiscardState *pds =
1752 postcopy_discard_send_init(ms, first, block->idstr);
1753
1754
1755 postcopy_chunk_hostpages_pass(ms, true, block, pds);
1756
1757
1758
1759
1760 postcopy_chunk_hostpages_pass(ms, false, block, pds);
1761
1762 postcopy_discard_send_finish(ms, pds);
1763 }
1764
1765 return 0;
1766}
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777int ram_postcopy_send_discard_bitmap(MigrationState *ms)
1778{
1779 int ret;
1780 unsigned long *bitmap, *unsentmap;
1781
1782 rcu_read_lock();
1783
1784
1785 migration_bitmap_sync();
1786
1787 unsentmap = atomic_rcu_read(&migration_bitmap_rcu)->unsentmap;
1788 if (!unsentmap) {
1789
1790
1791
1792
1793 error_report("migration ram resized during precopy phase");
1794 rcu_read_unlock();
1795 return -EINVAL;
1796 }
1797
1798
1799 ret = postcopy_chunk_hostpages(ms);
1800 if (ret) {
1801 rcu_read_unlock();
1802 return ret;
1803 }
1804
1805
1806
1807
1808 bitmap = atomic_rcu_read(&migration_bitmap_rcu)->bmap;
1809 bitmap_or(unsentmap, unsentmap, bitmap,
1810 last_ram_offset() >> TARGET_PAGE_BITS);
1811
1812
1813 trace_ram_postcopy_send_discard_bitmap();
1814#ifdef DEBUG_POSTCOPY
1815 ram_debug_dump_bitmap(unsentmap, true);
1816#endif
1817
1818 ret = postcopy_each_ram_send_discard(ms);
1819 rcu_read_unlock();
1820
1821 return ret;
1822}
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832int ram_discard_range(MigrationIncomingState *mis,
1833 const char *block_name,
1834 uint64_t start, size_t length)
1835{
1836 int ret = -1;
1837
1838 rcu_read_lock();
1839 RAMBlock *rb = qemu_ram_block_by_name(block_name);
1840
1841 if (!rb) {
1842 error_report("ram_discard_range: Failed to find block '%s'",
1843 block_name);
1844 goto err;
1845 }
1846
1847 uint8_t *host_startaddr = rb->host + start;
1848
1849 if ((uintptr_t)host_startaddr & (qemu_host_page_size - 1)) {
1850 error_report("ram_discard_range: Unaligned start address: %p",
1851 host_startaddr);
1852 goto err;
1853 }
1854
1855 if ((start + length) <= rb->used_length) {
1856 uint8_t *host_endaddr = host_startaddr + length;
1857 if ((uintptr_t)host_endaddr & (qemu_host_page_size - 1)) {
1858 error_report("ram_discard_range: Unaligned end address: %p",
1859 host_endaddr);
1860 goto err;
1861 }
1862 ret = postcopy_ram_discard_range(mis, host_startaddr, length);
1863 } else {
1864 error_report("ram_discard_range: Overrun block '%s' (%" PRIu64
1865 "/%zx/" RAM_ADDR_FMT")",
1866 block_name, start, length, rb->used_length);
1867 }
1868
1869err:
1870 rcu_read_unlock();
1871
1872 return ret;
1873}
1874
1875static int ram_save_init_globals(void)
1876{
1877 int64_t ram_bitmap_pages;
1878
1879 dirty_rate_high_cnt = 0;
1880 bitmap_sync_count = 0;
1881 migration_bitmap_sync_init();
1882 qemu_mutex_init(&migration_bitmap_mutex);
1883
1884 if (migrate_use_xbzrle()) {
1885 XBZRLE_cache_lock();
1886 ZERO_TARGET_PAGE = g_malloc0(TARGET_PAGE_SIZE);
1887 XBZRLE.cache = cache_init(migrate_xbzrle_cache_size() /
1888 TARGET_PAGE_SIZE,
1889 TARGET_PAGE_SIZE);
1890 if (!XBZRLE.cache) {
1891 XBZRLE_cache_unlock();
1892 error_report("Error creating cache");
1893 return -1;
1894 }
1895 XBZRLE_cache_unlock();
1896
1897
1898 XBZRLE.encoded_buf = g_try_malloc0(TARGET_PAGE_SIZE);
1899 if (!XBZRLE.encoded_buf) {
1900 error_report("Error allocating encoded_buf");
1901 return -1;
1902 }
1903
1904 XBZRLE.current_buf = g_try_malloc(TARGET_PAGE_SIZE);
1905 if (!XBZRLE.current_buf) {
1906 error_report("Error allocating current_buf");
1907 g_free(XBZRLE.encoded_buf);
1908 XBZRLE.encoded_buf = NULL;
1909 return -1;
1910 }
1911
1912 acct_clear();
1913 }
1914
1915
1916 qemu_mutex_lock_iothread();
1917
1918 qemu_mutex_lock_ramlist();
1919 rcu_read_lock();
1920 bytes_transferred = 0;
1921 reset_ram_globals();
1922
1923 ram_bitmap_pages = last_ram_offset() >> TARGET_PAGE_BITS;
1924 migration_bitmap_rcu = g_new0(struct BitmapRcu, 1);
1925 migration_bitmap_rcu->bmap = bitmap_new(ram_bitmap_pages);
1926 bitmap_set(migration_bitmap_rcu->bmap, 0, ram_bitmap_pages);
1927
1928 if (migrate_postcopy_ram()) {
1929 migration_bitmap_rcu->unsentmap = bitmap_new(ram_bitmap_pages);
1930 bitmap_set(migration_bitmap_rcu->unsentmap, 0, ram_bitmap_pages);
1931 }
1932
1933
1934
1935
1936
1937 migration_dirty_pages = ram_bytes_total() >> TARGET_PAGE_BITS;
1938
1939 memory_global_dirty_log_start();
1940 migration_bitmap_sync();
1941 qemu_mutex_unlock_ramlist();
1942 qemu_mutex_unlock_iothread();
1943 rcu_read_unlock();
1944
1945 return 0;
1946}
1947
1948
1949
1950
1951
1952
1953
1954static int ram_save_setup(QEMUFile *f, void *opaque)
1955{
1956 RAMBlock *block;
1957
1958
1959 if (!migration_in_colo_state()) {
1960 if (ram_save_init_globals() < 0) {
1961 return -1;
1962 }
1963 }
1964
1965 rcu_read_lock();
1966
1967 qemu_put_be64(f, ram_bytes_total() | RAM_SAVE_FLAG_MEM_SIZE);
1968
1969 QLIST_FOREACH_RCU(block, &ram_list.blocks, next) {
1970 qemu_put_byte(f, strlen(block->idstr));
1971 qemu_put_buffer(f, (uint8_t *)block->idstr, strlen(block->idstr));
1972 qemu_put_be64(f, block->used_length);
1973 }
1974
1975 rcu_read_unlock();
1976
1977 ram_control_before_iterate(f, RAM_CONTROL_SETUP);
1978 ram_control_after_iterate(f, RAM_CONTROL_SETUP);
1979
1980 qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
1981
1982 return 0;
1983}
1984
1985static int ram_save_iterate(QEMUFile *f, void *opaque)
1986{
1987 int ret;
1988 int i;
1989 int64_t t0;
1990 int done = 0;
1991
1992 rcu_read_lock();
1993 if (ram_list.version != last_version) {
1994 reset_ram_globals();
1995 }
1996
1997
1998 smp_rmb();
1999
2000 ram_control_before_iterate(f, RAM_CONTROL_ROUND);
2001
2002 t0 = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
2003 i = 0;
2004 while ((ret = qemu_file_rate_limit(f)) == 0) {
2005 int pages;
2006
2007 pages = ram_find_and_save_block(f, false, &bytes_transferred);
2008
2009 if (pages == 0) {
2010 done = 1;
2011 break;
2012 }
2013 acct_info.iterations++;
2014
2015
2016
2017
2018
2019
2020 if ((i & 63) == 0) {
2021 uint64_t t1 = (qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - t0) / 1000000;
2022 if (t1 > MAX_WAIT) {
2023 DPRINTF("big wait: %" PRIu64 " milliseconds, %d iterations\n",
2024 t1, i);
2025 break;
2026 }
2027 }
2028 i++;
2029 }
2030 flush_compressed_data(f);
2031 rcu_read_unlock();
2032
2033
2034
2035
2036
2037 ram_control_after_iterate(f, RAM_CONTROL_ROUND);
2038
2039 qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
2040 bytes_transferred += 8;
2041
2042 ret = qemu_file_get_error(f);
2043 if (ret < 0) {
2044 return ret;
2045 }
2046
2047 return done;
2048}
2049
2050
2051static int ram_save_complete(QEMUFile *f, void *opaque)
2052{
2053 rcu_read_lock();
2054
2055 if (!migration_in_postcopy(migrate_get_current())) {
2056 migration_bitmap_sync();
2057 }
2058
2059 ram_control_before_iterate(f, RAM_CONTROL_FINISH);
2060
2061
2062
2063
2064 while (true) {
2065 int pages;
2066
2067 pages = ram_find_and_save_block(f, !migration_in_colo_state(),
2068 &bytes_transferred);
2069
2070 if (pages == 0) {
2071 break;
2072 }
2073 }
2074
2075 flush_compressed_data(f);
2076 ram_control_after_iterate(f, RAM_CONTROL_FINISH);
2077
2078 rcu_read_unlock();
2079
2080 qemu_put_be64(f, RAM_SAVE_FLAG_EOS);
2081
2082 return 0;
2083}
2084
2085static void ram_save_pending(QEMUFile *f, void *opaque, uint64_t max_size,
2086 uint64_t *non_postcopiable_pending,
2087 uint64_t *postcopiable_pending)
2088{
2089 uint64_t remaining_size;
2090
2091 remaining_size = ram_save_remaining() * TARGET_PAGE_SIZE;
2092
2093 if (!migration_in_postcopy(migrate_get_current()) &&
2094 remaining_size < max_size) {
2095 qemu_mutex_lock_iothread();
2096 rcu_read_lock();
2097 migration_bitmap_sync();
2098 rcu_read_unlock();
2099 qemu_mutex_unlock_iothread();
2100 remaining_size = ram_save_remaining() * TARGET_PAGE_SIZE;
2101 }
2102
2103
2104 *postcopiable_pending += remaining_size;
2105}
2106
2107static int load_xbzrle(QEMUFile *f, ram_addr_t addr, void *host)
2108{
2109 unsigned int xh_len;
2110 int xh_flags;
2111 uint8_t *loaded_data;
2112
2113 if (!xbzrle_decoded_buf) {
2114 xbzrle_decoded_buf = g_malloc(TARGET_PAGE_SIZE);
2115 }
2116 loaded_data = xbzrle_decoded_buf;
2117
2118
2119 xh_flags = qemu_get_byte(f);
2120 xh_len = qemu_get_be16(f);
2121
2122 if (xh_flags != ENCODING_FLAG_XBZRLE) {
2123 error_report("Failed to load XBZRLE page - wrong compression!");
2124 return -1;
2125 }
2126
2127 if (xh_len > TARGET_PAGE_SIZE) {
2128 error_report("Failed to load XBZRLE page - len overflow!");
2129 return -1;
2130 }
2131
2132 qemu_get_buffer_in_place(f, &loaded_data, xh_len);
2133
2134
2135 if (xbzrle_decode_buffer(loaded_data, xh_len, host,
2136 TARGET_PAGE_SIZE) == -1) {
2137 error_report("Failed to load XBZRLE page - decode error!");
2138 return -1;
2139 }
2140
2141 return 0;
2142}
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153static inline RAMBlock *ram_block_from_stream(QEMUFile *f,
2154 int flags)
2155{
2156 static RAMBlock *block = NULL;
2157 char id[256];
2158 uint8_t len;
2159
2160 if (flags & RAM_SAVE_FLAG_CONTINUE) {
2161 if (!block) {
2162 error_report("Ack, bad migration stream!");
2163 return NULL;
2164 }
2165 return block;
2166 }
2167
2168 len = qemu_get_byte(f);
2169 qemu_get_buffer(f, (uint8_t *)id, len);
2170 id[len] = 0;
2171
2172 block = qemu_ram_block_by_name(id);
2173 if (!block) {
2174 error_report("Can't find block %s", id);
2175 return NULL;
2176 }
2177
2178 return block;
2179}
2180
2181static inline void *host_from_ram_block_offset(RAMBlock *block,
2182 ram_addr_t offset)
2183{
2184 if (!offset_in_ramblock(block, offset)) {
2185 return NULL;
2186 }
2187
2188 return block->host + offset;
2189}
2190
2191
2192
2193
2194
2195void ram_handle_compressed(void *host, uint8_t ch, uint64_t size)
2196{
2197 if (ch != 0 || !is_zero_range(host, size)) {
2198 memset(host, ch, size);
2199 }
2200}
2201
2202static void *do_data_decompress(void *opaque)
2203{
2204 DecompressParam *param = opaque;
2205 unsigned long pagesize;
2206 uint8_t *des;
2207 int len;
2208
2209 qemu_mutex_lock(¶m->mutex);
2210 while (!param->quit) {
2211 if (param->des) {
2212 des = param->des;
2213 len = param->len;
2214 param->des = 0;
2215 qemu_mutex_unlock(¶m->mutex);
2216
2217 pagesize = TARGET_PAGE_SIZE;
2218
2219
2220
2221
2222
2223 uncompress((Bytef *)des, &pagesize,
2224 (const Bytef *)param->compbuf, len);
2225
2226 qemu_mutex_lock(&decomp_done_lock);
2227 param->done = true;
2228 qemu_cond_signal(&decomp_done_cond);
2229 qemu_mutex_unlock(&decomp_done_lock);
2230
2231 qemu_mutex_lock(¶m->mutex);
2232 } else {
2233 qemu_cond_wait(¶m->cond, ¶m->mutex);
2234 }
2235 }
2236 qemu_mutex_unlock(¶m->mutex);
2237
2238 return NULL;
2239}
2240
2241static void wait_for_decompress_done(void)
2242{
2243 int idx, thread_count;
2244
2245 if (!migrate_use_compression()) {
2246 return;
2247 }
2248
2249 thread_count = migrate_decompress_threads();
2250 qemu_mutex_lock(&decomp_done_lock);
2251 for (idx = 0; idx < thread_count; idx++) {
2252 while (!decomp_param[idx].done) {
2253 qemu_cond_wait(&decomp_done_cond, &decomp_done_lock);
2254 }
2255 }
2256 qemu_mutex_unlock(&decomp_done_lock);
2257}
2258
2259void migrate_decompress_threads_create(void)
2260{
2261 int i, thread_count;
2262
2263 thread_count = migrate_decompress_threads();
2264 decompress_threads = g_new0(QemuThread, thread_count);
2265 decomp_param = g_new0(DecompressParam, thread_count);
2266 qemu_mutex_init(&decomp_done_lock);
2267 qemu_cond_init(&decomp_done_cond);
2268 for (i = 0; i < thread_count; i++) {
2269 qemu_mutex_init(&decomp_param[i].mutex);
2270 qemu_cond_init(&decomp_param[i].cond);
2271 decomp_param[i].compbuf = g_malloc0(compressBound(TARGET_PAGE_SIZE));
2272 decomp_param[i].done = true;
2273 decomp_param[i].quit = false;
2274 qemu_thread_create(decompress_threads + i, "decompress",
2275 do_data_decompress, decomp_param + i,
2276 QEMU_THREAD_JOINABLE);
2277 }
2278}
2279
2280void migrate_decompress_threads_join(void)
2281{
2282 int i, thread_count;
2283
2284 thread_count = migrate_decompress_threads();
2285 for (i = 0; i < thread_count; i++) {
2286 qemu_mutex_lock(&decomp_param[i].mutex);
2287 decomp_param[i].quit = true;
2288 qemu_cond_signal(&decomp_param[i].cond);
2289 qemu_mutex_unlock(&decomp_param[i].mutex);
2290 }
2291 for (i = 0; i < thread_count; i++) {
2292 qemu_thread_join(decompress_threads + i);
2293 qemu_mutex_destroy(&decomp_param[i].mutex);
2294 qemu_cond_destroy(&decomp_param[i].cond);
2295 g_free(decomp_param[i].compbuf);
2296 }
2297 g_free(decompress_threads);
2298 g_free(decomp_param);
2299 decompress_threads = NULL;
2300 decomp_param = NULL;
2301}
2302
2303static void decompress_data_with_multi_threads(QEMUFile *f,
2304 void *host, int len)
2305{
2306 int idx, thread_count;
2307
2308 thread_count = migrate_decompress_threads();
2309 qemu_mutex_lock(&decomp_done_lock);
2310 while (true) {
2311 for (idx = 0; idx < thread_count; idx++) {
2312 if (decomp_param[idx].done) {
2313 decomp_param[idx].done = false;
2314 qemu_mutex_lock(&decomp_param[idx].mutex);
2315 qemu_get_buffer(f, decomp_param[idx].compbuf, len);
2316 decomp_param[idx].des = host;
2317 decomp_param[idx].len = len;
2318 qemu_cond_signal(&decomp_param[idx].cond);
2319 qemu_mutex_unlock(&decomp_param[idx].mutex);
2320 break;
2321 }
2322 }
2323 if (idx < thread_count) {
2324 break;
2325 } else {
2326 qemu_cond_wait(&decomp_done_cond, &decomp_done_lock);
2327 }
2328 }
2329 qemu_mutex_unlock(&decomp_done_lock);
2330}
2331
2332
2333
2334
2335
2336int ram_postcopy_incoming_init(MigrationIncomingState *mis)
2337{
2338 size_t ram_pages = last_ram_offset() >> TARGET_PAGE_BITS;
2339
2340 return postcopy_ram_incoming_init(mis, ram_pages);
2341}
2342
2343
2344
2345
2346
2347static int ram_load_postcopy(QEMUFile *f)
2348{
2349 int flags = 0, ret = 0;
2350 bool place_needed = false;
2351 bool matching_page_sizes = qemu_host_page_size == TARGET_PAGE_SIZE;
2352 MigrationIncomingState *mis = migration_incoming_get_current();
2353
2354 void *postcopy_host_page = postcopy_get_tmp_page(mis);
2355 void *last_host = NULL;
2356 bool all_zero = false;
2357
2358 while (!ret && !(flags & RAM_SAVE_FLAG_EOS)) {
2359 ram_addr_t addr;
2360 void *host = NULL;
2361 void *page_buffer = NULL;
2362 void *place_source = NULL;
2363 uint8_t ch;
2364
2365 addr = qemu_get_be64(f);
2366 flags = addr & ~TARGET_PAGE_MASK;
2367 addr &= TARGET_PAGE_MASK;
2368
2369 trace_ram_load_postcopy_loop((uint64_t)addr, flags);
2370 place_needed = false;
2371 if (flags & (RAM_SAVE_FLAG_COMPRESS | RAM_SAVE_FLAG_PAGE)) {
2372 RAMBlock *block = ram_block_from_stream(f, flags);
2373
2374 host = host_from_ram_block_offset(block, addr);
2375 if (!host) {
2376 error_report("Illegal RAM offset " RAM_ADDR_FMT, addr);
2377 ret = -EINVAL;
2378 break;
2379 }
2380
2381
2382
2383
2384
2385
2386
2387
2388 page_buffer = postcopy_host_page +
2389 ((uintptr_t)host & ~qemu_host_page_mask);
2390
2391 if (!((uintptr_t)host & ~qemu_host_page_mask)) {
2392 all_zero = true;
2393 } else {
2394
2395 if (host != (last_host + TARGET_PAGE_SIZE)) {
2396 error_report("Non-sequential target page %p/%p",
2397 host, last_host);
2398 ret = -EINVAL;
2399 break;
2400 }
2401 }
2402
2403
2404
2405
2406
2407
2408 place_needed = (((uintptr_t)host + TARGET_PAGE_SIZE) &
2409 ~qemu_host_page_mask) == 0;
2410 place_source = postcopy_host_page;
2411 }
2412 last_host = host;
2413
2414 switch (flags & ~RAM_SAVE_FLAG_CONTINUE) {
2415 case RAM_SAVE_FLAG_COMPRESS:
2416 ch = qemu_get_byte(f);
2417 memset(page_buffer, ch, TARGET_PAGE_SIZE);
2418 if (ch) {
2419 all_zero = false;
2420 }
2421 break;
2422
2423 case RAM_SAVE_FLAG_PAGE:
2424 all_zero = false;
2425 if (!place_needed || !matching_page_sizes) {
2426 qemu_get_buffer(f, page_buffer, TARGET_PAGE_SIZE);
2427 } else {
2428
2429
2430
2431
2432 qemu_get_buffer_in_place(f, (uint8_t **)&place_source,
2433 TARGET_PAGE_SIZE);
2434 }
2435 break;
2436 case RAM_SAVE_FLAG_EOS:
2437
2438 break;
2439 default:
2440 error_report("Unknown combination of migration flags: %#x"
2441 " (postcopy mode)", flags);
2442 ret = -EINVAL;
2443 }
2444
2445 if (place_needed) {
2446
2447 if (all_zero) {
2448 ret = postcopy_place_page_zero(mis,
2449 host + TARGET_PAGE_SIZE -
2450 qemu_host_page_size);
2451 } else {
2452 ret = postcopy_place_page(mis, host + TARGET_PAGE_SIZE -
2453 qemu_host_page_size,
2454 place_source);
2455 }
2456 }
2457 if (!ret) {
2458 ret = qemu_file_get_error(f);
2459 }
2460 }
2461
2462 return ret;
2463}
2464
2465static int ram_load(QEMUFile *f, void *opaque, int version_id)
2466{
2467 int flags = 0, ret = 0;
2468 static uint64_t seq_iter;
2469 int len = 0;
2470
2471
2472
2473
2474 bool postcopy_running = postcopy_state_get() >= POSTCOPY_INCOMING_LISTENING;
2475
2476 seq_iter++;
2477
2478 if (version_id != 4) {
2479 ret = -EINVAL;
2480 }
2481
2482
2483
2484
2485
2486
2487 rcu_read_lock();
2488
2489 if (postcopy_running) {
2490 ret = ram_load_postcopy(f);
2491 }
2492
2493 while (!postcopy_running && !ret && !(flags & RAM_SAVE_FLAG_EOS)) {
2494 ram_addr_t addr, total_ram_bytes;
2495 void *host = NULL;
2496 uint8_t ch;
2497
2498 addr = qemu_get_be64(f);
2499 flags = addr & ~TARGET_PAGE_MASK;
2500 addr &= TARGET_PAGE_MASK;
2501
2502 if (flags & (RAM_SAVE_FLAG_COMPRESS | RAM_SAVE_FLAG_PAGE |
2503 RAM_SAVE_FLAG_COMPRESS_PAGE | RAM_SAVE_FLAG_XBZRLE)) {
2504 RAMBlock *block = ram_block_from_stream(f, flags);
2505
2506 host = host_from_ram_block_offset(block, addr);
2507 if (!host) {
2508 error_report("Illegal RAM offset " RAM_ADDR_FMT, addr);
2509 ret = -EINVAL;
2510 break;
2511 }
2512 }
2513
2514 switch (flags & ~RAM_SAVE_FLAG_CONTINUE) {
2515 case RAM_SAVE_FLAG_MEM_SIZE:
2516
2517 total_ram_bytes = addr;
2518 while (!ret && total_ram_bytes) {
2519 RAMBlock *block;
2520 char id[256];
2521 ram_addr_t length;
2522
2523 len = qemu_get_byte(f);
2524 qemu_get_buffer(f, (uint8_t *)id, len);
2525 id[len] = 0;
2526 length = qemu_get_be64(f);
2527
2528 block = qemu_ram_block_by_name(id);
2529 if (block) {
2530 if (length != block->used_length) {
2531 Error *local_err = NULL;
2532
2533 ret = qemu_ram_resize(block, length,
2534 &local_err);
2535 if (local_err) {
2536 error_report_err(local_err);
2537 }
2538 }
2539 ram_control_load_hook(f, RAM_CONTROL_BLOCK_REG,
2540 block->idstr);
2541 } else {
2542 error_report("Unknown ramblock \"%s\", cannot "
2543 "accept migration", id);
2544 ret = -EINVAL;
2545 }
2546
2547 total_ram_bytes -= length;
2548 }
2549 break;
2550
2551 case RAM_SAVE_FLAG_COMPRESS:
2552 ch = qemu_get_byte(f);
2553 ram_handle_compressed(host, ch, TARGET_PAGE_SIZE);
2554 break;
2555
2556 case RAM_SAVE_FLAG_PAGE:
2557 qemu_get_buffer(f, host, TARGET_PAGE_SIZE);
2558 break;
2559
2560 case RAM_SAVE_FLAG_COMPRESS_PAGE:
2561 len = qemu_get_be32(f);
2562 if (len < 0 || len > compressBound(TARGET_PAGE_SIZE)) {
2563 error_report("Invalid compressed data length: %d", len);
2564 ret = -EINVAL;
2565 break;
2566 }
2567 decompress_data_with_multi_threads(f, host, len);
2568 break;
2569
2570 case RAM_SAVE_FLAG_XBZRLE:
2571 if (load_xbzrle(f, addr, host) < 0) {
2572 error_report("Failed to decompress XBZRLE page at "
2573 RAM_ADDR_FMT, addr);
2574 ret = -EINVAL;
2575 break;
2576 }
2577 break;
2578 case RAM_SAVE_FLAG_EOS:
2579
2580 break;
2581 default:
2582 if (flags & RAM_SAVE_FLAG_HOOK) {
2583 ram_control_load_hook(f, RAM_CONTROL_HOOK, NULL);
2584 } else {
2585 error_report("Unknown combination of migration flags: %#x",
2586 flags);
2587 ret = -EINVAL;
2588 }
2589 }
2590 if (!ret) {
2591 ret = qemu_file_get_error(f);
2592 }
2593 }
2594
2595 wait_for_decompress_done();
2596 rcu_read_unlock();
2597 DPRINTF("Completed load of VM with exit code %d seq iteration "
2598 "%" PRIu64 "\n", ret, seq_iter);
2599 return ret;
2600}
2601
2602static SaveVMHandlers savevm_ram_handlers = {
2603 .save_live_setup = ram_save_setup,
2604 .save_live_iterate = ram_save_iterate,
2605 .save_live_complete_postcopy = ram_save_complete,
2606 .save_live_complete_precopy = ram_save_complete,
2607 .save_live_pending = ram_save_pending,
2608 .load_state = ram_load,
2609 .cleanup = ram_migration_cleanup,
2610};
2611
2612void ram_mig_init(void)
2613{
2614 qemu_mutex_init(&XBZRLE.lock);
2615 register_savevm_live(NULL, "ram", 0, 4, &savevm_ram_handlers, NULL);
2616}
2617