1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16#include "qemu/osdep.h"
17#include "qapi/error.h"
18#include "qemu/error-report.h"
19#include "qemu/cutils.h"
20#include "qemu/queue.h"
21#include "block.h"
22#include "migration/misc.h"
23#include "migration.h"
24#include "migration/register.h"
25#include "qemu-file.h"
26#include "migration/vmstate.h"
27#include "sysemu/block-backend.h"
28
29#define BLOCK_SIZE (1 << 20)
30#define BDRV_SECTORS_PER_DIRTY_CHUNK (BLOCK_SIZE >> BDRV_SECTOR_BITS)
31
32#define BLK_MIG_FLAG_DEVICE_BLOCK 0x01
33#define BLK_MIG_FLAG_EOS 0x02
34#define BLK_MIG_FLAG_PROGRESS 0x04
35#define BLK_MIG_FLAG_ZERO_BLOCK 0x08
36
37#define MAX_IS_ALLOCATED_SEARCH (65536 * BDRV_SECTOR_SIZE)
38
39#define MAX_INFLIGHT_IO 512
40
41
42
43#ifdef DEBUG_BLK_MIGRATION
44#define DPRINTF(fmt, ...) \
45 do { printf("blk_migration: " fmt, ## __VA_ARGS__); } while (0)
46#else
47#define DPRINTF(fmt, ...) \
48 do { } while (0)
49#endif
50
51typedef struct BlkMigDevState {
52
53 BlockBackend *blk;
54 char *blk_name;
55 int shared_base;
56 int64_t total_sectors;
57 QSIMPLEQ_ENTRY(BlkMigDevState) entry;
58 Error *blocker;
59
60
61 int bulk_completed;
62 int64_t cur_sector;
63 int64_t cur_dirty;
64
65
66
67
68 unsigned long *aio_bitmap;
69
70
71 int64_t completed_sectors;
72
73
74
75
76 BdrvDirtyBitmap *dirty_bitmap;
77} BlkMigDevState;
78
79typedef struct BlkMigBlock {
80
81 uint8_t *buf;
82 BlkMigDevState *bmds;
83 int64_t sector;
84 int nr_sectors;
85 struct iovec iov;
86 QEMUIOVector qiov;
87 BlockAIOCB *aiocb;
88
89
90 int ret;
91 QSIMPLEQ_ENTRY(BlkMigBlock) entry;
92} BlkMigBlock;
93
94typedef struct BlkMigState {
95 QSIMPLEQ_HEAD(bmds_list, BlkMigDevState) bmds_list;
96 int64_t total_sector_sum;
97 bool zero_blocks;
98
99
100 QSIMPLEQ_HEAD(blk_list, BlkMigBlock) blk_list;
101 int submitted;
102 int read_done;
103
104
105 int transferred;
106 int prev_progress;
107 int bulk_completed;
108
109
110 QemuMutex lock;
111} BlkMigState;
112
113static BlkMigState block_mig_state;
114
115static void blk_mig_lock(void)
116{
117 qemu_mutex_lock(&block_mig_state.lock);
118}
119
120static void blk_mig_unlock(void)
121{
122 qemu_mutex_unlock(&block_mig_state.lock);
123}
124
125
126
127
128
129static void blk_send(QEMUFile *f, BlkMigBlock * blk)
130{
131 int len;
132 uint64_t flags = BLK_MIG_FLAG_DEVICE_BLOCK;
133
134 if (block_mig_state.zero_blocks &&
135 buffer_is_zero(blk->buf, BLOCK_SIZE)) {
136 flags |= BLK_MIG_FLAG_ZERO_BLOCK;
137 }
138
139
140 qemu_put_be64(f, (blk->sector << BDRV_SECTOR_BITS)
141 | flags);
142
143
144 len = strlen(blk->bmds->blk_name);
145 qemu_put_byte(f, len);
146 qemu_put_buffer(f, (uint8_t *) blk->bmds->blk_name, len);
147
148
149
150
151 if (flags & BLK_MIG_FLAG_ZERO_BLOCK) {
152 qemu_fflush(f);
153 return;
154 }
155
156 qemu_put_buffer(f, blk->buf, BLOCK_SIZE);
157}
158
159int blk_mig_active(void)
160{
161 return !QSIMPLEQ_EMPTY(&block_mig_state.bmds_list);
162}
163
164int blk_mig_bulk_active(void)
165{
166 return blk_mig_active() && !block_mig_state.bulk_completed;
167}
168
169uint64_t blk_mig_bytes_transferred(void)
170{
171 BlkMigDevState *bmds;
172 uint64_t sum = 0;
173
174 blk_mig_lock();
175 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
176 sum += bmds->completed_sectors;
177 }
178 blk_mig_unlock();
179 return sum << BDRV_SECTOR_BITS;
180}
181
182uint64_t blk_mig_bytes_remaining(void)
183{
184 return blk_mig_bytes_total() - blk_mig_bytes_transferred();
185}
186
187uint64_t blk_mig_bytes_total(void)
188{
189 BlkMigDevState *bmds;
190 uint64_t sum = 0;
191
192 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
193 sum += bmds->total_sectors;
194 }
195 return sum << BDRV_SECTOR_BITS;
196}
197
198
199
200
201static int bmds_aio_inflight(BlkMigDevState *bmds, int64_t sector)
202{
203 int64_t chunk = sector / (int64_t)BDRV_SECTORS_PER_DIRTY_CHUNK;
204
205 if (sector < blk_nb_sectors(bmds->blk)) {
206 return !!(bmds->aio_bitmap[chunk / (sizeof(unsigned long) * 8)] &
207 (1UL << (chunk % (sizeof(unsigned long) * 8))));
208 } else {
209 return 0;
210 }
211}
212
213
214
215static void bmds_set_aio_inflight(BlkMigDevState *bmds, int64_t sector_num,
216 int nb_sectors, int set)
217{
218 int64_t start, end;
219 unsigned long val, idx, bit;
220
221 start = sector_num / BDRV_SECTORS_PER_DIRTY_CHUNK;
222 end = (sector_num + nb_sectors - 1) / BDRV_SECTORS_PER_DIRTY_CHUNK;
223
224 for (; start <= end; start++) {
225 idx = start / (sizeof(unsigned long) * 8);
226 bit = start % (sizeof(unsigned long) * 8);
227 val = bmds->aio_bitmap[idx];
228 if (set) {
229 val |= 1UL << bit;
230 } else {
231 val &= ~(1UL << bit);
232 }
233 bmds->aio_bitmap[idx] = val;
234 }
235}
236
237static void alloc_aio_bitmap(BlkMigDevState *bmds)
238{
239 BlockBackend *bb = bmds->blk;
240 int64_t bitmap_size;
241
242 bitmap_size = blk_nb_sectors(bb) + BDRV_SECTORS_PER_DIRTY_CHUNK * 8 - 1;
243 bitmap_size /= BDRV_SECTORS_PER_DIRTY_CHUNK * 8;
244
245 bmds->aio_bitmap = g_malloc0(bitmap_size);
246}
247
248
249
250static void blk_mig_read_cb(void *opaque, int ret)
251{
252 BlkMigBlock *blk = opaque;
253
254 blk_mig_lock();
255 blk->ret = ret;
256
257 QSIMPLEQ_INSERT_TAIL(&block_mig_state.blk_list, blk, entry);
258 bmds_set_aio_inflight(blk->bmds, blk->sector, blk->nr_sectors, 0);
259
260 block_mig_state.submitted--;
261 block_mig_state.read_done++;
262 assert(block_mig_state.submitted >= 0);
263 blk_mig_unlock();
264}
265
266
267
268static int mig_save_device_bulk(QEMUFile *f, BlkMigDevState *bmds)
269{
270 int64_t total_sectors = bmds->total_sectors;
271 int64_t cur_sector = bmds->cur_sector;
272 BlockBackend *bb = bmds->blk;
273 BlkMigBlock *blk;
274 int nr_sectors;
275 int64_t count;
276
277 if (bmds->shared_base) {
278 qemu_mutex_lock_iothread();
279 aio_context_acquire(blk_get_aio_context(bb));
280
281
282 while (cur_sector < total_sectors &&
283 !bdrv_is_allocated(blk_bs(bb), cur_sector * BDRV_SECTOR_SIZE,
284 MAX_IS_ALLOCATED_SEARCH, &count)) {
285 if (count < BDRV_SECTOR_SIZE) {
286 break;
287 }
288 cur_sector += count >> BDRV_SECTOR_BITS;
289 }
290 aio_context_release(blk_get_aio_context(bb));
291 qemu_mutex_unlock_iothread();
292 }
293
294 if (cur_sector >= total_sectors) {
295 bmds->cur_sector = bmds->completed_sectors = total_sectors;
296 return 1;
297 }
298
299 bmds->completed_sectors = cur_sector;
300
301 cur_sector &= ~((int64_t)BDRV_SECTORS_PER_DIRTY_CHUNK - 1);
302
303
304 nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK;
305
306 if (total_sectors - cur_sector < BDRV_SECTORS_PER_DIRTY_CHUNK) {
307 nr_sectors = total_sectors - cur_sector;
308 }
309
310 blk = g_new(BlkMigBlock, 1);
311 blk->buf = g_malloc(BLOCK_SIZE);
312 blk->bmds = bmds;
313 blk->sector = cur_sector;
314 blk->nr_sectors = nr_sectors;
315
316 blk->iov.iov_base = blk->buf;
317 blk->iov.iov_len = nr_sectors * BDRV_SECTOR_SIZE;
318 qemu_iovec_init_external(&blk->qiov, &blk->iov, 1);
319
320 blk_mig_lock();
321 block_mig_state.submitted++;
322 blk_mig_unlock();
323
324
325
326
327
328
329
330
331
332 qemu_mutex_lock_iothread();
333 aio_context_acquire(blk_get_aio_context(bmds->blk));
334 blk->aiocb = blk_aio_preadv(bb, cur_sector * BDRV_SECTOR_SIZE, &blk->qiov,
335 0, blk_mig_read_cb, blk);
336
337 bdrv_reset_dirty_bitmap(bmds->dirty_bitmap, cur_sector * BDRV_SECTOR_SIZE,
338 nr_sectors * BDRV_SECTOR_SIZE);
339 aio_context_release(blk_get_aio_context(bmds->blk));
340 qemu_mutex_unlock_iothread();
341
342 bmds->cur_sector = cur_sector + nr_sectors;
343 return (bmds->cur_sector >= total_sectors);
344}
345
346
347
348static int set_dirty_tracking(void)
349{
350 BlkMigDevState *bmds;
351 int ret;
352
353 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
354 bmds->dirty_bitmap = bdrv_create_dirty_bitmap(blk_bs(bmds->blk),
355 BLOCK_SIZE, NULL, NULL);
356 if (!bmds->dirty_bitmap) {
357 ret = -errno;
358 goto fail;
359 }
360 }
361 return 0;
362
363fail:
364 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
365 if (bmds->dirty_bitmap) {
366 bdrv_release_dirty_bitmap(blk_bs(bmds->blk), bmds->dirty_bitmap);
367 }
368 }
369 return ret;
370}
371
372
373
374static void unset_dirty_tracking(void)
375{
376 BlkMigDevState *bmds;
377
378 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
379 bdrv_release_dirty_bitmap(blk_bs(bmds->blk), bmds->dirty_bitmap);
380 }
381}
382
383static int init_blk_migration(QEMUFile *f)
384{
385 BlockDriverState *bs;
386 BlkMigDevState *bmds;
387 int64_t sectors;
388 BdrvNextIterator it;
389 int i, num_bs = 0;
390 struct {
391 BlkMigDevState *bmds;
392 BlockDriverState *bs;
393 } *bmds_bs;
394 Error *local_err = NULL;
395 int ret;
396
397 block_mig_state.submitted = 0;
398 block_mig_state.read_done = 0;
399 block_mig_state.transferred = 0;
400 block_mig_state.total_sector_sum = 0;
401 block_mig_state.prev_progress = -1;
402 block_mig_state.bulk_completed = 0;
403 block_mig_state.zero_blocks = migrate_zero_blocks();
404
405 for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
406 num_bs++;
407 }
408 bmds_bs = g_malloc0(num_bs * sizeof(*bmds_bs));
409
410 for (i = 0, bs = bdrv_first(&it); bs; bs = bdrv_next(&it), i++) {
411 if (bdrv_is_read_only(bs)) {
412 continue;
413 }
414
415 sectors = bdrv_nb_sectors(bs);
416 if (sectors <= 0) {
417 ret = sectors;
418 bdrv_next_cleanup(&it);
419 goto out;
420 }
421
422 bmds = g_new0(BlkMigDevState, 1);
423 bmds->blk = blk_new(BLK_PERM_CONSISTENT_READ, BLK_PERM_ALL);
424 bmds->blk_name = g_strdup(bdrv_get_device_name(bs));
425 bmds->bulk_completed = 0;
426 bmds->total_sectors = sectors;
427 bmds->completed_sectors = 0;
428 bmds->shared_base = migrate_use_block_incremental();
429
430 assert(i < num_bs);
431 bmds_bs[i].bmds = bmds;
432 bmds_bs[i].bs = bs;
433
434 block_mig_state.total_sector_sum += sectors;
435
436 if (bmds->shared_base) {
437 DPRINTF("Start migration for %s with shared base image\n",
438 bdrv_get_device_name(bs));
439 } else {
440 DPRINTF("Start full migration for %s\n", bdrv_get_device_name(bs));
441 }
442
443 QSIMPLEQ_INSERT_TAIL(&block_mig_state.bmds_list, bmds, entry);
444 }
445
446
447
448 for (i = 0; i < num_bs; i++) {
449 BlkMigDevState *bmds = bmds_bs[i].bmds;
450 BlockDriverState *bs = bmds_bs[i].bs;
451
452 if (bmds) {
453 ret = blk_insert_bs(bmds->blk, bs, &local_err);
454 if (ret < 0) {
455 error_report_err(local_err);
456 goto out;
457 }
458
459 alloc_aio_bitmap(bmds);
460 error_setg(&bmds->blocker, "block device is in use by migration");
461 bdrv_op_block_all(bs, bmds->blocker);
462 }
463 }
464
465 ret = 0;
466out:
467 g_free(bmds_bs);
468 return ret;
469}
470
471
472
473static int blk_mig_save_bulked_block(QEMUFile *f)
474{
475 int64_t completed_sector_sum = 0;
476 BlkMigDevState *bmds;
477 int progress;
478 int ret = 0;
479
480 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
481 if (bmds->bulk_completed == 0) {
482 if (mig_save_device_bulk(f, bmds) == 1) {
483
484 bmds->bulk_completed = 1;
485 }
486 completed_sector_sum += bmds->completed_sectors;
487 ret = 1;
488 break;
489 } else {
490 completed_sector_sum += bmds->completed_sectors;
491 }
492 }
493
494 if (block_mig_state.total_sector_sum != 0) {
495 progress = completed_sector_sum * 100 /
496 block_mig_state.total_sector_sum;
497 } else {
498 progress = 100;
499 }
500 if (progress != block_mig_state.prev_progress) {
501 block_mig_state.prev_progress = progress;
502 qemu_put_be64(f, (progress << BDRV_SECTOR_BITS)
503 | BLK_MIG_FLAG_PROGRESS);
504 DPRINTF("Completed %d %%\r", progress);
505 }
506
507 return ret;
508}
509
510static void blk_mig_reset_dirty_cursor(void)
511{
512 BlkMigDevState *bmds;
513
514 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
515 bmds->cur_dirty = 0;
516 }
517}
518
519
520
521static int mig_save_device_dirty(QEMUFile *f, BlkMigDevState *bmds,
522 int is_async)
523{
524 BlkMigBlock *blk;
525 BlockDriverState *bs = blk_bs(bmds->blk);
526 int64_t total_sectors = bmds->total_sectors;
527 int64_t sector;
528 int nr_sectors;
529 int ret = -EIO;
530
531 for (sector = bmds->cur_dirty; sector < bmds->total_sectors;) {
532 blk_mig_lock();
533 if (bmds_aio_inflight(bmds, sector)) {
534 blk_mig_unlock();
535 blk_drain(bmds->blk);
536 } else {
537 blk_mig_unlock();
538 }
539 bdrv_dirty_bitmap_lock(bmds->dirty_bitmap);
540 if (bdrv_get_dirty_locked(bs, bmds->dirty_bitmap,
541 sector * BDRV_SECTOR_SIZE)) {
542 if (total_sectors - sector < BDRV_SECTORS_PER_DIRTY_CHUNK) {
543 nr_sectors = total_sectors - sector;
544 } else {
545 nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK;
546 }
547 bdrv_reset_dirty_bitmap_locked(bmds->dirty_bitmap,
548 sector * BDRV_SECTOR_SIZE,
549 nr_sectors * BDRV_SECTOR_SIZE);
550 bdrv_dirty_bitmap_unlock(bmds->dirty_bitmap);
551
552 blk = g_new(BlkMigBlock, 1);
553 blk->buf = g_malloc(BLOCK_SIZE);
554 blk->bmds = bmds;
555 blk->sector = sector;
556 blk->nr_sectors = nr_sectors;
557
558 if (is_async) {
559 blk->iov.iov_base = blk->buf;
560 blk->iov.iov_len = nr_sectors * BDRV_SECTOR_SIZE;
561 qemu_iovec_init_external(&blk->qiov, &blk->iov, 1);
562
563 blk->aiocb = blk_aio_preadv(bmds->blk,
564 sector * BDRV_SECTOR_SIZE,
565 &blk->qiov, 0, blk_mig_read_cb,
566 blk);
567
568 blk_mig_lock();
569 block_mig_state.submitted++;
570 bmds_set_aio_inflight(bmds, sector, nr_sectors, 1);
571 blk_mig_unlock();
572 } else {
573 ret = blk_pread(bmds->blk, sector * BDRV_SECTOR_SIZE, blk->buf,
574 nr_sectors * BDRV_SECTOR_SIZE);
575 if (ret < 0) {
576 goto error;
577 }
578 blk_send(f, blk);
579
580 g_free(blk->buf);
581 g_free(blk);
582 }
583
584 sector += nr_sectors;
585 bmds->cur_dirty = sector;
586 break;
587 }
588
589 bdrv_dirty_bitmap_unlock(bmds->dirty_bitmap);
590 sector += BDRV_SECTORS_PER_DIRTY_CHUNK;
591 bmds->cur_dirty = sector;
592 }
593
594 return (bmds->cur_dirty >= bmds->total_sectors);
595
596error:
597 DPRINTF("Error reading sector %" PRId64 "\n", sector);
598 g_free(blk->buf);
599 g_free(blk);
600 return ret;
601}
602
603
604
605
606
607
608
609static int blk_mig_save_dirty_block(QEMUFile *f, int is_async)
610{
611 BlkMigDevState *bmds;
612 int ret = 1;
613
614 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
615 aio_context_acquire(blk_get_aio_context(bmds->blk));
616 ret = mig_save_device_dirty(f, bmds, is_async);
617 aio_context_release(blk_get_aio_context(bmds->blk));
618 if (ret <= 0) {
619 break;
620 }
621 }
622
623 return ret;
624}
625
626
627
628static int flush_blks(QEMUFile *f)
629{
630 BlkMigBlock *blk;
631 int ret = 0;
632
633 DPRINTF("%s Enter submitted %d read_done %d transferred %d\n",
634 __FUNCTION__, block_mig_state.submitted, block_mig_state.read_done,
635 block_mig_state.transferred);
636
637 blk_mig_lock();
638 while ((blk = QSIMPLEQ_FIRST(&block_mig_state.blk_list)) != NULL) {
639 if (qemu_file_rate_limit(f)) {
640 break;
641 }
642 if (blk->ret < 0) {
643 ret = blk->ret;
644 break;
645 }
646
647 QSIMPLEQ_REMOVE_HEAD(&block_mig_state.blk_list, entry);
648 blk_mig_unlock();
649 blk_send(f, blk);
650 blk_mig_lock();
651
652 g_free(blk->buf);
653 g_free(blk);
654
655 block_mig_state.read_done--;
656 block_mig_state.transferred++;
657 assert(block_mig_state.read_done >= 0);
658 }
659 blk_mig_unlock();
660
661 DPRINTF("%s Exit submitted %d read_done %d transferred %d\n", __FUNCTION__,
662 block_mig_state.submitted, block_mig_state.read_done,
663 block_mig_state.transferred);
664 return ret;
665}
666
667
668
669static int64_t get_remaining_dirty(void)
670{
671 BlkMigDevState *bmds;
672 int64_t dirty = 0;
673
674 QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
675 aio_context_acquire(blk_get_aio_context(bmds->blk));
676 dirty += bdrv_get_dirty_count(bmds->dirty_bitmap);
677 aio_context_release(blk_get_aio_context(bmds->blk));
678 }
679
680 return dirty;
681}
682
683
684
685
686static void block_migration_cleanup_bmds(void)
687{
688 BlkMigDevState *bmds;
689 AioContext *ctx;
690
691 unset_dirty_tracking();
692
693 while ((bmds = QSIMPLEQ_FIRST(&block_mig_state.bmds_list)) != NULL) {
694 QSIMPLEQ_REMOVE_HEAD(&block_mig_state.bmds_list, entry);
695 bdrv_op_unblock_all(blk_bs(bmds->blk), bmds->blocker);
696 error_free(bmds->blocker);
697
698
699 ctx = blk_get_aio_context(bmds->blk);
700 aio_context_acquire(ctx);
701 blk_unref(bmds->blk);
702 aio_context_release(ctx);
703
704 g_free(bmds->blk_name);
705 g_free(bmds->aio_bitmap);
706 g_free(bmds);
707 }
708}
709
710
711static void block_migration_cleanup(void *opaque)
712{
713 BlkMigBlock *blk;
714
715 bdrv_drain_all();
716
717 block_migration_cleanup_bmds();
718
719 blk_mig_lock();
720 while ((blk = QSIMPLEQ_FIRST(&block_mig_state.blk_list)) != NULL) {
721 QSIMPLEQ_REMOVE_HEAD(&block_mig_state.blk_list, entry);
722 g_free(blk->buf);
723 g_free(blk);
724 }
725 blk_mig_unlock();
726}
727
728static int block_save_setup(QEMUFile *f, void *opaque)
729{
730 int ret;
731
732 DPRINTF("Enter save live setup submitted %d transferred %d\n",
733 block_mig_state.submitted, block_mig_state.transferred);
734
735 qemu_mutex_lock_iothread();
736 ret = init_blk_migration(f);
737 if (ret < 0) {
738 qemu_mutex_unlock_iothread();
739 return ret;
740 }
741
742
743 ret = set_dirty_tracking();
744
745 qemu_mutex_unlock_iothread();
746
747 if (ret) {
748 return ret;
749 }
750
751 ret = flush_blks(f);
752 blk_mig_reset_dirty_cursor();
753 qemu_put_be64(f, BLK_MIG_FLAG_EOS);
754
755 return ret;
756}
757
758static int block_save_iterate(QEMUFile *f, void *opaque)
759{
760 int ret;
761 int64_t last_ftell = qemu_ftell(f);
762 int64_t delta_ftell;
763
764 DPRINTF("Enter save live iterate submitted %d transferred %d\n",
765 block_mig_state.submitted, block_mig_state.transferred);
766
767 ret = flush_blks(f);
768 if (ret) {
769 return ret;
770 }
771
772 blk_mig_reset_dirty_cursor();
773
774
775 blk_mig_lock();
776 while ((block_mig_state.submitted +
777 block_mig_state.read_done) * BLOCK_SIZE <
778 qemu_file_get_rate_limit(f) &&
779 (block_mig_state.submitted +
780 block_mig_state.read_done) <
781 MAX_INFLIGHT_IO) {
782 blk_mig_unlock();
783 if (block_mig_state.bulk_completed == 0) {
784
785 if (blk_mig_save_bulked_block(f) == 0) {
786
787 block_mig_state.bulk_completed = 1;
788 }
789 ret = 0;
790 } else {
791
792
793
794 qemu_mutex_lock_iothread();
795 ret = blk_mig_save_dirty_block(f, 1);
796 qemu_mutex_unlock_iothread();
797 }
798 if (ret < 0) {
799 return ret;
800 }
801 blk_mig_lock();
802 if (ret != 0) {
803
804 break;
805 }
806 }
807 blk_mig_unlock();
808
809 ret = flush_blks(f);
810 if (ret) {
811 return ret;
812 }
813
814 qemu_put_be64(f, BLK_MIG_FLAG_EOS);
815 delta_ftell = qemu_ftell(f) - last_ftell;
816 if (delta_ftell > 0) {
817 return 1;
818 } else if (delta_ftell < 0) {
819 return -1;
820 } else {
821 return 0;
822 }
823}
824
825
826
827static int block_save_complete(QEMUFile *f, void *opaque)
828{
829 int ret;
830
831 DPRINTF("Enter save live complete submitted %d transferred %d\n",
832 block_mig_state.submitted, block_mig_state.transferred);
833
834 ret = flush_blks(f);
835 if (ret) {
836 return ret;
837 }
838
839 blk_mig_reset_dirty_cursor();
840
841
842
843 blk_mig_lock();
844 assert(block_mig_state.submitted == 0);
845 blk_mig_unlock();
846
847 do {
848 ret = blk_mig_save_dirty_block(f, 0);
849 if (ret < 0) {
850 return ret;
851 }
852 } while (ret == 0);
853
854
855 qemu_put_be64(f, (100 << BDRV_SECTOR_BITS) | BLK_MIG_FLAG_PROGRESS);
856
857 DPRINTF("Block migration completed\n");
858
859 qemu_put_be64(f, BLK_MIG_FLAG_EOS);
860
861
862
863 block_migration_cleanup_bmds();
864
865 return 0;
866}
867
868static void block_save_pending(QEMUFile *f, void *opaque, uint64_t max_size,
869 uint64_t *non_postcopiable_pending,
870 uint64_t *postcopiable_pending)
871{
872
873 uint64_t pending;
874
875 qemu_mutex_lock_iothread();
876 pending = get_remaining_dirty();
877 qemu_mutex_unlock_iothread();
878
879 blk_mig_lock();
880 pending += block_mig_state.submitted * BLOCK_SIZE +
881 block_mig_state.read_done * BLOCK_SIZE;
882 blk_mig_unlock();
883
884
885 if (pending <= max_size && !block_mig_state.bulk_completed) {
886 pending = max_size + BLOCK_SIZE;
887 }
888
889 DPRINTF("Enter save live pending %" PRIu64 "\n", pending);
890
891 *non_postcopiable_pending += pending;
892}
893
894static int block_load(QEMUFile *f, void *opaque, int version_id)
895{
896 static int banner_printed;
897 int len, flags;
898 char device_name[256];
899 int64_t addr;
900 BlockBackend *blk, *blk_prev = NULL;;
901 Error *local_err = NULL;
902 uint8_t *buf;
903 int64_t total_sectors = 0;
904 int nr_sectors;
905 int ret;
906 BlockDriverInfo bdi;
907 int cluster_size = BLOCK_SIZE;
908
909 do {
910 addr = qemu_get_be64(f);
911
912 flags = addr & ~BDRV_SECTOR_MASK;
913 addr >>= BDRV_SECTOR_BITS;
914
915 if (flags & BLK_MIG_FLAG_DEVICE_BLOCK) {
916
917 len = qemu_get_byte(f);
918 qemu_get_buffer(f, (uint8_t *)device_name, len);
919 device_name[len] = '\0';
920
921 blk = blk_by_name(device_name);
922 if (!blk) {
923 fprintf(stderr, "Error unknown block device %s\n",
924 device_name);
925 return -EINVAL;
926 }
927
928 if (blk != blk_prev) {
929 blk_prev = blk;
930 total_sectors = blk_nb_sectors(blk);
931 if (total_sectors <= 0) {
932 error_report("Error getting length of block device %s",
933 device_name);
934 return -EINVAL;
935 }
936
937 blk_invalidate_cache(blk, &local_err);
938 if (local_err) {
939 error_report_err(local_err);
940 return -EINVAL;
941 }
942
943 ret = bdrv_get_info(blk_bs(blk), &bdi);
944 if (ret == 0 && bdi.cluster_size > 0 &&
945 bdi.cluster_size <= BLOCK_SIZE &&
946 BLOCK_SIZE % bdi.cluster_size == 0) {
947 cluster_size = bdi.cluster_size;
948 } else {
949 cluster_size = BLOCK_SIZE;
950 }
951 }
952
953 if (total_sectors - addr < BDRV_SECTORS_PER_DIRTY_CHUNK) {
954 nr_sectors = total_sectors - addr;
955 } else {
956 nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK;
957 }
958
959 if (flags & BLK_MIG_FLAG_ZERO_BLOCK) {
960 ret = blk_pwrite_zeroes(blk, addr * BDRV_SECTOR_SIZE,
961 nr_sectors * BDRV_SECTOR_SIZE,
962 BDRV_REQ_MAY_UNMAP);
963 } else {
964 int i;
965 int64_t cur_addr;
966 uint8_t *cur_buf;
967
968 buf = g_malloc(BLOCK_SIZE);
969 qemu_get_buffer(f, buf, BLOCK_SIZE);
970 for (i = 0; i < BLOCK_SIZE / cluster_size; i++) {
971 cur_addr = addr * BDRV_SECTOR_SIZE + i * cluster_size;
972 cur_buf = buf + i * cluster_size;
973
974 if ((!block_mig_state.zero_blocks ||
975 cluster_size < BLOCK_SIZE) &&
976 buffer_is_zero(cur_buf, cluster_size)) {
977 ret = blk_pwrite_zeroes(blk, cur_addr,
978 cluster_size,
979 BDRV_REQ_MAY_UNMAP);
980 } else {
981 ret = blk_pwrite(blk, cur_addr, cur_buf,
982 cluster_size, 0);
983 }
984 if (ret < 0) {
985 break;
986 }
987 }
988 g_free(buf);
989 }
990
991 if (ret < 0) {
992 return ret;
993 }
994 } else if (flags & BLK_MIG_FLAG_PROGRESS) {
995 if (!banner_printed) {
996 printf("Receiving block device images\n");
997 banner_printed = 1;
998 }
999 printf("Completed %d %%%c", (int)addr,
1000 (addr == 100) ? '\n' : '\r');
1001 fflush(stdout);
1002 } else if (!(flags & BLK_MIG_FLAG_EOS)) {
1003 fprintf(stderr, "Unknown block migration flags: %#x\n", flags);
1004 return -EINVAL;
1005 }
1006 ret = qemu_file_get_error(f);
1007 if (ret != 0) {
1008 return ret;
1009 }
1010 } while (!(flags & BLK_MIG_FLAG_EOS));
1011
1012 return 0;
1013}
1014
1015static bool block_is_active(void *opaque)
1016{
1017 return migrate_use_block();
1018}
1019
1020static SaveVMHandlers savevm_block_handlers = {
1021 .save_setup = block_save_setup,
1022 .save_live_iterate = block_save_iterate,
1023 .save_live_complete_precopy = block_save_complete,
1024 .save_live_pending = block_save_pending,
1025 .load_state = block_load,
1026 .save_cleanup = block_migration_cleanup,
1027 .is_active = block_is_active,
1028};
1029
1030void blk_mig_init(void)
1031{
1032 QSIMPLEQ_INIT(&block_mig_state.bmds_list);
1033 QSIMPLEQ_INIT(&block_mig_state.blk_list);
1034 qemu_mutex_init(&block_mig_state.lock);
1035
1036 register_savevm_live(NULL, "block", 0, 1, &savevm_block_handlers,
1037 &block_mig_state);
1038}
1039