qemu/block-migration.c
<<
>>
Prefs
   1/*
   2 * QEMU live block migration
   3 *
   4 * Copyright IBM, Corp. 2009
   5 *
   6 * Authors:
   7 *  Liran Schour   <lirans@il.ibm.com>
   8 *
   9 * This work is licensed under the terms of the GNU GPL, version 2.  See
  10 * the COPYING file in the top-level directory.
  11 *
  12 */
  13
  14#include "qemu-common.h"
  15#include "block_int.h"
  16#include "hw/hw.h"
  17#include "qemu-queue.h"
  18#include "qemu-timer.h"
  19#include "monitor.h"
  20#include "block-migration.h"
  21#include "migration.h"
  22#include "blockdev.h"
  23#include <assert.h>
  24
  25#define BLOCK_SIZE (BDRV_SECTORS_PER_DIRTY_CHUNK << BDRV_SECTOR_BITS)
  26
  27#define BLK_MIG_FLAG_DEVICE_BLOCK       0x01
  28#define BLK_MIG_FLAG_EOS                0x02
  29#define BLK_MIG_FLAG_PROGRESS           0x04
  30
  31#define MAX_IS_ALLOCATED_SEARCH 65536
  32
  33//#define DEBUG_BLK_MIGRATION
  34
  35#ifdef DEBUG_BLK_MIGRATION
  36#define DPRINTF(fmt, ...) \
  37    do { printf("blk_migration: " fmt, ## __VA_ARGS__); } while (0)
  38#else
  39#define DPRINTF(fmt, ...) \
  40    do { } while (0)
  41#endif
  42
  43typedef struct BlkMigDevState {
  44    BlockDriverState *bs;
  45    int bulk_completed;
  46    int shared_base;
  47    int64_t cur_sector;
  48    int64_t cur_dirty;
  49    int64_t completed_sectors;
  50    int64_t total_sectors;
  51    int64_t dirty;
  52    QSIMPLEQ_ENTRY(BlkMigDevState) entry;
  53    unsigned long *aio_bitmap;
  54} BlkMigDevState;
  55
  56typedef struct BlkMigBlock {
  57    uint8_t *buf;
  58    BlkMigDevState *bmds;
  59    int64_t sector;
  60    int nr_sectors;
  61    struct iovec iov;
  62    QEMUIOVector qiov;
  63    BlockDriverAIOCB *aiocb;
  64    int ret;
  65    QSIMPLEQ_ENTRY(BlkMigBlock) entry;
  66} BlkMigBlock;
  67
  68typedef struct BlkMigState {
  69    int blk_enable;
  70    int shared_base;
  71    QSIMPLEQ_HEAD(bmds_list, BlkMigDevState) bmds_list;
  72    QSIMPLEQ_HEAD(blk_list, BlkMigBlock) blk_list;
  73    int submitted;
  74    int read_done;
  75    int transferred;
  76    int64_t total_sector_sum;
  77    int prev_progress;
  78    int bulk_completed;
  79    long double total_time;
  80    long double prev_time_offset;
  81    int reads;
  82} BlkMigState;
  83
  84static BlkMigState block_mig_state;
  85
  86static void blk_send(QEMUFile *f, BlkMigBlock * blk)
  87{
  88    int len;
  89
  90    /* sector number and flags */
  91    qemu_put_be64(f, (blk->sector << BDRV_SECTOR_BITS)
  92                     | BLK_MIG_FLAG_DEVICE_BLOCK);
  93
  94    /* device name */
  95    len = strlen(blk->bmds->bs->device_name);
  96    qemu_put_byte(f, len);
  97    qemu_put_buffer(f, (uint8_t *)blk->bmds->bs->device_name, len);
  98
  99    qemu_put_buffer(f, blk->buf, BLOCK_SIZE);
 100}
 101
 102int blk_mig_active(void)
 103{
 104    return !QSIMPLEQ_EMPTY(&block_mig_state.bmds_list);
 105}
 106
 107uint64_t blk_mig_bytes_transferred(void)
 108{
 109    BlkMigDevState *bmds;
 110    uint64_t sum = 0;
 111
 112    QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
 113        sum += bmds->completed_sectors;
 114    }
 115    return sum << BDRV_SECTOR_BITS;
 116}
 117
 118uint64_t blk_mig_bytes_remaining(void)
 119{
 120    return blk_mig_bytes_total() - blk_mig_bytes_transferred();
 121}
 122
 123uint64_t blk_mig_bytes_total(void)
 124{
 125    BlkMigDevState *bmds;
 126    uint64_t sum = 0;
 127
 128    QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
 129        sum += bmds->total_sectors;
 130    }
 131    return sum << BDRV_SECTOR_BITS;
 132}
 133
 134static inline long double compute_read_bwidth(void)
 135{
 136    assert(block_mig_state.total_time != 0);
 137    return (block_mig_state.reads / block_mig_state.total_time) * BLOCK_SIZE;
 138}
 139
 140static int bmds_aio_inflight(BlkMigDevState *bmds, int64_t sector)
 141{
 142    int64_t chunk = sector / (int64_t)BDRV_SECTORS_PER_DIRTY_CHUNK;
 143
 144    if ((sector << BDRV_SECTOR_BITS) < bdrv_getlength(bmds->bs)) {
 145        return !!(bmds->aio_bitmap[chunk / (sizeof(unsigned long) * 8)] &
 146            (1UL << (chunk % (sizeof(unsigned long) * 8))));
 147    } else {
 148        return 0;
 149    }
 150}
 151
 152static void bmds_set_aio_inflight(BlkMigDevState *bmds, int64_t sector_num,
 153                             int nb_sectors, int set)
 154{
 155    int64_t start, end;
 156    unsigned long val, idx, bit;
 157
 158    start = sector_num / BDRV_SECTORS_PER_DIRTY_CHUNK;
 159    end = (sector_num + nb_sectors - 1) / BDRV_SECTORS_PER_DIRTY_CHUNK;
 160
 161    for (; start <= end; start++) {
 162        idx = start / (sizeof(unsigned long) * 8);
 163        bit = start % (sizeof(unsigned long) * 8);
 164        val = bmds->aio_bitmap[idx];
 165        if (set) {
 166            val |= 1UL << bit;
 167        } else {
 168            val &= ~(1UL << bit);
 169        }
 170        bmds->aio_bitmap[idx] = val;
 171    }
 172}
 173
 174static void alloc_aio_bitmap(BlkMigDevState *bmds)
 175{
 176    BlockDriverState *bs = bmds->bs;
 177    int64_t bitmap_size;
 178
 179    bitmap_size = (bdrv_getlength(bs) >> BDRV_SECTOR_BITS) +
 180            BDRV_SECTORS_PER_DIRTY_CHUNK * 8 - 1;
 181    bitmap_size /= BDRV_SECTORS_PER_DIRTY_CHUNK * 8;
 182
 183    bmds->aio_bitmap = g_malloc0(bitmap_size);
 184}
 185
 186static void blk_mig_read_cb(void *opaque, int ret)
 187{
 188    long double curr_time = qemu_get_clock_ns(rt_clock);
 189    BlkMigBlock *blk = opaque;
 190
 191    blk->ret = ret;
 192
 193    block_mig_state.reads++;
 194    block_mig_state.total_time += (curr_time - block_mig_state.prev_time_offset);
 195    block_mig_state.prev_time_offset = curr_time;
 196
 197    QSIMPLEQ_INSERT_TAIL(&block_mig_state.blk_list, blk, entry);
 198    bmds_set_aio_inflight(blk->bmds, blk->sector, blk->nr_sectors, 0);
 199
 200    block_mig_state.submitted--;
 201    block_mig_state.read_done++;
 202    assert(block_mig_state.submitted >= 0);
 203}
 204
 205static int mig_save_device_bulk(Monitor *mon, QEMUFile *f,
 206                                BlkMigDevState *bmds)
 207{
 208    int64_t total_sectors = bmds->total_sectors;
 209    int64_t cur_sector = bmds->cur_sector;
 210    BlockDriverState *bs = bmds->bs;
 211    BlkMigBlock *blk;
 212    int nr_sectors;
 213
 214    if (bmds->shared_base) {
 215        while (cur_sector < total_sectors &&
 216               !bdrv_is_allocated(bs, cur_sector, MAX_IS_ALLOCATED_SEARCH,
 217                                  &nr_sectors)) {
 218            cur_sector += nr_sectors;
 219        }
 220    }
 221
 222    if (cur_sector >= total_sectors) {
 223        bmds->cur_sector = bmds->completed_sectors = total_sectors;
 224        return 1;
 225    }
 226
 227    bmds->completed_sectors = cur_sector;
 228
 229    cur_sector &= ~((int64_t)BDRV_SECTORS_PER_DIRTY_CHUNK - 1);
 230
 231    /* we are going to transfer a full block even if it is not allocated */
 232    nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK;
 233
 234    if (total_sectors - cur_sector < BDRV_SECTORS_PER_DIRTY_CHUNK) {
 235        nr_sectors = total_sectors - cur_sector;
 236    }
 237
 238    blk = g_malloc(sizeof(BlkMigBlock));
 239    blk->buf = g_malloc(BLOCK_SIZE);
 240    blk->bmds = bmds;
 241    blk->sector = cur_sector;
 242    blk->nr_sectors = nr_sectors;
 243
 244    blk->iov.iov_base = blk->buf;
 245    blk->iov.iov_len = nr_sectors * BDRV_SECTOR_SIZE;
 246    qemu_iovec_init_external(&blk->qiov, &blk->iov, 1);
 247
 248    if (block_mig_state.submitted == 0) {
 249        block_mig_state.prev_time_offset = qemu_get_clock_ns(rt_clock);
 250    }
 251
 252    blk->aiocb = bdrv_aio_readv(bs, cur_sector, &blk->qiov,
 253                                nr_sectors, blk_mig_read_cb, blk);
 254    if (!blk->aiocb) {
 255        goto error;
 256    }
 257    block_mig_state.submitted++;
 258
 259    bdrv_reset_dirty(bs, cur_sector, nr_sectors);
 260    bmds->cur_sector = cur_sector + nr_sectors;
 261
 262    return (bmds->cur_sector >= total_sectors);
 263
 264error:
 265    monitor_printf(mon, "Error reading sector %" PRId64 "\n", cur_sector);
 266    qemu_file_set_error(f, -EIO);
 267    g_free(blk->buf);
 268    g_free(blk);
 269    return 0;
 270}
 271
 272static void set_dirty_tracking(int enable)
 273{
 274    BlkMigDevState *bmds;
 275
 276    QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
 277        bdrv_set_dirty_tracking(bmds->bs, enable);
 278    }
 279}
 280
 281static void init_blk_migration_it(void *opaque, BlockDriverState *bs)
 282{
 283    Monitor *mon = opaque;
 284    BlkMigDevState *bmds;
 285    int64_t sectors;
 286
 287    if (!bdrv_is_read_only(bs)) {
 288        sectors = bdrv_getlength(bs) >> BDRV_SECTOR_BITS;
 289        if (sectors <= 0) {
 290            return;
 291        }
 292
 293        bmds = g_malloc0(sizeof(BlkMigDevState));
 294        bmds->bs = bs;
 295        bmds->bulk_completed = 0;
 296        bmds->total_sectors = sectors;
 297        bmds->completed_sectors = 0;
 298        bmds->shared_base = block_mig_state.shared_base;
 299        alloc_aio_bitmap(bmds);
 300        drive_get_ref(drive_get_by_blockdev(bs));
 301        bdrv_set_in_use(bs, 1);
 302
 303        block_mig_state.total_sector_sum += sectors;
 304
 305        if (bmds->shared_base) {
 306            monitor_printf(mon, "Start migration for %s with shared base "
 307                                "image\n",
 308                           bs->device_name);
 309        } else {
 310            monitor_printf(mon, "Start full migration for %s\n",
 311                           bs->device_name);
 312        }
 313
 314        QSIMPLEQ_INSERT_TAIL(&block_mig_state.bmds_list, bmds, entry);
 315    }
 316}
 317
 318static void init_blk_migration(Monitor *mon, QEMUFile *f)
 319{
 320    block_mig_state.submitted = 0;
 321    block_mig_state.read_done = 0;
 322    block_mig_state.transferred = 0;
 323    block_mig_state.total_sector_sum = 0;
 324    block_mig_state.prev_progress = -1;
 325    block_mig_state.bulk_completed = 0;
 326    block_mig_state.total_time = 0;
 327    block_mig_state.reads = 0;
 328
 329    bdrv_iterate(init_blk_migration_it, mon);
 330}
 331
 332static int blk_mig_save_bulked_block(Monitor *mon, QEMUFile *f)
 333{
 334    int64_t completed_sector_sum = 0;
 335    BlkMigDevState *bmds;
 336    int progress;
 337    int ret = 0;
 338
 339    QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
 340        if (bmds->bulk_completed == 0) {
 341            if (mig_save_device_bulk(mon, f, bmds) == 1) {
 342                /* completed bulk section for this device */
 343                bmds->bulk_completed = 1;
 344            }
 345            completed_sector_sum += bmds->completed_sectors;
 346            ret = 1;
 347            break;
 348        } else {
 349            completed_sector_sum += bmds->completed_sectors;
 350        }
 351    }
 352
 353    if (block_mig_state.total_sector_sum != 0) {
 354        progress = completed_sector_sum * 100 /
 355                   block_mig_state.total_sector_sum;
 356    } else {
 357        progress = 100;
 358    }
 359    if (progress != block_mig_state.prev_progress) {
 360        block_mig_state.prev_progress = progress;
 361        qemu_put_be64(f, (progress << BDRV_SECTOR_BITS)
 362                         | BLK_MIG_FLAG_PROGRESS);
 363        monitor_printf(mon, "Completed %d %%\r", progress);
 364        monitor_flush(mon);
 365    }
 366
 367    return ret;
 368}
 369
 370static void blk_mig_reset_dirty_cursor(void)
 371{
 372    BlkMigDevState *bmds;
 373
 374    QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
 375        bmds->cur_dirty = 0;
 376    }
 377}
 378
 379static int mig_save_device_dirty(Monitor *mon, QEMUFile *f,
 380                                 BlkMigDevState *bmds, int is_async)
 381{
 382    BlkMigBlock *blk;
 383    int64_t total_sectors = bmds->total_sectors;
 384    int64_t sector;
 385    int nr_sectors;
 386    int ret = -EIO;
 387
 388    for (sector = bmds->cur_dirty; sector < bmds->total_sectors;) {
 389        if (bmds_aio_inflight(bmds, sector)) {
 390            qemu_aio_flush();
 391        }
 392        if (bdrv_get_dirty(bmds->bs, sector)) {
 393
 394            if (total_sectors - sector < BDRV_SECTORS_PER_DIRTY_CHUNK) {
 395                nr_sectors = total_sectors - sector;
 396            } else {
 397                nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK;
 398            }
 399            blk = g_malloc(sizeof(BlkMigBlock));
 400            blk->buf = g_malloc(BLOCK_SIZE);
 401            blk->bmds = bmds;
 402            blk->sector = sector;
 403            blk->nr_sectors = nr_sectors;
 404
 405            if (is_async) {
 406                blk->iov.iov_base = blk->buf;
 407                blk->iov.iov_len = nr_sectors * BDRV_SECTOR_SIZE;
 408                qemu_iovec_init_external(&blk->qiov, &blk->iov, 1);
 409
 410                if (block_mig_state.submitted == 0) {
 411                    block_mig_state.prev_time_offset = qemu_get_clock_ns(rt_clock);
 412                }
 413
 414                blk->aiocb = bdrv_aio_readv(bmds->bs, sector, &blk->qiov,
 415                                            nr_sectors, blk_mig_read_cb, blk);
 416                if (!blk->aiocb) {
 417                    goto error;
 418                }
 419                block_mig_state.submitted++;
 420                bmds_set_aio_inflight(bmds, sector, nr_sectors, 1);
 421            } else {
 422                ret = bdrv_read(bmds->bs, sector, blk->buf, nr_sectors);
 423                if (ret < 0) {
 424                    goto error;
 425                }
 426                blk_send(f, blk);
 427
 428                g_free(blk->buf);
 429                g_free(blk);
 430            }
 431
 432            bdrv_reset_dirty(bmds->bs, sector, nr_sectors);
 433            break;
 434        }
 435        sector += BDRV_SECTORS_PER_DIRTY_CHUNK;
 436        bmds->cur_dirty = sector;
 437    }
 438
 439    return (bmds->cur_dirty >= bmds->total_sectors);
 440
 441error:
 442    monitor_printf(mon, "Error reading sector %" PRId64 "\n", sector);
 443    qemu_file_set_error(f, ret);
 444    g_free(blk->buf);
 445    g_free(blk);
 446    return 0;
 447}
 448
 449static int blk_mig_save_dirty_block(Monitor *mon, QEMUFile *f, int is_async)
 450{
 451    BlkMigDevState *bmds;
 452    int ret = 0;
 453
 454    QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
 455        if (mig_save_device_dirty(mon, f, bmds, is_async) == 0) {
 456            ret = 1;
 457            break;
 458        }
 459    }
 460
 461    return ret;
 462}
 463
 464static void flush_blks(QEMUFile* f)
 465{
 466    BlkMigBlock *blk;
 467
 468    DPRINTF("%s Enter submitted %d read_done %d transferred %d\n",
 469            __FUNCTION__, block_mig_state.submitted, block_mig_state.read_done,
 470            block_mig_state.transferred);
 471
 472    while ((blk = QSIMPLEQ_FIRST(&block_mig_state.blk_list)) != NULL) {
 473        if (qemu_file_rate_limit(f)) {
 474            break;
 475        }
 476        if (blk->ret < 0) {
 477            qemu_file_set_error(f, blk->ret);
 478            break;
 479        }
 480        blk_send(f, blk);
 481
 482        QSIMPLEQ_REMOVE_HEAD(&block_mig_state.blk_list, entry);
 483        g_free(blk->buf);
 484        g_free(blk);
 485
 486        block_mig_state.read_done--;
 487        block_mig_state.transferred++;
 488        assert(block_mig_state.read_done >= 0);
 489    }
 490
 491    DPRINTF("%s Exit submitted %d read_done %d transferred %d\n", __FUNCTION__,
 492            block_mig_state.submitted, block_mig_state.read_done,
 493            block_mig_state.transferred);
 494}
 495
 496static int64_t get_remaining_dirty(void)
 497{
 498    BlkMigDevState *bmds;
 499    int64_t dirty = 0;
 500
 501    QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
 502        dirty += bdrv_get_dirty_count(bmds->bs);
 503    }
 504
 505    return dirty * BLOCK_SIZE;
 506}
 507
 508static int is_stage2_completed(void)
 509{
 510    int64_t remaining_dirty;
 511    long double bwidth;
 512
 513    if (block_mig_state.bulk_completed == 1) {
 514
 515        remaining_dirty = get_remaining_dirty();
 516        if (remaining_dirty == 0) {
 517            return 1;
 518        }
 519
 520        bwidth = compute_read_bwidth();
 521
 522        if ((remaining_dirty / bwidth) <=
 523            migrate_max_downtime()) {
 524            /* finish stage2 because we think that we can finish remaining work
 525               below max_downtime */
 526
 527            return 1;
 528        }
 529    }
 530
 531    return 0;
 532}
 533
 534static void blk_mig_cleanup(Monitor *mon)
 535{
 536    BlkMigDevState *bmds;
 537    BlkMigBlock *blk;
 538
 539    set_dirty_tracking(0);
 540
 541    while ((bmds = QSIMPLEQ_FIRST(&block_mig_state.bmds_list)) != NULL) {
 542        QSIMPLEQ_REMOVE_HEAD(&block_mig_state.bmds_list, entry);
 543        bdrv_set_in_use(bmds->bs, 0);
 544        drive_put_ref(drive_get_by_blockdev(bmds->bs));
 545        g_free(bmds->aio_bitmap);
 546        g_free(bmds);
 547    }
 548
 549    while ((blk = QSIMPLEQ_FIRST(&block_mig_state.blk_list)) != NULL) {
 550        QSIMPLEQ_REMOVE_HEAD(&block_mig_state.blk_list, entry);
 551        g_free(blk->buf);
 552        g_free(blk);
 553    }
 554
 555    monitor_printf(mon, "\n");
 556}
 557
 558static int block_save_live(Monitor *mon, QEMUFile *f, int stage, void *opaque)
 559{
 560    int ret;
 561
 562    DPRINTF("Enter save live stage %d submitted %d transferred %d\n",
 563            stage, block_mig_state.submitted, block_mig_state.transferred);
 564
 565    if (stage < 0) {
 566        blk_mig_cleanup(mon);
 567        return 0;
 568    }
 569
 570    if (block_mig_state.blk_enable != 1) {
 571        /* no need to migrate storage */
 572        qemu_put_be64(f, BLK_MIG_FLAG_EOS);
 573        return 1;
 574    }
 575
 576    if (stage == 1) {
 577        init_blk_migration(mon, f);
 578
 579        /* start track dirty blocks */
 580        set_dirty_tracking(1);
 581    }
 582
 583    flush_blks(f);
 584
 585    ret = qemu_file_get_error(f);
 586    if (ret) {
 587        blk_mig_cleanup(mon);
 588        return ret;
 589    }
 590
 591    blk_mig_reset_dirty_cursor();
 592
 593    if (stage == 2) {
 594        /* control the rate of transfer */
 595        while ((block_mig_state.submitted +
 596                block_mig_state.read_done) * BLOCK_SIZE <
 597               qemu_file_get_rate_limit(f)) {
 598            if (block_mig_state.bulk_completed == 0) {
 599                /* first finish the bulk phase */
 600                if (blk_mig_save_bulked_block(mon, f) == 0) {
 601                    /* finished saving bulk on all devices */
 602                    block_mig_state.bulk_completed = 1;
 603                }
 604            } else {
 605                if (blk_mig_save_dirty_block(mon, f, 1) == 0) {
 606                    /* no more dirty blocks */
 607                    break;
 608                }
 609            }
 610        }
 611
 612        flush_blks(f);
 613
 614        ret = qemu_file_get_error(f);
 615        if (ret) {
 616            blk_mig_cleanup(mon);
 617            return ret;
 618        }
 619    }
 620
 621    if (stage == 3) {
 622        /* we know for sure that save bulk is completed and
 623           all async read completed */
 624        assert(block_mig_state.submitted == 0);
 625
 626        while (blk_mig_save_dirty_block(mon, f, 0) != 0);
 627        blk_mig_cleanup(mon);
 628
 629        /* report completion */
 630        qemu_put_be64(f, (100 << BDRV_SECTOR_BITS) | BLK_MIG_FLAG_PROGRESS);
 631
 632        ret = qemu_file_get_error(f);
 633        if (ret) {
 634            return ret;
 635        }
 636
 637        monitor_printf(mon, "Block migration completed\n");
 638    }
 639
 640    qemu_put_be64(f, BLK_MIG_FLAG_EOS);
 641
 642    return ((stage == 2) && is_stage2_completed());
 643}
 644
 645static int block_load(QEMUFile *f, void *opaque, int version_id)
 646{
 647    static int banner_printed;
 648    int len, flags;
 649    char device_name[256];
 650    int64_t addr;
 651    BlockDriverState *bs, *bs_prev = NULL;
 652    uint8_t *buf;
 653    int64_t total_sectors = 0;
 654    int nr_sectors;
 655    int ret;
 656
 657    do {
 658        addr = qemu_get_be64(f);
 659
 660        flags = addr & ~BDRV_SECTOR_MASK;
 661        addr >>= BDRV_SECTOR_BITS;
 662
 663        if (flags & BLK_MIG_FLAG_DEVICE_BLOCK) {
 664            /* get device name */
 665            len = qemu_get_byte(f);
 666            qemu_get_buffer(f, (uint8_t *)device_name, len);
 667            device_name[len] = '\0';
 668
 669            bs = bdrv_find(device_name);
 670            if (!bs) {
 671                fprintf(stderr, "Error unknown block device %s\n",
 672                        device_name);
 673                return -EINVAL;
 674            }
 675
 676            if (bs != bs_prev) {
 677                bs_prev = bs;
 678                total_sectors = bdrv_getlength(bs) >> BDRV_SECTOR_BITS;
 679                if (total_sectors <= 0) {
 680                    error_report("Error getting length of block device %s",
 681                                 device_name);
 682                    return -EINVAL;
 683                }
 684            }
 685
 686            if (total_sectors - addr < BDRV_SECTORS_PER_DIRTY_CHUNK) {
 687                nr_sectors = total_sectors - addr;
 688            } else {
 689                nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK;
 690            }
 691
 692            buf = g_malloc(BLOCK_SIZE);
 693
 694            qemu_get_buffer(f, buf, BLOCK_SIZE);
 695            ret = bdrv_write(bs, addr, buf, nr_sectors);
 696
 697            g_free(buf);
 698            if (ret < 0) {
 699                return ret;
 700            }
 701        } else if (flags & BLK_MIG_FLAG_PROGRESS) {
 702            if (!banner_printed) {
 703                printf("Receiving block device images\n");
 704                banner_printed = 1;
 705            }
 706            printf("Completed %d %%%c", (int)addr,
 707                   (addr == 100) ? '\n' : '\r');
 708            fflush(stdout);
 709        } else if (!(flags & BLK_MIG_FLAG_EOS)) {
 710            fprintf(stderr, "Unknown flags\n");
 711            return -EINVAL;
 712        }
 713        ret = qemu_file_get_error(f);
 714        if (ret != 0) {
 715            return ret;
 716        }
 717    } while (!(flags & BLK_MIG_FLAG_EOS));
 718
 719    return 0;
 720}
 721
 722static void block_set_params(int blk_enable, int shared_base, void *opaque)
 723{
 724    block_mig_state.blk_enable = blk_enable;
 725    block_mig_state.shared_base = shared_base;
 726
 727    /* shared base means that blk_enable = 1 */
 728    block_mig_state.blk_enable |= shared_base;
 729}
 730
 731void blk_mig_init(void)
 732{
 733    QSIMPLEQ_INIT(&block_mig_state.bmds_list);
 734    QSIMPLEQ_INIT(&block_mig_state.blk_list);
 735
 736    register_savevm_live(NULL, "block", 0, 1, block_set_params,
 737                         block_save_live, NULL, block_load, &block_mig_state);
 738}
 739