qemu/migration/block.c
<<
>>
Prefs
   1/*
   2 * QEMU live block migration
   3 *
   4 * Copyright IBM, Corp. 2009
   5 *
   6 * Authors:
   7 *  Liran Schour   <lirans@il.ibm.com>
   8 *
   9 * This work is licensed under the terms of the GNU GPL, version 2.  See
  10 * the COPYING file in the top-level directory.
  11 *
  12 * Contributions after 2012-01-13 are licensed under the terms of the
  13 * GNU GPL, version 2 or (at your option) any later version.
  14 */
  15
  16#include "qemu/osdep.h"
  17#include "qapi/error.h"
  18#include "qemu/error-report.h"
  19#include "qemu/cutils.h"
  20#include "qemu/queue.h"
  21#include "block.h"
  22#include "migration/misc.h"
  23#include "migration.h"
  24#include "migration/register.h"
  25#include "qemu-file.h"
  26#include "migration/vmstate.h"
  27#include "sysemu/block-backend.h"
  28
  29#define BLOCK_SIZE                       (1 << 20)
  30#define BDRV_SECTORS_PER_DIRTY_CHUNK     (BLOCK_SIZE >> BDRV_SECTOR_BITS)
  31
  32#define BLK_MIG_FLAG_DEVICE_BLOCK       0x01
  33#define BLK_MIG_FLAG_EOS                0x02
  34#define BLK_MIG_FLAG_PROGRESS           0x04
  35#define BLK_MIG_FLAG_ZERO_BLOCK         0x08
  36
  37#define MAX_IS_ALLOCATED_SEARCH (65536 * BDRV_SECTOR_SIZE)
  38
  39#define MAX_IO_BUFFERS 512
  40#define MAX_PARALLEL_IO 16
  41
  42//#define DEBUG_BLK_MIGRATION
  43
  44#ifdef DEBUG_BLK_MIGRATION
  45#define DPRINTF(fmt, ...) \
  46    do { printf("blk_migration: " fmt, ## __VA_ARGS__); } while (0)
  47#else
  48#define DPRINTF(fmt, ...) \
  49    do { } while (0)
  50#endif
  51
  52typedef struct BlkMigDevState {
  53    /* Written during setup phase.  Can be read without a lock.  */
  54    BlockBackend *blk;
  55    char *blk_name;
  56    int shared_base;
  57    int64_t total_sectors;
  58    QSIMPLEQ_ENTRY(BlkMigDevState) entry;
  59    Error *blocker;
  60
  61    /* Only used by migration thread.  Does not need a lock.  */
  62    int bulk_completed;
  63    int64_t cur_sector;
  64    int64_t cur_dirty;
  65
  66    /* Data in the aio_bitmap is protected by block migration lock.
  67     * Allocation and free happen during setup and cleanup respectively.
  68     */
  69    unsigned long *aio_bitmap;
  70
  71    /* Protected by block migration lock.  */
  72    int64_t completed_sectors;
  73
  74    /* During migration this is protected by iothread lock / AioContext.
  75     * Allocation and free happen during setup and cleanup respectively.
  76     */
  77    BdrvDirtyBitmap *dirty_bitmap;
  78} BlkMigDevState;
  79
  80typedef struct BlkMigBlock {
  81    /* Only used by migration thread.  */
  82    uint8_t *buf;
  83    BlkMigDevState *bmds;
  84    int64_t sector;
  85    int nr_sectors;
  86    struct iovec iov;
  87    QEMUIOVector qiov;
  88    BlockAIOCB *aiocb;
  89
  90    /* Protected by block migration lock.  */
  91    int ret;
  92    QSIMPLEQ_ENTRY(BlkMigBlock) entry;
  93} BlkMigBlock;
  94
  95typedef struct BlkMigState {
  96    QSIMPLEQ_HEAD(bmds_list, BlkMigDevState) bmds_list;
  97    int64_t total_sector_sum;
  98    bool zero_blocks;
  99
 100    /* Protected by lock.  */
 101    QSIMPLEQ_HEAD(blk_list, BlkMigBlock) blk_list;
 102    int submitted;
 103    int read_done;
 104
 105    /* Only used by migration thread.  Does not need a lock.  */
 106    int transferred;
 107    int prev_progress;
 108    int bulk_completed;
 109
 110    /* Lock must be taken _inside_ the iothread lock and any AioContexts.  */
 111    QemuMutex lock;
 112} BlkMigState;
 113
 114static BlkMigState block_mig_state;
 115
 116static void blk_mig_lock(void)
 117{
 118    qemu_mutex_lock(&block_mig_state.lock);
 119}
 120
 121static void blk_mig_unlock(void)
 122{
 123    qemu_mutex_unlock(&block_mig_state.lock);
 124}
 125
 126/* Must run outside of the iothread lock during the bulk phase,
 127 * or the VM will stall.
 128 */
 129
 130static void blk_send(QEMUFile *f, BlkMigBlock * blk)
 131{
 132    int len;
 133    uint64_t flags = BLK_MIG_FLAG_DEVICE_BLOCK;
 134
 135    if (block_mig_state.zero_blocks &&
 136        buffer_is_zero(blk->buf, BLOCK_SIZE)) {
 137        flags |= BLK_MIG_FLAG_ZERO_BLOCK;
 138    }
 139
 140    /* sector number and flags */
 141    qemu_put_be64(f, (blk->sector << BDRV_SECTOR_BITS)
 142                     | flags);
 143
 144    /* device name */
 145    len = strlen(blk->bmds->blk_name);
 146    qemu_put_byte(f, len);
 147    qemu_put_buffer(f, (uint8_t *) blk->bmds->blk_name, len);
 148
 149    /* if a block is zero we need to flush here since the network
 150     * bandwidth is now a lot higher than the storage device bandwidth.
 151     * thus if we queue zero blocks we slow down the migration */
 152    if (flags & BLK_MIG_FLAG_ZERO_BLOCK) {
 153        qemu_fflush(f);
 154        return;
 155    }
 156
 157    qemu_put_buffer(f, blk->buf, BLOCK_SIZE);
 158}
 159
 160int blk_mig_active(void)
 161{
 162    return !QSIMPLEQ_EMPTY(&block_mig_state.bmds_list);
 163}
 164
 165int blk_mig_bulk_active(void)
 166{
 167    return blk_mig_active() && !block_mig_state.bulk_completed;
 168}
 169
 170uint64_t blk_mig_bytes_transferred(void)
 171{
 172    BlkMigDevState *bmds;
 173    uint64_t sum = 0;
 174
 175    blk_mig_lock();
 176    QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
 177        sum += bmds->completed_sectors;
 178    }
 179    blk_mig_unlock();
 180    return sum << BDRV_SECTOR_BITS;
 181}
 182
 183uint64_t blk_mig_bytes_remaining(void)
 184{
 185    return blk_mig_bytes_total() - blk_mig_bytes_transferred();
 186}
 187
 188uint64_t blk_mig_bytes_total(void)
 189{
 190    BlkMigDevState *bmds;
 191    uint64_t sum = 0;
 192
 193    QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
 194        sum += bmds->total_sectors;
 195    }
 196    return sum << BDRV_SECTOR_BITS;
 197}
 198
 199
 200/* Called with migration lock held.  */
 201
 202static int bmds_aio_inflight(BlkMigDevState *bmds, int64_t sector)
 203{
 204    int64_t chunk = sector / (int64_t)BDRV_SECTORS_PER_DIRTY_CHUNK;
 205
 206    if (sector < blk_nb_sectors(bmds->blk)) {
 207        return !!(bmds->aio_bitmap[chunk / (sizeof(unsigned long) * 8)] &
 208            (1UL << (chunk % (sizeof(unsigned long) * 8))));
 209    } else {
 210        return 0;
 211    }
 212}
 213
 214/* Called with migration lock held.  */
 215
 216static void bmds_set_aio_inflight(BlkMigDevState *bmds, int64_t sector_num,
 217                             int nb_sectors, int set)
 218{
 219    int64_t start, end;
 220    unsigned long val, idx, bit;
 221
 222    start = sector_num / BDRV_SECTORS_PER_DIRTY_CHUNK;
 223    end = (sector_num + nb_sectors - 1) / BDRV_SECTORS_PER_DIRTY_CHUNK;
 224
 225    for (; start <= end; start++) {
 226        idx = start / (sizeof(unsigned long) * 8);
 227        bit = start % (sizeof(unsigned long) * 8);
 228        val = bmds->aio_bitmap[idx];
 229        if (set) {
 230            val |= 1UL << bit;
 231        } else {
 232            val &= ~(1UL << bit);
 233        }
 234        bmds->aio_bitmap[idx] = val;
 235    }
 236}
 237
 238static void alloc_aio_bitmap(BlkMigDevState *bmds)
 239{
 240    BlockBackend *bb = bmds->blk;
 241    int64_t bitmap_size;
 242
 243    bitmap_size = blk_nb_sectors(bb) + BDRV_SECTORS_PER_DIRTY_CHUNK * 8 - 1;
 244    bitmap_size /= BDRV_SECTORS_PER_DIRTY_CHUNK * 8;
 245
 246    bmds->aio_bitmap = g_malloc0(bitmap_size);
 247}
 248
 249/* Never hold migration lock when yielding to the main loop!  */
 250
 251static void blk_mig_read_cb(void *opaque, int ret)
 252{
 253    BlkMigBlock *blk = opaque;
 254
 255    blk_mig_lock();
 256    blk->ret = ret;
 257
 258    QSIMPLEQ_INSERT_TAIL(&block_mig_state.blk_list, blk, entry);
 259    bmds_set_aio_inflight(blk->bmds, blk->sector, blk->nr_sectors, 0);
 260
 261    block_mig_state.submitted--;
 262    block_mig_state.read_done++;
 263    assert(block_mig_state.submitted >= 0);
 264    blk_mig_unlock();
 265}
 266
 267/* Called with no lock taken.  */
 268
 269static int mig_save_device_bulk(QEMUFile *f, BlkMigDevState *bmds)
 270{
 271    int64_t total_sectors = bmds->total_sectors;
 272    int64_t cur_sector = bmds->cur_sector;
 273    BlockBackend *bb = bmds->blk;
 274    BlkMigBlock *blk;
 275    int nr_sectors;
 276    int64_t count;
 277
 278    if (bmds->shared_base) {
 279        qemu_mutex_lock_iothread();
 280        aio_context_acquire(blk_get_aio_context(bb));
 281        /* Skip unallocated sectors; intentionally treats failure or
 282         * partial sector as an allocated sector */
 283        while (cur_sector < total_sectors &&
 284               !bdrv_is_allocated(blk_bs(bb), cur_sector * BDRV_SECTOR_SIZE,
 285                                  MAX_IS_ALLOCATED_SEARCH, &count)) {
 286            if (count < BDRV_SECTOR_SIZE) {
 287                break;
 288            }
 289            cur_sector += count >> BDRV_SECTOR_BITS;
 290        }
 291        aio_context_release(blk_get_aio_context(bb));
 292        qemu_mutex_unlock_iothread();
 293    }
 294
 295    if (cur_sector >= total_sectors) {
 296        bmds->cur_sector = bmds->completed_sectors = total_sectors;
 297        return 1;
 298    }
 299
 300    bmds->completed_sectors = cur_sector;
 301
 302    cur_sector &= ~((int64_t)BDRV_SECTORS_PER_DIRTY_CHUNK - 1);
 303
 304    /* we are going to transfer a full block even if it is not allocated */
 305    nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK;
 306
 307    if (total_sectors - cur_sector < BDRV_SECTORS_PER_DIRTY_CHUNK) {
 308        nr_sectors = total_sectors - cur_sector;
 309    }
 310
 311    blk = g_new(BlkMigBlock, 1);
 312    blk->buf = g_malloc(BLOCK_SIZE);
 313    blk->bmds = bmds;
 314    blk->sector = cur_sector;
 315    blk->nr_sectors = nr_sectors;
 316
 317    blk->iov.iov_base = blk->buf;
 318    blk->iov.iov_len = nr_sectors * BDRV_SECTOR_SIZE;
 319    qemu_iovec_init_external(&blk->qiov, &blk->iov, 1);
 320
 321    blk_mig_lock();
 322    block_mig_state.submitted++;
 323    blk_mig_unlock();
 324
 325    /* We do not know if bs is under the main thread (and thus does
 326     * not acquire the AioContext when doing AIO) or rather under
 327     * dataplane.  Thus acquire both the iothread mutex and the
 328     * AioContext.
 329     *
 330     * This is ugly and will disappear when we make bdrv_* thread-safe,
 331     * without the need to acquire the AioContext.
 332     */
 333    qemu_mutex_lock_iothread();
 334    aio_context_acquire(blk_get_aio_context(bmds->blk));
 335    bdrv_reset_dirty_bitmap(bmds->dirty_bitmap, cur_sector * BDRV_SECTOR_SIZE,
 336                            nr_sectors * BDRV_SECTOR_SIZE);
 337    blk->aiocb = blk_aio_preadv(bb, cur_sector * BDRV_SECTOR_SIZE, &blk->qiov,
 338                                0, blk_mig_read_cb, blk);
 339    aio_context_release(blk_get_aio_context(bmds->blk));
 340    qemu_mutex_unlock_iothread();
 341
 342    bmds->cur_sector = cur_sector + nr_sectors;
 343    return (bmds->cur_sector >= total_sectors);
 344}
 345
 346/* Called with iothread lock taken.  */
 347
 348static int set_dirty_tracking(void)
 349{
 350    BlkMigDevState *bmds;
 351    int ret;
 352
 353    QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
 354        bmds->dirty_bitmap = bdrv_create_dirty_bitmap(blk_bs(bmds->blk),
 355                                                      BLOCK_SIZE, NULL, NULL);
 356        if (!bmds->dirty_bitmap) {
 357            ret = -errno;
 358            goto fail;
 359        }
 360    }
 361    return 0;
 362
 363fail:
 364    QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
 365        if (bmds->dirty_bitmap) {
 366            bdrv_release_dirty_bitmap(blk_bs(bmds->blk), bmds->dirty_bitmap);
 367        }
 368    }
 369    return ret;
 370}
 371
 372/* Called with iothread lock taken.  */
 373
 374static void unset_dirty_tracking(void)
 375{
 376    BlkMigDevState *bmds;
 377
 378    QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
 379        bdrv_release_dirty_bitmap(blk_bs(bmds->blk), bmds->dirty_bitmap);
 380    }
 381}
 382
 383static int init_blk_migration(QEMUFile *f)
 384{
 385    BlockDriverState *bs;
 386    BlkMigDevState *bmds;
 387    int64_t sectors;
 388    BdrvNextIterator it;
 389    int i, num_bs = 0;
 390    struct {
 391        BlkMigDevState *bmds;
 392        BlockDriverState *bs;
 393    } *bmds_bs;
 394    Error *local_err = NULL;
 395    int ret;
 396
 397    block_mig_state.submitted = 0;
 398    block_mig_state.read_done = 0;
 399    block_mig_state.transferred = 0;
 400    block_mig_state.total_sector_sum = 0;
 401    block_mig_state.prev_progress = -1;
 402    block_mig_state.bulk_completed = 0;
 403    block_mig_state.zero_blocks = migrate_zero_blocks();
 404
 405    for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
 406        num_bs++;
 407    }
 408    bmds_bs = g_malloc0(num_bs * sizeof(*bmds_bs));
 409
 410    for (i = 0, bs = bdrv_first(&it); bs; bs = bdrv_next(&it), i++) {
 411        if (bdrv_is_read_only(bs)) {
 412            continue;
 413        }
 414
 415        sectors = bdrv_nb_sectors(bs);
 416        if (sectors <= 0) {
 417            ret = sectors;
 418            bdrv_next_cleanup(&it);
 419            goto out;
 420        }
 421
 422        bmds = g_new0(BlkMigDevState, 1);
 423        bmds->blk = blk_new(BLK_PERM_CONSISTENT_READ, BLK_PERM_ALL);
 424        bmds->blk_name = g_strdup(bdrv_get_device_name(bs));
 425        bmds->bulk_completed = 0;
 426        bmds->total_sectors = sectors;
 427        bmds->completed_sectors = 0;
 428        bmds->shared_base = migrate_use_block_incremental();
 429
 430        assert(i < num_bs);
 431        bmds_bs[i].bmds = bmds;
 432        bmds_bs[i].bs = bs;
 433
 434        block_mig_state.total_sector_sum += sectors;
 435
 436        if (bmds->shared_base) {
 437            DPRINTF("Start migration for %s with shared base image\n",
 438                    bdrv_get_device_name(bs));
 439        } else {
 440            DPRINTF("Start full migration for %s\n", bdrv_get_device_name(bs));
 441        }
 442
 443        QSIMPLEQ_INSERT_TAIL(&block_mig_state.bmds_list, bmds, entry);
 444    }
 445
 446    /* Can only insert new BDSes now because doing so while iterating block
 447     * devices may end up in a deadlock (iterating the new BDSes, too). */
 448    for (i = 0; i < num_bs; i++) {
 449        BlkMigDevState *bmds = bmds_bs[i].bmds;
 450        BlockDriverState *bs = bmds_bs[i].bs;
 451
 452        if (bmds) {
 453            ret = blk_insert_bs(bmds->blk, bs, &local_err);
 454            if (ret < 0) {
 455                error_report_err(local_err);
 456                goto out;
 457            }
 458
 459            alloc_aio_bitmap(bmds);
 460            error_setg(&bmds->blocker, "block device is in use by migration");
 461            bdrv_op_block_all(bs, bmds->blocker);
 462        }
 463    }
 464
 465    ret = 0;
 466out:
 467    g_free(bmds_bs);
 468    return ret;
 469}
 470
 471/* Called with no lock taken.  */
 472
 473static int blk_mig_save_bulked_block(QEMUFile *f)
 474{
 475    int64_t completed_sector_sum = 0;
 476    BlkMigDevState *bmds;
 477    int progress;
 478    int ret = 0;
 479
 480    QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
 481        if (bmds->bulk_completed == 0) {
 482            if (mig_save_device_bulk(f, bmds) == 1) {
 483                /* completed bulk section for this device */
 484                bmds->bulk_completed = 1;
 485            }
 486            completed_sector_sum += bmds->completed_sectors;
 487            ret = 1;
 488            break;
 489        } else {
 490            completed_sector_sum += bmds->completed_sectors;
 491        }
 492    }
 493
 494    if (block_mig_state.total_sector_sum != 0) {
 495        progress = completed_sector_sum * 100 /
 496                   block_mig_state.total_sector_sum;
 497    } else {
 498        progress = 100;
 499    }
 500    if (progress != block_mig_state.prev_progress) {
 501        block_mig_state.prev_progress = progress;
 502        qemu_put_be64(f, (progress << BDRV_SECTOR_BITS)
 503                         | BLK_MIG_FLAG_PROGRESS);
 504        DPRINTF("Completed %d %%\r", progress);
 505    }
 506
 507    return ret;
 508}
 509
 510static void blk_mig_reset_dirty_cursor(void)
 511{
 512    BlkMigDevState *bmds;
 513
 514    QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
 515        bmds->cur_dirty = 0;
 516    }
 517}
 518
 519/* Called with iothread lock and AioContext taken.  */
 520
 521static int mig_save_device_dirty(QEMUFile *f, BlkMigDevState *bmds,
 522                                 int is_async)
 523{
 524    BlkMigBlock *blk;
 525    BlockDriverState *bs = blk_bs(bmds->blk);
 526    int64_t total_sectors = bmds->total_sectors;
 527    int64_t sector;
 528    int nr_sectors;
 529    int ret = -EIO;
 530
 531    for (sector = bmds->cur_dirty; sector < bmds->total_sectors;) {
 532        blk_mig_lock();
 533        if (bmds_aio_inflight(bmds, sector)) {
 534            blk_mig_unlock();
 535            blk_drain(bmds->blk);
 536        } else {
 537            blk_mig_unlock();
 538        }
 539        bdrv_dirty_bitmap_lock(bmds->dirty_bitmap);
 540        if (bdrv_get_dirty_locked(bs, bmds->dirty_bitmap,
 541                                  sector * BDRV_SECTOR_SIZE)) {
 542            if (total_sectors - sector < BDRV_SECTORS_PER_DIRTY_CHUNK) {
 543                nr_sectors = total_sectors - sector;
 544            } else {
 545                nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK;
 546            }
 547            bdrv_reset_dirty_bitmap_locked(bmds->dirty_bitmap,
 548                                           sector * BDRV_SECTOR_SIZE,
 549                                           nr_sectors * BDRV_SECTOR_SIZE);
 550            bdrv_dirty_bitmap_unlock(bmds->dirty_bitmap);
 551
 552            blk = g_new(BlkMigBlock, 1);
 553            blk->buf = g_malloc(BLOCK_SIZE);
 554            blk->bmds = bmds;
 555            blk->sector = sector;
 556            blk->nr_sectors = nr_sectors;
 557
 558            if (is_async) {
 559                blk->iov.iov_base = blk->buf;
 560                blk->iov.iov_len = nr_sectors * BDRV_SECTOR_SIZE;
 561                qemu_iovec_init_external(&blk->qiov, &blk->iov, 1);
 562
 563                blk->aiocb = blk_aio_preadv(bmds->blk,
 564                                            sector * BDRV_SECTOR_SIZE,
 565                                            &blk->qiov, 0, blk_mig_read_cb,
 566                                            blk);
 567
 568                blk_mig_lock();
 569                block_mig_state.submitted++;
 570                bmds_set_aio_inflight(bmds, sector, nr_sectors, 1);
 571                blk_mig_unlock();
 572            } else {
 573                ret = blk_pread(bmds->blk, sector * BDRV_SECTOR_SIZE, blk->buf,
 574                                nr_sectors * BDRV_SECTOR_SIZE);
 575                if (ret < 0) {
 576                    goto error;
 577                }
 578                blk_send(f, blk);
 579
 580                g_free(blk->buf);
 581                g_free(blk);
 582            }
 583
 584            sector += nr_sectors;
 585            bmds->cur_dirty = sector;
 586            break;
 587        }
 588
 589        bdrv_dirty_bitmap_unlock(bmds->dirty_bitmap);
 590        sector += BDRV_SECTORS_PER_DIRTY_CHUNK;
 591        bmds->cur_dirty = sector;
 592    }
 593
 594    return (bmds->cur_dirty >= bmds->total_sectors);
 595
 596error:
 597    DPRINTF("Error reading sector %" PRId64 "\n", sector);
 598    g_free(blk->buf);
 599    g_free(blk);
 600    return ret;
 601}
 602
 603/* Called with iothread lock taken.
 604 *
 605 * return value:
 606 * 0: too much data for max_downtime
 607 * 1: few enough data for max_downtime
 608*/
 609static int blk_mig_save_dirty_block(QEMUFile *f, int is_async)
 610{
 611    BlkMigDevState *bmds;
 612    int ret = 1;
 613
 614    QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
 615        aio_context_acquire(blk_get_aio_context(bmds->blk));
 616        ret = mig_save_device_dirty(f, bmds, is_async);
 617        aio_context_release(blk_get_aio_context(bmds->blk));
 618        if (ret <= 0) {
 619            break;
 620        }
 621    }
 622
 623    return ret;
 624}
 625
 626/* Called with no locks taken.  */
 627
 628static int flush_blks(QEMUFile *f)
 629{
 630    BlkMigBlock *blk;
 631    int ret = 0;
 632
 633    DPRINTF("%s Enter submitted %d read_done %d transferred %d\n",
 634            __func__, block_mig_state.submitted, block_mig_state.read_done,
 635            block_mig_state.transferred);
 636
 637    blk_mig_lock();
 638    while ((blk = QSIMPLEQ_FIRST(&block_mig_state.blk_list)) != NULL) {
 639        if (qemu_file_rate_limit(f)) {
 640            break;
 641        }
 642        if (blk->ret < 0) {
 643            ret = blk->ret;
 644            break;
 645        }
 646
 647        QSIMPLEQ_REMOVE_HEAD(&block_mig_state.blk_list, entry);
 648        blk_mig_unlock();
 649        blk_send(f, blk);
 650        blk_mig_lock();
 651
 652        g_free(blk->buf);
 653        g_free(blk);
 654
 655        block_mig_state.read_done--;
 656        block_mig_state.transferred++;
 657        assert(block_mig_state.read_done >= 0);
 658    }
 659    blk_mig_unlock();
 660
 661    DPRINTF("%s Exit submitted %d read_done %d transferred %d\n", __func__,
 662            block_mig_state.submitted, block_mig_state.read_done,
 663            block_mig_state.transferred);
 664    return ret;
 665}
 666
 667/* Called with iothread lock taken.  */
 668
 669static int64_t get_remaining_dirty(void)
 670{
 671    BlkMigDevState *bmds;
 672    int64_t dirty = 0;
 673
 674    QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
 675        aio_context_acquire(blk_get_aio_context(bmds->blk));
 676        dirty += bdrv_get_dirty_count(bmds->dirty_bitmap);
 677        aio_context_release(blk_get_aio_context(bmds->blk));
 678    }
 679
 680    return dirty;
 681}
 682
 683
 684
 685/* Called with iothread lock taken.  */
 686static void block_migration_cleanup_bmds(void)
 687{
 688    BlkMigDevState *bmds;
 689    AioContext *ctx;
 690
 691    unset_dirty_tracking();
 692
 693    while ((bmds = QSIMPLEQ_FIRST(&block_mig_state.bmds_list)) != NULL) {
 694        QSIMPLEQ_REMOVE_HEAD(&block_mig_state.bmds_list, entry);
 695        bdrv_op_unblock_all(blk_bs(bmds->blk), bmds->blocker);
 696        error_free(bmds->blocker);
 697
 698        /* Save ctx, because bmds->blk can disappear during blk_unref.  */
 699        ctx = blk_get_aio_context(bmds->blk);
 700        aio_context_acquire(ctx);
 701        blk_unref(bmds->blk);
 702        aio_context_release(ctx);
 703
 704        g_free(bmds->blk_name);
 705        g_free(bmds->aio_bitmap);
 706        g_free(bmds);
 707    }
 708}
 709
 710/* Called with iothread lock taken.  */
 711static void block_migration_cleanup(void *opaque)
 712{
 713    BlkMigBlock *blk;
 714
 715    bdrv_drain_all();
 716
 717    block_migration_cleanup_bmds();
 718
 719    blk_mig_lock();
 720    while ((blk = QSIMPLEQ_FIRST(&block_mig_state.blk_list)) != NULL) {
 721        QSIMPLEQ_REMOVE_HEAD(&block_mig_state.blk_list, entry);
 722        g_free(blk->buf);
 723        g_free(blk);
 724    }
 725    blk_mig_unlock();
 726}
 727
 728static int block_save_setup(QEMUFile *f, void *opaque)
 729{
 730    int ret;
 731
 732    DPRINTF("Enter save live setup submitted %d transferred %d\n",
 733            block_mig_state.submitted, block_mig_state.transferred);
 734
 735    qemu_mutex_lock_iothread();
 736    ret = init_blk_migration(f);
 737    if (ret < 0) {
 738        qemu_mutex_unlock_iothread();
 739        return ret;
 740    }
 741
 742    /* start track dirty blocks */
 743    ret = set_dirty_tracking();
 744
 745    qemu_mutex_unlock_iothread();
 746
 747    if (ret) {
 748        return ret;
 749    }
 750
 751    ret = flush_blks(f);
 752    blk_mig_reset_dirty_cursor();
 753    qemu_put_be64(f, BLK_MIG_FLAG_EOS);
 754
 755    return ret;
 756}
 757
 758static int block_save_iterate(QEMUFile *f, void *opaque)
 759{
 760    int ret;
 761    int64_t last_ftell = qemu_ftell(f);
 762    int64_t delta_ftell;
 763
 764    DPRINTF("Enter save live iterate submitted %d transferred %d\n",
 765            block_mig_state.submitted, block_mig_state.transferred);
 766
 767    ret = flush_blks(f);
 768    if (ret) {
 769        return ret;
 770    }
 771
 772    blk_mig_reset_dirty_cursor();
 773
 774    /* control the rate of transfer */
 775    blk_mig_lock();
 776    while (block_mig_state.read_done * BLOCK_SIZE <
 777           qemu_file_get_rate_limit(f) &&
 778           block_mig_state.submitted < MAX_PARALLEL_IO &&
 779           (block_mig_state.submitted + block_mig_state.read_done) <
 780           MAX_IO_BUFFERS) {
 781        blk_mig_unlock();
 782        if (block_mig_state.bulk_completed == 0) {
 783            /* first finish the bulk phase */
 784            if (blk_mig_save_bulked_block(f) == 0) {
 785                /* finished saving bulk on all devices */
 786                block_mig_state.bulk_completed = 1;
 787            }
 788            ret = 0;
 789        } else {
 790            /* Always called with iothread lock taken for
 791             * simplicity, block_save_complete also calls it.
 792             */
 793            qemu_mutex_lock_iothread();
 794            ret = blk_mig_save_dirty_block(f, 1);
 795            qemu_mutex_unlock_iothread();
 796        }
 797        if (ret < 0) {
 798            return ret;
 799        }
 800        blk_mig_lock();
 801        if (ret != 0) {
 802            /* no more dirty blocks */
 803            break;
 804        }
 805    }
 806    blk_mig_unlock();
 807
 808    ret = flush_blks(f);
 809    if (ret) {
 810        return ret;
 811    }
 812
 813    qemu_put_be64(f, BLK_MIG_FLAG_EOS);
 814    delta_ftell = qemu_ftell(f) - last_ftell;
 815    if (delta_ftell > 0) {
 816        return 1;
 817    } else if (delta_ftell < 0) {
 818        return -1;
 819    } else {
 820        return 0;
 821    }
 822}
 823
 824/* Called with iothread lock taken.  */
 825
 826static int block_save_complete(QEMUFile *f, void *opaque)
 827{
 828    int ret;
 829
 830    DPRINTF("Enter save live complete submitted %d transferred %d\n",
 831            block_mig_state.submitted, block_mig_state.transferred);
 832
 833    ret = flush_blks(f);
 834    if (ret) {
 835        return ret;
 836    }
 837
 838    blk_mig_reset_dirty_cursor();
 839
 840    /* we know for sure that save bulk is completed and
 841       all async read completed */
 842    blk_mig_lock();
 843    assert(block_mig_state.submitted == 0);
 844    blk_mig_unlock();
 845
 846    do {
 847        ret = blk_mig_save_dirty_block(f, 0);
 848        if (ret < 0) {
 849            return ret;
 850        }
 851    } while (ret == 0);
 852
 853    /* report completion */
 854    qemu_put_be64(f, (100 << BDRV_SECTOR_BITS) | BLK_MIG_FLAG_PROGRESS);
 855
 856    DPRINTF("Block migration completed\n");
 857
 858    qemu_put_be64(f, BLK_MIG_FLAG_EOS);
 859
 860    /* Make sure that our BlockBackends are gone, so that the block driver
 861     * nodes can be inactivated. */
 862    block_migration_cleanup_bmds();
 863
 864    return 0;
 865}
 866
 867static void block_save_pending(QEMUFile *f, void *opaque, uint64_t max_size,
 868                               uint64_t *res_precopy_only,
 869                               uint64_t *res_compatible,
 870                               uint64_t *res_postcopy_only)
 871{
 872    /* Estimate pending number of bytes to send */
 873    uint64_t pending;
 874
 875    qemu_mutex_lock_iothread();
 876    pending = get_remaining_dirty();
 877    qemu_mutex_unlock_iothread();
 878
 879    blk_mig_lock();
 880    pending += block_mig_state.submitted * BLOCK_SIZE +
 881               block_mig_state.read_done * BLOCK_SIZE;
 882    blk_mig_unlock();
 883
 884    /* Report at least one block pending during bulk phase */
 885    if (pending <= max_size && !block_mig_state.bulk_completed) {
 886        pending = max_size + BLOCK_SIZE;
 887    }
 888
 889    DPRINTF("Enter save live pending  %" PRIu64 "\n", pending);
 890    /* We don't do postcopy */
 891    *res_precopy_only += pending;
 892}
 893
 894static int block_load(QEMUFile *f, void *opaque, int version_id)
 895{
 896    static int banner_printed;
 897    int len, flags;
 898    char device_name[256];
 899    int64_t addr;
 900    BlockBackend *blk, *blk_prev = NULL;
 901    Error *local_err = NULL;
 902    uint8_t *buf;
 903    int64_t total_sectors = 0;
 904    int nr_sectors;
 905    int ret;
 906    BlockDriverInfo bdi;
 907    int cluster_size = BLOCK_SIZE;
 908
 909    do {
 910        addr = qemu_get_be64(f);
 911
 912        flags = addr & ~BDRV_SECTOR_MASK;
 913        addr >>= BDRV_SECTOR_BITS;
 914
 915        if (flags & BLK_MIG_FLAG_DEVICE_BLOCK) {
 916            /* get device name */
 917            len = qemu_get_byte(f);
 918            qemu_get_buffer(f, (uint8_t *)device_name, len);
 919            device_name[len] = '\0';
 920
 921            blk = blk_by_name(device_name);
 922            if (!blk) {
 923                fprintf(stderr, "Error unknown block device %s\n",
 924                        device_name);
 925                return -EINVAL;
 926            }
 927
 928            if (blk != blk_prev) {
 929                blk_prev = blk;
 930                total_sectors = blk_nb_sectors(blk);
 931                if (total_sectors <= 0) {
 932                    error_report("Error getting length of block device %s",
 933                                 device_name);
 934                    return -EINVAL;
 935                }
 936
 937                blk_invalidate_cache(blk, &local_err);
 938                if (local_err) {
 939                    error_report_err(local_err);
 940                    return -EINVAL;
 941                }
 942
 943                ret = bdrv_get_info(blk_bs(blk), &bdi);
 944                if (ret == 0 && bdi.cluster_size > 0 &&
 945                    bdi.cluster_size <= BLOCK_SIZE &&
 946                    BLOCK_SIZE % bdi.cluster_size == 0) {
 947                    cluster_size = bdi.cluster_size;
 948                } else {
 949                    cluster_size = BLOCK_SIZE;
 950                }
 951            }
 952
 953            if (total_sectors - addr < BDRV_SECTORS_PER_DIRTY_CHUNK) {
 954                nr_sectors = total_sectors - addr;
 955            } else {
 956                nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK;
 957            }
 958
 959            if (flags & BLK_MIG_FLAG_ZERO_BLOCK) {
 960                ret = blk_pwrite_zeroes(blk, addr * BDRV_SECTOR_SIZE,
 961                                        nr_sectors * BDRV_SECTOR_SIZE,
 962                                        BDRV_REQ_MAY_UNMAP);
 963            } else {
 964                int i;
 965                int64_t cur_addr;
 966                uint8_t *cur_buf;
 967
 968                buf = g_malloc(BLOCK_SIZE);
 969                qemu_get_buffer(f, buf, BLOCK_SIZE);
 970                for (i = 0; i < BLOCK_SIZE / cluster_size; i++) {
 971                    cur_addr = addr * BDRV_SECTOR_SIZE + i * cluster_size;
 972                    cur_buf = buf + i * cluster_size;
 973
 974                    if ((!block_mig_state.zero_blocks ||
 975                        cluster_size < BLOCK_SIZE) &&
 976                        buffer_is_zero(cur_buf, cluster_size)) {
 977                        ret = blk_pwrite_zeroes(blk, cur_addr,
 978                                                cluster_size,
 979                                                BDRV_REQ_MAY_UNMAP);
 980                    } else {
 981                        ret = blk_pwrite(blk, cur_addr, cur_buf,
 982                                         cluster_size, 0);
 983                    }
 984                    if (ret < 0) {
 985                        break;
 986                    }
 987                }
 988                g_free(buf);
 989            }
 990
 991            if (ret < 0) {
 992                return ret;
 993            }
 994        } else if (flags & BLK_MIG_FLAG_PROGRESS) {
 995            if (!banner_printed) {
 996                printf("Receiving block device images\n");
 997                banner_printed = 1;
 998            }
 999            printf("Completed %d %%%c", (int)addr,
1000                   (addr == 100) ? '\n' : '\r');
1001            fflush(stdout);
1002        } else if (!(flags & BLK_MIG_FLAG_EOS)) {
1003            fprintf(stderr, "Unknown block migration flags: %#x\n", flags);
1004            return -EINVAL;
1005        }
1006        ret = qemu_file_get_error(f);
1007        if (ret != 0) {
1008            return ret;
1009        }
1010    } while (!(flags & BLK_MIG_FLAG_EOS));
1011
1012    return 0;
1013}
1014
1015static bool block_is_active(void *opaque)
1016{
1017    return migrate_use_block();
1018}
1019
1020static SaveVMHandlers savevm_block_handlers = {
1021    .save_setup = block_save_setup,
1022    .save_live_iterate = block_save_iterate,
1023    .save_live_complete_precopy = block_save_complete,
1024    .save_live_pending = block_save_pending,
1025    .load_state = block_load,
1026    .save_cleanup = block_migration_cleanup,
1027    .is_active = block_is_active,
1028};
1029
1030void blk_mig_init(void)
1031{
1032    QSIMPLEQ_INIT(&block_mig_state.bmds_list);
1033    QSIMPLEQ_INIT(&block_mig_state.blk_list);
1034    qemu_mutex_init(&block_mig_state.lock);
1035
1036    register_savevm_live(NULL, "block", 0, 1, &savevm_block_handlers,
1037                         &block_mig_state);
1038}
1039