qemu/migration/block.c
<<
>>
Prefs
   1/*
   2 * QEMU live block migration
   3 *
   4 * Copyright IBM, Corp. 2009
   5 *
   6 * Authors:
   7 *  Liran Schour   <lirans@il.ibm.com>
   8 *
   9 * This work is licensed under the terms of the GNU GPL, version 2.  See
  10 * the COPYING file in the top-level directory.
  11 *
  12 * Contributions after 2012-01-13 are licensed under the terms of the
  13 * GNU GPL, version 2 or (at your option) any later version.
  14 */
  15
  16#include "qemu/osdep.h"
  17#include "qapi/error.h"
  18#include "qemu/error-report.h"
  19#include "qemu/main-loop.h"
  20#include "qemu/cutils.h"
  21#include "qemu/queue.h"
  22#include "block.h"
  23#include "migration/misc.h"
  24#include "migration.h"
  25#include "migration/register.h"
  26#include "qemu-file.h"
  27#include "migration/vmstate.h"
  28#include "sysemu/block-backend.h"
  29
  30#define BLK_MIG_BLOCK_SIZE           (1 << 20)
  31#define BDRV_SECTORS_PER_DIRTY_CHUNK (BLK_MIG_BLOCK_SIZE >> BDRV_SECTOR_BITS)
  32
  33#define BLK_MIG_FLAG_DEVICE_BLOCK       0x01
  34#define BLK_MIG_FLAG_EOS                0x02
  35#define BLK_MIG_FLAG_PROGRESS           0x04
  36#define BLK_MIG_FLAG_ZERO_BLOCK         0x08
  37
  38#define MAX_IS_ALLOCATED_SEARCH (65536 * BDRV_SECTOR_SIZE)
  39
  40#define MAX_IO_BUFFERS 512
  41#define MAX_PARALLEL_IO 16
  42
  43//#define DEBUG_BLK_MIGRATION
  44
  45#ifdef DEBUG_BLK_MIGRATION
  46#define DPRINTF(fmt, ...) \
  47    do { printf("blk_migration: " fmt, ## __VA_ARGS__); } while (0)
  48#else
  49#define DPRINTF(fmt, ...) \
  50    do { } while (0)
  51#endif
  52
  53typedef struct BlkMigDevState {
  54    /* Written during setup phase.  Can be read without a lock.  */
  55    BlockBackend *blk;
  56    char *blk_name;
  57    int shared_base;
  58    int64_t total_sectors;
  59    QSIMPLEQ_ENTRY(BlkMigDevState) entry;
  60    Error *blocker;
  61
  62    /* Only used by migration thread.  Does not need a lock.  */
  63    int bulk_completed;
  64    int64_t cur_sector;
  65    int64_t cur_dirty;
  66
  67    /* Data in the aio_bitmap is protected by block migration lock.
  68     * Allocation and free happen during setup and cleanup respectively.
  69     */
  70    unsigned long *aio_bitmap;
  71
  72    /* Protected by block migration lock.  */
  73    int64_t completed_sectors;
  74
  75    /* During migration this is protected by iothread lock / AioContext.
  76     * Allocation and free happen during setup and cleanup respectively.
  77     */
  78    BdrvDirtyBitmap *dirty_bitmap;
  79} BlkMigDevState;
  80
  81typedef struct BlkMigBlock {
  82    /* Only used by migration thread.  */
  83    uint8_t *buf;
  84    BlkMigDevState *bmds;
  85    int64_t sector;
  86    int nr_sectors;
  87    QEMUIOVector qiov;
  88    BlockAIOCB *aiocb;
  89
  90    /* Protected by block migration lock.  */
  91    int ret;
  92    QSIMPLEQ_ENTRY(BlkMigBlock) entry;
  93} BlkMigBlock;
  94
  95typedef struct BlkMigState {
  96    QSIMPLEQ_HEAD(, BlkMigDevState) bmds_list;
  97    int64_t total_sector_sum;
  98    bool zero_blocks;
  99
 100    /* Protected by lock.  */
 101    QSIMPLEQ_HEAD(, BlkMigBlock) blk_list;
 102    int submitted;
 103    int read_done;
 104
 105    /* Only used by migration thread.  Does not need a lock.  */
 106    int transferred;
 107    int prev_progress;
 108    int bulk_completed;
 109
 110    /* Lock must be taken _inside_ the iothread lock and any AioContexts.  */
 111    QemuMutex lock;
 112} BlkMigState;
 113
 114static BlkMigState block_mig_state;
 115
 116static void blk_mig_lock(void)
 117{
 118    qemu_mutex_lock(&block_mig_state.lock);
 119}
 120
 121static void blk_mig_unlock(void)
 122{
 123    qemu_mutex_unlock(&block_mig_state.lock);
 124}
 125
 126/* Must run outside of the iothread lock during the bulk phase,
 127 * or the VM will stall.
 128 */
 129
 130static void blk_send(QEMUFile *f, BlkMigBlock * blk)
 131{
 132    int len;
 133    uint64_t flags = BLK_MIG_FLAG_DEVICE_BLOCK;
 134
 135    if (block_mig_state.zero_blocks &&
 136        buffer_is_zero(blk->buf, BLK_MIG_BLOCK_SIZE)) {
 137        flags |= BLK_MIG_FLAG_ZERO_BLOCK;
 138    }
 139
 140    /* sector number and flags */
 141    qemu_put_be64(f, (blk->sector << BDRV_SECTOR_BITS)
 142                     | flags);
 143
 144    /* device name */
 145    len = strlen(blk->bmds->blk_name);
 146    qemu_put_byte(f, len);
 147    qemu_put_buffer(f, (uint8_t *) blk->bmds->blk_name, len);
 148
 149    /* if a block is zero we need to flush here since the network
 150     * bandwidth is now a lot higher than the storage device bandwidth.
 151     * thus if we queue zero blocks we slow down the migration */
 152    if (flags & BLK_MIG_FLAG_ZERO_BLOCK) {
 153        qemu_fflush(f);
 154        return;
 155    }
 156
 157    qemu_put_buffer(f, blk->buf, BLK_MIG_BLOCK_SIZE);
 158}
 159
 160int blk_mig_active(void)
 161{
 162    return !QSIMPLEQ_EMPTY(&block_mig_state.bmds_list);
 163}
 164
 165int blk_mig_bulk_active(void)
 166{
 167    return blk_mig_active() && !block_mig_state.bulk_completed;
 168}
 169
 170uint64_t blk_mig_bytes_transferred(void)
 171{
 172    BlkMigDevState *bmds;
 173    uint64_t sum = 0;
 174
 175    blk_mig_lock();
 176    QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
 177        sum += bmds->completed_sectors;
 178    }
 179    blk_mig_unlock();
 180    return sum << BDRV_SECTOR_BITS;
 181}
 182
 183uint64_t blk_mig_bytes_remaining(void)
 184{
 185    return blk_mig_bytes_total() - blk_mig_bytes_transferred();
 186}
 187
 188uint64_t blk_mig_bytes_total(void)
 189{
 190    BlkMigDevState *bmds;
 191    uint64_t sum = 0;
 192
 193    QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
 194        sum += bmds->total_sectors;
 195    }
 196    return sum << BDRV_SECTOR_BITS;
 197}
 198
 199
 200/* Called with migration lock held.  */
 201
 202static int bmds_aio_inflight(BlkMigDevState *bmds, int64_t sector)
 203{
 204    int64_t chunk = sector / (int64_t)BDRV_SECTORS_PER_DIRTY_CHUNK;
 205
 206    if (sector < blk_nb_sectors(bmds->blk)) {
 207        return !!(bmds->aio_bitmap[chunk / (sizeof(unsigned long) * 8)] &
 208            (1UL << (chunk % (sizeof(unsigned long) * 8))));
 209    } else {
 210        return 0;
 211    }
 212}
 213
 214/* Called with migration lock held.  */
 215
 216static void bmds_set_aio_inflight(BlkMigDevState *bmds, int64_t sector_num,
 217                             int nb_sectors, int set)
 218{
 219    int64_t start, end;
 220    unsigned long val, idx, bit;
 221
 222    start = sector_num / BDRV_SECTORS_PER_DIRTY_CHUNK;
 223    end = (sector_num + nb_sectors - 1) / BDRV_SECTORS_PER_DIRTY_CHUNK;
 224
 225    for (; start <= end; start++) {
 226        idx = start / (sizeof(unsigned long) * 8);
 227        bit = start % (sizeof(unsigned long) * 8);
 228        val = bmds->aio_bitmap[idx];
 229        if (set) {
 230            val |= 1UL << bit;
 231        } else {
 232            val &= ~(1UL << bit);
 233        }
 234        bmds->aio_bitmap[idx] = val;
 235    }
 236}
 237
 238static void alloc_aio_bitmap(BlkMigDevState *bmds)
 239{
 240    BlockBackend *bb = bmds->blk;
 241    int64_t bitmap_size;
 242
 243    bitmap_size = blk_nb_sectors(bb) + BDRV_SECTORS_PER_DIRTY_CHUNK * 8 - 1;
 244    bitmap_size /= BDRV_SECTORS_PER_DIRTY_CHUNK * 8;
 245
 246    bmds->aio_bitmap = g_malloc0(bitmap_size);
 247}
 248
 249/* Never hold migration lock when yielding to the main loop!  */
 250
 251static void blk_mig_read_cb(void *opaque, int ret)
 252{
 253    BlkMigBlock *blk = opaque;
 254
 255    blk_mig_lock();
 256    blk->ret = ret;
 257
 258    QSIMPLEQ_INSERT_TAIL(&block_mig_state.blk_list, blk, entry);
 259    bmds_set_aio_inflight(blk->bmds, blk->sector, blk->nr_sectors, 0);
 260
 261    block_mig_state.submitted--;
 262    block_mig_state.read_done++;
 263    assert(block_mig_state.submitted >= 0);
 264    blk_mig_unlock();
 265}
 266
 267/* Called with no lock taken.  */
 268
 269static int mig_save_device_bulk(QEMUFile *f, BlkMigDevState *bmds)
 270{
 271    int64_t total_sectors = bmds->total_sectors;
 272    int64_t cur_sector = bmds->cur_sector;
 273    BlockBackend *bb = bmds->blk;
 274    BlkMigBlock *blk;
 275    int nr_sectors;
 276    int64_t count;
 277
 278    if (bmds->shared_base) {
 279        qemu_mutex_lock_iothread();
 280        aio_context_acquire(blk_get_aio_context(bb));
 281        /* Skip unallocated sectors; intentionally treats failure or
 282         * partial sector as an allocated sector */
 283        while (cur_sector < total_sectors &&
 284               !bdrv_is_allocated(blk_bs(bb), cur_sector * BDRV_SECTOR_SIZE,
 285                                  MAX_IS_ALLOCATED_SEARCH, &count)) {
 286            if (count < BDRV_SECTOR_SIZE) {
 287                break;
 288            }
 289            cur_sector += count >> BDRV_SECTOR_BITS;
 290        }
 291        aio_context_release(blk_get_aio_context(bb));
 292        qemu_mutex_unlock_iothread();
 293    }
 294
 295    if (cur_sector >= total_sectors) {
 296        bmds->cur_sector = bmds->completed_sectors = total_sectors;
 297        return 1;
 298    }
 299
 300    bmds->completed_sectors = cur_sector;
 301
 302    cur_sector &= ~((int64_t)BDRV_SECTORS_PER_DIRTY_CHUNK - 1);
 303
 304    /* we are going to transfer a full block even if it is not allocated */
 305    nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK;
 306
 307    if (total_sectors - cur_sector < BDRV_SECTORS_PER_DIRTY_CHUNK) {
 308        nr_sectors = total_sectors - cur_sector;
 309    }
 310
 311    blk = g_new(BlkMigBlock, 1);
 312    blk->buf = g_malloc(BLK_MIG_BLOCK_SIZE);
 313    blk->bmds = bmds;
 314    blk->sector = cur_sector;
 315    blk->nr_sectors = nr_sectors;
 316
 317    qemu_iovec_init_buf(&blk->qiov, blk->buf, nr_sectors * BDRV_SECTOR_SIZE);
 318
 319    blk_mig_lock();
 320    block_mig_state.submitted++;
 321    blk_mig_unlock();
 322
 323    /* We do not know if bs is under the main thread (and thus does
 324     * not acquire the AioContext when doing AIO) or rather under
 325     * dataplane.  Thus acquire both the iothread mutex and the
 326     * AioContext.
 327     *
 328     * This is ugly and will disappear when we make bdrv_* thread-safe,
 329     * without the need to acquire the AioContext.
 330     */
 331    qemu_mutex_lock_iothread();
 332    aio_context_acquire(blk_get_aio_context(bmds->blk));
 333    bdrv_reset_dirty_bitmap(bmds->dirty_bitmap, cur_sector * BDRV_SECTOR_SIZE,
 334                            nr_sectors * BDRV_SECTOR_SIZE);
 335    blk->aiocb = blk_aio_preadv(bb, cur_sector * BDRV_SECTOR_SIZE, &blk->qiov,
 336                                0, blk_mig_read_cb, blk);
 337    aio_context_release(blk_get_aio_context(bmds->blk));
 338    qemu_mutex_unlock_iothread();
 339
 340    bmds->cur_sector = cur_sector + nr_sectors;
 341    return (bmds->cur_sector >= total_sectors);
 342}
 343
 344/* Called with iothread lock taken.  */
 345
 346static int set_dirty_tracking(void)
 347{
 348    BlkMigDevState *bmds;
 349    int ret;
 350
 351    QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
 352        bmds->dirty_bitmap = bdrv_create_dirty_bitmap(blk_bs(bmds->blk),
 353                                                      BLK_MIG_BLOCK_SIZE,
 354                                                      NULL, NULL);
 355        if (!bmds->dirty_bitmap) {
 356            ret = -errno;
 357            goto fail;
 358        }
 359    }
 360    return 0;
 361
 362fail:
 363    QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
 364        if (bmds->dirty_bitmap) {
 365            bdrv_release_dirty_bitmap(bmds->dirty_bitmap);
 366        }
 367    }
 368    return ret;
 369}
 370
 371/* Called with iothread lock taken.  */
 372
 373static void unset_dirty_tracking(void)
 374{
 375    BlkMigDevState *bmds;
 376
 377    QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
 378        bdrv_release_dirty_bitmap(bmds->dirty_bitmap);
 379    }
 380}
 381
 382static int init_blk_migration(QEMUFile *f)
 383{
 384    BlockDriverState *bs;
 385    BlkMigDevState *bmds;
 386    int64_t sectors;
 387    BdrvNextIterator it;
 388    int i, num_bs = 0;
 389    struct {
 390        BlkMigDevState *bmds;
 391        BlockDriverState *bs;
 392    } *bmds_bs;
 393    Error *local_err = NULL;
 394    int ret;
 395
 396    block_mig_state.submitted = 0;
 397    block_mig_state.read_done = 0;
 398    block_mig_state.transferred = 0;
 399    block_mig_state.total_sector_sum = 0;
 400    block_mig_state.prev_progress = -1;
 401    block_mig_state.bulk_completed = 0;
 402    block_mig_state.zero_blocks = migrate_zero_blocks();
 403
 404    for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
 405        num_bs++;
 406    }
 407    bmds_bs = g_malloc0(num_bs * sizeof(*bmds_bs));
 408
 409    for (i = 0, bs = bdrv_first(&it); bs; bs = bdrv_next(&it), i++) {
 410        if (bdrv_is_read_only(bs)) {
 411            continue;
 412        }
 413
 414        sectors = bdrv_nb_sectors(bs);
 415        if (sectors <= 0) {
 416            ret = sectors;
 417            bdrv_next_cleanup(&it);
 418            goto out;
 419        }
 420
 421        bmds = g_new0(BlkMigDevState, 1);
 422        bmds->blk = blk_new(qemu_get_aio_context(),
 423                            BLK_PERM_CONSISTENT_READ, BLK_PERM_ALL);
 424        bmds->blk_name = g_strdup(bdrv_get_device_name(bs));
 425        bmds->bulk_completed = 0;
 426        bmds->total_sectors = sectors;
 427        bmds->completed_sectors = 0;
 428        bmds->shared_base = migrate_use_block_incremental();
 429
 430        assert(i < num_bs);
 431        bmds_bs[i].bmds = bmds;
 432        bmds_bs[i].bs = bs;
 433
 434        block_mig_state.total_sector_sum += sectors;
 435
 436        if (bmds->shared_base) {
 437            DPRINTF("Start migration for %s with shared base image\n",
 438                    bdrv_get_device_name(bs));
 439        } else {
 440            DPRINTF("Start full migration for %s\n", bdrv_get_device_name(bs));
 441        }
 442
 443        QSIMPLEQ_INSERT_TAIL(&block_mig_state.bmds_list, bmds, entry);
 444    }
 445
 446    /* Can only insert new BDSes now because doing so while iterating block
 447     * devices may end up in a deadlock (iterating the new BDSes, too). */
 448    for (i = 0; i < num_bs; i++) {
 449        BlkMigDevState *bmds = bmds_bs[i].bmds;
 450        BlockDriverState *bs = bmds_bs[i].bs;
 451
 452        if (bmds) {
 453            ret = blk_insert_bs(bmds->blk, bs, &local_err);
 454            if (ret < 0) {
 455                error_report_err(local_err);
 456                goto out;
 457            }
 458
 459            alloc_aio_bitmap(bmds);
 460            error_setg(&bmds->blocker, "block device is in use by migration");
 461            bdrv_op_block_all(bs, bmds->blocker);
 462        }
 463    }
 464
 465    ret = 0;
 466out:
 467    g_free(bmds_bs);
 468    return ret;
 469}
 470
 471/* Called with no lock taken.  */
 472
 473static int blk_mig_save_bulked_block(QEMUFile *f)
 474{
 475    int64_t completed_sector_sum = 0;
 476    BlkMigDevState *bmds;
 477    int progress;
 478    int ret = 0;
 479
 480    QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
 481        if (bmds->bulk_completed == 0) {
 482            if (mig_save_device_bulk(f, bmds) == 1) {
 483                /* completed bulk section for this device */
 484                bmds->bulk_completed = 1;
 485            }
 486            completed_sector_sum += bmds->completed_sectors;
 487            ret = 1;
 488            break;
 489        } else {
 490            completed_sector_sum += bmds->completed_sectors;
 491        }
 492    }
 493
 494    if (block_mig_state.total_sector_sum != 0) {
 495        progress = completed_sector_sum * 100 /
 496                   block_mig_state.total_sector_sum;
 497    } else {
 498        progress = 100;
 499    }
 500    if (progress != block_mig_state.prev_progress) {
 501        block_mig_state.prev_progress = progress;
 502        qemu_put_be64(f, (progress << BDRV_SECTOR_BITS)
 503                         | BLK_MIG_FLAG_PROGRESS);
 504        DPRINTF("Completed %d %%\r", progress);
 505    }
 506
 507    return ret;
 508}
 509
 510static void blk_mig_reset_dirty_cursor(void)
 511{
 512    BlkMigDevState *bmds;
 513
 514    QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
 515        bmds->cur_dirty = 0;
 516    }
 517}
 518
 519/* Called with iothread lock and AioContext taken.  */
 520
 521static int mig_save_device_dirty(QEMUFile *f, BlkMigDevState *bmds,
 522                                 int is_async)
 523{
 524    BlkMigBlock *blk;
 525    int64_t total_sectors = bmds->total_sectors;
 526    int64_t sector;
 527    int nr_sectors;
 528    int ret = -EIO;
 529
 530    for (sector = bmds->cur_dirty; sector < bmds->total_sectors;) {
 531        blk_mig_lock();
 532        if (bmds_aio_inflight(bmds, sector)) {
 533            blk_mig_unlock();
 534            blk_drain(bmds->blk);
 535        } else {
 536            blk_mig_unlock();
 537        }
 538        bdrv_dirty_bitmap_lock(bmds->dirty_bitmap);
 539        if (bdrv_dirty_bitmap_get_locked(bmds->dirty_bitmap,
 540                                         sector * BDRV_SECTOR_SIZE)) {
 541            if (total_sectors - sector < BDRV_SECTORS_PER_DIRTY_CHUNK) {
 542                nr_sectors = total_sectors - sector;
 543            } else {
 544                nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK;
 545            }
 546            bdrv_reset_dirty_bitmap_locked(bmds->dirty_bitmap,
 547                                           sector * BDRV_SECTOR_SIZE,
 548                                           nr_sectors * BDRV_SECTOR_SIZE);
 549            bdrv_dirty_bitmap_unlock(bmds->dirty_bitmap);
 550
 551            blk = g_new(BlkMigBlock, 1);
 552            blk->buf = g_malloc(BLK_MIG_BLOCK_SIZE);
 553            blk->bmds = bmds;
 554            blk->sector = sector;
 555            blk->nr_sectors = nr_sectors;
 556
 557            if (is_async) {
 558                qemu_iovec_init_buf(&blk->qiov, blk->buf,
 559                                    nr_sectors * BDRV_SECTOR_SIZE);
 560
 561                blk->aiocb = blk_aio_preadv(bmds->blk,
 562                                            sector * BDRV_SECTOR_SIZE,
 563                                            &blk->qiov, 0, blk_mig_read_cb,
 564                                            blk);
 565
 566                blk_mig_lock();
 567                block_mig_state.submitted++;
 568                bmds_set_aio_inflight(bmds, sector, nr_sectors, 1);
 569                blk_mig_unlock();
 570            } else {
 571                ret = blk_pread(bmds->blk, sector * BDRV_SECTOR_SIZE, blk->buf,
 572                                nr_sectors * BDRV_SECTOR_SIZE);
 573                if (ret < 0) {
 574                    goto error;
 575                }
 576                blk_send(f, blk);
 577
 578                g_free(blk->buf);
 579                g_free(blk);
 580            }
 581
 582            sector += nr_sectors;
 583            bmds->cur_dirty = sector;
 584            break;
 585        }
 586
 587        bdrv_dirty_bitmap_unlock(bmds->dirty_bitmap);
 588        sector += BDRV_SECTORS_PER_DIRTY_CHUNK;
 589        bmds->cur_dirty = sector;
 590    }
 591
 592    return (bmds->cur_dirty >= bmds->total_sectors);
 593
 594error:
 595    DPRINTF("Error reading sector %" PRId64 "\n", sector);
 596    g_free(blk->buf);
 597    g_free(blk);
 598    return ret;
 599}
 600
 601/* Called with iothread lock taken.
 602 *
 603 * return value:
 604 * 0: too much data for max_downtime
 605 * 1: few enough data for max_downtime
 606*/
 607static int blk_mig_save_dirty_block(QEMUFile *f, int is_async)
 608{
 609    BlkMigDevState *bmds;
 610    int ret = 1;
 611
 612    QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
 613        aio_context_acquire(blk_get_aio_context(bmds->blk));
 614        ret = mig_save_device_dirty(f, bmds, is_async);
 615        aio_context_release(blk_get_aio_context(bmds->blk));
 616        if (ret <= 0) {
 617            break;
 618        }
 619    }
 620
 621    return ret;
 622}
 623
 624/* Called with no locks taken.  */
 625
 626static int flush_blks(QEMUFile *f)
 627{
 628    BlkMigBlock *blk;
 629    int ret = 0;
 630
 631    DPRINTF("%s Enter submitted %d read_done %d transferred %d\n",
 632            __func__, block_mig_state.submitted, block_mig_state.read_done,
 633            block_mig_state.transferred);
 634
 635    blk_mig_lock();
 636    while ((blk = QSIMPLEQ_FIRST(&block_mig_state.blk_list)) != NULL) {
 637        if (qemu_file_rate_limit(f)) {
 638            break;
 639        }
 640        if (blk->ret < 0) {
 641            ret = blk->ret;
 642            break;
 643        }
 644
 645        QSIMPLEQ_REMOVE_HEAD(&block_mig_state.blk_list, entry);
 646        blk_mig_unlock();
 647        blk_send(f, blk);
 648        blk_mig_lock();
 649
 650        g_free(blk->buf);
 651        g_free(blk);
 652
 653        block_mig_state.read_done--;
 654        block_mig_state.transferred++;
 655        assert(block_mig_state.read_done >= 0);
 656    }
 657    blk_mig_unlock();
 658
 659    DPRINTF("%s Exit submitted %d read_done %d transferred %d\n", __func__,
 660            block_mig_state.submitted, block_mig_state.read_done,
 661            block_mig_state.transferred);
 662    return ret;
 663}
 664
 665/* Called with iothread lock taken.  */
 666
 667static int64_t get_remaining_dirty(void)
 668{
 669    BlkMigDevState *bmds;
 670    int64_t dirty = 0;
 671
 672    QSIMPLEQ_FOREACH(bmds, &block_mig_state.bmds_list, entry) {
 673        aio_context_acquire(blk_get_aio_context(bmds->blk));
 674        dirty += bdrv_get_dirty_count(bmds->dirty_bitmap);
 675        aio_context_release(blk_get_aio_context(bmds->blk));
 676    }
 677
 678    return dirty;
 679}
 680
 681
 682
 683/* Called with iothread lock taken.  */
 684static void block_migration_cleanup_bmds(void)
 685{
 686    BlkMigDevState *bmds;
 687    AioContext *ctx;
 688
 689    unset_dirty_tracking();
 690
 691    while ((bmds = QSIMPLEQ_FIRST(&block_mig_state.bmds_list)) != NULL) {
 692        QSIMPLEQ_REMOVE_HEAD(&block_mig_state.bmds_list, entry);
 693        bdrv_op_unblock_all(blk_bs(bmds->blk), bmds->blocker);
 694        error_free(bmds->blocker);
 695
 696        /* Save ctx, because bmds->blk can disappear during blk_unref.  */
 697        ctx = blk_get_aio_context(bmds->blk);
 698        aio_context_acquire(ctx);
 699        blk_unref(bmds->blk);
 700        aio_context_release(ctx);
 701
 702        g_free(bmds->blk_name);
 703        g_free(bmds->aio_bitmap);
 704        g_free(bmds);
 705    }
 706}
 707
 708/* Called with iothread lock taken.  */
 709static void block_migration_cleanup(void *opaque)
 710{
 711    BlkMigBlock *blk;
 712
 713    bdrv_drain_all();
 714
 715    block_migration_cleanup_bmds();
 716
 717    blk_mig_lock();
 718    while ((blk = QSIMPLEQ_FIRST(&block_mig_state.blk_list)) != NULL) {
 719        QSIMPLEQ_REMOVE_HEAD(&block_mig_state.blk_list, entry);
 720        g_free(blk->buf);
 721        g_free(blk);
 722    }
 723    blk_mig_unlock();
 724}
 725
 726static int block_save_setup(QEMUFile *f, void *opaque)
 727{
 728    int ret;
 729
 730    DPRINTF("Enter save live setup submitted %d transferred %d\n",
 731            block_mig_state.submitted, block_mig_state.transferred);
 732
 733    qemu_mutex_lock_iothread();
 734    ret = init_blk_migration(f);
 735    if (ret < 0) {
 736        qemu_mutex_unlock_iothread();
 737        return ret;
 738    }
 739
 740    /* start track dirty blocks */
 741    ret = set_dirty_tracking();
 742
 743    qemu_mutex_unlock_iothread();
 744
 745    if (ret) {
 746        return ret;
 747    }
 748
 749    ret = flush_blks(f);
 750    blk_mig_reset_dirty_cursor();
 751    qemu_put_be64(f, BLK_MIG_FLAG_EOS);
 752
 753    return ret;
 754}
 755
 756static int block_save_iterate(QEMUFile *f, void *opaque)
 757{
 758    int ret;
 759    int64_t last_ftell = qemu_ftell(f);
 760    int64_t delta_ftell;
 761
 762    DPRINTF("Enter save live iterate submitted %d transferred %d\n",
 763            block_mig_state.submitted, block_mig_state.transferred);
 764
 765    ret = flush_blks(f);
 766    if (ret) {
 767        return ret;
 768    }
 769
 770    blk_mig_reset_dirty_cursor();
 771
 772    /* control the rate of transfer */
 773    blk_mig_lock();
 774    while (block_mig_state.read_done * BLK_MIG_BLOCK_SIZE <
 775           qemu_file_get_rate_limit(f) &&
 776           block_mig_state.submitted < MAX_PARALLEL_IO &&
 777           (block_mig_state.submitted + block_mig_state.read_done) <
 778           MAX_IO_BUFFERS) {
 779        blk_mig_unlock();
 780        if (block_mig_state.bulk_completed == 0) {
 781            /* first finish the bulk phase */
 782            if (blk_mig_save_bulked_block(f) == 0) {
 783                /* finished saving bulk on all devices */
 784                block_mig_state.bulk_completed = 1;
 785            }
 786            ret = 0;
 787        } else {
 788            /* Always called with iothread lock taken for
 789             * simplicity, block_save_complete also calls it.
 790             */
 791            qemu_mutex_lock_iothread();
 792            ret = blk_mig_save_dirty_block(f, 1);
 793            qemu_mutex_unlock_iothread();
 794        }
 795        if (ret < 0) {
 796            return ret;
 797        }
 798        blk_mig_lock();
 799        if (ret != 0) {
 800            /* no more dirty blocks */
 801            break;
 802        }
 803    }
 804    blk_mig_unlock();
 805
 806    ret = flush_blks(f);
 807    if (ret) {
 808        return ret;
 809    }
 810
 811    qemu_put_be64(f, BLK_MIG_FLAG_EOS);
 812    delta_ftell = qemu_ftell(f) - last_ftell;
 813    if (delta_ftell > 0) {
 814        return 1;
 815    } else if (delta_ftell < 0) {
 816        return -1;
 817    } else {
 818        return 0;
 819    }
 820}
 821
 822/* Called with iothread lock taken.  */
 823
 824static int block_save_complete(QEMUFile *f, void *opaque)
 825{
 826    int ret;
 827
 828    DPRINTF("Enter save live complete submitted %d transferred %d\n",
 829            block_mig_state.submitted, block_mig_state.transferred);
 830
 831    ret = flush_blks(f);
 832    if (ret) {
 833        return ret;
 834    }
 835
 836    blk_mig_reset_dirty_cursor();
 837
 838    /* we know for sure that save bulk is completed and
 839       all async read completed */
 840    blk_mig_lock();
 841    assert(block_mig_state.submitted == 0);
 842    blk_mig_unlock();
 843
 844    do {
 845        ret = blk_mig_save_dirty_block(f, 0);
 846        if (ret < 0) {
 847            return ret;
 848        }
 849    } while (ret == 0);
 850
 851    /* report completion */
 852    qemu_put_be64(f, (100 << BDRV_SECTOR_BITS) | BLK_MIG_FLAG_PROGRESS);
 853
 854    DPRINTF("Block migration completed\n");
 855
 856    qemu_put_be64(f, BLK_MIG_FLAG_EOS);
 857
 858    /* Make sure that our BlockBackends are gone, so that the block driver
 859     * nodes can be inactivated. */
 860    block_migration_cleanup_bmds();
 861
 862    return 0;
 863}
 864
 865static void block_save_pending(QEMUFile *f, void *opaque, uint64_t max_size,
 866                               uint64_t *res_precopy_only,
 867                               uint64_t *res_compatible,
 868                               uint64_t *res_postcopy_only)
 869{
 870    /* Estimate pending number of bytes to send */
 871    uint64_t pending;
 872
 873    qemu_mutex_lock_iothread();
 874    pending = get_remaining_dirty();
 875    qemu_mutex_unlock_iothread();
 876
 877    blk_mig_lock();
 878    pending += block_mig_state.submitted * BLK_MIG_BLOCK_SIZE +
 879               block_mig_state.read_done * BLK_MIG_BLOCK_SIZE;
 880    blk_mig_unlock();
 881
 882    /* Report at least one block pending during bulk phase */
 883    if (pending <= max_size && !block_mig_state.bulk_completed) {
 884        pending = max_size + BLK_MIG_BLOCK_SIZE;
 885    }
 886
 887    DPRINTF("Enter save live pending  %" PRIu64 "\n", pending);
 888    /* We don't do postcopy */
 889    *res_precopy_only += pending;
 890}
 891
 892static int block_load(QEMUFile *f, void *opaque, int version_id)
 893{
 894    static int banner_printed;
 895    int len, flags;
 896    char device_name[256];
 897    int64_t addr;
 898    BlockBackend *blk, *blk_prev = NULL;
 899    Error *local_err = NULL;
 900    uint8_t *buf;
 901    int64_t total_sectors = 0;
 902    int nr_sectors;
 903    int ret;
 904    BlockDriverInfo bdi;
 905    int cluster_size = BLK_MIG_BLOCK_SIZE;
 906
 907    do {
 908        addr = qemu_get_be64(f);
 909
 910        flags = addr & (BDRV_SECTOR_SIZE - 1);
 911        addr >>= BDRV_SECTOR_BITS;
 912
 913        if (flags & BLK_MIG_FLAG_DEVICE_BLOCK) {
 914            /* get device name */
 915            len = qemu_get_byte(f);
 916            qemu_get_buffer(f, (uint8_t *)device_name, len);
 917            device_name[len] = '\0';
 918
 919            blk = blk_by_name(device_name);
 920            if (!blk) {
 921                fprintf(stderr, "Error unknown block device %s\n",
 922                        device_name);
 923                return -EINVAL;
 924            }
 925
 926            if (blk != blk_prev) {
 927                blk_prev = blk;
 928                total_sectors = blk_nb_sectors(blk);
 929                if (total_sectors <= 0) {
 930                    error_report("Error getting length of block device %s",
 931                                 device_name);
 932                    return -EINVAL;
 933                }
 934
 935                blk_invalidate_cache(blk, &local_err);
 936                if (local_err) {
 937                    error_report_err(local_err);
 938                    return -EINVAL;
 939                }
 940
 941                ret = bdrv_get_info(blk_bs(blk), &bdi);
 942                if (ret == 0 && bdi.cluster_size > 0 &&
 943                    bdi.cluster_size <= BLK_MIG_BLOCK_SIZE &&
 944                    BLK_MIG_BLOCK_SIZE % bdi.cluster_size == 0) {
 945                    cluster_size = bdi.cluster_size;
 946                } else {
 947                    cluster_size = BLK_MIG_BLOCK_SIZE;
 948                }
 949            }
 950
 951            if (total_sectors - addr < BDRV_SECTORS_PER_DIRTY_CHUNK) {
 952                nr_sectors = total_sectors - addr;
 953            } else {
 954                nr_sectors = BDRV_SECTORS_PER_DIRTY_CHUNK;
 955            }
 956
 957            if (flags & BLK_MIG_FLAG_ZERO_BLOCK) {
 958                ret = blk_pwrite_zeroes(blk, addr * BDRV_SECTOR_SIZE,
 959                                        nr_sectors * BDRV_SECTOR_SIZE,
 960                                        BDRV_REQ_MAY_UNMAP);
 961            } else {
 962                int i;
 963                int64_t cur_addr;
 964                uint8_t *cur_buf;
 965
 966                buf = g_malloc(BLK_MIG_BLOCK_SIZE);
 967                qemu_get_buffer(f, buf, BLK_MIG_BLOCK_SIZE);
 968                for (i = 0; i < BLK_MIG_BLOCK_SIZE / cluster_size; i++) {
 969                    cur_addr = addr * BDRV_SECTOR_SIZE + i * cluster_size;
 970                    cur_buf = buf + i * cluster_size;
 971
 972                    if ((!block_mig_state.zero_blocks ||
 973                        cluster_size < BLK_MIG_BLOCK_SIZE) &&
 974                        buffer_is_zero(cur_buf, cluster_size)) {
 975                        ret = blk_pwrite_zeroes(blk, cur_addr,
 976                                                cluster_size,
 977                                                BDRV_REQ_MAY_UNMAP);
 978                    } else {
 979                        ret = blk_pwrite(blk, cur_addr, cur_buf,
 980                                         cluster_size, 0);
 981                    }
 982                    if (ret < 0) {
 983                        break;
 984                    }
 985                }
 986                g_free(buf);
 987            }
 988
 989            if (ret < 0) {
 990                return ret;
 991            }
 992        } else if (flags & BLK_MIG_FLAG_PROGRESS) {
 993            if (!banner_printed) {
 994                printf("Receiving block device images\n");
 995                banner_printed = 1;
 996            }
 997            printf("Completed %d %%%c", (int)addr,
 998                   (addr == 100) ? '\n' : '\r');
 999            fflush(stdout);
1000        } else if (!(flags & BLK_MIG_FLAG_EOS)) {
1001            fprintf(stderr, "Unknown block migration flags: %#x\n", flags);
1002            return -EINVAL;
1003        }
1004        ret = qemu_file_get_error(f);
1005        if (ret != 0) {
1006            return ret;
1007        }
1008    } while (!(flags & BLK_MIG_FLAG_EOS));
1009
1010    return 0;
1011}
1012
1013static bool block_is_active(void *opaque)
1014{
1015    return migrate_use_block();
1016}
1017
1018static SaveVMHandlers savevm_block_handlers = {
1019    .save_setup = block_save_setup,
1020    .save_live_iterate = block_save_iterate,
1021    .save_live_complete_precopy = block_save_complete,
1022    .save_live_pending = block_save_pending,
1023    .load_state = block_load,
1024    .save_cleanup = block_migration_cleanup,
1025    .is_active = block_is_active,
1026};
1027
1028void blk_mig_init(void)
1029{
1030    QSIMPLEQ_INIT(&block_mig_state.bmds_list);
1031    QSIMPLEQ_INIT(&block_mig_state.blk_list);
1032    qemu_mutex_init(&block_mig_state.lock);
1033
1034    register_savevm_live("block", 0, 1, &savevm_block_handlers,
1035                         &block_mig_state);
1036}
1037