qemu/block/backup.c
<<
>>
Prefs
   1/*
   2 * QEMU backup
   3 *
   4 * Copyright (C) 2013 Proxmox Server Solutions
   5 *
   6 * Authors:
   7 *  Dietmar Maurer (dietmar@proxmox.com)
   8 *
   9 * This work is licensed under the terms of the GNU GPL, version 2 or later.
  10 * See the COPYING file in the top-level directory.
  11 *
  12 */
  13
  14#include "qemu/osdep.h"
  15
  16#include "trace.h"
  17#include "block/block.h"
  18#include "block/block_int.h"
  19#include "block/blockjob.h"
  20#include "qapi/error.h"
  21#include "qapi/qmp/qerror.h"
  22#include "qemu/ratelimit.h"
  23#include "qemu/cutils.h"
  24#include "sysemu/block-backend.h"
  25#include "qemu/bitmap.h"
  26
  27#define BACKUP_CLUSTER_SIZE_DEFAULT (1 << 16)
  28#define SLICE_TIME 100000000ULL /* ns */
  29
  30typedef struct CowRequest {
  31    int64_t start;
  32    int64_t end;
  33    QLIST_ENTRY(CowRequest) list;
  34    CoQueue wait_queue; /* coroutines blocked on this request */
  35} CowRequest;
  36
  37typedef struct BackupBlockJob {
  38    BlockJob common;
  39    BlockDriverState *target;
  40    /* bitmap for sync=incremental */
  41    BdrvDirtyBitmap *sync_bitmap;
  42    MirrorSyncMode sync_mode;
  43    RateLimit limit;
  44    BlockdevOnError on_source_error;
  45    BlockdevOnError on_target_error;
  46    CoRwlock flush_rwlock;
  47    uint64_t sectors_read;
  48    unsigned long *done_bitmap;
  49    int64_t cluster_size;
  50    QLIST_HEAD(, CowRequest) inflight_reqs;
  51} BackupBlockJob;
  52
  53/* Size of a cluster in sectors, instead of bytes. */
  54static inline int64_t cluster_size_sectors(BackupBlockJob *job)
  55{
  56  return job->cluster_size / BDRV_SECTOR_SIZE;
  57}
  58
  59/* See if in-flight requests overlap and wait for them to complete */
  60static void coroutine_fn wait_for_overlapping_requests(BackupBlockJob *job,
  61                                                       int64_t start,
  62                                                       int64_t end)
  63{
  64    CowRequest *req;
  65    bool retry;
  66
  67    do {
  68        retry = false;
  69        QLIST_FOREACH(req, &job->inflight_reqs, list) {
  70            if (end > req->start && start < req->end) {
  71                qemu_co_queue_wait(&req->wait_queue);
  72                retry = true;
  73                break;
  74            }
  75        }
  76    } while (retry);
  77}
  78
  79/* Keep track of an in-flight request */
  80static void cow_request_begin(CowRequest *req, BackupBlockJob *job,
  81                                     int64_t start, int64_t end)
  82{
  83    req->start = start;
  84    req->end = end;
  85    qemu_co_queue_init(&req->wait_queue);
  86    QLIST_INSERT_HEAD(&job->inflight_reqs, req, list);
  87}
  88
  89/* Forget about a completed request */
  90static void cow_request_end(CowRequest *req)
  91{
  92    QLIST_REMOVE(req, list);
  93    qemu_co_queue_restart_all(&req->wait_queue);
  94}
  95
  96static int coroutine_fn backup_do_cow(BlockDriverState *bs,
  97                                      int64_t sector_num, int nb_sectors,
  98                                      bool *error_is_read,
  99                                      bool is_write_notifier)
 100{
 101    BackupBlockJob *job = (BackupBlockJob *)bs->job;
 102    CowRequest cow_request;
 103    struct iovec iov;
 104    QEMUIOVector bounce_qiov;
 105    void *bounce_buffer = NULL;
 106    int ret = 0;
 107    int64_t sectors_per_cluster = cluster_size_sectors(job);
 108    int64_t start, end;
 109    int n;
 110
 111    qemu_co_rwlock_rdlock(&job->flush_rwlock);
 112
 113    start = sector_num / sectors_per_cluster;
 114    end = DIV_ROUND_UP(sector_num + nb_sectors, sectors_per_cluster);
 115
 116    trace_backup_do_cow_enter(job, start, sector_num, nb_sectors);
 117
 118    wait_for_overlapping_requests(job, start, end);
 119    cow_request_begin(&cow_request, job, start, end);
 120
 121    for (; start < end; start++) {
 122        if (test_bit(start, job->done_bitmap)) {
 123            trace_backup_do_cow_skip(job, start);
 124            continue; /* already copied */
 125        }
 126
 127        trace_backup_do_cow_process(job, start);
 128
 129        n = MIN(sectors_per_cluster,
 130                job->common.len / BDRV_SECTOR_SIZE -
 131                start * sectors_per_cluster);
 132
 133        if (!bounce_buffer) {
 134            bounce_buffer = qemu_blockalign(bs, job->cluster_size);
 135        }
 136        iov.iov_base = bounce_buffer;
 137        iov.iov_len = n * BDRV_SECTOR_SIZE;
 138        qemu_iovec_init_external(&bounce_qiov, &iov, 1);
 139
 140        if (is_write_notifier) {
 141            ret = bdrv_co_readv_no_serialising(bs,
 142                                           start * sectors_per_cluster,
 143                                           n, &bounce_qiov);
 144        } else {
 145            ret = bdrv_co_readv(bs, start * sectors_per_cluster, n,
 146                                &bounce_qiov);
 147        }
 148        if (ret < 0) {
 149            trace_backup_do_cow_read_fail(job, start, ret);
 150            if (error_is_read) {
 151                *error_is_read = true;
 152            }
 153            goto out;
 154        }
 155
 156        if (buffer_is_zero(iov.iov_base, iov.iov_len)) {
 157            ret = bdrv_co_write_zeroes(job->target,
 158                                       start * sectors_per_cluster,
 159                                       n, BDRV_REQ_MAY_UNMAP);
 160        } else {
 161            ret = bdrv_co_writev(job->target,
 162                                 start * sectors_per_cluster, n,
 163                                 &bounce_qiov);
 164        }
 165        if (ret < 0) {
 166            trace_backup_do_cow_write_fail(job, start, ret);
 167            if (error_is_read) {
 168                *error_is_read = false;
 169            }
 170            goto out;
 171        }
 172
 173        set_bit(start, job->done_bitmap);
 174
 175        /* Publish progress, guest I/O counts as progress too.  Note that the
 176         * offset field is an opaque progress value, it is not a disk offset.
 177         */
 178        job->sectors_read += n;
 179        job->common.offset += n * BDRV_SECTOR_SIZE;
 180    }
 181
 182out:
 183    if (bounce_buffer) {
 184        qemu_vfree(bounce_buffer);
 185    }
 186
 187    cow_request_end(&cow_request);
 188
 189    trace_backup_do_cow_return(job, sector_num, nb_sectors, ret);
 190
 191    qemu_co_rwlock_unlock(&job->flush_rwlock);
 192
 193    return ret;
 194}
 195
 196static int coroutine_fn backup_before_write_notify(
 197        NotifierWithReturn *notifier,
 198        void *opaque)
 199{
 200    BdrvTrackedRequest *req = opaque;
 201    int64_t sector_num = req->offset >> BDRV_SECTOR_BITS;
 202    int nb_sectors = req->bytes >> BDRV_SECTOR_BITS;
 203
 204    assert((req->offset & (BDRV_SECTOR_SIZE - 1)) == 0);
 205    assert((req->bytes & (BDRV_SECTOR_SIZE - 1)) == 0);
 206
 207    return backup_do_cow(req->bs, sector_num, nb_sectors, NULL, true);
 208}
 209
 210static void backup_set_speed(BlockJob *job, int64_t speed, Error **errp)
 211{
 212    BackupBlockJob *s = container_of(job, BackupBlockJob, common);
 213
 214    if (speed < 0) {
 215        error_setg(errp, QERR_INVALID_PARAMETER, "speed");
 216        return;
 217    }
 218    ratelimit_set_speed(&s->limit, speed / BDRV_SECTOR_SIZE, SLICE_TIME);
 219}
 220
 221static void backup_iostatus_reset(BlockJob *job)
 222{
 223    BackupBlockJob *s = container_of(job, BackupBlockJob, common);
 224
 225    if (s->target->blk) {
 226        blk_iostatus_reset(s->target->blk);
 227    }
 228}
 229
 230static void backup_cleanup_sync_bitmap(BackupBlockJob *job, int ret)
 231{
 232    BdrvDirtyBitmap *bm;
 233    BlockDriverState *bs = job->common.bs;
 234
 235    if (ret < 0 || block_job_is_cancelled(&job->common)) {
 236        /* Merge the successor back into the parent, delete nothing. */
 237        bm = bdrv_reclaim_dirty_bitmap(bs, job->sync_bitmap, NULL);
 238        assert(bm);
 239    } else {
 240        /* Everything is fine, delete this bitmap and install the backup. */
 241        bm = bdrv_dirty_bitmap_abdicate(bs, job->sync_bitmap, NULL);
 242        assert(bm);
 243    }
 244}
 245
 246static void backup_commit(BlockJob *job)
 247{
 248    BackupBlockJob *s = container_of(job, BackupBlockJob, common);
 249    if (s->sync_bitmap) {
 250        backup_cleanup_sync_bitmap(s, 0);
 251    }
 252}
 253
 254static void backup_abort(BlockJob *job)
 255{
 256    BackupBlockJob *s = container_of(job, BackupBlockJob, common);
 257    if (s->sync_bitmap) {
 258        backup_cleanup_sync_bitmap(s, -1);
 259    }
 260}
 261
 262static const BlockJobDriver backup_job_driver = {
 263    .instance_size  = sizeof(BackupBlockJob),
 264    .job_type       = BLOCK_JOB_TYPE_BACKUP,
 265    .set_speed      = backup_set_speed,
 266    .iostatus_reset = backup_iostatus_reset,
 267    .commit         = backup_commit,
 268    .abort          = backup_abort,
 269};
 270
 271static BlockErrorAction backup_error_action(BackupBlockJob *job,
 272                                            bool read, int error)
 273{
 274    if (read) {
 275        return block_job_error_action(&job->common, job->common.bs,
 276                                      job->on_source_error, true, error);
 277    } else {
 278        return block_job_error_action(&job->common, job->target,
 279                                      job->on_target_error, false, error);
 280    }
 281}
 282
 283typedef struct {
 284    int ret;
 285} BackupCompleteData;
 286
 287static void backup_complete(BlockJob *job, void *opaque)
 288{
 289    BackupBlockJob *s = container_of(job, BackupBlockJob, common);
 290    BackupCompleteData *data = opaque;
 291
 292    bdrv_unref(s->target);
 293
 294    block_job_completed(job, data->ret);
 295    g_free(data);
 296}
 297
 298static bool coroutine_fn yield_and_check(BackupBlockJob *job)
 299{
 300    if (block_job_is_cancelled(&job->common)) {
 301        return true;
 302    }
 303
 304    /* we need to yield so that bdrv_drain_all() returns.
 305     * (without, VM does not reboot)
 306     */
 307    if (job->common.speed) {
 308        uint64_t delay_ns = ratelimit_calculate_delay(&job->limit,
 309                                                      job->sectors_read);
 310        job->sectors_read = 0;
 311        block_job_sleep_ns(&job->common, QEMU_CLOCK_REALTIME, delay_ns);
 312    } else {
 313        block_job_sleep_ns(&job->common, QEMU_CLOCK_REALTIME, 0);
 314    }
 315
 316    if (block_job_is_cancelled(&job->common)) {
 317        return true;
 318    }
 319
 320    return false;
 321}
 322
 323static int coroutine_fn backup_run_incremental(BackupBlockJob *job)
 324{
 325    bool error_is_read;
 326    int ret = 0;
 327    int clusters_per_iter;
 328    uint32_t granularity;
 329    int64_t sector;
 330    int64_t cluster;
 331    int64_t end;
 332    int64_t last_cluster = -1;
 333    int64_t sectors_per_cluster = cluster_size_sectors(job);
 334    BlockDriverState *bs = job->common.bs;
 335    HBitmapIter hbi;
 336
 337    granularity = bdrv_dirty_bitmap_granularity(job->sync_bitmap);
 338    clusters_per_iter = MAX((granularity / job->cluster_size), 1);
 339    bdrv_dirty_iter_init(job->sync_bitmap, &hbi);
 340
 341    /* Find the next dirty sector(s) */
 342    while ((sector = hbitmap_iter_next(&hbi)) != -1) {
 343        cluster = sector / sectors_per_cluster;
 344
 345        /* Fake progress updates for any clusters we skipped */
 346        if (cluster != last_cluster + 1) {
 347            job->common.offset += ((cluster - last_cluster - 1) *
 348                                   job->cluster_size);
 349        }
 350
 351        for (end = cluster + clusters_per_iter; cluster < end; cluster++) {
 352            do {
 353                if (yield_and_check(job)) {
 354                    return ret;
 355                }
 356                ret = backup_do_cow(bs, cluster * sectors_per_cluster,
 357                                    sectors_per_cluster, &error_is_read,
 358                                    false);
 359                if ((ret < 0) &&
 360                    backup_error_action(job, error_is_read, -ret) ==
 361                    BLOCK_ERROR_ACTION_REPORT) {
 362                    return ret;
 363                }
 364            } while (ret < 0);
 365        }
 366
 367        /* If the bitmap granularity is smaller than the backup granularity,
 368         * we need to advance the iterator pointer to the next cluster. */
 369        if (granularity < job->cluster_size) {
 370            bdrv_set_dirty_iter(&hbi, cluster * sectors_per_cluster);
 371        }
 372
 373        last_cluster = cluster - 1;
 374    }
 375
 376    /* Play some final catchup with the progress meter */
 377    end = DIV_ROUND_UP(job->common.len, job->cluster_size);
 378    if (last_cluster + 1 < end) {
 379        job->common.offset += ((end - last_cluster - 1) * job->cluster_size);
 380    }
 381
 382    return ret;
 383}
 384
 385static void coroutine_fn backup_run(void *opaque)
 386{
 387    BackupBlockJob *job = opaque;
 388    BackupCompleteData *data;
 389    BlockDriverState *bs = job->common.bs;
 390    BlockDriverState *target = job->target;
 391    BlockdevOnError on_target_error = job->on_target_error;
 392    NotifierWithReturn before_write = {
 393        .notify = backup_before_write_notify,
 394    };
 395    int64_t start, end;
 396    int64_t sectors_per_cluster = cluster_size_sectors(job);
 397    int ret = 0;
 398
 399    QLIST_INIT(&job->inflight_reqs);
 400    qemu_co_rwlock_init(&job->flush_rwlock);
 401
 402    start = 0;
 403    end = DIV_ROUND_UP(job->common.len, job->cluster_size);
 404
 405    job->done_bitmap = bitmap_new(end);
 406
 407    if (target->blk) {
 408        blk_set_on_error(target->blk, on_target_error, on_target_error);
 409        blk_iostatus_enable(target->blk);
 410    }
 411
 412    bdrv_add_before_write_notifier(bs, &before_write);
 413
 414    if (job->sync_mode == MIRROR_SYNC_MODE_NONE) {
 415        while (!block_job_is_cancelled(&job->common)) {
 416            /* Yield until the job is cancelled.  We just let our before_write
 417             * notify callback service CoW requests. */
 418            job->common.busy = false;
 419            qemu_coroutine_yield();
 420            job->common.busy = true;
 421        }
 422    } else if (job->sync_mode == MIRROR_SYNC_MODE_INCREMENTAL) {
 423        ret = backup_run_incremental(job);
 424    } else {
 425        /* Both FULL and TOP SYNC_MODE's require copying.. */
 426        for (; start < end; start++) {
 427            bool error_is_read;
 428            if (yield_and_check(job)) {
 429                break;
 430            }
 431
 432            if (job->sync_mode == MIRROR_SYNC_MODE_TOP) {
 433                int i, n;
 434                int alloced = 0;
 435
 436                /* Check to see if these blocks are already in the
 437                 * backing file. */
 438
 439                for (i = 0; i < sectors_per_cluster;) {
 440                    /* bdrv_is_allocated() only returns true/false based
 441                     * on the first set of sectors it comes across that
 442                     * are are all in the same state.
 443                     * For that reason we must verify each sector in the
 444                     * backup cluster length.  We end up copying more than
 445                     * needed but at some point that is always the case. */
 446                    alloced =
 447                        bdrv_is_allocated(bs,
 448                                start * sectors_per_cluster + i,
 449                                sectors_per_cluster - i, &n);
 450                    i += n;
 451
 452                    if (alloced == 1 || n == 0) {
 453                        break;
 454                    }
 455                }
 456
 457                /* If the above loop never found any sectors that are in
 458                 * the topmost image, skip this backup. */
 459                if (alloced == 0) {
 460                    continue;
 461                }
 462            }
 463            /* FULL sync mode we copy the whole drive. */
 464            ret = backup_do_cow(bs, start * sectors_per_cluster,
 465                                sectors_per_cluster, &error_is_read, false);
 466            if (ret < 0) {
 467                /* Depending on error action, fail now or retry cluster */
 468                BlockErrorAction action =
 469                    backup_error_action(job, error_is_read, -ret);
 470                if (action == BLOCK_ERROR_ACTION_REPORT) {
 471                    break;
 472                } else {
 473                    start--;
 474                    continue;
 475                }
 476            }
 477        }
 478    }
 479
 480    notifier_with_return_remove(&before_write);
 481
 482    /* wait until pending backup_do_cow() calls have completed */
 483    qemu_co_rwlock_wrlock(&job->flush_rwlock);
 484    qemu_co_rwlock_unlock(&job->flush_rwlock);
 485    g_free(job->done_bitmap);
 486
 487    if (target->blk) {
 488        blk_iostatus_disable(target->blk);
 489    }
 490    bdrv_op_unblock_all(target, job->common.blocker);
 491
 492    data = g_malloc(sizeof(*data));
 493    data->ret = ret;
 494    block_job_defer_to_main_loop(&job->common, backup_complete, data);
 495}
 496
 497void backup_start(BlockDriverState *bs, BlockDriverState *target,
 498                  int64_t speed, MirrorSyncMode sync_mode,
 499                  BdrvDirtyBitmap *sync_bitmap,
 500                  BlockdevOnError on_source_error,
 501                  BlockdevOnError on_target_error,
 502                  BlockCompletionFunc *cb, void *opaque,
 503                  BlockJobTxn *txn, Error **errp)
 504{
 505    int64_t len;
 506    BlockDriverInfo bdi;
 507    int ret;
 508
 509    assert(bs);
 510    assert(target);
 511    assert(cb);
 512
 513    if (bs == target) {
 514        error_setg(errp, "Source and target cannot be the same");
 515        return;
 516    }
 517
 518    if ((on_source_error == BLOCKDEV_ON_ERROR_STOP ||
 519         on_source_error == BLOCKDEV_ON_ERROR_ENOSPC) &&
 520        (!bs->blk || !blk_iostatus_is_enabled(bs->blk))) {
 521        error_setg(errp, QERR_INVALID_PARAMETER, "on-source-error");
 522        return;
 523    }
 524
 525    if (!bdrv_is_inserted(bs)) {
 526        error_setg(errp, "Device is not inserted: %s",
 527                   bdrv_get_device_name(bs));
 528        return;
 529    }
 530
 531    if (!bdrv_is_inserted(target)) {
 532        error_setg(errp, "Device is not inserted: %s",
 533                   bdrv_get_device_name(target));
 534        return;
 535    }
 536
 537    if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_BACKUP_SOURCE, errp)) {
 538        return;
 539    }
 540
 541    if (bdrv_op_is_blocked(target, BLOCK_OP_TYPE_BACKUP_TARGET, errp)) {
 542        return;
 543    }
 544
 545    if (sync_mode == MIRROR_SYNC_MODE_INCREMENTAL) {
 546        if (!sync_bitmap) {
 547            error_setg(errp, "must provide a valid bitmap name for "
 548                             "\"incremental\" sync mode");
 549            return;
 550        }
 551
 552        /* Create a new bitmap, and freeze/disable this one. */
 553        if (bdrv_dirty_bitmap_create_successor(bs, sync_bitmap, errp) < 0) {
 554            return;
 555        }
 556    } else if (sync_bitmap) {
 557        error_setg(errp,
 558                   "a sync_bitmap was provided to backup_run, "
 559                   "but received an incompatible sync_mode (%s)",
 560                   MirrorSyncMode_lookup[sync_mode]);
 561        return;
 562    }
 563
 564    len = bdrv_getlength(bs);
 565    if (len < 0) {
 566        error_setg_errno(errp, -len, "unable to get length for '%s'",
 567                         bdrv_get_device_name(bs));
 568        goto error;
 569    }
 570
 571    BackupBlockJob *job = block_job_create(&backup_job_driver, bs, speed,
 572                                           cb, opaque, errp);
 573    if (!job) {
 574        goto error;
 575    }
 576
 577    job->on_source_error = on_source_error;
 578    job->on_target_error = on_target_error;
 579    job->target = target;
 580    job->sync_mode = sync_mode;
 581    job->sync_bitmap = sync_mode == MIRROR_SYNC_MODE_INCREMENTAL ?
 582                       sync_bitmap : NULL;
 583
 584    /* If there is no backing file on the target, we cannot rely on COW if our
 585     * backup cluster size is smaller than the target cluster size. Even for
 586     * targets with a backing file, try to avoid COW if possible. */
 587    ret = bdrv_get_info(job->target, &bdi);
 588    if (ret < 0 && !target->backing) {
 589        error_setg_errno(errp, -ret,
 590            "Couldn't determine the cluster size of the target image, "
 591            "which has no backing file");
 592        error_append_hint(errp,
 593            "Aborting, since this may create an unusable destination image\n");
 594        goto error;
 595    } else if (ret < 0 && target->backing) {
 596        /* Not fatal; just trudge on ahead. */
 597        job->cluster_size = BACKUP_CLUSTER_SIZE_DEFAULT;
 598    } else {
 599        job->cluster_size = MAX(BACKUP_CLUSTER_SIZE_DEFAULT, bdi.cluster_size);
 600    }
 601
 602    bdrv_op_block_all(target, job->common.blocker);
 603    job->common.len = len;
 604    job->common.co = qemu_coroutine_create(backup_run);
 605    block_job_txn_add_job(txn, &job->common);
 606    qemu_coroutine_enter(job->common.co, job);
 607    return;
 608
 609 error:
 610    if (sync_bitmap) {
 611        bdrv_reclaim_dirty_bitmap(bs, sync_bitmap, NULL);
 612    }
 613}
 614