qemu/block/commit.c
<<
>>
Prefs
   1/*
   2 * Live block commit
   3 *
   4 * Copyright Red Hat, Inc. 2012
   5 *
   6 * Authors:
   7 *  Jeff Cody   <jcody@redhat.com>
   8 *  Based on stream.c by Stefan Hajnoczi
   9 *
  10 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
  11 * See the COPYING.LIB file in the top-level directory.
  12 *
  13 */
  14
  15#include "qemu/osdep.h"
  16#include "qemu/cutils.h"
  17#include "trace.h"
  18#include "block/block_int.h"
  19#include "block/blockjob_int.h"
  20#include "qapi/error.h"
  21#include "qapi/qmp/qerror.h"
  22#include "qemu/ratelimit.h"
  23#include "sysemu/block-backend.h"
  24
  25enum {
  26    /*
  27     * Size of data buffer for populating the image file.  This should be large
  28     * enough to process multiple clusters in a single call, so that populating
  29     * contiguous regions of the image is efficient.
  30     */
  31    COMMIT_BUFFER_SIZE = 512 * 1024, /* in bytes */
  32};
  33
  34#define SLICE_TIME 100000000ULL /* ns */
  35
  36typedef struct CommitBlockJob {
  37    BlockJob common;
  38    RateLimit limit;
  39    BlockDriverState *commit_top_bs;
  40    BlockBackend *top;
  41    BlockBackend *base;
  42    BlockdevOnError on_error;
  43    int base_flags;
  44    char *backing_file_str;
  45} CommitBlockJob;
  46
  47static int coroutine_fn commit_populate(BlockBackend *bs, BlockBackend *base,
  48                                        int64_t offset, uint64_t bytes,
  49                                        void *buf)
  50{
  51    int ret = 0;
  52    QEMUIOVector qiov;
  53    struct iovec iov = {
  54        .iov_base = buf,
  55        .iov_len = bytes,
  56    };
  57
  58    assert(bytes < SIZE_MAX);
  59    qemu_iovec_init_external(&qiov, &iov, 1);
  60
  61    ret = blk_co_preadv(bs, offset, qiov.size, &qiov, 0);
  62    if (ret < 0) {
  63        return ret;
  64    }
  65
  66    ret = blk_co_pwritev(base, offset, qiov.size, &qiov, 0);
  67    if (ret < 0) {
  68        return ret;
  69    }
  70
  71    return 0;
  72}
  73
  74typedef struct {
  75    int ret;
  76} CommitCompleteData;
  77
  78static void commit_complete(BlockJob *job, void *opaque)
  79{
  80    CommitBlockJob *s = container_of(job, CommitBlockJob, common);
  81    CommitCompleteData *data = opaque;
  82    BlockDriverState *top = blk_bs(s->top);
  83    BlockDriverState *base = blk_bs(s->base);
  84    BlockDriverState *commit_top_bs = s->commit_top_bs;
  85    int ret = data->ret;
  86    bool remove_commit_top_bs = false;
  87
  88    /* Make sure commit_top_bs and top stay around until bdrv_replace_node() */
  89    bdrv_ref(top);
  90    bdrv_ref(commit_top_bs);
  91
  92    /* Remove base node parent that still uses BLK_PERM_WRITE/RESIZE before
  93     * the normal backing chain can be restored. */
  94    blk_unref(s->base);
  95
  96    if (!block_job_is_cancelled(&s->common) && ret == 0) {
  97        /* success */
  98        ret = bdrv_drop_intermediate(s->commit_top_bs, base,
  99                                     s->backing_file_str);
 100    } else {
 101        /* XXX Can (or should) we somehow keep 'consistent read' blocked even
 102         * after the failed/cancelled commit job is gone? If we already wrote
 103         * something to base, the intermediate images aren't valid any more. */
 104        remove_commit_top_bs = true;
 105    }
 106
 107    /* restore base open flags here if appropriate (e.g., change the base back
 108     * to r/o). These reopens do not need to be atomic, since we won't abort
 109     * even on failure here */
 110    if (s->base_flags != bdrv_get_flags(base)) {
 111        bdrv_reopen(base, s->base_flags, NULL);
 112    }
 113    g_free(s->backing_file_str);
 114    blk_unref(s->top);
 115
 116    /* If there is more than one reference to the job (e.g. if called from
 117     * block_job_finish_sync()), block_job_completed() won't free it and
 118     * therefore the blockers on the intermediate nodes remain. This would
 119     * cause bdrv_set_backing_hd() to fail. */
 120    block_job_remove_all_bdrv(job);
 121
 122    block_job_completed(&s->common, ret);
 123    g_free(data);
 124
 125    /* If bdrv_drop_intermediate() didn't already do that, remove the commit
 126     * filter driver from the backing chain. Do this as the final step so that
 127     * the 'consistent read' permission can be granted.  */
 128    if (remove_commit_top_bs) {
 129        bdrv_child_try_set_perm(commit_top_bs->backing, 0, BLK_PERM_ALL,
 130                                &error_abort);
 131        bdrv_replace_node(commit_top_bs, backing_bs(commit_top_bs),
 132                          &error_abort);
 133    }
 134
 135    bdrv_unref(commit_top_bs);
 136    bdrv_unref(top);
 137}
 138
 139static void coroutine_fn commit_run(void *opaque)
 140{
 141    CommitBlockJob *s = opaque;
 142    CommitCompleteData *data;
 143    int64_t offset;
 144    uint64_t delay_ns = 0;
 145    int ret = 0;
 146    int64_t n = 0; /* bytes */
 147    void *buf = NULL;
 148    int bytes_written = 0;
 149    int64_t base_len;
 150
 151    ret = s->common.len = blk_getlength(s->top);
 152
 153    if (s->common.len < 0) {
 154        goto out;
 155    }
 156
 157    ret = base_len = blk_getlength(s->base);
 158    if (base_len < 0) {
 159        goto out;
 160    }
 161
 162    if (base_len < s->common.len) {
 163        ret = blk_truncate(s->base, s->common.len, PREALLOC_MODE_OFF, NULL);
 164        if (ret) {
 165            goto out;
 166        }
 167    }
 168
 169    buf = blk_blockalign(s->top, COMMIT_BUFFER_SIZE);
 170
 171    for (offset = 0; offset < s->common.len; offset += n) {
 172        bool copy;
 173
 174        /* Note that even when no rate limit is applied we need to yield
 175         * with no pending I/O here so that bdrv_drain_all() returns.
 176         */
 177        block_job_sleep_ns(&s->common, delay_ns);
 178        if (block_job_is_cancelled(&s->common)) {
 179            break;
 180        }
 181        /* Copy if allocated above the base */
 182        ret = bdrv_is_allocated_above(blk_bs(s->top), blk_bs(s->base),
 183                                      offset, COMMIT_BUFFER_SIZE, &n);
 184        copy = (ret == 1);
 185        trace_commit_one_iteration(s, offset, n, ret);
 186        if (copy) {
 187            ret = commit_populate(s->top, s->base, offset, n, buf);
 188            bytes_written += n;
 189        }
 190        if (ret < 0) {
 191            BlockErrorAction action =
 192                block_job_error_action(&s->common, false, s->on_error, -ret);
 193            if (action == BLOCK_ERROR_ACTION_REPORT) {
 194                goto out;
 195            } else {
 196                n = 0;
 197                continue;
 198            }
 199        }
 200        /* Publish progress */
 201        s->common.offset += n;
 202
 203        if (copy && s->common.speed) {
 204            delay_ns = ratelimit_calculate_delay(&s->limit, n);
 205        }
 206    }
 207
 208    ret = 0;
 209
 210out:
 211    qemu_vfree(buf);
 212
 213    data = g_malloc(sizeof(*data));
 214    data->ret = ret;
 215    block_job_defer_to_main_loop(&s->common, commit_complete, data);
 216}
 217
 218static void commit_set_speed(BlockJob *job, int64_t speed, Error **errp)
 219{
 220    CommitBlockJob *s = container_of(job, CommitBlockJob, common);
 221
 222    if (speed < 0) {
 223        error_setg(errp, QERR_INVALID_PARAMETER, "speed");
 224        return;
 225    }
 226    ratelimit_set_speed(&s->limit, speed, SLICE_TIME);
 227}
 228
 229static const BlockJobDriver commit_job_driver = {
 230    .instance_size = sizeof(CommitBlockJob),
 231    .job_type      = BLOCK_JOB_TYPE_COMMIT,
 232    .set_speed     = commit_set_speed,
 233    .start         = commit_run,
 234};
 235
 236static int coroutine_fn bdrv_commit_top_preadv(BlockDriverState *bs,
 237    uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags)
 238{
 239    return bdrv_co_preadv(bs->backing, offset, bytes, qiov, flags);
 240}
 241
 242static void bdrv_commit_top_refresh_filename(BlockDriverState *bs, QDict *opts)
 243{
 244    bdrv_refresh_filename(bs->backing->bs);
 245    pstrcpy(bs->exact_filename, sizeof(bs->exact_filename),
 246            bs->backing->bs->filename);
 247}
 248
 249static void bdrv_commit_top_close(BlockDriverState *bs)
 250{
 251}
 252
 253static void bdrv_commit_top_child_perm(BlockDriverState *bs, BdrvChild *c,
 254                                       const BdrvChildRole *role,
 255                                       BlockReopenQueue *reopen_queue,
 256                                       uint64_t perm, uint64_t shared,
 257                                       uint64_t *nperm, uint64_t *nshared)
 258{
 259    *nperm = 0;
 260    *nshared = BLK_PERM_ALL;
 261}
 262
 263/* Dummy node that provides consistent read to its users without requiring it
 264 * from its backing file and that allows writes on the backing file chain. */
 265static BlockDriver bdrv_commit_top = {
 266    .format_name                = "commit_top",
 267    .bdrv_co_preadv             = bdrv_commit_top_preadv,
 268    .bdrv_co_get_block_status   = bdrv_co_get_block_status_from_backing,
 269    .bdrv_refresh_filename      = bdrv_commit_top_refresh_filename,
 270    .bdrv_close                 = bdrv_commit_top_close,
 271    .bdrv_child_perm            = bdrv_commit_top_child_perm,
 272};
 273
 274void commit_start(const char *job_id, BlockDriverState *bs,
 275                  BlockDriverState *base, BlockDriverState *top, int64_t speed,
 276                  BlockdevOnError on_error, const char *backing_file_str,
 277                  const char *filter_node_name, Error **errp)
 278{
 279    CommitBlockJob *s;
 280    BlockReopenQueue *reopen_queue = NULL;
 281    int orig_base_flags;
 282    BlockDriverState *iter;
 283    BlockDriverState *commit_top_bs = NULL;
 284    Error *local_err = NULL;
 285    int ret;
 286
 287    assert(top != bs);
 288    if (top == base) {
 289        error_setg(errp, "Invalid files for merge: top and base are the same");
 290        return;
 291    }
 292
 293    s = block_job_create(job_id, &commit_job_driver, bs, 0, BLK_PERM_ALL,
 294                         speed, BLOCK_JOB_DEFAULT, NULL, NULL, errp);
 295    if (!s) {
 296        return;
 297    }
 298
 299    /* convert base to r/w, if necessary */
 300    orig_base_flags = bdrv_get_flags(base);
 301    if (!(orig_base_flags & BDRV_O_RDWR)) {
 302        reopen_queue = bdrv_reopen_queue(reopen_queue, base, NULL,
 303                                         orig_base_flags | BDRV_O_RDWR);
 304    }
 305
 306    if (reopen_queue) {
 307        bdrv_reopen_multiple(bdrv_get_aio_context(bs), reopen_queue, &local_err);
 308        if (local_err != NULL) {
 309            error_propagate(errp, local_err);
 310            goto fail;
 311        }
 312    }
 313
 314    /* Insert commit_top block node above top, so we can block consistent read
 315     * on the backing chain below it */
 316    commit_top_bs = bdrv_new_open_driver(&bdrv_commit_top, filter_node_name, 0,
 317                                         errp);
 318    if (commit_top_bs == NULL) {
 319        goto fail;
 320    }
 321    if (!filter_node_name) {
 322        commit_top_bs->implicit = true;
 323    }
 324    commit_top_bs->total_sectors = top->total_sectors;
 325    bdrv_set_aio_context(commit_top_bs, bdrv_get_aio_context(top));
 326
 327    bdrv_set_backing_hd(commit_top_bs, top, &local_err);
 328    if (local_err) {
 329        bdrv_unref(commit_top_bs);
 330        commit_top_bs = NULL;
 331        error_propagate(errp, local_err);
 332        goto fail;
 333    }
 334    bdrv_replace_node(top, commit_top_bs, &local_err);
 335    if (local_err) {
 336        bdrv_unref(commit_top_bs);
 337        commit_top_bs = NULL;
 338        error_propagate(errp, local_err);
 339        goto fail;
 340    }
 341
 342    s->commit_top_bs = commit_top_bs;
 343    bdrv_unref(commit_top_bs);
 344
 345    /* Block all nodes between top and base, because they will
 346     * disappear from the chain after this operation. */
 347    assert(bdrv_chain_contains(top, base));
 348    for (iter = top; iter != base; iter = backing_bs(iter)) {
 349        /* XXX BLK_PERM_WRITE needs to be allowed so we don't block ourselves
 350         * at s->base (if writes are blocked for a node, they are also blocked
 351         * for its backing file). The other options would be a second filter
 352         * driver above s->base. */
 353        ret = block_job_add_bdrv(&s->common, "intermediate node", iter, 0,
 354                                 BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE,
 355                                 errp);
 356        if (ret < 0) {
 357            goto fail;
 358        }
 359    }
 360
 361    ret = block_job_add_bdrv(&s->common, "base", base, 0, BLK_PERM_ALL, errp);
 362    if (ret < 0) {
 363        goto fail;
 364    }
 365
 366    s->base = blk_new(BLK_PERM_CONSISTENT_READ
 367                      | BLK_PERM_WRITE
 368                      | BLK_PERM_RESIZE,
 369                      BLK_PERM_CONSISTENT_READ
 370                      | BLK_PERM_GRAPH_MOD
 371                      | BLK_PERM_WRITE_UNCHANGED);
 372    ret = blk_insert_bs(s->base, base, errp);
 373    if (ret < 0) {
 374        goto fail;
 375    }
 376
 377    /* Required permissions are already taken with block_job_add_bdrv() */
 378    s->top = blk_new(0, BLK_PERM_ALL);
 379    ret = blk_insert_bs(s->top, top, errp);
 380    if (ret < 0) {
 381        goto fail;
 382    }
 383
 384    s->base_flags = orig_base_flags;
 385    s->backing_file_str = g_strdup(backing_file_str);
 386    s->on_error = on_error;
 387
 388    trace_commit_start(bs, base, top, s);
 389    block_job_start(&s->common);
 390    return;
 391
 392fail:
 393    if (s->base) {
 394        blk_unref(s->base);
 395    }
 396    if (s->top) {
 397        blk_unref(s->top);
 398    }
 399    if (commit_top_bs) {
 400        bdrv_replace_node(commit_top_bs, top, &error_abort);
 401    }
 402    block_job_early_fail(&s->common);
 403}
 404
 405
 406#define COMMIT_BUF_SIZE (2048 * BDRV_SECTOR_SIZE)
 407
 408/* commit COW file into the raw image */
 409int bdrv_commit(BlockDriverState *bs)
 410{
 411    BlockBackend *src, *backing;
 412    BlockDriverState *backing_file_bs = NULL;
 413    BlockDriverState *commit_top_bs = NULL;
 414    BlockDriver *drv = bs->drv;
 415    int64_t offset, length, backing_length;
 416    int ro, open_flags;
 417    int64_t n;
 418    int ret = 0;
 419    uint8_t *buf = NULL;
 420    Error *local_err = NULL;
 421
 422    if (!drv)
 423        return -ENOMEDIUM;
 424
 425    if (!bs->backing) {
 426        return -ENOTSUP;
 427    }
 428
 429    if (bdrv_op_is_blocked(bs, BLOCK_OP_TYPE_COMMIT_SOURCE, NULL) ||
 430        bdrv_op_is_blocked(bs->backing->bs, BLOCK_OP_TYPE_COMMIT_TARGET, NULL)) {
 431        return -EBUSY;
 432    }
 433
 434    ro = bs->backing->bs->read_only;
 435    open_flags =  bs->backing->bs->open_flags;
 436
 437    if (ro) {
 438        if (bdrv_reopen(bs->backing->bs, open_flags | BDRV_O_RDWR, NULL)) {
 439            return -EACCES;
 440        }
 441    }
 442
 443    src = blk_new(BLK_PERM_CONSISTENT_READ, BLK_PERM_ALL);
 444    backing = blk_new(BLK_PERM_WRITE | BLK_PERM_RESIZE, BLK_PERM_ALL);
 445
 446    ret = blk_insert_bs(src, bs, &local_err);
 447    if (ret < 0) {
 448        error_report_err(local_err);
 449        goto ro_cleanup;
 450    }
 451
 452    /* Insert commit_top block node above backing, so we can write to it */
 453    backing_file_bs = backing_bs(bs);
 454
 455    commit_top_bs = bdrv_new_open_driver(&bdrv_commit_top, NULL, BDRV_O_RDWR,
 456                                         &local_err);
 457    if (commit_top_bs == NULL) {
 458        error_report_err(local_err);
 459        goto ro_cleanup;
 460    }
 461    bdrv_set_aio_context(commit_top_bs, bdrv_get_aio_context(backing_file_bs));
 462
 463    bdrv_set_backing_hd(commit_top_bs, backing_file_bs, &error_abort);
 464    bdrv_set_backing_hd(bs, commit_top_bs, &error_abort);
 465
 466    ret = blk_insert_bs(backing, backing_file_bs, &local_err);
 467    if (ret < 0) {
 468        error_report_err(local_err);
 469        goto ro_cleanup;
 470    }
 471
 472    length = blk_getlength(src);
 473    if (length < 0) {
 474        ret = length;
 475        goto ro_cleanup;
 476    }
 477
 478    backing_length = blk_getlength(backing);
 479    if (backing_length < 0) {
 480        ret = backing_length;
 481        goto ro_cleanup;
 482    }
 483
 484    /* If our top snapshot is larger than the backing file image,
 485     * grow the backing file image if possible.  If not possible,
 486     * we must return an error */
 487    if (length > backing_length) {
 488        ret = blk_truncate(backing, length, PREALLOC_MODE_OFF, &local_err);
 489        if (ret < 0) {
 490            error_report_err(local_err);
 491            goto ro_cleanup;
 492        }
 493    }
 494
 495    /* blk_try_blockalign() for src will choose an alignment that works for
 496     * backing as well, so no need to compare the alignment manually. */
 497    buf = blk_try_blockalign(src, COMMIT_BUF_SIZE);
 498    if (buf == NULL) {
 499        ret = -ENOMEM;
 500        goto ro_cleanup;
 501    }
 502
 503    for (offset = 0; offset < length; offset += n) {
 504        ret = bdrv_is_allocated(bs, offset, COMMIT_BUF_SIZE, &n);
 505        if (ret < 0) {
 506            goto ro_cleanup;
 507        }
 508        if (ret) {
 509            ret = blk_pread(src, offset, buf, n);
 510            if (ret < 0) {
 511                goto ro_cleanup;
 512            }
 513
 514            ret = blk_pwrite(backing, offset, buf, n, 0);
 515            if (ret < 0) {
 516                goto ro_cleanup;
 517            }
 518        }
 519    }
 520
 521    if (drv->bdrv_make_empty) {
 522        ret = drv->bdrv_make_empty(bs);
 523        if (ret < 0) {
 524            goto ro_cleanup;
 525        }
 526        blk_flush(src);
 527    }
 528
 529    /*
 530     * Make sure all data we wrote to the backing device is actually
 531     * stable on disk.
 532     */
 533    blk_flush(backing);
 534
 535    ret = 0;
 536ro_cleanup:
 537    qemu_vfree(buf);
 538
 539    blk_unref(backing);
 540    if (backing_file_bs) {
 541        bdrv_set_backing_hd(bs, backing_file_bs, &error_abort);
 542    }
 543    bdrv_unref(commit_top_bs);
 544    blk_unref(src);
 545
 546    if (ro) {
 547        /* ignoring error return here */
 548        bdrv_reopen(bs->backing->bs, open_flags & ~BDRV_O_RDWR, NULL);
 549    }
 550
 551    return ret;
 552}
 553