qemu/block/mirror.c
<<
>>
Prefs
   1/*
   2 * Image mirroring
   3 *
   4 * Copyright Red Hat, Inc. 2012
   5 *
   6 * Authors:
   7 *  Paolo Bonzini  <pbonzini@redhat.com>
   8 *
   9 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
  10 * See the COPYING.LIB file in the top-level directory.
  11 *
  12 */
  13
  14#include "qemu/osdep.h"
  15#include "qemu/cutils.h"
  16#include "qemu/coroutine.h"
  17#include "qemu/range.h"
  18#include "trace.h"
  19#include "block/blockjob_int.h"
  20#include "block/block_int.h"
  21#include "sysemu/block-backend.h"
  22#include "qapi/error.h"
  23#include "qapi/qmp/qerror.h"
  24#include "qemu/ratelimit.h"
  25#include "qemu/bitmap.h"
  26
  27#define MAX_IN_FLIGHT 16
  28#define MAX_IO_BYTES (1 << 20) /* 1 Mb */
  29#define DEFAULT_MIRROR_BUF_SIZE (MAX_IN_FLIGHT * MAX_IO_BYTES)
  30
  31/* The mirroring buffer is a list of granularity-sized chunks.
  32 * Free chunks are organized in a list.
  33 */
  34typedef struct MirrorBuffer {
  35    QSIMPLEQ_ENTRY(MirrorBuffer) next;
  36} MirrorBuffer;
  37
  38typedef struct MirrorOp MirrorOp;
  39
  40typedef struct MirrorBlockJob {
  41    BlockJob common;
  42    BlockBackend *target;
  43    BlockDriverState *mirror_top_bs;
  44    BlockDriverState *base;
  45
  46    /* The name of the graph node to replace */
  47    char *replaces;
  48    /* The BDS to replace */
  49    BlockDriverState *to_replace;
  50    /* Used to block operations on the drive-mirror-replace target */
  51    Error *replace_blocker;
  52    bool is_none_mode;
  53    BlockMirrorBackingMode backing_mode;
  54    MirrorCopyMode copy_mode;
  55    BlockdevOnError on_source_error, on_target_error;
  56    bool synced;
  57    /* Set when the target is synced (dirty bitmap is clean, nothing
  58     * in flight) and the job is running in active mode */
  59    bool actively_synced;
  60    bool should_complete;
  61    int64_t granularity;
  62    size_t buf_size;
  63    int64_t bdev_length;
  64    unsigned long *cow_bitmap;
  65    BdrvDirtyBitmap *dirty_bitmap;
  66    BdrvDirtyBitmapIter *dbi;
  67    uint8_t *buf;
  68    QSIMPLEQ_HEAD(, MirrorBuffer) buf_free;
  69    int buf_free_count;
  70
  71    uint64_t last_pause_ns;
  72    unsigned long *in_flight_bitmap;
  73    int in_flight;
  74    int64_t bytes_in_flight;
  75    QTAILQ_HEAD(, MirrorOp) ops_in_flight;
  76    int ret;
  77    bool unmap;
  78    int target_cluster_size;
  79    int max_iov;
  80    bool initial_zeroing_ongoing;
  81    int in_active_write_counter;
  82    bool prepared;
  83    bool in_drain;
  84} MirrorBlockJob;
  85
  86typedef struct MirrorBDSOpaque {
  87    MirrorBlockJob *job;
  88    bool stop;
  89} MirrorBDSOpaque;
  90
  91struct MirrorOp {
  92    MirrorBlockJob *s;
  93    QEMUIOVector qiov;
  94    int64_t offset;
  95    uint64_t bytes;
  96
  97    /* The pointee is set by mirror_co_read(), mirror_co_zero(), and
  98     * mirror_co_discard() before yielding for the first time */
  99    int64_t *bytes_handled;
 100
 101    bool is_pseudo_op;
 102    bool is_active_write;
 103    CoQueue waiting_requests;
 104
 105    QTAILQ_ENTRY(MirrorOp) next;
 106};
 107
 108typedef enum MirrorMethod {
 109    MIRROR_METHOD_COPY,
 110    MIRROR_METHOD_ZERO,
 111    MIRROR_METHOD_DISCARD,
 112} MirrorMethod;
 113
 114static BlockErrorAction mirror_error_action(MirrorBlockJob *s, bool read,
 115                                            int error)
 116{
 117    s->synced = false;
 118    s->actively_synced = false;
 119    if (read) {
 120        return block_job_error_action(&s->common, s->on_source_error,
 121                                      true, error);
 122    } else {
 123        return block_job_error_action(&s->common, s->on_target_error,
 124                                      false, error);
 125    }
 126}
 127
 128static void coroutine_fn mirror_wait_on_conflicts(MirrorOp *self,
 129                                                  MirrorBlockJob *s,
 130                                                  uint64_t offset,
 131                                                  uint64_t bytes)
 132{
 133    uint64_t self_start_chunk = offset / s->granularity;
 134    uint64_t self_end_chunk = DIV_ROUND_UP(offset + bytes, s->granularity);
 135    uint64_t self_nb_chunks = self_end_chunk - self_start_chunk;
 136
 137    while (find_next_bit(s->in_flight_bitmap, self_end_chunk,
 138                         self_start_chunk) < self_end_chunk &&
 139           s->ret >= 0)
 140    {
 141        MirrorOp *op;
 142
 143        QTAILQ_FOREACH(op, &s->ops_in_flight, next) {
 144            uint64_t op_start_chunk = op->offset / s->granularity;
 145            uint64_t op_nb_chunks = DIV_ROUND_UP(op->offset + op->bytes,
 146                                                 s->granularity) -
 147                                    op_start_chunk;
 148
 149            if (op == self) {
 150                continue;
 151            }
 152
 153            if (ranges_overlap(self_start_chunk, self_nb_chunks,
 154                               op_start_chunk, op_nb_chunks))
 155            {
 156                qemu_co_queue_wait(&op->waiting_requests, NULL);
 157                break;
 158            }
 159        }
 160    }
 161}
 162
 163static void coroutine_fn mirror_iteration_done(MirrorOp *op, int ret)
 164{
 165    MirrorBlockJob *s = op->s;
 166    struct iovec *iov;
 167    int64_t chunk_num;
 168    int i, nb_chunks;
 169
 170    trace_mirror_iteration_done(s, op->offset, op->bytes, ret);
 171
 172    s->in_flight--;
 173    s->bytes_in_flight -= op->bytes;
 174    iov = op->qiov.iov;
 175    for (i = 0; i < op->qiov.niov; i++) {
 176        MirrorBuffer *buf = (MirrorBuffer *) iov[i].iov_base;
 177        QSIMPLEQ_INSERT_TAIL(&s->buf_free, buf, next);
 178        s->buf_free_count++;
 179    }
 180
 181    chunk_num = op->offset / s->granularity;
 182    nb_chunks = DIV_ROUND_UP(op->bytes, s->granularity);
 183
 184    bitmap_clear(s->in_flight_bitmap, chunk_num, nb_chunks);
 185    QTAILQ_REMOVE(&s->ops_in_flight, op, next);
 186    if (ret >= 0) {
 187        if (s->cow_bitmap) {
 188            bitmap_set(s->cow_bitmap, chunk_num, nb_chunks);
 189        }
 190        if (!s->initial_zeroing_ongoing) {
 191            job_progress_update(&s->common.job, op->bytes);
 192        }
 193    }
 194    qemu_iovec_destroy(&op->qiov);
 195
 196    qemu_co_queue_restart_all(&op->waiting_requests);
 197    g_free(op);
 198}
 199
 200static void coroutine_fn mirror_write_complete(MirrorOp *op, int ret)
 201{
 202    MirrorBlockJob *s = op->s;
 203
 204    if (ret < 0) {
 205        BlockErrorAction action;
 206
 207        bdrv_set_dirty_bitmap(s->dirty_bitmap, op->offset, op->bytes);
 208        action = mirror_error_action(s, false, -ret);
 209        if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) {
 210            s->ret = ret;
 211        }
 212    }
 213
 214    mirror_iteration_done(op, ret);
 215}
 216
 217static void coroutine_fn mirror_read_complete(MirrorOp *op, int ret)
 218{
 219    MirrorBlockJob *s = op->s;
 220
 221    if (ret < 0) {
 222        BlockErrorAction action;
 223
 224        bdrv_set_dirty_bitmap(s->dirty_bitmap, op->offset, op->bytes);
 225        action = mirror_error_action(s, true, -ret);
 226        if (action == BLOCK_ERROR_ACTION_REPORT && s->ret >= 0) {
 227            s->ret = ret;
 228        }
 229
 230        mirror_iteration_done(op, ret);
 231        return;
 232    }
 233
 234    ret = blk_co_pwritev(s->target, op->offset, op->qiov.size, &op->qiov, 0);
 235    mirror_write_complete(op, ret);
 236}
 237
 238/* Clip bytes relative to offset to not exceed end-of-file */
 239static inline int64_t mirror_clip_bytes(MirrorBlockJob *s,
 240                                        int64_t offset,
 241                                        int64_t bytes)
 242{
 243    return MIN(bytes, s->bdev_length - offset);
 244}
 245
 246/* Round offset and/or bytes to target cluster if COW is needed, and
 247 * return the offset of the adjusted tail against original. */
 248static int mirror_cow_align(MirrorBlockJob *s, int64_t *offset,
 249                            uint64_t *bytes)
 250{
 251    bool need_cow;
 252    int ret = 0;
 253    int64_t align_offset = *offset;
 254    int64_t align_bytes = *bytes;
 255    int max_bytes = s->granularity * s->max_iov;
 256
 257    need_cow = !test_bit(*offset / s->granularity, s->cow_bitmap);
 258    need_cow |= !test_bit((*offset + *bytes - 1) / s->granularity,
 259                          s->cow_bitmap);
 260    if (need_cow) {
 261        bdrv_round_to_clusters(blk_bs(s->target), *offset, *bytes,
 262                               &align_offset, &align_bytes);
 263    }
 264
 265    if (align_bytes > max_bytes) {
 266        align_bytes = max_bytes;
 267        if (need_cow) {
 268            align_bytes = QEMU_ALIGN_DOWN(align_bytes, s->target_cluster_size);
 269        }
 270    }
 271    /* Clipping may result in align_bytes unaligned to chunk boundary, but
 272     * that doesn't matter because it's already the end of source image. */
 273    align_bytes = mirror_clip_bytes(s, align_offset, align_bytes);
 274
 275    ret = align_offset + align_bytes - (*offset + *bytes);
 276    *offset = align_offset;
 277    *bytes = align_bytes;
 278    assert(ret >= 0);
 279    return ret;
 280}
 281
 282static inline void coroutine_fn
 283mirror_wait_for_any_operation(MirrorBlockJob *s, bool active)
 284{
 285    MirrorOp *op;
 286
 287    QTAILQ_FOREACH(op, &s->ops_in_flight, next) {
 288        /* Do not wait on pseudo ops, because it may in turn wait on
 289         * some other operation to start, which may in fact be the
 290         * caller of this function.  Since there is only one pseudo op
 291         * at any given time, we will always find some real operation
 292         * to wait on. */
 293        if (!op->is_pseudo_op && op->is_active_write == active) {
 294            qemu_co_queue_wait(&op->waiting_requests, NULL);
 295            return;
 296        }
 297    }
 298    abort();
 299}
 300
 301static inline void coroutine_fn
 302mirror_wait_for_free_in_flight_slot(MirrorBlockJob *s)
 303{
 304    /* Only non-active operations use up in-flight slots */
 305    mirror_wait_for_any_operation(s, false);
 306}
 307
 308/* Perform a mirror copy operation.
 309 *
 310 * *op->bytes_handled is set to the number of bytes copied after and
 311 * including offset, excluding any bytes copied prior to offset due
 312 * to alignment.  This will be op->bytes if no alignment is necessary,
 313 * or (new_end - op->offset) if the tail is rounded up or down due to
 314 * alignment or buffer limit.
 315 */
 316static void coroutine_fn mirror_co_read(void *opaque)
 317{
 318    MirrorOp *op = opaque;
 319    MirrorBlockJob *s = op->s;
 320    int nb_chunks;
 321    uint64_t ret;
 322    uint64_t max_bytes;
 323
 324    max_bytes = s->granularity * s->max_iov;
 325
 326    /* We can only handle as much as buf_size at a time. */
 327    op->bytes = MIN(s->buf_size, MIN(max_bytes, op->bytes));
 328    assert(op->bytes);
 329    assert(op->bytes < BDRV_REQUEST_MAX_BYTES);
 330    *op->bytes_handled = op->bytes;
 331
 332    if (s->cow_bitmap) {
 333        *op->bytes_handled += mirror_cow_align(s, &op->offset, &op->bytes);
 334    }
 335    /* Cannot exceed BDRV_REQUEST_MAX_BYTES + INT_MAX */
 336    assert(*op->bytes_handled <= UINT_MAX);
 337    assert(op->bytes <= s->buf_size);
 338    /* The offset is granularity-aligned because:
 339     * 1) Caller passes in aligned values;
 340     * 2) mirror_cow_align is used only when target cluster is larger. */
 341    assert(QEMU_IS_ALIGNED(op->offset, s->granularity));
 342    /* The range is sector-aligned, since bdrv_getlength() rounds up. */
 343    assert(QEMU_IS_ALIGNED(op->bytes, BDRV_SECTOR_SIZE));
 344    nb_chunks = DIV_ROUND_UP(op->bytes, s->granularity);
 345
 346    while (s->buf_free_count < nb_chunks) {
 347        trace_mirror_yield_in_flight(s, op->offset, s->in_flight);
 348        mirror_wait_for_free_in_flight_slot(s);
 349    }
 350
 351    /* Now make a QEMUIOVector taking enough granularity-sized chunks
 352     * from s->buf_free.
 353     */
 354    qemu_iovec_init(&op->qiov, nb_chunks);
 355    while (nb_chunks-- > 0) {
 356        MirrorBuffer *buf = QSIMPLEQ_FIRST(&s->buf_free);
 357        size_t remaining = op->bytes - op->qiov.size;
 358
 359        QSIMPLEQ_REMOVE_HEAD(&s->buf_free, next);
 360        s->buf_free_count--;
 361        qemu_iovec_add(&op->qiov, buf, MIN(s->granularity, remaining));
 362    }
 363
 364    /* Copy the dirty cluster.  */
 365    s->in_flight++;
 366    s->bytes_in_flight += op->bytes;
 367    trace_mirror_one_iteration(s, op->offset, op->bytes);
 368
 369    ret = bdrv_co_preadv(s->mirror_top_bs->backing, op->offset, op->bytes,
 370                         &op->qiov, 0);
 371    mirror_read_complete(op, ret);
 372}
 373
 374static void coroutine_fn mirror_co_zero(void *opaque)
 375{
 376    MirrorOp *op = opaque;
 377    int ret;
 378
 379    op->s->in_flight++;
 380    op->s->bytes_in_flight += op->bytes;
 381    *op->bytes_handled = op->bytes;
 382
 383    ret = blk_co_pwrite_zeroes(op->s->target, op->offset, op->bytes,
 384                               op->s->unmap ? BDRV_REQ_MAY_UNMAP : 0);
 385    mirror_write_complete(op, ret);
 386}
 387
 388static void coroutine_fn mirror_co_discard(void *opaque)
 389{
 390    MirrorOp *op = opaque;
 391    int ret;
 392
 393    op->s->in_flight++;
 394    op->s->bytes_in_flight += op->bytes;
 395    *op->bytes_handled = op->bytes;
 396
 397    ret = blk_co_pdiscard(op->s->target, op->offset, op->bytes);
 398    mirror_write_complete(op, ret);
 399}
 400
 401static unsigned mirror_perform(MirrorBlockJob *s, int64_t offset,
 402                               unsigned bytes, MirrorMethod mirror_method)
 403{
 404    MirrorOp *op;
 405    Coroutine *co;
 406    int64_t bytes_handled = -1;
 407
 408    op = g_new(MirrorOp, 1);
 409    *op = (MirrorOp){
 410        .s              = s,
 411        .offset         = offset,
 412        .bytes          = bytes,
 413        .bytes_handled  = &bytes_handled,
 414    };
 415    qemu_co_queue_init(&op->waiting_requests);
 416
 417    switch (mirror_method) {
 418    case MIRROR_METHOD_COPY:
 419        co = qemu_coroutine_create(mirror_co_read, op);
 420        break;
 421    case MIRROR_METHOD_ZERO:
 422        co = qemu_coroutine_create(mirror_co_zero, op);
 423        break;
 424    case MIRROR_METHOD_DISCARD:
 425        co = qemu_coroutine_create(mirror_co_discard, op);
 426        break;
 427    default:
 428        abort();
 429    }
 430
 431    QTAILQ_INSERT_TAIL(&s->ops_in_flight, op, next);
 432    qemu_coroutine_enter(co);
 433    /* At this point, ownership of op has been moved to the coroutine
 434     * and the object may already be freed */
 435
 436    /* Assert that this value has been set */
 437    assert(bytes_handled >= 0);
 438
 439    /* Same assertion as in mirror_co_read() (and for mirror_co_read()
 440     * and mirror_co_discard(), bytes_handled == op->bytes, which
 441     * is the @bytes parameter given to this function) */
 442    assert(bytes_handled <= UINT_MAX);
 443    return bytes_handled;
 444}
 445
 446static uint64_t coroutine_fn mirror_iteration(MirrorBlockJob *s)
 447{
 448    BlockDriverState *source = s->mirror_top_bs->backing->bs;
 449    MirrorOp *pseudo_op;
 450    int64_t offset;
 451    uint64_t delay_ns = 0, ret = 0;
 452    /* At least the first dirty chunk is mirrored in one iteration. */
 453    int nb_chunks = 1;
 454    bool write_zeroes_ok = bdrv_can_write_zeroes_with_unmap(blk_bs(s->target));
 455    int max_io_bytes = MAX(s->buf_size / MAX_IN_FLIGHT, MAX_IO_BYTES);
 456
 457    bdrv_dirty_bitmap_lock(s->dirty_bitmap);
 458    offset = bdrv_dirty_iter_next(s->dbi);
 459    if (offset < 0) {
 460        bdrv_set_dirty_iter(s->dbi, 0);
 461        offset = bdrv_dirty_iter_next(s->dbi);
 462        trace_mirror_restart_iter(s, bdrv_get_dirty_count(s->dirty_bitmap));
 463        assert(offset >= 0);
 464    }
 465    bdrv_dirty_bitmap_unlock(s->dirty_bitmap);
 466
 467    mirror_wait_on_conflicts(NULL, s, offset, 1);
 468
 469    job_pause_point(&s->common.job);
 470
 471    /* Find the number of consective dirty chunks following the first dirty
 472     * one, and wait for in flight requests in them. */
 473    bdrv_dirty_bitmap_lock(s->dirty_bitmap);
 474    while (nb_chunks * s->granularity < s->buf_size) {
 475        int64_t next_dirty;
 476        int64_t next_offset = offset + nb_chunks * s->granularity;
 477        int64_t next_chunk = next_offset / s->granularity;
 478        if (next_offset >= s->bdev_length ||
 479            !bdrv_get_dirty_locked(source, s->dirty_bitmap, next_offset)) {
 480            break;
 481        }
 482        if (test_bit(next_chunk, s->in_flight_bitmap)) {
 483            break;
 484        }
 485
 486        next_dirty = bdrv_dirty_iter_next(s->dbi);
 487        if (next_dirty > next_offset || next_dirty < 0) {
 488            /* The bitmap iterator's cache is stale, refresh it */
 489            bdrv_set_dirty_iter(s->dbi, next_offset);
 490            next_dirty = bdrv_dirty_iter_next(s->dbi);
 491        }
 492        assert(next_dirty == next_offset);
 493        nb_chunks++;
 494    }
 495
 496    /* Clear dirty bits before querying the block status, because
 497     * calling bdrv_block_status_above could yield - if some blocks are
 498     * marked dirty in this window, we need to know.
 499     */
 500    bdrv_reset_dirty_bitmap_locked(s->dirty_bitmap, offset,
 501                                   nb_chunks * s->granularity);
 502    bdrv_dirty_bitmap_unlock(s->dirty_bitmap);
 503
 504    /* Before claiming an area in the in-flight bitmap, we have to
 505     * create a MirrorOp for it so that conflicting requests can wait
 506     * for it.  mirror_perform() will create the real MirrorOps later,
 507     * for now we just create a pseudo operation that will wake up all
 508     * conflicting requests once all real operations have been
 509     * launched. */
 510    pseudo_op = g_new(MirrorOp, 1);
 511    *pseudo_op = (MirrorOp){
 512        .offset         = offset,
 513        .bytes          = nb_chunks * s->granularity,
 514        .is_pseudo_op   = true,
 515    };
 516    qemu_co_queue_init(&pseudo_op->waiting_requests);
 517    QTAILQ_INSERT_TAIL(&s->ops_in_flight, pseudo_op, next);
 518
 519    bitmap_set(s->in_flight_bitmap, offset / s->granularity, nb_chunks);
 520    while (nb_chunks > 0 && offset < s->bdev_length) {
 521        int ret;
 522        int64_t io_bytes;
 523        int64_t io_bytes_acct;
 524        MirrorMethod mirror_method = MIRROR_METHOD_COPY;
 525
 526        assert(!(offset % s->granularity));
 527        ret = bdrv_block_status_above(source, NULL, offset,
 528                                      nb_chunks * s->granularity,
 529                                      &io_bytes, NULL, NULL);
 530        if (ret < 0) {
 531            io_bytes = MIN(nb_chunks * s->granularity, max_io_bytes);
 532        } else if (ret & BDRV_BLOCK_DATA) {
 533            io_bytes = MIN(io_bytes, max_io_bytes);
 534        }
 535
 536        io_bytes -= io_bytes % s->granularity;
 537        if (io_bytes < s->granularity) {
 538            io_bytes = s->granularity;
 539        } else if (ret >= 0 && !(ret & BDRV_BLOCK_DATA)) {
 540            int64_t target_offset;
 541            int64_t target_bytes;
 542            bdrv_round_to_clusters(blk_bs(s->target), offset, io_bytes,
 543                                   &target_offset, &target_bytes);
 544            if (target_offset == offset &&
 545                target_bytes == io_bytes) {
 546                mirror_method = ret & BDRV_BLOCK_ZERO ?
 547                                    MIRROR_METHOD_ZERO :
 548                                    MIRROR_METHOD_DISCARD;
 549            }
 550        }
 551
 552        while (s->in_flight >= MAX_IN_FLIGHT) {
 553            trace_mirror_yield_in_flight(s, offset, s->in_flight);
 554            mirror_wait_for_free_in_flight_slot(s);
 555        }
 556
 557        if (s->ret < 0) {
 558            ret = 0;
 559            goto fail;
 560        }
 561
 562        io_bytes = mirror_clip_bytes(s, offset, io_bytes);
 563        io_bytes = mirror_perform(s, offset, io_bytes, mirror_method);
 564        if (mirror_method != MIRROR_METHOD_COPY && write_zeroes_ok) {
 565            io_bytes_acct = 0;
 566        } else {
 567            io_bytes_acct = io_bytes;
 568        }
 569        assert(io_bytes);
 570        offset += io_bytes;
 571        nb_chunks -= DIV_ROUND_UP(io_bytes, s->granularity);
 572        delay_ns = block_job_ratelimit_get_delay(&s->common, io_bytes_acct);
 573    }
 574
 575    ret = delay_ns;
 576fail:
 577    QTAILQ_REMOVE(&s->ops_in_flight, pseudo_op, next);
 578    qemu_co_queue_restart_all(&pseudo_op->waiting_requests);
 579    g_free(pseudo_op);
 580
 581    return ret;
 582}
 583
 584static void mirror_free_init(MirrorBlockJob *s)
 585{
 586    int granularity = s->granularity;
 587    size_t buf_size = s->buf_size;
 588    uint8_t *buf = s->buf;
 589
 590    assert(s->buf_free_count == 0);
 591    QSIMPLEQ_INIT(&s->buf_free);
 592    while (buf_size != 0) {
 593        MirrorBuffer *cur = (MirrorBuffer *)buf;
 594        QSIMPLEQ_INSERT_TAIL(&s->buf_free, cur, next);
 595        s->buf_free_count++;
 596        buf_size -= granularity;
 597        buf += granularity;
 598    }
 599}
 600
 601/* This is also used for the .pause callback. There is no matching
 602 * mirror_resume() because mirror_run() will begin iterating again
 603 * when the job is resumed.
 604 */
 605static void coroutine_fn mirror_wait_for_all_io(MirrorBlockJob *s)
 606{
 607    while (s->in_flight > 0) {
 608        mirror_wait_for_free_in_flight_slot(s);
 609    }
 610}
 611
 612/**
 613 * mirror_exit_common: handle both abort() and prepare() cases.
 614 * for .prepare, returns 0 on success and -errno on failure.
 615 * for .abort cases, denoted by abort = true, MUST return 0.
 616 */
 617static int mirror_exit_common(Job *job)
 618{
 619    MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job);
 620    BlockJob *bjob = &s->common;
 621    MirrorBDSOpaque *bs_opaque;
 622    AioContext *replace_aio_context = NULL;
 623    BlockDriverState *src;
 624    BlockDriverState *target_bs;
 625    BlockDriverState *mirror_top_bs;
 626    Error *local_err = NULL;
 627    bool abort = job->ret < 0;
 628    int ret = 0;
 629
 630    if (s->prepared) {
 631        return 0;
 632    }
 633    s->prepared = true;
 634
 635    mirror_top_bs = s->mirror_top_bs;
 636    bs_opaque = mirror_top_bs->opaque;
 637    src = mirror_top_bs->backing->bs;
 638    target_bs = blk_bs(s->target);
 639
 640    if (bdrv_chain_contains(src, target_bs)) {
 641        bdrv_unfreeze_backing_chain(mirror_top_bs, target_bs);
 642    }
 643
 644    bdrv_release_dirty_bitmap(src, s->dirty_bitmap);
 645
 646    /* Make sure that the source BDS doesn't go away during bdrv_replace_node,
 647     * before we can call bdrv_drained_end */
 648    bdrv_ref(src);
 649    bdrv_ref(mirror_top_bs);
 650    bdrv_ref(target_bs);
 651
 652    /* Remove target parent that still uses BLK_PERM_WRITE/RESIZE before
 653     * inserting target_bs at s->to_replace, where we might not be able to get
 654     * these permissions.
 655     *
 656     * Note that blk_unref() alone doesn't necessarily drop permissions because
 657     * we might be running nested inside mirror_drain(), which takes an extra
 658     * reference, so use an explicit blk_set_perm() first. */
 659    blk_set_perm(s->target, 0, BLK_PERM_ALL, &error_abort);
 660    blk_unref(s->target);
 661    s->target = NULL;
 662
 663    /* We don't access the source any more. Dropping any WRITE/RESIZE is
 664     * required before it could become a backing file of target_bs. Not having
 665     * these permissions any more means that we can't allow any new requests on
 666     * mirror_top_bs from now on, so keep it drained. */
 667    bdrv_drained_begin(mirror_top_bs);
 668    bs_opaque->stop = true;
 669    bdrv_child_refresh_perms(mirror_top_bs, mirror_top_bs->backing,
 670                             &error_abort);
 671    if (!abort && s->backing_mode == MIRROR_SOURCE_BACKING_CHAIN) {
 672        BlockDriverState *backing = s->is_none_mode ? src : s->base;
 673        if (backing_bs(target_bs) != backing) {
 674            bdrv_set_backing_hd(target_bs, backing, &local_err);
 675            if (local_err) {
 676                error_report_err(local_err);
 677                ret = -EPERM;
 678            }
 679        }
 680    }
 681
 682    if (s->to_replace) {
 683        replace_aio_context = bdrv_get_aio_context(s->to_replace);
 684        aio_context_acquire(replace_aio_context);
 685    }
 686
 687    if (s->should_complete && !abort) {
 688        BlockDriverState *to_replace = s->to_replace ?: src;
 689        bool ro = bdrv_is_read_only(to_replace);
 690
 691        if (ro != bdrv_is_read_only(target_bs)) {
 692            bdrv_reopen_set_read_only(target_bs, ro, NULL);
 693        }
 694
 695        /* The mirror job has no requests in flight any more, but we need to
 696         * drain potential other users of the BDS before changing the graph. */
 697        assert(s->in_drain);
 698        bdrv_drained_begin(target_bs);
 699        bdrv_replace_node(to_replace, target_bs, &local_err);
 700        bdrv_drained_end(target_bs);
 701        if (local_err) {
 702            error_report_err(local_err);
 703            ret = -EPERM;
 704        }
 705    }
 706    if (s->to_replace) {
 707        bdrv_op_unblock_all(s->to_replace, s->replace_blocker);
 708        error_free(s->replace_blocker);
 709        bdrv_unref(s->to_replace);
 710    }
 711    if (replace_aio_context) {
 712        aio_context_release(replace_aio_context);
 713    }
 714    g_free(s->replaces);
 715    bdrv_unref(target_bs);
 716
 717    /*
 718     * Remove the mirror filter driver from the graph. Before this, get rid of
 719     * the blockers on the intermediate nodes so that the resulting state is
 720     * valid.
 721     */
 722    block_job_remove_all_bdrv(bjob);
 723    bdrv_replace_node(mirror_top_bs, backing_bs(mirror_top_bs), &error_abort);
 724
 725    /* We just changed the BDS the job BB refers to (with either or both of the
 726     * bdrv_replace_node() calls), so switch the BB back so the cleanup does
 727     * the right thing. We don't need any permissions any more now. */
 728    blk_remove_bs(bjob->blk);
 729    blk_set_perm(bjob->blk, 0, BLK_PERM_ALL, &error_abort);
 730    blk_insert_bs(bjob->blk, mirror_top_bs, &error_abort);
 731
 732    bs_opaque->job = NULL;
 733
 734    bdrv_drained_end(src);
 735    bdrv_drained_end(mirror_top_bs);
 736    s->in_drain = false;
 737    bdrv_unref(mirror_top_bs);
 738    bdrv_unref(src);
 739
 740    return ret;
 741}
 742
 743static int mirror_prepare(Job *job)
 744{
 745    return mirror_exit_common(job);
 746}
 747
 748static void mirror_abort(Job *job)
 749{
 750    int ret = mirror_exit_common(job);
 751    assert(ret == 0);
 752}
 753
 754static void coroutine_fn mirror_throttle(MirrorBlockJob *s)
 755{
 756    int64_t now = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
 757
 758    if (now - s->last_pause_ns > BLOCK_JOB_SLICE_TIME) {
 759        s->last_pause_ns = now;
 760        job_sleep_ns(&s->common.job, 0);
 761    } else {
 762        job_pause_point(&s->common.job);
 763    }
 764}
 765
 766static int coroutine_fn mirror_dirty_init(MirrorBlockJob *s)
 767{
 768    int64_t offset;
 769    BlockDriverState *base = s->base;
 770    BlockDriverState *bs = s->mirror_top_bs->backing->bs;
 771    BlockDriverState *target_bs = blk_bs(s->target);
 772    int ret;
 773    int64_t count;
 774
 775    if (base == NULL && !bdrv_has_zero_init(target_bs)) {
 776        if (!bdrv_can_write_zeroes_with_unmap(target_bs)) {
 777            bdrv_set_dirty_bitmap(s->dirty_bitmap, 0, s->bdev_length);
 778            return 0;
 779        }
 780
 781        s->initial_zeroing_ongoing = true;
 782        for (offset = 0; offset < s->bdev_length; ) {
 783            int bytes = MIN(s->bdev_length - offset,
 784                            QEMU_ALIGN_DOWN(INT_MAX, s->granularity));
 785
 786            mirror_throttle(s);
 787
 788            if (job_is_cancelled(&s->common.job)) {
 789                s->initial_zeroing_ongoing = false;
 790                return 0;
 791            }
 792
 793            if (s->in_flight >= MAX_IN_FLIGHT) {
 794                trace_mirror_yield(s, UINT64_MAX, s->buf_free_count,
 795                                   s->in_flight);
 796                mirror_wait_for_free_in_flight_slot(s);
 797                continue;
 798            }
 799
 800            mirror_perform(s, offset, bytes, MIRROR_METHOD_ZERO);
 801            offset += bytes;
 802        }
 803
 804        mirror_wait_for_all_io(s);
 805        s->initial_zeroing_ongoing = false;
 806    }
 807
 808    /* First part, loop on the sectors and initialize the dirty bitmap.  */
 809    for (offset = 0; offset < s->bdev_length; ) {
 810        /* Just to make sure we are not exceeding int limit. */
 811        int bytes = MIN(s->bdev_length - offset,
 812                        QEMU_ALIGN_DOWN(INT_MAX, s->granularity));
 813
 814        mirror_throttle(s);
 815
 816        if (job_is_cancelled(&s->common.job)) {
 817            return 0;
 818        }
 819
 820        ret = bdrv_is_allocated_above(bs, base, false, offset, bytes, &count);
 821        if (ret < 0) {
 822            return ret;
 823        }
 824
 825        assert(count);
 826        if (ret == 1) {
 827            bdrv_set_dirty_bitmap(s->dirty_bitmap, offset, count);
 828        }
 829        offset += count;
 830    }
 831    return 0;
 832}
 833
 834/* Called when going out of the streaming phase to flush the bulk of the
 835 * data to the medium, or just before completing.
 836 */
 837static int mirror_flush(MirrorBlockJob *s)
 838{
 839    int ret = blk_flush(s->target);
 840    if (ret < 0) {
 841        if (mirror_error_action(s, false, -ret) == BLOCK_ERROR_ACTION_REPORT) {
 842            s->ret = ret;
 843        }
 844    }
 845    return ret;
 846}
 847
 848static int coroutine_fn mirror_run(Job *job, Error **errp)
 849{
 850    MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job);
 851    BlockDriverState *bs = s->mirror_top_bs->backing->bs;
 852    BlockDriverState *target_bs = blk_bs(s->target);
 853    bool need_drain = true;
 854    int64_t length;
 855    BlockDriverInfo bdi;
 856    char backing_filename[2]; /* we only need 2 characters because we are only
 857                                 checking for a NULL string */
 858    int ret = 0;
 859
 860    if (job_is_cancelled(&s->common.job)) {
 861        goto immediate_exit;
 862    }
 863
 864    s->bdev_length = bdrv_getlength(bs);
 865    if (s->bdev_length < 0) {
 866        ret = s->bdev_length;
 867        goto immediate_exit;
 868    }
 869
 870    /* Active commit must resize the base image if its size differs from the
 871     * active layer. */
 872    if (s->base == blk_bs(s->target)) {
 873        int64_t base_length;
 874
 875        base_length = blk_getlength(s->target);
 876        if (base_length < 0) {
 877            ret = base_length;
 878            goto immediate_exit;
 879        }
 880
 881        if (s->bdev_length > base_length) {
 882            ret = blk_truncate(s->target, s->bdev_length, PREALLOC_MODE_OFF,
 883                               NULL);
 884            if (ret < 0) {
 885                goto immediate_exit;
 886            }
 887        }
 888    }
 889
 890    if (s->bdev_length == 0) {
 891        /* Transition to the READY state and wait for complete. */
 892        job_transition_to_ready(&s->common.job);
 893        s->synced = true;
 894        s->actively_synced = true;
 895        while (!job_is_cancelled(&s->common.job) && !s->should_complete) {
 896            job_yield(&s->common.job);
 897        }
 898        s->common.job.cancelled = false;
 899        goto immediate_exit;
 900    }
 901
 902    length = DIV_ROUND_UP(s->bdev_length, s->granularity);
 903    s->in_flight_bitmap = bitmap_new(length);
 904
 905    /* If we have no backing file yet in the destination, we cannot let
 906     * the destination do COW.  Instead, we copy sectors around the
 907     * dirty data if needed.  We need a bitmap to do that.
 908     */
 909    bdrv_get_backing_filename(target_bs, backing_filename,
 910                              sizeof(backing_filename));
 911    if (!bdrv_get_info(target_bs, &bdi) && bdi.cluster_size) {
 912        s->target_cluster_size = bdi.cluster_size;
 913    } else {
 914        s->target_cluster_size = BDRV_SECTOR_SIZE;
 915    }
 916    if (backing_filename[0] && !target_bs->backing &&
 917        s->granularity < s->target_cluster_size) {
 918        s->buf_size = MAX(s->buf_size, s->target_cluster_size);
 919        s->cow_bitmap = bitmap_new(length);
 920    }
 921    s->max_iov = MIN(bs->bl.max_iov, target_bs->bl.max_iov);
 922
 923    s->buf = qemu_try_blockalign(bs, s->buf_size);
 924    if (s->buf == NULL) {
 925        ret = -ENOMEM;
 926        goto immediate_exit;
 927    }
 928
 929    mirror_free_init(s);
 930
 931    s->last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
 932    if (!s->is_none_mode) {
 933        ret = mirror_dirty_init(s);
 934        if (ret < 0 || job_is_cancelled(&s->common.job)) {
 935            goto immediate_exit;
 936        }
 937    }
 938
 939    assert(!s->dbi);
 940    s->dbi = bdrv_dirty_iter_new(s->dirty_bitmap);
 941    for (;;) {
 942        uint64_t delay_ns = 0;
 943        int64_t cnt, delta;
 944        bool should_complete;
 945
 946        /* Do not start passive operations while there are active
 947         * writes in progress */
 948        while (s->in_active_write_counter) {
 949            mirror_wait_for_any_operation(s, true);
 950        }
 951
 952        if (s->ret < 0) {
 953            ret = s->ret;
 954            goto immediate_exit;
 955        }
 956
 957        job_pause_point(&s->common.job);
 958
 959        cnt = bdrv_get_dirty_count(s->dirty_bitmap);
 960        /* cnt is the number of dirty bytes remaining and s->bytes_in_flight is
 961         * the number of bytes currently being processed; together those are
 962         * the current remaining operation length */
 963        job_progress_set_remaining(&s->common.job, s->bytes_in_flight + cnt);
 964
 965        /* Note that even when no rate limit is applied we need to yield
 966         * periodically with no pending I/O so that bdrv_drain_all() returns.
 967         * We do so every BLKOCK_JOB_SLICE_TIME nanoseconds, or when there is
 968         * an error, or when the source is clean, whichever comes first. */
 969        delta = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - s->last_pause_ns;
 970        if (delta < BLOCK_JOB_SLICE_TIME &&
 971            s->common.iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
 972            if (s->in_flight >= MAX_IN_FLIGHT || s->buf_free_count == 0 ||
 973                (cnt == 0 && s->in_flight > 0)) {
 974                trace_mirror_yield(s, cnt, s->buf_free_count, s->in_flight);
 975                mirror_wait_for_free_in_flight_slot(s);
 976                continue;
 977            } else if (cnt != 0) {
 978                delay_ns = mirror_iteration(s);
 979            }
 980        }
 981
 982        should_complete = false;
 983        if (s->in_flight == 0 && cnt == 0) {
 984            trace_mirror_before_flush(s);
 985            if (!s->synced) {
 986                if (mirror_flush(s) < 0) {
 987                    /* Go check s->ret.  */
 988                    continue;
 989                }
 990                /* We're out of the streaming phase.  From now on, if the job
 991                 * is cancelled we will actually complete all pending I/O and
 992                 * report completion.  This way, block-job-cancel will leave
 993                 * the target in a consistent state.
 994                 */
 995                job_transition_to_ready(&s->common.job);
 996                s->synced = true;
 997                if (s->copy_mode != MIRROR_COPY_MODE_BACKGROUND) {
 998                    s->actively_synced = true;
 999                }
1000            }
1001
1002            should_complete = s->should_complete ||
1003                job_is_cancelled(&s->common.job);
1004            cnt = bdrv_get_dirty_count(s->dirty_bitmap);
1005        }
1006
1007        if (cnt == 0 && should_complete) {
1008            /* The dirty bitmap is not updated while operations are pending.
1009             * If we're about to exit, wait for pending operations before
1010             * calling bdrv_get_dirty_count(bs), or we may exit while the
1011             * source has dirty data to copy!
1012             *
1013             * Note that I/O can be submitted by the guest while
1014             * mirror_populate runs, so pause it now.  Before deciding
1015             * whether to switch to target check one last time if I/O has
1016             * come in the meanwhile, and if not flush the data to disk.
1017             */
1018            trace_mirror_before_drain(s, cnt);
1019
1020            s->in_drain = true;
1021            bdrv_drained_begin(bs);
1022            cnt = bdrv_get_dirty_count(s->dirty_bitmap);
1023            if (cnt > 0 || mirror_flush(s) < 0) {
1024                bdrv_drained_end(bs);
1025                s->in_drain = false;
1026                continue;
1027            }
1028
1029            /* The two disks are in sync.  Exit and report successful
1030             * completion.
1031             */
1032            assert(QLIST_EMPTY(&bs->tracked_requests));
1033            s->common.job.cancelled = false;
1034            need_drain = false;
1035            break;
1036        }
1037
1038        ret = 0;
1039
1040        if (s->synced && !should_complete) {
1041            delay_ns = (s->in_flight == 0 &&
1042                        cnt == 0 ? BLOCK_JOB_SLICE_TIME : 0);
1043        }
1044        trace_mirror_before_sleep(s, cnt, s->synced, delay_ns);
1045        job_sleep_ns(&s->common.job, delay_ns);
1046        if (job_is_cancelled(&s->common.job) &&
1047            (!s->synced || s->common.job.force_cancel))
1048        {
1049            break;
1050        }
1051        s->last_pause_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
1052    }
1053
1054immediate_exit:
1055    if (s->in_flight > 0) {
1056        /* We get here only if something went wrong.  Either the job failed,
1057         * or it was cancelled prematurely so that we do not guarantee that
1058         * the target is a copy of the source.
1059         */
1060        assert(ret < 0 || ((s->common.job.force_cancel || !s->synced) &&
1061               job_is_cancelled(&s->common.job)));
1062        assert(need_drain);
1063        mirror_wait_for_all_io(s);
1064    }
1065
1066    assert(s->in_flight == 0);
1067    qemu_vfree(s->buf);
1068    g_free(s->cow_bitmap);
1069    g_free(s->in_flight_bitmap);
1070    bdrv_dirty_iter_free(s->dbi);
1071
1072    if (need_drain) {
1073        s->in_drain = true;
1074        bdrv_drained_begin(bs);
1075    }
1076
1077    return ret;
1078}
1079
1080static void mirror_complete(Job *job, Error **errp)
1081{
1082    MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job);
1083    BlockDriverState *target;
1084
1085    target = blk_bs(s->target);
1086
1087    if (!s->synced) {
1088        error_setg(errp, "The active block job '%s' cannot be completed",
1089                   job->id);
1090        return;
1091    }
1092
1093    if (s->backing_mode == MIRROR_OPEN_BACKING_CHAIN) {
1094        int ret;
1095
1096        assert(!target->backing);
1097        ret = bdrv_open_backing_file(target, NULL, "backing", errp);
1098        if (ret < 0) {
1099            return;
1100        }
1101    }
1102
1103    /* block all operations on to_replace bs */
1104    if (s->replaces) {
1105        AioContext *replace_aio_context;
1106
1107        s->to_replace = bdrv_find_node(s->replaces);
1108        if (!s->to_replace) {
1109            error_setg(errp, "Node name '%s' not found", s->replaces);
1110            return;
1111        }
1112
1113        replace_aio_context = bdrv_get_aio_context(s->to_replace);
1114        aio_context_acquire(replace_aio_context);
1115
1116        /* TODO Translate this into permission system. Current definition of
1117         * GRAPH_MOD would require to request it for the parents; they might
1118         * not even be BlockDriverStates, however, so a BdrvChild can't address
1119         * them. May need redefinition of GRAPH_MOD. */
1120        error_setg(&s->replace_blocker,
1121                   "block device is in use by block-job-complete");
1122        bdrv_op_block_all(s->to_replace, s->replace_blocker);
1123        bdrv_ref(s->to_replace);
1124
1125        aio_context_release(replace_aio_context);
1126    }
1127
1128    s->should_complete = true;
1129    job_enter(job);
1130}
1131
1132static void coroutine_fn mirror_pause(Job *job)
1133{
1134    MirrorBlockJob *s = container_of(job, MirrorBlockJob, common.job);
1135
1136    mirror_wait_for_all_io(s);
1137}
1138
1139static bool mirror_drained_poll(BlockJob *job)
1140{
1141    MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
1142
1143    /* If the job isn't paused nor cancelled, we can't be sure that it won't
1144     * issue more requests. We make an exception if we've reached this point
1145     * from one of our own drain sections, to avoid a deadlock waiting for
1146     * ourselves.
1147     */
1148    if (!s->common.job.paused && !s->common.job.cancelled && !s->in_drain) {
1149        return true;
1150    }
1151
1152    return !!s->in_flight;
1153}
1154
1155static void mirror_drain(BlockJob *job)
1156{
1157    MirrorBlockJob *s = container_of(job, MirrorBlockJob, common);
1158
1159    /* Need to keep a reference in case blk_drain triggers execution
1160     * of mirror_complete...
1161     */
1162    if (s->target) {
1163        BlockBackend *target = s->target;
1164        blk_ref(target);
1165        blk_drain(target);
1166        blk_unref(target);
1167    }
1168}
1169
1170static const BlockJobDriver mirror_job_driver = {
1171    .job_driver = {
1172        .instance_size          = sizeof(MirrorBlockJob),
1173        .job_type               = JOB_TYPE_MIRROR,
1174        .free                   = block_job_free,
1175        .user_resume            = block_job_user_resume,
1176        .drain                  = block_job_drain,
1177        .run                    = mirror_run,
1178        .prepare                = mirror_prepare,
1179        .abort                  = mirror_abort,
1180        .pause                  = mirror_pause,
1181        .complete               = mirror_complete,
1182    },
1183    .drained_poll           = mirror_drained_poll,
1184    .drain                  = mirror_drain,
1185};
1186
1187static const BlockJobDriver commit_active_job_driver = {
1188    .job_driver = {
1189        .instance_size          = sizeof(MirrorBlockJob),
1190        .job_type               = JOB_TYPE_COMMIT,
1191        .free                   = block_job_free,
1192        .user_resume            = block_job_user_resume,
1193        .drain                  = block_job_drain,
1194        .run                    = mirror_run,
1195        .prepare                = mirror_prepare,
1196        .abort                  = mirror_abort,
1197        .pause                  = mirror_pause,
1198        .complete               = mirror_complete,
1199    },
1200    .drained_poll           = mirror_drained_poll,
1201    .drain                  = mirror_drain,
1202};
1203
1204static void coroutine_fn
1205do_sync_target_write(MirrorBlockJob *job, MirrorMethod method,
1206                     uint64_t offset, uint64_t bytes,
1207                     QEMUIOVector *qiov, int flags)
1208{
1209    QEMUIOVector target_qiov;
1210    uint64_t dirty_offset = offset;
1211    uint64_t dirty_bytes;
1212
1213    if (qiov) {
1214        qemu_iovec_init(&target_qiov, qiov->niov);
1215    }
1216
1217    while (true) {
1218        bool valid_area;
1219        int ret;
1220
1221        bdrv_dirty_bitmap_lock(job->dirty_bitmap);
1222        dirty_bytes = MIN(offset + bytes - dirty_offset, INT_MAX);
1223        valid_area = bdrv_dirty_bitmap_next_dirty_area(job->dirty_bitmap,
1224                                                       &dirty_offset,
1225                                                       &dirty_bytes);
1226        if (!valid_area) {
1227            bdrv_dirty_bitmap_unlock(job->dirty_bitmap);
1228            break;
1229        }
1230
1231        bdrv_reset_dirty_bitmap_locked(job->dirty_bitmap,
1232                                       dirty_offset, dirty_bytes);
1233        bdrv_dirty_bitmap_unlock(job->dirty_bitmap);
1234
1235        job_progress_increase_remaining(&job->common.job, dirty_bytes);
1236
1237        assert(dirty_offset - offset <= SIZE_MAX);
1238        if (qiov) {
1239            qemu_iovec_reset(&target_qiov);
1240            qemu_iovec_concat(&target_qiov, qiov,
1241                              dirty_offset - offset, dirty_bytes);
1242        }
1243
1244        switch (method) {
1245        case MIRROR_METHOD_COPY:
1246            ret = blk_co_pwritev(job->target, dirty_offset, dirty_bytes,
1247                                 qiov ? &target_qiov : NULL, flags);
1248            break;
1249
1250        case MIRROR_METHOD_ZERO:
1251            assert(!qiov);
1252            ret = blk_co_pwrite_zeroes(job->target, dirty_offset, dirty_bytes,
1253                                       flags);
1254            break;
1255
1256        case MIRROR_METHOD_DISCARD:
1257            assert(!qiov);
1258            ret = blk_co_pdiscard(job->target, dirty_offset, dirty_bytes);
1259            break;
1260
1261        default:
1262            abort();
1263        }
1264
1265        if (ret >= 0) {
1266            job_progress_update(&job->common.job, dirty_bytes);
1267        } else {
1268            BlockErrorAction action;
1269
1270            bdrv_set_dirty_bitmap(job->dirty_bitmap, dirty_offset, dirty_bytes);
1271            job->actively_synced = false;
1272
1273            action = mirror_error_action(job, false, -ret);
1274            if (action == BLOCK_ERROR_ACTION_REPORT) {
1275                if (!job->ret) {
1276                    job->ret = ret;
1277                }
1278                break;
1279            }
1280        }
1281
1282        dirty_offset += dirty_bytes;
1283    }
1284
1285    if (qiov) {
1286        qemu_iovec_destroy(&target_qiov);
1287    }
1288}
1289
1290static MirrorOp *coroutine_fn active_write_prepare(MirrorBlockJob *s,
1291                                                   uint64_t offset,
1292                                                   uint64_t bytes)
1293{
1294    MirrorOp *op;
1295    uint64_t start_chunk = offset / s->granularity;
1296    uint64_t end_chunk = DIV_ROUND_UP(offset + bytes, s->granularity);
1297
1298    op = g_new(MirrorOp, 1);
1299    *op = (MirrorOp){
1300        .s                  = s,
1301        .offset             = offset,
1302        .bytes              = bytes,
1303        .is_active_write    = true,
1304    };
1305    qemu_co_queue_init(&op->waiting_requests);
1306    QTAILQ_INSERT_TAIL(&s->ops_in_flight, op, next);
1307
1308    s->in_active_write_counter++;
1309
1310    mirror_wait_on_conflicts(op, s, offset, bytes);
1311
1312    bitmap_set(s->in_flight_bitmap, start_chunk, end_chunk - start_chunk);
1313
1314    return op;
1315}
1316
1317static void coroutine_fn active_write_settle(MirrorOp *op)
1318{
1319    uint64_t start_chunk = op->offset / op->s->granularity;
1320    uint64_t end_chunk = DIV_ROUND_UP(op->offset + op->bytes,
1321                                      op->s->granularity);
1322
1323    if (!--op->s->in_active_write_counter && op->s->actively_synced) {
1324        BdrvChild *source = op->s->mirror_top_bs->backing;
1325
1326        if (QLIST_FIRST(&source->bs->parents) == source &&
1327            QLIST_NEXT(source, next_parent) == NULL)
1328        {
1329            /* Assert that we are back in sync once all active write
1330             * operations are settled.
1331             * Note that we can only assert this if the mirror node
1332             * is the source node's only parent. */
1333            assert(!bdrv_get_dirty_count(op->s->dirty_bitmap));
1334        }
1335    }
1336    bitmap_clear(op->s->in_flight_bitmap, start_chunk, end_chunk - start_chunk);
1337    QTAILQ_REMOVE(&op->s->ops_in_flight, op, next);
1338    qemu_co_queue_restart_all(&op->waiting_requests);
1339    g_free(op);
1340}
1341
1342static int coroutine_fn bdrv_mirror_top_preadv(BlockDriverState *bs,
1343    uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags)
1344{
1345    return bdrv_co_preadv(bs->backing, offset, bytes, qiov, flags);
1346}
1347
1348static int coroutine_fn bdrv_mirror_top_do_write(BlockDriverState *bs,
1349    MirrorMethod method, uint64_t offset, uint64_t bytes, QEMUIOVector *qiov,
1350    int flags)
1351{
1352    MirrorOp *op = NULL;
1353    MirrorBDSOpaque *s = bs->opaque;
1354    int ret = 0;
1355    bool copy_to_target;
1356
1357    copy_to_target = s->job->ret >= 0 &&
1358                     s->job->copy_mode == MIRROR_COPY_MODE_WRITE_BLOCKING;
1359
1360    if (copy_to_target) {
1361        op = active_write_prepare(s->job, offset, bytes);
1362    }
1363
1364    switch (method) {
1365    case MIRROR_METHOD_COPY:
1366        ret = bdrv_co_pwritev(bs->backing, offset, bytes, qiov, flags);
1367        break;
1368
1369    case MIRROR_METHOD_ZERO:
1370        ret = bdrv_co_pwrite_zeroes(bs->backing, offset, bytes, flags);
1371        break;
1372
1373    case MIRROR_METHOD_DISCARD:
1374        ret = bdrv_co_pdiscard(bs->backing, offset, bytes);
1375        break;
1376
1377    default:
1378        abort();
1379    }
1380
1381    if (ret < 0) {
1382        goto out;
1383    }
1384
1385    if (copy_to_target) {
1386        do_sync_target_write(s->job, method, offset, bytes, qiov, flags);
1387    }
1388
1389out:
1390    if (copy_to_target) {
1391        active_write_settle(op);
1392    }
1393    return ret;
1394}
1395
1396static int coroutine_fn bdrv_mirror_top_pwritev(BlockDriverState *bs,
1397    uint64_t offset, uint64_t bytes, QEMUIOVector *qiov, int flags)
1398{
1399    MirrorBDSOpaque *s = bs->opaque;
1400    QEMUIOVector bounce_qiov;
1401    void *bounce_buf;
1402    int ret = 0;
1403    bool copy_to_target;
1404
1405    copy_to_target = s->job->ret >= 0 &&
1406                     s->job->copy_mode == MIRROR_COPY_MODE_WRITE_BLOCKING;
1407
1408    if (copy_to_target) {
1409        /* The guest might concurrently modify the data to write; but
1410         * the data on source and destination must match, so we have
1411         * to use a bounce buffer if we are going to write to the
1412         * target now. */
1413        bounce_buf = qemu_blockalign(bs, bytes);
1414        iov_to_buf_full(qiov->iov, qiov->niov, 0, bounce_buf, bytes);
1415
1416        qemu_iovec_init(&bounce_qiov, 1);
1417        qemu_iovec_add(&bounce_qiov, bounce_buf, bytes);
1418        qiov = &bounce_qiov;
1419    }
1420
1421    ret = bdrv_mirror_top_do_write(bs, MIRROR_METHOD_COPY, offset, bytes, qiov,
1422                                   flags);
1423
1424    if (copy_to_target) {
1425        qemu_iovec_destroy(&bounce_qiov);
1426        qemu_vfree(bounce_buf);
1427    }
1428
1429    return ret;
1430}
1431
1432static int coroutine_fn bdrv_mirror_top_flush(BlockDriverState *bs)
1433{
1434    if (bs->backing == NULL) {
1435        /* we can be here after failed bdrv_append in mirror_start_job */
1436        return 0;
1437    }
1438    return bdrv_co_flush(bs->backing->bs);
1439}
1440
1441static int coroutine_fn bdrv_mirror_top_pwrite_zeroes(BlockDriverState *bs,
1442    int64_t offset, int bytes, BdrvRequestFlags flags)
1443{
1444    return bdrv_mirror_top_do_write(bs, MIRROR_METHOD_ZERO, offset, bytes, NULL,
1445                                    flags);
1446}
1447
1448static int coroutine_fn bdrv_mirror_top_pdiscard(BlockDriverState *bs,
1449    int64_t offset, int bytes)
1450{
1451    return bdrv_mirror_top_do_write(bs, MIRROR_METHOD_DISCARD, offset, bytes,
1452                                    NULL, 0);
1453}
1454
1455static void bdrv_mirror_top_refresh_filename(BlockDriverState *bs)
1456{
1457    if (bs->backing == NULL) {
1458        /* we can be here after failed bdrv_attach_child in
1459         * bdrv_set_backing_hd */
1460        return;
1461    }
1462    pstrcpy(bs->exact_filename, sizeof(bs->exact_filename),
1463            bs->backing->bs->filename);
1464}
1465
1466static void bdrv_mirror_top_child_perm(BlockDriverState *bs, BdrvChild *c,
1467                                       const BdrvChildRole *role,
1468                                       BlockReopenQueue *reopen_queue,
1469                                       uint64_t perm, uint64_t shared,
1470                                       uint64_t *nperm, uint64_t *nshared)
1471{
1472    MirrorBDSOpaque *s = bs->opaque;
1473
1474    if (s->stop) {
1475        /*
1476         * If the job is to be stopped, we do not need to forward
1477         * anything to the real image.
1478         */
1479        *nperm = 0;
1480        *nshared = BLK_PERM_ALL;
1481        return;
1482    }
1483
1484    /* Must be able to forward guest writes to the real image */
1485    *nperm = 0;
1486    if (perm & BLK_PERM_WRITE) {
1487        *nperm |= BLK_PERM_WRITE;
1488    }
1489
1490    *nshared = BLK_PERM_ALL;
1491}
1492
1493static void bdrv_mirror_top_refresh_limits(BlockDriverState *bs, Error **errp)
1494{
1495    MirrorBDSOpaque *s = bs->opaque;
1496
1497    if (s && s->job && s->job->copy_mode == MIRROR_COPY_MODE_WRITE_BLOCKING) {
1498        bs->bl.request_alignment = s->job->granularity;
1499    }
1500}
1501
1502/* Dummy node that provides consistent read to its users without requiring it
1503 * from its backing file and that allows writes on the backing file chain. */
1504static BlockDriver bdrv_mirror_top = {
1505    .format_name                = "mirror_top",
1506    .bdrv_co_preadv             = bdrv_mirror_top_preadv,
1507    .bdrv_co_pwritev            = bdrv_mirror_top_pwritev,
1508    .bdrv_co_pwrite_zeroes      = bdrv_mirror_top_pwrite_zeroes,
1509    .bdrv_co_pdiscard           = bdrv_mirror_top_pdiscard,
1510    .bdrv_co_flush              = bdrv_mirror_top_flush,
1511    .bdrv_co_block_status       = bdrv_co_block_status_from_backing,
1512    .bdrv_refresh_filename      = bdrv_mirror_top_refresh_filename,
1513    .bdrv_child_perm            = bdrv_mirror_top_child_perm,
1514    .bdrv_refresh_limits        = bdrv_mirror_top_refresh_limits,
1515};
1516
1517static BlockJob *mirror_start_job(
1518                             const char *job_id, BlockDriverState *bs,
1519                             int creation_flags, BlockDriverState *target,
1520                             const char *replaces, int64_t speed,
1521                             uint32_t granularity, int64_t buf_size,
1522                             BlockMirrorBackingMode backing_mode,
1523                             BlockdevOnError on_source_error,
1524                             BlockdevOnError on_target_error,
1525                             bool unmap,
1526                             BlockCompletionFunc *cb,
1527                             void *opaque,
1528                             const BlockJobDriver *driver,
1529                             bool is_none_mode, BlockDriverState *base,
1530                             bool auto_complete, const char *filter_node_name,
1531                             bool is_mirror, MirrorCopyMode copy_mode,
1532                             Error **errp)
1533{
1534    MirrorBlockJob *s;
1535    MirrorBDSOpaque *bs_opaque;
1536    BlockDriverState *mirror_top_bs;
1537    bool target_graph_mod;
1538    bool target_is_backing;
1539    Error *local_err = NULL;
1540    int ret;
1541
1542    if (granularity == 0) {
1543        granularity = bdrv_get_default_bitmap_granularity(target);
1544    }
1545
1546    assert(is_power_of_2(granularity));
1547
1548    if (buf_size < 0) {
1549        error_setg(errp, "Invalid parameter 'buf-size'");
1550        return NULL;
1551    }
1552
1553    if (buf_size == 0) {
1554        buf_size = DEFAULT_MIRROR_BUF_SIZE;
1555    }
1556
1557    if (bs == target) {
1558        error_setg(errp, "Can't mirror node into itself");
1559        return NULL;
1560    }
1561
1562    /* In the case of active commit, add dummy driver to provide consistent
1563     * reads on the top, while disabling it in the intermediate nodes, and make
1564     * the backing chain writable. */
1565    mirror_top_bs = bdrv_new_open_driver(&bdrv_mirror_top, filter_node_name,
1566                                         BDRV_O_RDWR, errp);
1567    if (mirror_top_bs == NULL) {
1568        return NULL;
1569    }
1570    if (!filter_node_name) {
1571        mirror_top_bs->implicit = true;
1572    }
1573
1574    /* So that we can always drop this node */
1575    mirror_top_bs->never_freeze = true;
1576
1577    mirror_top_bs->total_sectors = bs->total_sectors;
1578    mirror_top_bs->supported_write_flags = BDRV_REQ_WRITE_UNCHANGED;
1579    mirror_top_bs->supported_zero_flags = BDRV_REQ_WRITE_UNCHANGED |
1580                                          BDRV_REQ_NO_FALLBACK;
1581    bs_opaque = g_new0(MirrorBDSOpaque, 1);
1582    mirror_top_bs->opaque = bs_opaque;
1583
1584    /* bdrv_append takes ownership of the mirror_top_bs reference, need to keep
1585     * it alive until block_job_create() succeeds even if bs has no parent. */
1586    bdrv_ref(mirror_top_bs);
1587    bdrv_drained_begin(bs);
1588    bdrv_append(mirror_top_bs, bs, &local_err);
1589    bdrv_drained_end(bs);
1590
1591    if (local_err) {
1592        bdrv_unref(mirror_top_bs);
1593        error_propagate(errp, local_err);
1594        return NULL;
1595    }
1596
1597    /* Make sure that the source is not resized while the job is running */
1598    s = block_job_create(job_id, driver, NULL, mirror_top_bs,
1599                         BLK_PERM_CONSISTENT_READ,
1600                         BLK_PERM_CONSISTENT_READ | BLK_PERM_WRITE_UNCHANGED |
1601                         BLK_PERM_WRITE | BLK_PERM_GRAPH_MOD, speed,
1602                         creation_flags, cb, opaque, errp);
1603    if (!s) {
1604        goto fail;
1605    }
1606    bs_opaque->job = s;
1607
1608    /* The block job now has a reference to this node */
1609    bdrv_unref(mirror_top_bs);
1610
1611    s->mirror_top_bs = mirror_top_bs;
1612
1613    /* No resize for the target either; while the mirror is still running, a
1614     * consistent read isn't necessarily possible. We could possibly allow
1615     * writes and graph modifications, though it would likely defeat the
1616     * purpose of a mirror, so leave them blocked for now.
1617     *
1618     * In the case of active commit, things look a bit different, though,
1619     * because the target is an already populated backing file in active use.
1620     * We can allow anything except resize there.*/
1621    target_is_backing = bdrv_chain_contains(bs, target);
1622    target_graph_mod = (backing_mode != MIRROR_LEAVE_BACKING_CHAIN);
1623    s->target = blk_new(s->common.job.aio_context,
1624                        BLK_PERM_WRITE | BLK_PERM_RESIZE |
1625                        (target_graph_mod ? BLK_PERM_GRAPH_MOD : 0),
1626                        BLK_PERM_WRITE_UNCHANGED |
1627                        (target_is_backing ? BLK_PERM_CONSISTENT_READ |
1628                                             BLK_PERM_WRITE |
1629                                             BLK_PERM_GRAPH_MOD : 0));
1630    ret = blk_insert_bs(s->target, target, errp);
1631    if (ret < 0) {
1632        goto fail;
1633    }
1634    if (is_mirror) {
1635        /* XXX: Mirror target could be a NBD server of target QEMU in the case
1636         * of non-shared block migration. To allow migration completion, we
1637         * have to allow "inactivate" of the target BB.  When that happens, we
1638         * know the job is drained, and the vcpus are stopped, so no write
1639         * operation will be performed. Block layer already has assertions to
1640         * ensure that. */
1641        blk_set_force_allow_inactivate(s->target);
1642    }
1643    blk_set_allow_aio_context_change(s->target, true);
1644
1645    s->replaces = g_strdup(replaces);
1646    s->on_source_error = on_source_error;
1647    s->on_target_error = on_target_error;
1648    s->is_none_mode = is_none_mode;
1649    s->backing_mode = backing_mode;
1650    s->copy_mode = copy_mode;
1651    s->base = base;
1652    s->granularity = granularity;
1653    s->buf_size = ROUND_UP(buf_size, granularity);
1654    s->unmap = unmap;
1655    if (auto_complete) {
1656        s->should_complete = true;
1657    }
1658
1659    /*
1660     * Must be called before we start tracking writes, but after
1661     *
1662     *     ((MirrorBlockJob *)
1663     *         ((MirrorBDSOpaque *)
1664     *             mirror_top_bs->opaque
1665     *         )->job
1666     *     )->copy_mode
1667     *
1668     * has the correct value.
1669     * (We start tracking writes as of the following
1670     * bdrv_create_dirty_bitmap() call.)
1671     */
1672    bdrv_refresh_limits(mirror_top_bs, &local_err);
1673    if (local_err) {
1674        error_propagate(errp, local_err);
1675        goto fail;
1676    }
1677
1678    s->dirty_bitmap = bdrv_create_dirty_bitmap(bs, granularity, NULL, errp);
1679    if (!s->dirty_bitmap) {
1680        goto fail;
1681    }
1682
1683    ret = block_job_add_bdrv(&s->common, "source", bs, 0,
1684                             BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE |
1685                             BLK_PERM_CONSISTENT_READ,
1686                             errp);
1687    if (ret < 0) {
1688        goto fail;
1689    }
1690
1691    /* Required permissions are already taken with blk_new() */
1692    block_job_add_bdrv(&s->common, "target", target, 0, BLK_PERM_ALL,
1693                       &error_abort);
1694
1695    /* In commit_active_start() all intermediate nodes disappear, so
1696     * any jobs in them must be blocked */
1697    if (target_is_backing) {
1698        BlockDriverState *iter;
1699        for (iter = backing_bs(bs); iter != target; iter = backing_bs(iter)) {
1700            /* XXX BLK_PERM_WRITE needs to be allowed so we don't block
1701             * ourselves at s->base (if writes are blocked for a node, they are
1702             * also blocked for its backing file). The other options would be a
1703             * second filter driver above s->base (== target). */
1704            ret = block_job_add_bdrv(&s->common, "intermediate node", iter, 0,
1705                                     BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE,
1706                                     errp);
1707            if (ret < 0) {
1708                goto fail;
1709            }
1710        }
1711
1712        if (bdrv_freeze_backing_chain(mirror_top_bs, target, errp) < 0) {
1713            goto fail;
1714        }
1715    }
1716
1717    QTAILQ_INIT(&s->ops_in_flight);
1718
1719    trace_mirror_start(bs, s, opaque);
1720    job_start(&s->common.job);
1721
1722    return &s->common;
1723
1724fail:
1725    if (s) {
1726        /* Make sure this BDS does not go away until we have completed the graph
1727         * changes below */
1728        bdrv_ref(mirror_top_bs);
1729
1730        g_free(s->replaces);
1731        blk_unref(s->target);
1732        bs_opaque->job = NULL;
1733        if (s->dirty_bitmap) {
1734            bdrv_release_dirty_bitmap(bs, s->dirty_bitmap);
1735        }
1736        job_early_fail(&s->common.job);
1737    }
1738
1739    bs_opaque->stop = true;
1740    bdrv_child_refresh_perms(mirror_top_bs, mirror_top_bs->backing,
1741                             &error_abort);
1742    bdrv_replace_node(mirror_top_bs, backing_bs(mirror_top_bs), &error_abort);
1743
1744    bdrv_unref(mirror_top_bs);
1745
1746    return NULL;
1747}
1748
1749void mirror_start(const char *job_id, BlockDriverState *bs,
1750                  BlockDriverState *target, const char *replaces,
1751                  int creation_flags, int64_t speed,
1752                  uint32_t granularity, int64_t buf_size,
1753                  MirrorSyncMode mode, BlockMirrorBackingMode backing_mode,
1754                  BlockdevOnError on_source_error,
1755                  BlockdevOnError on_target_error,
1756                  bool unmap, const char *filter_node_name,
1757                  MirrorCopyMode copy_mode, Error **errp)
1758{
1759    bool is_none_mode;
1760    BlockDriverState *base;
1761
1762    if (mode == MIRROR_SYNC_MODE_INCREMENTAL) {
1763        error_setg(errp, "Sync mode 'incremental' not supported");
1764        return;
1765    }
1766    is_none_mode = mode == MIRROR_SYNC_MODE_NONE;
1767    base = mode == MIRROR_SYNC_MODE_TOP ? backing_bs(bs) : NULL;
1768    mirror_start_job(job_id, bs, creation_flags, target, replaces,
1769                     speed, granularity, buf_size, backing_mode,
1770                     on_source_error, on_target_error, unmap, NULL, NULL,
1771                     &mirror_job_driver, is_none_mode, base, false,
1772                     filter_node_name, true, copy_mode, errp);
1773}
1774
1775BlockJob *commit_active_start(const char *job_id, BlockDriverState *bs,
1776                              BlockDriverState *base, int creation_flags,
1777                              int64_t speed, BlockdevOnError on_error,
1778                              const char *filter_node_name,
1779                              BlockCompletionFunc *cb, void *opaque,
1780                              bool auto_complete, Error **errp)
1781{
1782    bool base_read_only;
1783    Error *local_err = NULL;
1784    BlockJob *ret;
1785
1786    base_read_only = bdrv_is_read_only(base);
1787
1788    if (base_read_only) {
1789        if (bdrv_reopen_set_read_only(base, false, errp) < 0) {
1790            return NULL;
1791        }
1792    }
1793
1794    ret = mirror_start_job(
1795                     job_id, bs, creation_flags, base, NULL, speed, 0, 0,
1796                     MIRROR_LEAVE_BACKING_CHAIN,
1797                     on_error, on_error, true, cb, opaque,
1798                     &commit_active_job_driver, false, base, auto_complete,
1799                     filter_node_name, false, MIRROR_COPY_MODE_BACKGROUND,
1800                     &local_err);
1801    if (local_err) {
1802        error_propagate(errp, local_err);
1803        goto error_restore_flags;
1804    }
1805
1806    return ret;
1807
1808error_restore_flags:
1809    /* ignore error and errp for bdrv_reopen, because we want to propagate
1810     * the original error */
1811    if (base_read_only) {
1812        bdrv_reopen_set_read_only(base, true, NULL);
1813    }
1814    return NULL;
1815}
1816