qemu/block/io.c
<<
>>
Prefs
   1/*
   2 * Block layer I/O functions
   3 *
   4 * Copyright (c) 2003 Fabrice Bellard
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a copy
   7 * of this software and associated documentation files (the "Software"), to deal
   8 * in the Software without restriction, including without limitation the rights
   9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  10 * copies of the Software, and to permit persons to whom the Software is
  11 * furnished to do so, subject to the following conditions:
  12 *
  13 * The above copyright notice and this permission notice shall be included in
  14 * all copies or substantial portions of the Software.
  15 *
  16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  22 * THE SOFTWARE.
  23 */
  24
  25#include "qemu/osdep.h"
  26#include "trace.h"
  27#include "sysemu/block-backend.h"
  28#include "block/blockjob.h"
  29#include "block/blockjob_int.h"
  30#include "block/block_int.h"
  31#include "qemu/cutils.h"
  32#include "qapi/error.h"
  33#include "qemu/error-report.h"
  34
  35#define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
  36
  37/* Maximum bounce buffer for copy-on-read and write zeroes, in bytes */
  38#define MAX_BOUNCE_BUFFER (32768 << BDRV_SECTOR_BITS)
  39
  40static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs,
  41    int64_t offset, int bytes, BdrvRequestFlags flags);
  42
  43void bdrv_parent_drained_begin(BlockDriverState *bs)
  44{
  45    BdrvChild *c;
  46
  47    QLIST_FOREACH(c, &bs->parents, next_parent) {
  48        if (c->role->drained_begin) {
  49            c->role->drained_begin(c);
  50        }
  51    }
  52}
  53
  54void bdrv_parent_drained_end(BlockDriverState *bs)
  55{
  56    BdrvChild *c;
  57
  58    QLIST_FOREACH(c, &bs->parents, next_parent) {
  59        if (c->role->drained_end) {
  60            c->role->drained_end(c);
  61        }
  62    }
  63}
  64
  65static void bdrv_merge_limits(BlockLimits *dst, const BlockLimits *src)
  66{
  67    dst->opt_transfer = MAX(dst->opt_transfer, src->opt_transfer);
  68    dst->max_transfer = MIN_NON_ZERO(dst->max_transfer, src->max_transfer);
  69    dst->opt_mem_alignment = MAX(dst->opt_mem_alignment,
  70                                 src->opt_mem_alignment);
  71    dst->min_mem_alignment = MAX(dst->min_mem_alignment,
  72                                 src->min_mem_alignment);
  73    dst->max_iov = MIN_NON_ZERO(dst->max_iov, src->max_iov);
  74}
  75
  76void bdrv_refresh_limits(BlockDriverState *bs, Error **errp)
  77{
  78    BlockDriver *drv = bs->drv;
  79    Error *local_err = NULL;
  80
  81    memset(&bs->bl, 0, sizeof(bs->bl));
  82
  83    if (!drv) {
  84        return;
  85    }
  86
  87    /* Default alignment based on whether driver has byte interface */
  88    bs->bl.request_alignment = drv->bdrv_co_preadv ? 1 : 512;
  89
  90    /* Take some limits from the children as a default */
  91    if (bs->file) {
  92        bdrv_refresh_limits(bs->file->bs, &local_err);
  93        if (local_err) {
  94            error_propagate(errp, local_err);
  95            return;
  96        }
  97        bdrv_merge_limits(&bs->bl, &bs->file->bs->bl);
  98    } else {
  99        bs->bl.min_mem_alignment = 512;
 100        bs->bl.opt_mem_alignment = getpagesize();
 101
 102        /* Safe default since most protocols use readv()/writev()/etc */
 103        bs->bl.max_iov = IOV_MAX;
 104    }
 105
 106    if (bs->backing) {
 107        bdrv_refresh_limits(bs->backing->bs, &local_err);
 108        if (local_err) {
 109            error_propagate(errp, local_err);
 110            return;
 111        }
 112        bdrv_merge_limits(&bs->bl, &bs->backing->bs->bl);
 113    }
 114
 115    /* Then let the driver override it */
 116    if (drv->bdrv_refresh_limits) {
 117        drv->bdrv_refresh_limits(bs, errp);
 118    }
 119}
 120
 121/**
 122 * The copy-on-read flag is actually a reference count so multiple users may
 123 * use the feature without worrying about clobbering its previous state.
 124 * Copy-on-read stays enabled until all users have called to disable it.
 125 */
 126void bdrv_enable_copy_on_read(BlockDriverState *bs)
 127{
 128    atomic_inc(&bs->copy_on_read);
 129}
 130
 131void bdrv_disable_copy_on_read(BlockDriverState *bs)
 132{
 133    int old = atomic_fetch_dec(&bs->copy_on_read);
 134    assert(old >= 1);
 135}
 136
 137/* Check if any requests are in-flight (including throttled requests) */
 138bool bdrv_requests_pending(BlockDriverState *bs)
 139{
 140    BdrvChild *child;
 141
 142    if (atomic_read(&bs->in_flight)) {
 143        return true;
 144    }
 145
 146    QLIST_FOREACH(child, &bs->children, next) {
 147        if (bdrv_requests_pending(child->bs)) {
 148            return true;
 149        }
 150    }
 151
 152    return false;
 153}
 154
 155typedef struct {
 156    Coroutine *co;
 157    BlockDriverState *bs;
 158    bool done;
 159} BdrvCoDrainData;
 160
 161static void coroutine_fn bdrv_drain_invoke_entry(void *opaque)
 162{
 163    BdrvCoDrainData *data = opaque;
 164    BlockDriverState *bs = data->bs;
 165
 166    bs->drv->bdrv_co_drain(bs);
 167
 168    /* Set data->done before reading bs->wakeup.  */
 169    atomic_mb_set(&data->done, true);
 170    bdrv_wakeup(bs);
 171}
 172
 173static void bdrv_drain_invoke(BlockDriverState *bs)
 174{
 175    BdrvCoDrainData data = { .bs = bs, .done = false };
 176
 177    if (!bs->drv || !bs->drv->bdrv_co_drain) {
 178        return;
 179    }
 180
 181    data.co = qemu_coroutine_create(bdrv_drain_invoke_entry, &data);
 182    bdrv_coroutine_enter(bs, data.co);
 183    BDRV_POLL_WHILE(bs, !data.done);
 184}
 185
 186static bool bdrv_drain_recurse(BlockDriverState *bs)
 187{
 188    BdrvChild *child, *tmp;
 189    bool waited;
 190
 191    waited = BDRV_POLL_WHILE(bs, atomic_read(&bs->in_flight) > 0);
 192
 193    /* Ensure any pending metadata writes are submitted to bs->file.  */
 194    bdrv_drain_invoke(bs);
 195
 196    QLIST_FOREACH_SAFE(child, &bs->children, next, tmp) {
 197        BlockDriverState *bs = child->bs;
 198        bool in_main_loop =
 199            qemu_get_current_aio_context() == qemu_get_aio_context();
 200        assert(bs->refcnt > 0);
 201        if (in_main_loop) {
 202            /* In case the recursive bdrv_drain_recurse processes a
 203             * block_job_defer_to_main_loop BH and modifies the graph,
 204             * let's hold a reference to bs until we are done.
 205             *
 206             * IOThread doesn't have such a BH, and it is not safe to call
 207             * bdrv_unref without BQL, so skip doing it there.
 208             */
 209            bdrv_ref(bs);
 210        }
 211        waited |= bdrv_drain_recurse(bs);
 212        if (in_main_loop) {
 213            bdrv_unref(bs);
 214        }
 215    }
 216
 217    return waited;
 218}
 219
 220static void bdrv_co_drain_bh_cb(void *opaque)
 221{
 222    BdrvCoDrainData *data = opaque;
 223    Coroutine *co = data->co;
 224    BlockDriverState *bs = data->bs;
 225
 226    bdrv_dec_in_flight(bs);
 227    bdrv_drained_begin(bs);
 228    data->done = true;
 229    aio_co_wake(co);
 230}
 231
 232static void coroutine_fn bdrv_co_yield_to_drain(BlockDriverState *bs)
 233{
 234    BdrvCoDrainData data;
 235
 236    /* Calling bdrv_drain() from a BH ensures the current coroutine yields and
 237     * other coroutines run if they were queued from
 238     * qemu_co_queue_run_restart(). */
 239
 240    assert(qemu_in_coroutine());
 241    data = (BdrvCoDrainData) {
 242        .co = qemu_coroutine_self(),
 243        .bs = bs,
 244        .done = false,
 245    };
 246    bdrv_inc_in_flight(bs);
 247    aio_bh_schedule_oneshot(bdrv_get_aio_context(bs),
 248                            bdrv_co_drain_bh_cb, &data);
 249
 250    qemu_coroutine_yield();
 251    /* If we are resumed from some other event (such as an aio completion or a
 252     * timer callback), it is a bug in the caller that should be fixed. */
 253    assert(data.done);
 254}
 255
 256void bdrv_drained_begin(BlockDriverState *bs)
 257{
 258    if (qemu_in_coroutine()) {
 259        bdrv_co_yield_to_drain(bs);
 260        return;
 261    }
 262
 263    if (atomic_fetch_inc(&bs->quiesce_counter) == 0) {
 264        aio_disable_external(bdrv_get_aio_context(bs));
 265        bdrv_parent_drained_begin(bs);
 266    }
 267
 268    bdrv_drain_recurse(bs);
 269}
 270
 271void bdrv_drained_end(BlockDriverState *bs)
 272{
 273    assert(bs->quiesce_counter > 0);
 274    if (atomic_fetch_dec(&bs->quiesce_counter) > 1) {
 275        return;
 276    }
 277
 278    bdrv_parent_drained_end(bs);
 279    aio_enable_external(bdrv_get_aio_context(bs));
 280}
 281
 282/*
 283 * Wait for pending requests to complete on a single BlockDriverState subtree,
 284 * and suspend block driver's internal I/O until next request arrives.
 285 *
 286 * Note that unlike bdrv_drain_all(), the caller must hold the BlockDriverState
 287 * AioContext.
 288 *
 289 * Only this BlockDriverState's AioContext is run, so in-flight requests must
 290 * not depend on events in other AioContexts.  In that case, use
 291 * bdrv_drain_all() instead.
 292 */
 293void coroutine_fn bdrv_co_drain(BlockDriverState *bs)
 294{
 295    assert(qemu_in_coroutine());
 296    bdrv_drained_begin(bs);
 297    bdrv_drained_end(bs);
 298}
 299
 300void bdrv_drain(BlockDriverState *bs)
 301{
 302    bdrv_drained_begin(bs);
 303    bdrv_drained_end(bs);
 304}
 305
 306/*
 307 * Wait for pending requests to complete across all BlockDriverStates
 308 *
 309 * This function does not flush data to disk, use bdrv_flush_all() for that
 310 * after calling this function.
 311 *
 312 * This pauses all block jobs and disables external clients. It must
 313 * be paired with bdrv_drain_all_end().
 314 *
 315 * NOTE: no new block jobs or BlockDriverStates can be created between
 316 * the bdrv_drain_all_begin() and bdrv_drain_all_end() calls.
 317 */
 318void bdrv_drain_all_begin(void)
 319{
 320    /* Always run first iteration so any pending completion BHs run */
 321    bool waited = true;
 322    BlockDriverState *bs;
 323    BdrvNextIterator it;
 324    GSList *aio_ctxs = NULL, *ctx;
 325
 326    block_job_pause_all();
 327
 328    for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
 329        AioContext *aio_context = bdrv_get_aio_context(bs);
 330
 331        aio_context_acquire(aio_context);
 332        bdrv_parent_drained_begin(bs);
 333        aio_disable_external(aio_context);
 334        aio_context_release(aio_context);
 335
 336        if (!g_slist_find(aio_ctxs, aio_context)) {
 337            aio_ctxs = g_slist_prepend(aio_ctxs, aio_context);
 338        }
 339    }
 340
 341    /* Note that completion of an asynchronous I/O operation can trigger any
 342     * number of other I/O operations on other devices---for example a
 343     * coroutine can submit an I/O request to another device in response to
 344     * request completion.  Therefore we must keep looping until there was no
 345     * more activity rather than simply draining each device independently.
 346     */
 347    while (waited) {
 348        waited = false;
 349
 350        for (ctx = aio_ctxs; ctx != NULL; ctx = ctx->next) {
 351            AioContext *aio_context = ctx->data;
 352
 353            aio_context_acquire(aio_context);
 354            for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
 355                if (aio_context == bdrv_get_aio_context(bs)) {
 356                    waited |= bdrv_drain_recurse(bs);
 357                }
 358            }
 359            aio_context_release(aio_context);
 360        }
 361    }
 362
 363    g_slist_free(aio_ctxs);
 364}
 365
 366void bdrv_drain_all_end(void)
 367{
 368    BlockDriverState *bs;
 369    BdrvNextIterator it;
 370
 371    for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
 372        AioContext *aio_context = bdrv_get_aio_context(bs);
 373
 374        aio_context_acquire(aio_context);
 375        aio_enable_external(aio_context);
 376        bdrv_parent_drained_end(bs);
 377        aio_context_release(aio_context);
 378    }
 379
 380    block_job_resume_all();
 381}
 382
 383void bdrv_drain_all(void)
 384{
 385    bdrv_drain_all_begin();
 386    bdrv_drain_all_end();
 387}
 388
 389/**
 390 * Remove an active request from the tracked requests list
 391 *
 392 * This function should be called when a tracked request is completing.
 393 */
 394static void tracked_request_end(BdrvTrackedRequest *req)
 395{
 396    if (req->serialising) {
 397        atomic_dec(&req->bs->serialising_in_flight);
 398    }
 399
 400    qemu_co_mutex_lock(&req->bs->reqs_lock);
 401    QLIST_REMOVE(req, list);
 402    qemu_co_queue_restart_all(&req->wait_queue);
 403    qemu_co_mutex_unlock(&req->bs->reqs_lock);
 404}
 405
 406/**
 407 * Add an active request to the tracked requests list
 408 */
 409static void tracked_request_begin(BdrvTrackedRequest *req,
 410                                  BlockDriverState *bs,
 411                                  int64_t offset,
 412                                  unsigned int bytes,
 413                                  enum BdrvTrackedRequestType type)
 414{
 415    *req = (BdrvTrackedRequest){
 416        .bs = bs,
 417        .offset         = offset,
 418        .bytes          = bytes,
 419        .type           = type,
 420        .co             = qemu_coroutine_self(),
 421        .serialising    = false,
 422        .overlap_offset = offset,
 423        .overlap_bytes  = bytes,
 424    };
 425
 426    qemu_co_queue_init(&req->wait_queue);
 427
 428    qemu_co_mutex_lock(&bs->reqs_lock);
 429    QLIST_INSERT_HEAD(&bs->tracked_requests, req, list);
 430    qemu_co_mutex_unlock(&bs->reqs_lock);
 431}
 432
 433static void mark_request_serialising(BdrvTrackedRequest *req, uint64_t align)
 434{
 435    int64_t overlap_offset = req->offset & ~(align - 1);
 436    unsigned int overlap_bytes = ROUND_UP(req->offset + req->bytes, align)
 437                               - overlap_offset;
 438
 439    if (!req->serialising) {
 440        atomic_inc(&req->bs->serialising_in_flight);
 441        req->serialising = true;
 442    }
 443
 444    req->overlap_offset = MIN(req->overlap_offset, overlap_offset);
 445    req->overlap_bytes = MAX(req->overlap_bytes, overlap_bytes);
 446}
 447
 448/**
 449 * Round a region to cluster boundaries
 450 */
 451void bdrv_round_to_clusters(BlockDriverState *bs,
 452                            int64_t offset, unsigned int bytes,
 453                            int64_t *cluster_offset,
 454                            unsigned int *cluster_bytes)
 455{
 456    BlockDriverInfo bdi;
 457
 458    if (bdrv_get_info(bs, &bdi) < 0 || bdi.cluster_size == 0) {
 459        *cluster_offset = offset;
 460        *cluster_bytes = bytes;
 461    } else {
 462        int64_t c = bdi.cluster_size;
 463        *cluster_offset = QEMU_ALIGN_DOWN(offset, c);
 464        *cluster_bytes = QEMU_ALIGN_UP(offset - *cluster_offset + bytes, c);
 465    }
 466}
 467
 468static int bdrv_get_cluster_size(BlockDriverState *bs)
 469{
 470    BlockDriverInfo bdi;
 471    int ret;
 472
 473    ret = bdrv_get_info(bs, &bdi);
 474    if (ret < 0 || bdi.cluster_size == 0) {
 475        return bs->bl.request_alignment;
 476    } else {
 477        return bdi.cluster_size;
 478    }
 479}
 480
 481static bool tracked_request_overlaps(BdrvTrackedRequest *req,
 482                                     int64_t offset, unsigned int bytes)
 483{
 484    /*        aaaa   bbbb */
 485    if (offset >= req->overlap_offset + req->overlap_bytes) {
 486        return false;
 487    }
 488    /* bbbb   aaaa        */
 489    if (req->overlap_offset >= offset + bytes) {
 490        return false;
 491    }
 492    return true;
 493}
 494
 495void bdrv_inc_in_flight(BlockDriverState *bs)
 496{
 497    atomic_inc(&bs->in_flight);
 498}
 499
 500static void dummy_bh_cb(void *opaque)
 501{
 502}
 503
 504void bdrv_wakeup(BlockDriverState *bs)
 505{
 506    /* The barrier (or an atomic op) is in the caller.  */
 507    if (atomic_read(&bs->wakeup)) {
 508        aio_bh_schedule_oneshot(qemu_get_aio_context(), dummy_bh_cb, NULL);
 509    }
 510}
 511
 512void bdrv_dec_in_flight(BlockDriverState *bs)
 513{
 514    atomic_dec(&bs->in_flight);
 515    bdrv_wakeup(bs);
 516}
 517
 518static bool coroutine_fn wait_serialising_requests(BdrvTrackedRequest *self)
 519{
 520    BlockDriverState *bs = self->bs;
 521    BdrvTrackedRequest *req;
 522    bool retry;
 523    bool waited = false;
 524
 525    if (!atomic_read(&bs->serialising_in_flight)) {
 526        return false;
 527    }
 528
 529    do {
 530        retry = false;
 531        qemu_co_mutex_lock(&bs->reqs_lock);
 532        QLIST_FOREACH(req, &bs->tracked_requests, list) {
 533            if (req == self || (!req->serialising && !self->serialising)) {
 534                continue;
 535            }
 536            if (tracked_request_overlaps(req, self->overlap_offset,
 537                                         self->overlap_bytes))
 538            {
 539                /* Hitting this means there was a reentrant request, for
 540                 * example, a block driver issuing nested requests.  This must
 541                 * never happen since it means deadlock.
 542                 */
 543                assert(qemu_coroutine_self() != req->co);
 544
 545                /* If the request is already (indirectly) waiting for us, or
 546                 * will wait for us as soon as it wakes up, then just go on
 547                 * (instead of producing a deadlock in the former case). */
 548                if (!req->waiting_for) {
 549                    self->waiting_for = req;
 550                    qemu_co_queue_wait(&req->wait_queue, &bs->reqs_lock);
 551                    self->waiting_for = NULL;
 552                    retry = true;
 553                    waited = true;
 554                    break;
 555                }
 556            }
 557        }
 558        qemu_co_mutex_unlock(&bs->reqs_lock);
 559    } while (retry);
 560
 561    return waited;
 562}
 563
 564static int bdrv_check_byte_request(BlockDriverState *bs, int64_t offset,
 565                                   size_t size)
 566{
 567    if (size > BDRV_REQUEST_MAX_SECTORS << BDRV_SECTOR_BITS) {
 568        return -EIO;
 569    }
 570
 571    if (!bdrv_is_inserted(bs)) {
 572        return -ENOMEDIUM;
 573    }
 574
 575    if (offset < 0) {
 576        return -EIO;
 577    }
 578
 579    return 0;
 580}
 581
 582typedef struct RwCo {
 583    BdrvChild *child;
 584    int64_t offset;
 585    QEMUIOVector *qiov;
 586    bool is_write;
 587    int ret;
 588    BdrvRequestFlags flags;
 589} RwCo;
 590
 591static void coroutine_fn bdrv_rw_co_entry(void *opaque)
 592{
 593    RwCo *rwco = opaque;
 594
 595    if (!rwco->is_write) {
 596        rwco->ret = bdrv_co_preadv(rwco->child, rwco->offset,
 597                                   rwco->qiov->size, rwco->qiov,
 598                                   rwco->flags);
 599    } else {
 600        rwco->ret = bdrv_co_pwritev(rwco->child, rwco->offset,
 601                                    rwco->qiov->size, rwco->qiov,
 602                                    rwco->flags);
 603    }
 604}
 605
 606/*
 607 * Process a vectored synchronous request using coroutines
 608 */
 609static int bdrv_prwv_co(BdrvChild *child, int64_t offset,
 610                        QEMUIOVector *qiov, bool is_write,
 611                        BdrvRequestFlags flags)
 612{
 613    Coroutine *co;
 614    RwCo rwco = {
 615        .child = child,
 616        .offset = offset,
 617        .qiov = qiov,
 618        .is_write = is_write,
 619        .ret = NOT_DONE,
 620        .flags = flags,
 621    };
 622
 623    if (qemu_in_coroutine()) {
 624        /* Fast-path if already in coroutine context */
 625        bdrv_rw_co_entry(&rwco);
 626    } else {
 627        co = qemu_coroutine_create(bdrv_rw_co_entry, &rwco);
 628        bdrv_coroutine_enter(child->bs, co);
 629        BDRV_POLL_WHILE(child->bs, rwco.ret == NOT_DONE);
 630    }
 631    return rwco.ret;
 632}
 633
 634/*
 635 * Process a synchronous request using coroutines
 636 */
 637static int bdrv_rw_co(BdrvChild *child, int64_t sector_num, uint8_t *buf,
 638                      int nb_sectors, bool is_write, BdrvRequestFlags flags)
 639{
 640    QEMUIOVector qiov;
 641    struct iovec iov = {
 642        .iov_base = (void *)buf,
 643        .iov_len = nb_sectors * BDRV_SECTOR_SIZE,
 644    };
 645
 646    if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
 647        return -EINVAL;
 648    }
 649
 650    qemu_iovec_init_external(&qiov, &iov, 1);
 651    return bdrv_prwv_co(child, sector_num << BDRV_SECTOR_BITS,
 652                        &qiov, is_write, flags);
 653}
 654
 655/* return < 0 if error. See bdrv_write() for the return codes */
 656int bdrv_read(BdrvChild *child, int64_t sector_num,
 657              uint8_t *buf, int nb_sectors)
 658{
 659    return bdrv_rw_co(child, sector_num, buf, nb_sectors, false, 0);
 660}
 661
 662/* Return < 0 if error. Important errors are:
 663  -EIO         generic I/O error (may happen for all errors)
 664  -ENOMEDIUM   No media inserted.
 665  -EINVAL      Invalid sector number or nb_sectors
 666  -EACCES      Trying to write a read-only device
 667*/
 668int bdrv_write(BdrvChild *child, int64_t sector_num,
 669               const uint8_t *buf, int nb_sectors)
 670{
 671    return bdrv_rw_co(child, sector_num, (uint8_t *)buf, nb_sectors, true, 0);
 672}
 673
 674int bdrv_pwrite_zeroes(BdrvChild *child, int64_t offset,
 675                       int bytes, BdrvRequestFlags flags)
 676{
 677    QEMUIOVector qiov;
 678    struct iovec iov = {
 679        .iov_base = NULL,
 680        .iov_len = bytes,
 681    };
 682
 683    qemu_iovec_init_external(&qiov, &iov, 1);
 684    return bdrv_prwv_co(child, offset, &qiov, true,
 685                        BDRV_REQ_ZERO_WRITE | flags);
 686}
 687
 688/*
 689 * Completely zero out a block device with the help of bdrv_pwrite_zeroes.
 690 * The operation is sped up by checking the block status and only writing
 691 * zeroes to the device if they currently do not return zeroes. Optional
 692 * flags are passed through to bdrv_pwrite_zeroes (e.g. BDRV_REQ_MAY_UNMAP,
 693 * BDRV_REQ_FUA).
 694 *
 695 * Returns < 0 on error, 0 on success. For error codes see bdrv_write().
 696 */
 697int bdrv_make_zero(BdrvChild *child, BdrvRequestFlags flags)
 698{
 699    int64_t target_sectors, ret, nb_sectors, sector_num = 0;
 700    BlockDriverState *bs = child->bs;
 701    BlockDriverState *file;
 702    int n;
 703
 704    target_sectors = bdrv_nb_sectors(bs);
 705    if (target_sectors < 0) {
 706        return target_sectors;
 707    }
 708
 709    for (;;) {
 710        nb_sectors = MIN(target_sectors - sector_num, BDRV_REQUEST_MAX_SECTORS);
 711        if (nb_sectors <= 0) {
 712            return 0;
 713        }
 714        ret = bdrv_get_block_status(bs, sector_num, nb_sectors, &n, &file);
 715        if (ret < 0) {
 716            error_report("error getting block status at sector %" PRId64 ": %s",
 717                         sector_num, strerror(-ret));
 718            return ret;
 719        }
 720        if (ret & BDRV_BLOCK_ZERO) {
 721            sector_num += n;
 722            continue;
 723        }
 724        ret = bdrv_pwrite_zeroes(child, sector_num << BDRV_SECTOR_BITS,
 725                                 n << BDRV_SECTOR_BITS, flags);
 726        if (ret < 0) {
 727            error_report("error writing zeroes at sector %" PRId64 ": %s",
 728                         sector_num, strerror(-ret));
 729            return ret;
 730        }
 731        sector_num += n;
 732    }
 733}
 734
 735int bdrv_preadv(BdrvChild *child, int64_t offset, QEMUIOVector *qiov)
 736{
 737    int ret;
 738
 739    ret = bdrv_prwv_co(child, offset, qiov, false, 0);
 740    if (ret < 0) {
 741        return ret;
 742    }
 743
 744    return qiov->size;
 745}
 746
 747int bdrv_pread(BdrvChild *child, int64_t offset, void *buf, int bytes)
 748{
 749    QEMUIOVector qiov;
 750    struct iovec iov = {
 751        .iov_base = (void *)buf,
 752        .iov_len = bytes,
 753    };
 754
 755    if (bytes < 0) {
 756        return -EINVAL;
 757    }
 758
 759    qemu_iovec_init_external(&qiov, &iov, 1);
 760    return bdrv_preadv(child, offset, &qiov);
 761}
 762
 763int bdrv_pwritev(BdrvChild *child, int64_t offset, QEMUIOVector *qiov)
 764{
 765    int ret;
 766
 767    ret = bdrv_prwv_co(child, offset, qiov, true, 0);
 768    if (ret < 0) {
 769        return ret;
 770    }
 771
 772    return qiov->size;
 773}
 774
 775int bdrv_pwrite(BdrvChild *child, int64_t offset, const void *buf, int bytes)
 776{
 777    QEMUIOVector qiov;
 778    struct iovec iov = {
 779        .iov_base   = (void *) buf,
 780        .iov_len    = bytes,
 781    };
 782
 783    if (bytes < 0) {
 784        return -EINVAL;
 785    }
 786
 787    qemu_iovec_init_external(&qiov, &iov, 1);
 788    return bdrv_pwritev(child, offset, &qiov);
 789}
 790
 791/*
 792 * Writes to the file and ensures that no writes are reordered across this
 793 * request (acts as a barrier)
 794 *
 795 * Returns 0 on success, -errno in error cases.
 796 */
 797int bdrv_pwrite_sync(BdrvChild *child, int64_t offset,
 798                     const void *buf, int count)
 799{
 800    int ret;
 801
 802    ret = bdrv_pwrite(child, offset, buf, count);
 803    if (ret < 0) {
 804        return ret;
 805    }
 806
 807    ret = bdrv_flush(child->bs);
 808    if (ret < 0) {
 809        return ret;
 810    }
 811
 812    return 0;
 813}
 814
 815typedef struct CoroutineIOCompletion {
 816    Coroutine *coroutine;
 817    int ret;
 818} CoroutineIOCompletion;
 819
 820static void bdrv_co_io_em_complete(void *opaque, int ret)
 821{
 822    CoroutineIOCompletion *co = opaque;
 823
 824    co->ret = ret;
 825    aio_co_wake(co->coroutine);
 826}
 827
 828static int coroutine_fn bdrv_driver_preadv(BlockDriverState *bs,
 829                                           uint64_t offset, uint64_t bytes,
 830                                           QEMUIOVector *qiov, int flags)
 831{
 832    BlockDriver *drv = bs->drv;
 833    int64_t sector_num;
 834    unsigned int nb_sectors;
 835
 836    assert(!(flags & ~BDRV_REQ_MASK));
 837
 838    if (drv->bdrv_co_preadv) {
 839        return drv->bdrv_co_preadv(bs, offset, bytes, qiov, flags);
 840    }
 841
 842    sector_num = offset >> BDRV_SECTOR_BITS;
 843    nb_sectors = bytes >> BDRV_SECTOR_BITS;
 844
 845    assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0);
 846    assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0);
 847    assert((bytes >> BDRV_SECTOR_BITS) <= BDRV_REQUEST_MAX_SECTORS);
 848
 849    if (drv->bdrv_co_readv) {
 850        return drv->bdrv_co_readv(bs, sector_num, nb_sectors, qiov);
 851    } else {
 852        BlockAIOCB *acb;
 853        CoroutineIOCompletion co = {
 854            .coroutine = qemu_coroutine_self(),
 855        };
 856
 857        acb = bs->drv->bdrv_aio_readv(bs, sector_num, qiov, nb_sectors,
 858                                      bdrv_co_io_em_complete, &co);
 859        if (acb == NULL) {
 860            return -EIO;
 861        } else {
 862            qemu_coroutine_yield();
 863            return co.ret;
 864        }
 865    }
 866}
 867
 868static int coroutine_fn bdrv_driver_pwritev(BlockDriverState *bs,
 869                                            uint64_t offset, uint64_t bytes,
 870                                            QEMUIOVector *qiov, int flags)
 871{
 872    BlockDriver *drv = bs->drv;
 873    int64_t sector_num;
 874    unsigned int nb_sectors;
 875    int ret;
 876
 877    assert(!(flags & ~BDRV_REQ_MASK));
 878
 879    if (drv->bdrv_co_pwritev) {
 880        ret = drv->bdrv_co_pwritev(bs, offset, bytes, qiov,
 881                                   flags & bs->supported_write_flags);
 882        flags &= ~bs->supported_write_flags;
 883        goto emulate_flags;
 884    }
 885
 886    sector_num = offset >> BDRV_SECTOR_BITS;
 887    nb_sectors = bytes >> BDRV_SECTOR_BITS;
 888
 889    assert((offset & (BDRV_SECTOR_SIZE - 1)) == 0);
 890    assert((bytes & (BDRV_SECTOR_SIZE - 1)) == 0);
 891    assert((bytes >> BDRV_SECTOR_BITS) <= BDRV_REQUEST_MAX_SECTORS);
 892
 893    if (drv->bdrv_co_writev_flags) {
 894        ret = drv->bdrv_co_writev_flags(bs, sector_num, nb_sectors, qiov,
 895                                        flags & bs->supported_write_flags);
 896        flags &= ~bs->supported_write_flags;
 897    } else if (drv->bdrv_co_writev) {
 898        assert(!bs->supported_write_flags);
 899        ret = drv->bdrv_co_writev(bs, sector_num, nb_sectors, qiov);
 900    } else {
 901        BlockAIOCB *acb;
 902        CoroutineIOCompletion co = {
 903            .coroutine = qemu_coroutine_self(),
 904        };
 905
 906        acb = bs->drv->bdrv_aio_writev(bs, sector_num, qiov, nb_sectors,
 907                                       bdrv_co_io_em_complete, &co);
 908        if (acb == NULL) {
 909            ret = -EIO;
 910        } else {
 911            qemu_coroutine_yield();
 912            ret = co.ret;
 913        }
 914    }
 915
 916emulate_flags:
 917    if (ret == 0 && (flags & BDRV_REQ_FUA)) {
 918        ret = bdrv_co_flush(bs);
 919    }
 920
 921    return ret;
 922}
 923
 924static int coroutine_fn
 925bdrv_driver_pwritev_compressed(BlockDriverState *bs, uint64_t offset,
 926                               uint64_t bytes, QEMUIOVector *qiov)
 927{
 928    BlockDriver *drv = bs->drv;
 929
 930    if (!drv->bdrv_co_pwritev_compressed) {
 931        return -ENOTSUP;
 932    }
 933
 934    return drv->bdrv_co_pwritev_compressed(bs, offset, bytes, qiov);
 935}
 936
 937static int coroutine_fn bdrv_co_do_copy_on_readv(BdrvChild *child,
 938        int64_t offset, unsigned int bytes, QEMUIOVector *qiov)
 939{
 940    BlockDriverState *bs = child->bs;
 941
 942    /* Perform I/O through a temporary buffer so that users who scribble over
 943     * their read buffer while the operation is in progress do not end up
 944     * modifying the image file.  This is critical for zero-copy guest I/O
 945     * where anything might happen inside guest memory.
 946     */
 947    void *bounce_buffer;
 948
 949    BlockDriver *drv = bs->drv;
 950    struct iovec iov;
 951    QEMUIOVector local_qiov;
 952    int64_t cluster_offset;
 953    unsigned int cluster_bytes;
 954    size_t skip_bytes;
 955    int ret;
 956    int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer,
 957                                    BDRV_REQUEST_MAX_BYTES);
 958    unsigned int progress = 0;
 959
 960    /* FIXME We cannot require callers to have write permissions when all they
 961     * are doing is a read request. If we did things right, write permissions
 962     * would be obtained anyway, but internally by the copy-on-read code. As
 963     * long as it is implemented here rather than in a separat filter driver,
 964     * the copy-on-read code doesn't have its own BdrvChild, however, for which
 965     * it could request permissions. Therefore we have to bypass the permission
 966     * system for the moment. */
 967    // assert(child->perm & (BLK_PERM_WRITE_UNCHANGED | BLK_PERM_WRITE));
 968
 969    /* Cover entire cluster so no additional backing file I/O is required when
 970     * allocating cluster in the image file.  Note that this value may exceed
 971     * BDRV_REQUEST_MAX_BYTES (even when the original read did not), which
 972     * is one reason we loop rather than doing it all at once.
 973     */
 974    bdrv_round_to_clusters(bs, offset, bytes, &cluster_offset, &cluster_bytes);
 975    skip_bytes = offset - cluster_offset;
 976
 977    trace_bdrv_co_do_copy_on_readv(bs, offset, bytes,
 978                                   cluster_offset, cluster_bytes);
 979
 980    bounce_buffer = qemu_try_blockalign(bs,
 981                                        MIN(MIN(max_transfer, cluster_bytes),
 982                                            MAX_BOUNCE_BUFFER));
 983    if (bounce_buffer == NULL) {
 984        ret = -ENOMEM;
 985        goto err;
 986    }
 987
 988    while (cluster_bytes) {
 989        int64_t pnum;
 990
 991        ret = bdrv_is_allocated(bs, cluster_offset,
 992                                MIN(cluster_bytes, max_transfer), &pnum);
 993        if (ret < 0) {
 994            /* Safe to treat errors in querying allocation as if
 995             * unallocated; we'll probably fail again soon on the
 996             * read, but at least that will set a decent errno.
 997             */
 998            pnum = MIN(cluster_bytes, max_transfer);
 999        }
1000
1001        assert(skip_bytes < pnum);
1002
1003        if (ret <= 0) {
1004            /* Must copy-on-read; use the bounce buffer */
1005            iov.iov_base = bounce_buffer;
1006            iov.iov_len = pnum = MIN(pnum, MAX_BOUNCE_BUFFER);
1007            qemu_iovec_init_external(&local_qiov, &iov, 1);
1008
1009            ret = bdrv_driver_preadv(bs, cluster_offset, pnum,
1010                                     &local_qiov, 0);
1011            if (ret < 0) {
1012                goto err;
1013            }
1014
1015            if (drv->bdrv_co_pwrite_zeroes &&
1016                buffer_is_zero(bounce_buffer, pnum)) {
1017                /* FIXME: Should we (perhaps conditionally) be setting
1018                 * BDRV_REQ_MAY_UNMAP, if it will allow for a sparser copy
1019                 * that still correctly reads as zero? */
1020                ret = bdrv_co_do_pwrite_zeroes(bs, cluster_offset, pnum, 0);
1021            } else {
1022                /* This does not change the data on the disk, it is not
1023                 * necessary to flush even in cache=writethrough mode.
1024                 */
1025                ret = bdrv_driver_pwritev(bs, cluster_offset, pnum,
1026                                          &local_qiov, 0);
1027            }
1028
1029            if (ret < 0) {
1030                /* It might be okay to ignore write errors for guest
1031                 * requests.  If this is a deliberate copy-on-read
1032                 * then we don't want to ignore the error.  Simply
1033                 * report it in all cases.
1034                 */
1035                goto err;
1036            }
1037
1038            qemu_iovec_from_buf(qiov, progress, bounce_buffer + skip_bytes,
1039                                pnum - skip_bytes);
1040        } else {
1041            /* Read directly into the destination */
1042            qemu_iovec_init(&local_qiov, qiov->niov);
1043            qemu_iovec_concat(&local_qiov, qiov, progress, pnum - skip_bytes);
1044            ret = bdrv_driver_preadv(bs, offset + progress, local_qiov.size,
1045                                     &local_qiov, 0);
1046            qemu_iovec_destroy(&local_qiov);
1047            if (ret < 0) {
1048                goto err;
1049            }
1050        }
1051
1052        cluster_offset += pnum;
1053        cluster_bytes -= pnum;
1054        progress += pnum - skip_bytes;
1055        skip_bytes = 0;
1056    }
1057    ret = 0;
1058
1059err:
1060    qemu_vfree(bounce_buffer);
1061    return ret;
1062}
1063
1064/*
1065 * Forwards an already correctly aligned request to the BlockDriver. This
1066 * handles copy on read, zeroing after EOF, and fragmentation of large
1067 * reads; any other features must be implemented by the caller.
1068 */
1069static int coroutine_fn bdrv_aligned_preadv(BdrvChild *child,
1070    BdrvTrackedRequest *req, int64_t offset, unsigned int bytes,
1071    int64_t align, QEMUIOVector *qiov, int flags)
1072{
1073    BlockDriverState *bs = child->bs;
1074    int64_t total_bytes, max_bytes;
1075    int ret = 0;
1076    uint64_t bytes_remaining = bytes;
1077    int max_transfer;
1078
1079    assert(is_power_of_2(align));
1080    assert((offset & (align - 1)) == 0);
1081    assert((bytes & (align - 1)) == 0);
1082    assert(!qiov || bytes == qiov->size);
1083    assert((bs->open_flags & BDRV_O_NO_IO) == 0);
1084    max_transfer = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_transfer, INT_MAX),
1085                                   align);
1086
1087    /* TODO: We would need a per-BDS .supported_read_flags and
1088     * potential fallback support, if we ever implement any read flags
1089     * to pass through to drivers.  For now, there aren't any
1090     * passthrough flags.  */
1091    assert(!(flags & ~(BDRV_REQ_NO_SERIALISING | BDRV_REQ_COPY_ON_READ)));
1092
1093    /* Handle Copy on Read and associated serialisation */
1094    if (flags & BDRV_REQ_COPY_ON_READ) {
1095        /* If we touch the same cluster it counts as an overlap.  This
1096         * guarantees that allocating writes will be serialized and not race
1097         * with each other for the same cluster.  For example, in copy-on-read
1098         * it ensures that the CoR read and write operations are atomic and
1099         * guest writes cannot interleave between them. */
1100        mark_request_serialising(req, bdrv_get_cluster_size(bs));
1101    }
1102
1103    if (!(flags & BDRV_REQ_NO_SERIALISING)) {
1104        wait_serialising_requests(req);
1105    }
1106
1107    if (flags & BDRV_REQ_COPY_ON_READ) {
1108        /* TODO: Simplify further once bdrv_is_allocated no longer
1109         * requires sector alignment */
1110        int64_t start = QEMU_ALIGN_DOWN(offset, BDRV_SECTOR_SIZE);
1111        int64_t end = QEMU_ALIGN_UP(offset + bytes, BDRV_SECTOR_SIZE);
1112        int64_t pnum;
1113
1114        ret = bdrv_is_allocated(bs, start, end - start, &pnum);
1115        if (ret < 0) {
1116            goto out;
1117        }
1118
1119        if (!ret || pnum != end - start) {
1120            ret = bdrv_co_do_copy_on_readv(child, offset, bytes, qiov);
1121            goto out;
1122        }
1123    }
1124
1125    /* Forward the request to the BlockDriver, possibly fragmenting it */
1126    total_bytes = bdrv_getlength(bs);
1127    if (total_bytes < 0) {
1128        ret = total_bytes;
1129        goto out;
1130    }
1131
1132    max_bytes = ROUND_UP(MAX(0, total_bytes - offset), align);
1133    if (bytes <= max_bytes && bytes <= max_transfer) {
1134        ret = bdrv_driver_preadv(bs, offset, bytes, qiov, 0);
1135        goto out;
1136    }
1137
1138    while (bytes_remaining) {
1139        int num;
1140
1141        if (max_bytes) {
1142            QEMUIOVector local_qiov;
1143
1144            num = MIN(bytes_remaining, MIN(max_bytes, max_transfer));
1145            assert(num);
1146            qemu_iovec_init(&local_qiov, qiov->niov);
1147            qemu_iovec_concat(&local_qiov, qiov, bytes - bytes_remaining, num);
1148
1149            ret = bdrv_driver_preadv(bs, offset + bytes - bytes_remaining,
1150                                     num, &local_qiov, 0);
1151            max_bytes -= num;
1152            qemu_iovec_destroy(&local_qiov);
1153        } else {
1154            num = bytes_remaining;
1155            ret = qemu_iovec_memset(qiov, bytes - bytes_remaining, 0,
1156                                    bytes_remaining);
1157        }
1158        if (ret < 0) {
1159            goto out;
1160        }
1161        bytes_remaining -= num;
1162    }
1163
1164out:
1165    return ret < 0 ? ret : 0;
1166}
1167
1168/*
1169 * Handle a read request in coroutine context
1170 */
1171int coroutine_fn bdrv_co_preadv(BdrvChild *child,
1172    int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
1173    BdrvRequestFlags flags)
1174{
1175    BlockDriverState *bs = child->bs;
1176    BlockDriver *drv = bs->drv;
1177    BdrvTrackedRequest req;
1178
1179    uint64_t align = bs->bl.request_alignment;
1180    uint8_t *head_buf = NULL;
1181    uint8_t *tail_buf = NULL;
1182    QEMUIOVector local_qiov;
1183    bool use_local_qiov = false;
1184    int ret;
1185
1186    trace_bdrv_co_preadv(child->bs, offset, bytes, flags);
1187
1188    if (!drv) {
1189        return -ENOMEDIUM;
1190    }
1191
1192    ret = bdrv_check_byte_request(bs, offset, bytes);
1193    if (ret < 0) {
1194        return ret;
1195    }
1196
1197    bdrv_inc_in_flight(bs);
1198
1199    /* Don't do copy-on-read if we read data before write operation */
1200    if (atomic_read(&bs->copy_on_read) && !(flags & BDRV_REQ_NO_SERIALISING)) {
1201        flags |= BDRV_REQ_COPY_ON_READ;
1202    }
1203
1204    /* Align read if necessary by padding qiov */
1205    if (offset & (align - 1)) {
1206        head_buf = qemu_blockalign(bs, align);
1207        qemu_iovec_init(&local_qiov, qiov->niov + 2);
1208        qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1));
1209        qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
1210        use_local_qiov = true;
1211
1212        bytes += offset & (align - 1);
1213        offset = offset & ~(align - 1);
1214    }
1215
1216    if ((offset + bytes) & (align - 1)) {
1217        if (!use_local_qiov) {
1218            qemu_iovec_init(&local_qiov, qiov->niov + 1);
1219            qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
1220            use_local_qiov = true;
1221        }
1222        tail_buf = qemu_blockalign(bs, align);
1223        qemu_iovec_add(&local_qiov, tail_buf,
1224                       align - ((offset + bytes) & (align - 1)));
1225
1226        bytes = ROUND_UP(bytes, align);
1227    }
1228
1229    tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_READ);
1230    ret = bdrv_aligned_preadv(child, &req, offset, bytes, align,
1231                              use_local_qiov ? &local_qiov : qiov,
1232                              flags);
1233    tracked_request_end(&req);
1234    bdrv_dec_in_flight(bs);
1235
1236    if (use_local_qiov) {
1237        qemu_iovec_destroy(&local_qiov);
1238        qemu_vfree(head_buf);
1239        qemu_vfree(tail_buf);
1240    }
1241
1242    return ret;
1243}
1244
1245static int coroutine_fn bdrv_co_do_readv(BdrvChild *child,
1246    int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
1247    BdrvRequestFlags flags)
1248{
1249    if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
1250        return -EINVAL;
1251    }
1252
1253    return bdrv_co_preadv(child, sector_num << BDRV_SECTOR_BITS,
1254                          nb_sectors << BDRV_SECTOR_BITS, qiov, flags);
1255}
1256
1257int coroutine_fn bdrv_co_readv(BdrvChild *child, int64_t sector_num,
1258                               int nb_sectors, QEMUIOVector *qiov)
1259{
1260    return bdrv_co_do_readv(child, sector_num, nb_sectors, qiov, 0);
1261}
1262
1263static int coroutine_fn bdrv_co_do_pwrite_zeroes(BlockDriverState *bs,
1264    int64_t offset, int bytes, BdrvRequestFlags flags)
1265{
1266    BlockDriver *drv = bs->drv;
1267    QEMUIOVector qiov;
1268    struct iovec iov = {0};
1269    int ret = 0;
1270    bool need_flush = false;
1271    int head = 0;
1272    int tail = 0;
1273
1274    int max_write_zeroes = MIN_NON_ZERO(bs->bl.max_pwrite_zeroes, INT_MAX);
1275    int alignment = MAX(bs->bl.pwrite_zeroes_alignment,
1276                        bs->bl.request_alignment);
1277    int max_transfer = MIN_NON_ZERO(bs->bl.max_transfer, MAX_BOUNCE_BUFFER);
1278
1279    assert(alignment % bs->bl.request_alignment == 0);
1280    head = offset % alignment;
1281    tail = (offset + bytes) % alignment;
1282    max_write_zeroes = QEMU_ALIGN_DOWN(max_write_zeroes, alignment);
1283    assert(max_write_zeroes >= bs->bl.request_alignment);
1284
1285    while (bytes > 0 && !ret) {
1286        int num = bytes;
1287
1288        /* Align request.  Block drivers can expect the "bulk" of the request
1289         * to be aligned, and that unaligned requests do not cross cluster
1290         * boundaries.
1291         */
1292        if (head) {
1293            /* Make a small request up to the first aligned sector. For
1294             * convenience, limit this request to max_transfer even if
1295             * we don't need to fall back to writes.  */
1296            num = MIN(MIN(bytes, max_transfer), alignment - head);
1297            head = (head + num) % alignment;
1298            assert(num < max_write_zeroes);
1299        } else if (tail && num > alignment) {
1300            /* Shorten the request to the last aligned sector.  */
1301            num -= tail;
1302        }
1303
1304        /* limit request size */
1305        if (num > max_write_zeroes) {
1306            num = max_write_zeroes;
1307        }
1308
1309        ret = -ENOTSUP;
1310        /* First try the efficient write zeroes operation */
1311        if (drv->bdrv_co_pwrite_zeroes) {
1312            ret = drv->bdrv_co_pwrite_zeroes(bs, offset, num,
1313                                             flags & bs->supported_zero_flags);
1314            if (ret != -ENOTSUP && (flags & BDRV_REQ_FUA) &&
1315                !(bs->supported_zero_flags & BDRV_REQ_FUA)) {
1316                need_flush = true;
1317            }
1318        } else {
1319            assert(!bs->supported_zero_flags);
1320        }
1321
1322        if (ret == -ENOTSUP) {
1323            /* Fall back to bounce buffer if write zeroes is unsupported */
1324            BdrvRequestFlags write_flags = flags & ~BDRV_REQ_ZERO_WRITE;
1325
1326            if ((flags & BDRV_REQ_FUA) &&
1327                !(bs->supported_write_flags & BDRV_REQ_FUA)) {
1328                /* No need for bdrv_driver_pwrite() to do a fallback
1329                 * flush on each chunk; use just one at the end */
1330                write_flags &= ~BDRV_REQ_FUA;
1331                need_flush = true;
1332            }
1333            num = MIN(num, max_transfer);
1334            iov.iov_len = num;
1335            if (iov.iov_base == NULL) {
1336                iov.iov_base = qemu_try_blockalign(bs, num);
1337                if (iov.iov_base == NULL) {
1338                    ret = -ENOMEM;
1339                    goto fail;
1340                }
1341                memset(iov.iov_base, 0, num);
1342            }
1343            qemu_iovec_init_external(&qiov, &iov, 1);
1344
1345            ret = bdrv_driver_pwritev(bs, offset, num, &qiov, write_flags);
1346
1347            /* Keep bounce buffer around if it is big enough for all
1348             * all future requests.
1349             */
1350            if (num < max_transfer) {
1351                qemu_vfree(iov.iov_base);
1352                iov.iov_base = NULL;
1353            }
1354        }
1355
1356        offset += num;
1357        bytes -= num;
1358    }
1359
1360fail:
1361    if (ret == 0 && need_flush) {
1362        ret = bdrv_co_flush(bs);
1363    }
1364    qemu_vfree(iov.iov_base);
1365    return ret;
1366}
1367
1368/*
1369 * Forwards an already correctly aligned write request to the BlockDriver,
1370 * after possibly fragmenting it.
1371 */
1372static int coroutine_fn bdrv_aligned_pwritev(BdrvChild *child,
1373    BdrvTrackedRequest *req, int64_t offset, unsigned int bytes,
1374    int64_t align, QEMUIOVector *qiov, int flags)
1375{
1376    BlockDriverState *bs = child->bs;
1377    BlockDriver *drv = bs->drv;
1378    bool waited;
1379    int ret;
1380
1381    int64_t start_sector = offset >> BDRV_SECTOR_BITS;
1382    int64_t end_sector = DIV_ROUND_UP(offset + bytes, BDRV_SECTOR_SIZE);
1383    uint64_t bytes_remaining = bytes;
1384    int max_transfer;
1385
1386    if (bdrv_has_readonly_bitmaps(bs)) {
1387        return -EPERM;
1388    }
1389
1390    assert(is_power_of_2(align));
1391    assert((offset & (align - 1)) == 0);
1392    assert((bytes & (align - 1)) == 0);
1393    assert(!qiov || bytes == qiov->size);
1394    assert((bs->open_flags & BDRV_O_NO_IO) == 0);
1395    assert(!(flags & ~BDRV_REQ_MASK));
1396    max_transfer = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_transfer, INT_MAX),
1397                                   align);
1398
1399    waited = wait_serialising_requests(req);
1400    assert(!waited || !req->serialising);
1401    assert(req->overlap_offset <= offset);
1402    assert(offset + bytes <= req->overlap_offset + req->overlap_bytes);
1403    assert(child->perm & BLK_PERM_WRITE);
1404    assert(end_sector <= bs->total_sectors || child->perm & BLK_PERM_RESIZE);
1405
1406    ret = notifier_with_return_list_notify(&bs->before_write_notifiers, req);
1407
1408    if (!ret && bs->detect_zeroes != BLOCKDEV_DETECT_ZEROES_OPTIONS_OFF &&
1409        !(flags & BDRV_REQ_ZERO_WRITE) && drv->bdrv_co_pwrite_zeroes &&
1410        qemu_iovec_is_zero(qiov)) {
1411        flags |= BDRV_REQ_ZERO_WRITE;
1412        if (bs->detect_zeroes == BLOCKDEV_DETECT_ZEROES_OPTIONS_UNMAP) {
1413            flags |= BDRV_REQ_MAY_UNMAP;
1414        }
1415    }
1416
1417    if (ret < 0) {
1418        /* Do nothing, write notifier decided to fail this request */
1419    } else if (flags & BDRV_REQ_ZERO_WRITE) {
1420        bdrv_debug_event(bs, BLKDBG_PWRITEV_ZERO);
1421        ret = bdrv_co_do_pwrite_zeroes(bs, offset, bytes, flags);
1422    } else if (flags & BDRV_REQ_WRITE_COMPRESSED) {
1423        ret = bdrv_driver_pwritev_compressed(bs, offset, bytes, qiov);
1424    } else if (bytes <= max_transfer) {
1425        bdrv_debug_event(bs, BLKDBG_PWRITEV);
1426        ret = bdrv_driver_pwritev(bs, offset, bytes, qiov, flags);
1427    } else {
1428        bdrv_debug_event(bs, BLKDBG_PWRITEV);
1429        while (bytes_remaining) {
1430            int num = MIN(bytes_remaining, max_transfer);
1431            QEMUIOVector local_qiov;
1432            int local_flags = flags;
1433
1434            assert(num);
1435            if (num < bytes_remaining && (flags & BDRV_REQ_FUA) &&
1436                !(bs->supported_write_flags & BDRV_REQ_FUA)) {
1437                /* If FUA is going to be emulated by flush, we only
1438                 * need to flush on the last iteration */
1439                local_flags &= ~BDRV_REQ_FUA;
1440            }
1441            qemu_iovec_init(&local_qiov, qiov->niov);
1442            qemu_iovec_concat(&local_qiov, qiov, bytes - bytes_remaining, num);
1443
1444            ret = bdrv_driver_pwritev(bs, offset + bytes - bytes_remaining,
1445                                      num, &local_qiov, local_flags);
1446            qemu_iovec_destroy(&local_qiov);
1447            if (ret < 0) {
1448                break;
1449            }
1450            bytes_remaining -= num;
1451        }
1452    }
1453    bdrv_debug_event(bs, BLKDBG_PWRITEV_DONE);
1454
1455    atomic_inc(&bs->write_gen);
1456    bdrv_set_dirty(bs, start_sector, end_sector - start_sector);
1457
1458    stat64_max(&bs->wr_highest_offset, offset + bytes);
1459
1460    if (ret >= 0) {
1461        bs->total_sectors = MAX(bs->total_sectors, end_sector);
1462        ret = 0;
1463    }
1464
1465    return ret;
1466}
1467
1468static int coroutine_fn bdrv_co_do_zero_pwritev(BdrvChild *child,
1469                                                int64_t offset,
1470                                                unsigned int bytes,
1471                                                BdrvRequestFlags flags,
1472                                                BdrvTrackedRequest *req)
1473{
1474    BlockDriverState *bs = child->bs;
1475    uint8_t *buf = NULL;
1476    QEMUIOVector local_qiov;
1477    struct iovec iov;
1478    uint64_t align = bs->bl.request_alignment;
1479    unsigned int head_padding_bytes, tail_padding_bytes;
1480    int ret = 0;
1481
1482    head_padding_bytes = offset & (align - 1);
1483    tail_padding_bytes = (align - (offset + bytes)) & (align - 1);
1484
1485
1486    assert(flags & BDRV_REQ_ZERO_WRITE);
1487    if (head_padding_bytes || tail_padding_bytes) {
1488        buf = qemu_blockalign(bs, align);
1489        iov = (struct iovec) {
1490            .iov_base   = buf,
1491            .iov_len    = align,
1492        };
1493        qemu_iovec_init_external(&local_qiov, &iov, 1);
1494    }
1495    if (head_padding_bytes) {
1496        uint64_t zero_bytes = MIN(bytes, align - head_padding_bytes);
1497
1498        /* RMW the unaligned part before head. */
1499        mark_request_serialising(req, align);
1500        wait_serialising_requests(req);
1501        bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_HEAD);
1502        ret = bdrv_aligned_preadv(child, req, offset & ~(align - 1), align,
1503                                  align, &local_qiov, 0);
1504        if (ret < 0) {
1505            goto fail;
1506        }
1507        bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD);
1508
1509        memset(buf + head_padding_bytes, 0, zero_bytes);
1510        ret = bdrv_aligned_pwritev(child, req, offset & ~(align - 1), align,
1511                                   align, &local_qiov,
1512                                   flags & ~BDRV_REQ_ZERO_WRITE);
1513        if (ret < 0) {
1514            goto fail;
1515        }
1516        offset += zero_bytes;
1517        bytes -= zero_bytes;
1518    }
1519
1520    assert(!bytes || (offset & (align - 1)) == 0);
1521    if (bytes >= align) {
1522        /* Write the aligned part in the middle. */
1523        uint64_t aligned_bytes = bytes & ~(align - 1);
1524        ret = bdrv_aligned_pwritev(child, req, offset, aligned_bytes, align,
1525                                   NULL, flags);
1526        if (ret < 0) {
1527            goto fail;
1528        }
1529        bytes -= aligned_bytes;
1530        offset += aligned_bytes;
1531    }
1532
1533    assert(!bytes || (offset & (align - 1)) == 0);
1534    if (bytes) {
1535        assert(align == tail_padding_bytes + bytes);
1536        /* RMW the unaligned part after tail. */
1537        mark_request_serialising(req, align);
1538        wait_serialising_requests(req);
1539        bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL);
1540        ret = bdrv_aligned_preadv(child, req, offset, align,
1541                                  align, &local_qiov, 0);
1542        if (ret < 0) {
1543            goto fail;
1544        }
1545        bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL);
1546
1547        memset(buf, 0, bytes);
1548        ret = bdrv_aligned_pwritev(child, req, offset, align, align,
1549                                   &local_qiov, flags & ~BDRV_REQ_ZERO_WRITE);
1550    }
1551fail:
1552    qemu_vfree(buf);
1553    return ret;
1554
1555}
1556
1557/*
1558 * Handle a write request in coroutine context
1559 */
1560int coroutine_fn bdrv_co_pwritev(BdrvChild *child,
1561    int64_t offset, unsigned int bytes, QEMUIOVector *qiov,
1562    BdrvRequestFlags flags)
1563{
1564    BlockDriverState *bs = child->bs;
1565    BdrvTrackedRequest req;
1566    uint64_t align = bs->bl.request_alignment;
1567    uint8_t *head_buf = NULL;
1568    uint8_t *tail_buf = NULL;
1569    QEMUIOVector local_qiov;
1570    bool use_local_qiov = false;
1571    int ret;
1572
1573    trace_bdrv_co_pwritev(child->bs, offset, bytes, flags);
1574
1575    if (!bs->drv) {
1576        return -ENOMEDIUM;
1577    }
1578    if (bs->read_only) {
1579        return -EPERM;
1580    }
1581    assert(!(bs->open_flags & BDRV_O_INACTIVE));
1582
1583    ret = bdrv_check_byte_request(bs, offset, bytes);
1584    if (ret < 0) {
1585        return ret;
1586    }
1587
1588    bdrv_inc_in_flight(bs);
1589    /*
1590     * Align write if necessary by performing a read-modify-write cycle.
1591     * Pad qiov with the read parts and be sure to have a tracked request not
1592     * only for bdrv_aligned_pwritev, but also for the reads of the RMW cycle.
1593     */
1594    tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_WRITE);
1595
1596    if (!qiov) {
1597        ret = bdrv_co_do_zero_pwritev(child, offset, bytes, flags, &req);
1598        goto out;
1599    }
1600
1601    if (offset & (align - 1)) {
1602        QEMUIOVector head_qiov;
1603        struct iovec head_iov;
1604
1605        mark_request_serialising(&req, align);
1606        wait_serialising_requests(&req);
1607
1608        head_buf = qemu_blockalign(bs, align);
1609        head_iov = (struct iovec) {
1610            .iov_base   = head_buf,
1611            .iov_len    = align,
1612        };
1613        qemu_iovec_init_external(&head_qiov, &head_iov, 1);
1614
1615        bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_HEAD);
1616        ret = bdrv_aligned_preadv(child, &req, offset & ~(align - 1), align,
1617                                  align, &head_qiov, 0);
1618        if (ret < 0) {
1619            goto fail;
1620        }
1621        bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_HEAD);
1622
1623        qemu_iovec_init(&local_qiov, qiov->niov + 2);
1624        qemu_iovec_add(&local_qiov, head_buf, offset & (align - 1));
1625        qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
1626        use_local_qiov = true;
1627
1628        bytes += offset & (align - 1);
1629        offset = offset & ~(align - 1);
1630
1631        /* We have read the tail already if the request is smaller
1632         * than one aligned block.
1633         */
1634        if (bytes < align) {
1635            qemu_iovec_add(&local_qiov, head_buf + bytes, align - bytes);
1636            bytes = align;
1637        }
1638    }
1639
1640    if ((offset + bytes) & (align - 1)) {
1641        QEMUIOVector tail_qiov;
1642        struct iovec tail_iov;
1643        size_t tail_bytes;
1644        bool waited;
1645
1646        mark_request_serialising(&req, align);
1647        waited = wait_serialising_requests(&req);
1648        assert(!waited || !use_local_qiov);
1649
1650        tail_buf = qemu_blockalign(bs, align);
1651        tail_iov = (struct iovec) {
1652            .iov_base   = tail_buf,
1653            .iov_len    = align,
1654        };
1655        qemu_iovec_init_external(&tail_qiov, &tail_iov, 1);
1656
1657        bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_TAIL);
1658        ret = bdrv_aligned_preadv(child, &req, (offset + bytes) & ~(align - 1),
1659                                  align, align, &tail_qiov, 0);
1660        if (ret < 0) {
1661            goto fail;
1662        }
1663        bdrv_debug_event(bs, BLKDBG_PWRITEV_RMW_AFTER_TAIL);
1664
1665        if (!use_local_qiov) {
1666            qemu_iovec_init(&local_qiov, qiov->niov + 1);
1667            qemu_iovec_concat(&local_qiov, qiov, 0, qiov->size);
1668            use_local_qiov = true;
1669        }
1670
1671        tail_bytes = (offset + bytes) & (align - 1);
1672        qemu_iovec_add(&local_qiov, tail_buf + tail_bytes, align - tail_bytes);
1673
1674        bytes = ROUND_UP(bytes, align);
1675    }
1676
1677    ret = bdrv_aligned_pwritev(child, &req, offset, bytes, align,
1678                               use_local_qiov ? &local_qiov : qiov,
1679                               flags);
1680
1681fail:
1682
1683    if (use_local_qiov) {
1684        qemu_iovec_destroy(&local_qiov);
1685    }
1686    qemu_vfree(head_buf);
1687    qemu_vfree(tail_buf);
1688out:
1689    tracked_request_end(&req);
1690    bdrv_dec_in_flight(bs);
1691    return ret;
1692}
1693
1694static int coroutine_fn bdrv_co_do_writev(BdrvChild *child,
1695    int64_t sector_num, int nb_sectors, QEMUIOVector *qiov,
1696    BdrvRequestFlags flags)
1697{
1698    if (nb_sectors < 0 || nb_sectors > BDRV_REQUEST_MAX_SECTORS) {
1699        return -EINVAL;
1700    }
1701
1702    return bdrv_co_pwritev(child, sector_num << BDRV_SECTOR_BITS,
1703                           nb_sectors << BDRV_SECTOR_BITS, qiov, flags);
1704}
1705
1706int coroutine_fn bdrv_co_writev(BdrvChild *child, int64_t sector_num,
1707    int nb_sectors, QEMUIOVector *qiov)
1708{
1709    return bdrv_co_do_writev(child, sector_num, nb_sectors, qiov, 0);
1710}
1711
1712int coroutine_fn bdrv_co_pwrite_zeroes(BdrvChild *child, int64_t offset,
1713                                       int bytes, BdrvRequestFlags flags)
1714{
1715    trace_bdrv_co_pwrite_zeroes(child->bs, offset, bytes, flags);
1716
1717    if (!(child->bs->open_flags & BDRV_O_UNMAP)) {
1718        flags &= ~BDRV_REQ_MAY_UNMAP;
1719    }
1720
1721    return bdrv_co_pwritev(child, offset, bytes, NULL,
1722                           BDRV_REQ_ZERO_WRITE | flags);
1723}
1724
1725/*
1726 * Flush ALL BDSes regardless of if they are reachable via a BlkBackend or not.
1727 */
1728int bdrv_flush_all(void)
1729{
1730    BdrvNextIterator it;
1731    BlockDriverState *bs = NULL;
1732    int result = 0;
1733
1734    for (bs = bdrv_first(&it); bs; bs = bdrv_next(&it)) {
1735        AioContext *aio_context = bdrv_get_aio_context(bs);
1736        int ret;
1737
1738        aio_context_acquire(aio_context);
1739        ret = bdrv_flush(bs);
1740        if (ret < 0 && !result) {
1741            result = ret;
1742        }
1743        aio_context_release(aio_context);
1744    }
1745
1746    return result;
1747}
1748
1749
1750typedef struct BdrvCoGetBlockStatusData {
1751    BlockDriverState *bs;
1752    BlockDriverState *base;
1753    BlockDriverState **file;
1754    int64_t sector_num;
1755    int nb_sectors;
1756    int *pnum;
1757    int64_t ret;
1758    bool done;
1759} BdrvCoGetBlockStatusData;
1760
1761/*
1762 * Returns the allocation status of the specified sectors.
1763 * Drivers not implementing the functionality are assumed to not support
1764 * backing files, hence all their sectors are reported as allocated.
1765 *
1766 * If 'sector_num' is beyond the end of the disk image the return value is
1767 * BDRV_BLOCK_EOF and 'pnum' is set to 0.
1768 *
1769 * 'pnum' is set to the number of sectors (including and immediately following
1770 * the specified sector) that are known to be in the same
1771 * allocated/unallocated state.
1772 *
1773 * 'nb_sectors' is the max value 'pnum' should be set to.  If nb_sectors goes
1774 * beyond the end of the disk image it will be clamped; if 'pnum' is set to
1775 * the end of the image, then the returned value will include BDRV_BLOCK_EOF.
1776 *
1777 * If returned value is positive and BDRV_BLOCK_OFFSET_VALID bit is set, 'file'
1778 * points to the BDS which the sector range is allocated in.
1779 */
1780static int64_t coroutine_fn bdrv_co_get_block_status(BlockDriverState *bs,
1781                                                     int64_t sector_num,
1782                                                     int nb_sectors, int *pnum,
1783                                                     BlockDriverState **file)
1784{
1785    int64_t total_sectors;
1786    int64_t n;
1787    int64_t ret, ret2;
1788
1789    *file = NULL;
1790    total_sectors = bdrv_nb_sectors(bs);
1791    if (total_sectors < 0) {
1792        return total_sectors;
1793    }
1794
1795    if (sector_num >= total_sectors) {
1796        *pnum = 0;
1797        return BDRV_BLOCK_EOF;
1798    }
1799
1800    n = total_sectors - sector_num;
1801    if (n < nb_sectors) {
1802        nb_sectors = n;
1803    }
1804
1805    if (!bs->drv->bdrv_co_get_block_status) {
1806        *pnum = nb_sectors;
1807        ret = BDRV_BLOCK_DATA | BDRV_BLOCK_ALLOCATED;
1808        if (sector_num + nb_sectors == total_sectors) {
1809            ret |= BDRV_BLOCK_EOF;
1810        }
1811        if (bs->drv->protocol_name) {
1812            ret |= BDRV_BLOCK_OFFSET_VALID | (sector_num * BDRV_SECTOR_SIZE);
1813            *file = bs;
1814        }
1815        return ret;
1816    }
1817
1818    bdrv_inc_in_flight(bs);
1819    ret = bs->drv->bdrv_co_get_block_status(bs, sector_num, nb_sectors, pnum,
1820                                            file);
1821    if (ret < 0) {
1822        *pnum = 0;
1823        goto out;
1824    }
1825
1826    if (ret & BDRV_BLOCK_RAW) {
1827        assert(ret & BDRV_BLOCK_OFFSET_VALID && *file);
1828        ret = bdrv_co_get_block_status(*file, ret >> BDRV_SECTOR_BITS,
1829                                       *pnum, pnum, file);
1830        goto out;
1831    }
1832
1833    if (ret & (BDRV_BLOCK_DATA | BDRV_BLOCK_ZERO)) {
1834        ret |= BDRV_BLOCK_ALLOCATED;
1835    } else {
1836        if (bdrv_unallocated_blocks_are_zero(bs)) {
1837            ret |= BDRV_BLOCK_ZERO;
1838        } else if (bs->backing) {
1839            BlockDriverState *bs2 = bs->backing->bs;
1840            int64_t nb_sectors2 = bdrv_nb_sectors(bs2);
1841            if (nb_sectors2 >= 0 && sector_num >= nb_sectors2) {
1842                ret |= BDRV_BLOCK_ZERO;
1843            }
1844        }
1845    }
1846
1847    if (*file && *file != bs &&
1848        (ret & BDRV_BLOCK_DATA) && !(ret & BDRV_BLOCK_ZERO) &&
1849        (ret & BDRV_BLOCK_OFFSET_VALID)) {
1850        BlockDriverState *file2;
1851        int file_pnum;
1852
1853        ret2 = bdrv_co_get_block_status(*file, ret >> BDRV_SECTOR_BITS,
1854                                        *pnum, &file_pnum, &file2);
1855        if (ret2 >= 0) {
1856            /* Ignore errors.  This is just providing extra information, it
1857             * is useful but not necessary.
1858             */
1859            if (ret2 & BDRV_BLOCK_EOF &&
1860                (!file_pnum || ret2 & BDRV_BLOCK_ZERO)) {
1861                /*
1862                 * It is valid for the format block driver to read
1863                 * beyond the end of the underlying file's current
1864                 * size; such areas read as zero.
1865                 */
1866                ret |= BDRV_BLOCK_ZERO;
1867            } else {
1868                /* Limit request to the range reported by the protocol driver */
1869                *pnum = file_pnum;
1870                ret |= (ret2 & BDRV_BLOCK_ZERO);
1871            }
1872        }
1873    }
1874
1875out:
1876    bdrv_dec_in_flight(bs);
1877    if (ret >= 0 && sector_num + *pnum == total_sectors) {
1878        ret |= BDRV_BLOCK_EOF;
1879    }
1880    return ret;
1881}
1882
1883static int64_t coroutine_fn bdrv_co_get_block_status_above(BlockDriverState *bs,
1884        BlockDriverState *base,
1885        int64_t sector_num,
1886        int nb_sectors,
1887        int *pnum,
1888        BlockDriverState **file)
1889{
1890    BlockDriverState *p;
1891    int64_t ret = 0;
1892    bool first = true;
1893
1894    assert(bs != base);
1895    for (p = bs; p != base; p = backing_bs(p)) {
1896        ret = bdrv_co_get_block_status(p, sector_num, nb_sectors, pnum, file);
1897        if (ret < 0) {
1898            break;
1899        }
1900        if (ret & BDRV_BLOCK_ZERO && ret & BDRV_BLOCK_EOF && !first) {
1901            /*
1902             * Reading beyond the end of the file continues to read
1903             * zeroes, but we can only widen the result to the
1904             * unallocated length we learned from an earlier
1905             * iteration.
1906             */
1907            *pnum = nb_sectors;
1908        }
1909        if (ret & (BDRV_BLOCK_ZERO | BDRV_BLOCK_DATA)) {
1910            break;
1911        }
1912        /* [sector_num, pnum] unallocated on this layer, which could be only
1913         * the first part of [sector_num, nb_sectors].  */
1914        nb_sectors = MIN(nb_sectors, *pnum);
1915        first = false;
1916    }
1917    return ret;
1918}
1919
1920/* Coroutine wrapper for bdrv_get_block_status_above() */
1921static void coroutine_fn bdrv_get_block_status_above_co_entry(void *opaque)
1922{
1923    BdrvCoGetBlockStatusData *data = opaque;
1924
1925    data->ret = bdrv_co_get_block_status_above(data->bs, data->base,
1926                                               data->sector_num,
1927                                               data->nb_sectors,
1928                                               data->pnum,
1929                                               data->file);
1930    data->done = true;
1931}
1932
1933/*
1934 * Synchronous wrapper around bdrv_co_get_block_status_above().
1935 *
1936 * See bdrv_co_get_block_status_above() for details.
1937 */
1938int64_t bdrv_get_block_status_above(BlockDriverState *bs,
1939                                    BlockDriverState *base,
1940                                    int64_t sector_num,
1941                                    int nb_sectors, int *pnum,
1942                                    BlockDriverState **file)
1943{
1944    Coroutine *co;
1945    BdrvCoGetBlockStatusData data = {
1946        .bs = bs,
1947        .base = base,
1948        .file = file,
1949        .sector_num = sector_num,
1950        .nb_sectors = nb_sectors,
1951        .pnum = pnum,
1952        .done = false,
1953    };
1954
1955    if (qemu_in_coroutine()) {
1956        /* Fast-path if already in coroutine context */
1957        bdrv_get_block_status_above_co_entry(&data);
1958    } else {
1959        co = qemu_coroutine_create(bdrv_get_block_status_above_co_entry,
1960                                   &data);
1961        bdrv_coroutine_enter(bs, co);
1962        BDRV_POLL_WHILE(bs, !data.done);
1963    }
1964    return data.ret;
1965}
1966
1967int64_t bdrv_get_block_status(BlockDriverState *bs,
1968                              int64_t sector_num,
1969                              int nb_sectors, int *pnum,
1970                              BlockDriverState **file)
1971{
1972    return bdrv_get_block_status_above(bs, backing_bs(bs),
1973                                       sector_num, nb_sectors, pnum, file);
1974}
1975
1976int coroutine_fn bdrv_is_allocated(BlockDriverState *bs, int64_t offset,
1977                                   int64_t bytes, int64_t *pnum)
1978{
1979    BlockDriverState *file;
1980    int64_t sector_num = offset >> BDRV_SECTOR_BITS;
1981    int nb_sectors = bytes >> BDRV_SECTOR_BITS;
1982    int64_t ret;
1983    int psectors;
1984
1985    assert(QEMU_IS_ALIGNED(offset, BDRV_SECTOR_SIZE));
1986    assert(QEMU_IS_ALIGNED(bytes, BDRV_SECTOR_SIZE) && bytes < INT_MAX);
1987    ret = bdrv_get_block_status(bs, sector_num, nb_sectors, &psectors,
1988                                &file);
1989    if (ret < 0) {
1990        return ret;
1991    }
1992    if (pnum) {
1993        *pnum = psectors * BDRV_SECTOR_SIZE;
1994    }
1995    return !!(ret & BDRV_BLOCK_ALLOCATED);
1996}
1997
1998/*
1999 * Given an image chain: ... -> [BASE] -> [INTER1] -> [INTER2] -> [TOP]
2000 *
2001 * Return true if (a prefix of) the given range is allocated in any image
2002 * between BASE and TOP (inclusive).  BASE can be NULL to check if the given
2003 * offset is allocated in any image of the chain.  Return false otherwise,
2004 * or negative errno on failure.
2005 *
2006 * 'pnum' is set to the number of bytes (including and immediately
2007 * following the specified offset) that are known to be in the same
2008 * allocated/unallocated state.  Note that a subsequent call starting
2009 * at 'offset + *pnum' may return the same allocation status (in other
2010 * words, the result is not necessarily the maximum possible range);
2011 * but 'pnum' will only be 0 when end of file is reached.
2012 *
2013 */
2014int bdrv_is_allocated_above(BlockDriverState *top,
2015                            BlockDriverState *base,
2016                            int64_t offset, int64_t bytes, int64_t *pnum)
2017{
2018    BlockDriverState *intermediate;
2019    int ret;
2020    int64_t n = bytes;
2021
2022    intermediate = top;
2023    while (intermediate && intermediate != base) {
2024        int64_t pnum_inter;
2025        int64_t size_inter;
2026
2027        ret = bdrv_is_allocated(intermediate, offset, bytes, &pnum_inter);
2028        if (ret < 0) {
2029            return ret;
2030        }
2031        if (ret) {
2032            *pnum = pnum_inter;
2033            return 1;
2034        }
2035
2036        size_inter = bdrv_getlength(intermediate);
2037        if (size_inter < 0) {
2038            return size_inter;
2039        }
2040        if (n > pnum_inter &&
2041            (intermediate == top || offset + pnum_inter < size_inter)) {
2042            n = pnum_inter;
2043        }
2044
2045        intermediate = backing_bs(intermediate);
2046    }
2047
2048    *pnum = n;
2049    return 0;
2050}
2051
2052typedef struct BdrvVmstateCo {
2053    BlockDriverState    *bs;
2054    QEMUIOVector        *qiov;
2055    int64_t             pos;
2056    bool                is_read;
2057    int                 ret;
2058} BdrvVmstateCo;
2059
2060static int coroutine_fn
2061bdrv_co_rw_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos,
2062                   bool is_read)
2063{
2064    BlockDriver *drv = bs->drv;
2065    int ret = -ENOTSUP;
2066
2067    bdrv_inc_in_flight(bs);
2068
2069    if (!drv) {
2070        ret = -ENOMEDIUM;
2071    } else if (drv->bdrv_load_vmstate) {
2072        if (is_read) {
2073            ret = drv->bdrv_load_vmstate(bs, qiov, pos);
2074        } else {
2075            ret = drv->bdrv_save_vmstate(bs, qiov, pos);
2076        }
2077    } else if (bs->file) {
2078        ret = bdrv_co_rw_vmstate(bs->file->bs, qiov, pos, is_read);
2079    }
2080
2081    bdrv_dec_in_flight(bs);
2082    return ret;
2083}
2084
2085static void coroutine_fn bdrv_co_rw_vmstate_entry(void *opaque)
2086{
2087    BdrvVmstateCo *co = opaque;
2088    co->ret = bdrv_co_rw_vmstate(co->bs, co->qiov, co->pos, co->is_read);
2089}
2090
2091static inline int
2092bdrv_rw_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos,
2093                bool is_read)
2094{
2095    if (qemu_in_coroutine()) {
2096        return bdrv_co_rw_vmstate(bs, qiov, pos, is_read);
2097    } else {
2098        BdrvVmstateCo data = {
2099            .bs         = bs,
2100            .qiov       = qiov,
2101            .pos        = pos,
2102            .is_read    = is_read,
2103            .ret        = -EINPROGRESS,
2104        };
2105        Coroutine *co = qemu_coroutine_create(bdrv_co_rw_vmstate_entry, &data);
2106
2107        bdrv_coroutine_enter(bs, co);
2108        BDRV_POLL_WHILE(bs, data.ret == -EINPROGRESS);
2109        return data.ret;
2110    }
2111}
2112
2113int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf,
2114                      int64_t pos, int size)
2115{
2116    QEMUIOVector qiov;
2117    struct iovec iov = {
2118        .iov_base   = (void *) buf,
2119        .iov_len    = size,
2120    };
2121    int ret;
2122
2123    qemu_iovec_init_external(&qiov, &iov, 1);
2124
2125    ret = bdrv_writev_vmstate(bs, &qiov, pos);
2126    if (ret < 0) {
2127        return ret;
2128    }
2129
2130    return size;
2131}
2132
2133int bdrv_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
2134{
2135    return bdrv_rw_vmstate(bs, qiov, pos, false);
2136}
2137
2138int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf,
2139                      int64_t pos, int size)
2140{
2141    QEMUIOVector qiov;
2142    struct iovec iov = {
2143        .iov_base   = buf,
2144        .iov_len    = size,
2145    };
2146    int ret;
2147
2148    qemu_iovec_init_external(&qiov, &iov, 1);
2149    ret = bdrv_readv_vmstate(bs, &qiov, pos);
2150    if (ret < 0) {
2151        return ret;
2152    }
2153
2154    return size;
2155}
2156
2157int bdrv_readv_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos)
2158{
2159    return bdrv_rw_vmstate(bs, qiov, pos, true);
2160}
2161
2162/**************************************************************/
2163/* async I/Os */
2164
2165void bdrv_aio_cancel(BlockAIOCB *acb)
2166{
2167    qemu_aio_ref(acb);
2168    bdrv_aio_cancel_async(acb);
2169    while (acb->refcnt > 1) {
2170        if (acb->aiocb_info->get_aio_context) {
2171            aio_poll(acb->aiocb_info->get_aio_context(acb), true);
2172        } else if (acb->bs) {
2173            /* qemu_aio_ref and qemu_aio_unref are not thread-safe, so
2174             * assert that we're not using an I/O thread.  Thread-safe
2175             * code should use bdrv_aio_cancel_async exclusively.
2176             */
2177            assert(bdrv_get_aio_context(acb->bs) == qemu_get_aio_context());
2178            aio_poll(bdrv_get_aio_context(acb->bs), true);
2179        } else {
2180            abort();
2181        }
2182    }
2183    qemu_aio_unref(acb);
2184}
2185
2186/* Async version of aio cancel. The caller is not blocked if the acb implements
2187 * cancel_async, otherwise we do nothing and let the request normally complete.
2188 * In either case the completion callback must be called. */
2189void bdrv_aio_cancel_async(BlockAIOCB *acb)
2190{
2191    if (acb->aiocb_info->cancel_async) {
2192        acb->aiocb_info->cancel_async(acb);
2193    }
2194}
2195
2196/**************************************************************/
2197/* Coroutine block device emulation */
2198
2199typedef struct FlushCo {
2200    BlockDriverState *bs;
2201    int ret;
2202} FlushCo;
2203
2204
2205static void coroutine_fn bdrv_flush_co_entry(void *opaque)
2206{
2207    FlushCo *rwco = opaque;
2208
2209    rwco->ret = bdrv_co_flush(rwco->bs);
2210}
2211
2212int coroutine_fn bdrv_co_flush(BlockDriverState *bs)
2213{
2214    int current_gen;
2215    int ret = 0;
2216
2217    bdrv_inc_in_flight(bs);
2218
2219    if (!bdrv_is_inserted(bs) || bdrv_is_read_only(bs) ||
2220        bdrv_is_sg(bs)) {
2221        goto early_exit;
2222    }
2223
2224    qemu_co_mutex_lock(&bs->reqs_lock);
2225    current_gen = atomic_read(&bs->write_gen);
2226
2227    /* Wait until any previous flushes are completed */
2228    while (bs->active_flush_req) {
2229        qemu_co_queue_wait(&bs->flush_queue, &bs->reqs_lock);
2230    }
2231
2232    /* Flushes reach this point in nondecreasing current_gen order.  */
2233    bs->active_flush_req = true;
2234    qemu_co_mutex_unlock(&bs->reqs_lock);
2235
2236    /* Write back all layers by calling one driver function */
2237    if (bs->drv->bdrv_co_flush) {
2238        ret = bs->drv->bdrv_co_flush(bs);
2239        goto out;
2240    }
2241
2242    /* Write back cached data to the OS even with cache=unsafe */
2243    BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_OS);
2244    if (bs->drv->bdrv_co_flush_to_os) {
2245        ret = bs->drv->bdrv_co_flush_to_os(bs);
2246        if (ret < 0) {
2247            goto out;
2248        }
2249    }
2250
2251    /* But don't actually force it to the disk with cache=unsafe */
2252    if (bs->open_flags & BDRV_O_NO_FLUSH) {
2253        goto flush_parent;
2254    }
2255
2256    /* Check if we really need to flush anything */
2257    if (bs->flushed_gen == current_gen) {
2258        goto flush_parent;
2259    }
2260
2261    BLKDBG_EVENT(bs->file, BLKDBG_FLUSH_TO_DISK);
2262    if (bs->drv->bdrv_co_flush_to_disk) {
2263        ret = bs->drv->bdrv_co_flush_to_disk(bs);
2264    } else if (bs->drv->bdrv_aio_flush) {
2265        BlockAIOCB *acb;
2266        CoroutineIOCompletion co = {
2267            .coroutine = qemu_coroutine_self(),
2268        };
2269
2270        acb = bs->drv->bdrv_aio_flush(bs, bdrv_co_io_em_complete, &co);
2271        if (acb == NULL) {
2272            ret = -EIO;
2273        } else {
2274            qemu_coroutine_yield();
2275            ret = co.ret;
2276        }
2277    } else {
2278        /*
2279         * Some block drivers always operate in either writethrough or unsafe
2280         * mode and don't support bdrv_flush therefore. Usually qemu doesn't
2281         * know how the server works (because the behaviour is hardcoded or
2282         * depends on server-side configuration), so we can't ensure that
2283         * everything is safe on disk. Returning an error doesn't work because
2284         * that would break guests even if the server operates in writethrough
2285         * mode.
2286         *
2287         * Let's hope the user knows what he's doing.
2288         */
2289        ret = 0;
2290    }
2291
2292    if (ret < 0) {
2293        goto out;
2294    }
2295
2296    /* Now flush the underlying protocol.  It will also have BDRV_O_NO_FLUSH
2297     * in the case of cache=unsafe, so there are no useless flushes.
2298     */
2299flush_parent:
2300    ret = bs->file ? bdrv_co_flush(bs->file->bs) : 0;
2301out:
2302    /* Notify any pending flushes that we have completed */
2303    if (ret == 0) {
2304        bs->flushed_gen = current_gen;
2305    }
2306
2307    qemu_co_mutex_lock(&bs->reqs_lock);
2308    bs->active_flush_req = false;
2309    /* Return value is ignored - it's ok if wait queue is empty */
2310    qemu_co_queue_next(&bs->flush_queue);
2311    qemu_co_mutex_unlock(&bs->reqs_lock);
2312
2313early_exit:
2314    bdrv_dec_in_flight(bs);
2315    return ret;
2316}
2317
2318int bdrv_flush(BlockDriverState *bs)
2319{
2320    Coroutine *co;
2321    FlushCo flush_co = {
2322        .bs = bs,
2323        .ret = NOT_DONE,
2324    };
2325
2326    if (qemu_in_coroutine()) {
2327        /* Fast-path if already in coroutine context */
2328        bdrv_flush_co_entry(&flush_co);
2329    } else {
2330        co = qemu_coroutine_create(bdrv_flush_co_entry, &flush_co);
2331        bdrv_coroutine_enter(bs, co);
2332        BDRV_POLL_WHILE(bs, flush_co.ret == NOT_DONE);
2333    }
2334
2335    return flush_co.ret;
2336}
2337
2338typedef struct DiscardCo {
2339    BlockDriverState *bs;
2340    int64_t offset;
2341    int bytes;
2342    int ret;
2343} DiscardCo;
2344static void coroutine_fn bdrv_pdiscard_co_entry(void *opaque)
2345{
2346    DiscardCo *rwco = opaque;
2347
2348    rwco->ret = bdrv_co_pdiscard(rwco->bs, rwco->offset, rwco->bytes);
2349}
2350
2351int coroutine_fn bdrv_co_pdiscard(BlockDriverState *bs, int64_t offset,
2352                                  int bytes)
2353{
2354    BdrvTrackedRequest req;
2355    int max_pdiscard, ret;
2356    int head, tail, align;
2357
2358    if (!bs->drv) {
2359        return -ENOMEDIUM;
2360    }
2361
2362    if (bdrv_has_readonly_bitmaps(bs)) {
2363        return -EPERM;
2364    }
2365
2366    ret = bdrv_check_byte_request(bs, offset, bytes);
2367    if (ret < 0) {
2368        return ret;
2369    } else if (bs->read_only) {
2370        return -EPERM;
2371    }
2372    assert(!(bs->open_flags & BDRV_O_INACTIVE));
2373
2374    /* Do nothing if disabled.  */
2375    if (!(bs->open_flags & BDRV_O_UNMAP)) {
2376        return 0;
2377    }
2378
2379    if (!bs->drv->bdrv_co_pdiscard && !bs->drv->bdrv_aio_pdiscard) {
2380        return 0;
2381    }
2382
2383    /* Discard is advisory, but some devices track and coalesce
2384     * unaligned requests, so we must pass everything down rather than
2385     * round here.  Still, most devices will just silently ignore
2386     * unaligned requests (by returning -ENOTSUP), so we must fragment
2387     * the request accordingly.  */
2388    align = MAX(bs->bl.pdiscard_alignment, bs->bl.request_alignment);
2389    assert(align % bs->bl.request_alignment == 0);
2390    head = offset % align;
2391    tail = (offset + bytes) % align;
2392
2393    bdrv_inc_in_flight(bs);
2394    tracked_request_begin(&req, bs, offset, bytes, BDRV_TRACKED_DISCARD);
2395
2396    ret = notifier_with_return_list_notify(&bs->before_write_notifiers, &req);
2397    if (ret < 0) {
2398        goto out;
2399    }
2400
2401    max_pdiscard = QEMU_ALIGN_DOWN(MIN_NON_ZERO(bs->bl.max_pdiscard, INT_MAX),
2402                                   align);
2403    assert(max_pdiscard >= bs->bl.request_alignment);
2404
2405    while (bytes > 0) {
2406        int num = bytes;
2407
2408        if (head) {
2409            /* Make small requests to get to alignment boundaries. */
2410            num = MIN(bytes, align - head);
2411            if (!QEMU_IS_ALIGNED(num, bs->bl.request_alignment)) {
2412                num %= bs->bl.request_alignment;
2413            }
2414            head = (head + num) % align;
2415            assert(num < max_pdiscard);
2416        } else if (tail) {
2417            if (num > align) {
2418                /* Shorten the request to the last aligned cluster.  */
2419                num -= tail;
2420            } else if (!QEMU_IS_ALIGNED(tail, bs->bl.request_alignment) &&
2421                       tail > bs->bl.request_alignment) {
2422                tail %= bs->bl.request_alignment;
2423                num -= tail;
2424            }
2425        }
2426        /* limit request size */
2427        if (num > max_pdiscard) {
2428            num = max_pdiscard;
2429        }
2430
2431        if (bs->drv->bdrv_co_pdiscard) {
2432            ret = bs->drv->bdrv_co_pdiscard(bs, offset, num);
2433        } else {
2434            BlockAIOCB *acb;
2435            CoroutineIOCompletion co = {
2436                .coroutine = qemu_coroutine_self(),
2437            };
2438
2439            acb = bs->drv->bdrv_aio_pdiscard(bs, offset, num,
2440                                             bdrv_co_io_em_complete, &co);
2441            if (acb == NULL) {
2442                ret = -EIO;
2443                goto out;
2444            } else {
2445                qemu_coroutine_yield();
2446                ret = co.ret;
2447            }
2448        }
2449        if (ret && ret != -ENOTSUP) {
2450            goto out;
2451        }
2452
2453        offset += num;
2454        bytes -= num;
2455    }
2456    ret = 0;
2457out:
2458    atomic_inc(&bs->write_gen);
2459    bdrv_set_dirty(bs, req.offset >> BDRV_SECTOR_BITS,
2460                   req.bytes >> BDRV_SECTOR_BITS);
2461    tracked_request_end(&req);
2462    bdrv_dec_in_flight(bs);
2463    return ret;
2464}
2465
2466int bdrv_pdiscard(BlockDriverState *bs, int64_t offset, int bytes)
2467{
2468    Coroutine *co;
2469    DiscardCo rwco = {
2470        .bs = bs,
2471        .offset = offset,
2472        .bytes = bytes,
2473        .ret = NOT_DONE,
2474    };
2475
2476    if (qemu_in_coroutine()) {
2477        /* Fast-path if already in coroutine context */
2478        bdrv_pdiscard_co_entry(&rwco);
2479    } else {
2480        co = qemu_coroutine_create(bdrv_pdiscard_co_entry, &rwco);
2481        bdrv_coroutine_enter(bs, co);
2482        BDRV_POLL_WHILE(bs, rwco.ret == NOT_DONE);
2483    }
2484
2485    return rwco.ret;
2486}
2487
2488int bdrv_co_ioctl(BlockDriverState *bs, int req, void *buf)
2489{
2490    BlockDriver *drv = bs->drv;
2491    CoroutineIOCompletion co = {
2492        .coroutine = qemu_coroutine_self(),
2493    };
2494    BlockAIOCB *acb;
2495
2496    bdrv_inc_in_flight(bs);
2497    if (!drv || (!drv->bdrv_aio_ioctl && !drv->bdrv_co_ioctl)) {
2498        co.ret = -ENOTSUP;
2499        goto out;
2500    }
2501
2502    if (drv->bdrv_co_ioctl) {
2503        co.ret = drv->bdrv_co_ioctl(bs, req, buf);
2504    } else {
2505        acb = drv->bdrv_aio_ioctl(bs, req, buf, bdrv_co_io_em_complete, &co);
2506        if (!acb) {
2507            co.ret = -ENOTSUP;
2508            goto out;
2509        }
2510        qemu_coroutine_yield();
2511    }
2512out:
2513    bdrv_dec_in_flight(bs);
2514    return co.ret;
2515}
2516
2517void *qemu_blockalign(BlockDriverState *bs, size_t size)
2518{
2519    return qemu_memalign(bdrv_opt_mem_align(bs), size);
2520}
2521
2522void *qemu_blockalign0(BlockDriverState *bs, size_t size)
2523{
2524    return memset(qemu_blockalign(bs, size), 0, size);
2525}
2526
2527void *qemu_try_blockalign(BlockDriverState *bs, size_t size)
2528{
2529    size_t align = bdrv_opt_mem_align(bs);
2530
2531    /* Ensure that NULL is never returned on success */
2532    assert(align > 0);
2533    if (size == 0) {
2534        size = align;
2535    }
2536
2537    return qemu_try_memalign(align, size);
2538}
2539
2540void *qemu_try_blockalign0(BlockDriverState *bs, size_t size)
2541{
2542    void *mem = qemu_try_blockalign(bs, size);
2543
2544    if (mem) {
2545        memset(mem, 0, size);
2546    }
2547
2548    return mem;
2549}
2550
2551/*
2552 * Check if all memory in this vector is sector aligned.
2553 */
2554bool bdrv_qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov)
2555{
2556    int i;
2557    size_t alignment = bdrv_min_mem_align(bs);
2558
2559    for (i = 0; i < qiov->niov; i++) {
2560        if ((uintptr_t) qiov->iov[i].iov_base % alignment) {
2561            return false;
2562        }
2563        if (qiov->iov[i].iov_len % alignment) {
2564            return false;
2565        }
2566    }
2567
2568    return true;
2569}
2570
2571void bdrv_add_before_write_notifier(BlockDriverState *bs,
2572                                    NotifierWithReturn *notifier)
2573{
2574    notifier_with_return_list_add(&bs->before_write_notifiers, notifier);
2575}
2576
2577void bdrv_io_plug(BlockDriverState *bs)
2578{
2579    BdrvChild *child;
2580
2581    QLIST_FOREACH(child, &bs->children, next) {
2582        bdrv_io_plug(child->bs);
2583    }
2584
2585    if (atomic_fetch_inc(&bs->io_plugged) == 0) {
2586        BlockDriver *drv = bs->drv;
2587        if (drv && drv->bdrv_io_plug) {
2588            drv->bdrv_io_plug(bs);
2589        }
2590    }
2591}
2592
2593void bdrv_io_unplug(BlockDriverState *bs)
2594{
2595    BdrvChild *child;
2596
2597    assert(bs->io_plugged);
2598    if (atomic_fetch_dec(&bs->io_plugged) == 1) {
2599        BlockDriver *drv = bs->drv;
2600        if (drv && drv->bdrv_io_unplug) {
2601            drv->bdrv_io_unplug(bs);
2602        }
2603    }
2604
2605    QLIST_FOREACH(child, &bs->children, next) {
2606        bdrv_io_unplug(child->bs);
2607    }
2608}
2609