qemu/block/block-backend.c
<<
>>
Prefs
   1/*
   2 * QEMU Block backends
   3 *
   4 * Copyright (C) 2014-2016 Red Hat, Inc.
   5 *
   6 * Authors:
   7 *  Markus Armbruster <armbru@redhat.com>,
   8 *
   9 * This work is licensed under the terms of the GNU LGPL, version 2.1
  10 * or later.  See the COPYING.LIB file in the top-level directory.
  11 */
  12
  13#include "qemu/osdep.h"
  14#include "sysemu/block-backend.h"
  15#include "block/block_int.h"
  16#include "block/blockjob.h"
  17#include "block/throttle-groups.h"
  18#include "hw/qdev-core.h"
  19#include "sysemu/blockdev.h"
  20#include "sysemu/runstate.h"
  21#include "sysemu/sysemu.h"
  22#include "sysemu/replay.h"
  23#include "qapi/error.h"
  24#include "qapi/qapi-events-block.h"
  25#include "qemu/id.h"
  26#include "qemu/main-loop.h"
  27#include "qemu/option.h"
  28#include "trace.h"
  29#include "migration/misc.h"
  30
  31/* Number of coroutines to reserve per attached device model */
  32#define COROUTINE_POOL_RESERVATION 64
  33
  34#define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
  35
  36static AioContext *blk_aiocb_get_aio_context(BlockAIOCB *acb);
  37
  38typedef struct BlockBackendAioNotifier {
  39    void (*attached_aio_context)(AioContext *new_context, void *opaque);
  40    void (*detach_aio_context)(void *opaque);
  41    void *opaque;
  42    QLIST_ENTRY(BlockBackendAioNotifier) list;
  43} BlockBackendAioNotifier;
  44
  45struct BlockBackend {
  46    char *name;
  47    int refcnt;
  48    BdrvChild *root;
  49    AioContext *ctx;
  50    DriveInfo *legacy_dinfo;    /* null unless created by drive_new() */
  51    QTAILQ_ENTRY(BlockBackend) link;         /* for block_backends */
  52    QTAILQ_ENTRY(BlockBackend) monitor_link; /* for monitor_block_backends */
  53    BlockBackendPublic public;
  54
  55    DeviceState *dev;           /* attached device model, if any */
  56    const BlockDevOps *dev_ops;
  57    void *dev_opaque;
  58
  59    /* the block size for which the guest device expects atomicity */
  60    int guest_block_size;
  61
  62    /* If the BDS tree is removed, some of its options are stored here (which
  63     * can be used to restore those options in the new BDS on insert) */
  64    BlockBackendRootState root_state;
  65
  66    bool enable_write_cache;
  67
  68    /* I/O stats (display with "info blockstats"). */
  69    BlockAcctStats stats;
  70
  71    BlockdevOnError on_read_error, on_write_error;
  72    bool iostatus_enabled;
  73    BlockDeviceIoStatus iostatus;
  74
  75    uint64_t perm;
  76    uint64_t shared_perm;
  77    bool disable_perm;
  78
  79    bool allow_aio_context_change;
  80    bool allow_write_beyond_eof;
  81
  82    NotifierList remove_bs_notifiers, insert_bs_notifiers;
  83    QLIST_HEAD(, BlockBackendAioNotifier) aio_notifiers;
  84
  85    int quiesce_counter;
  86    CoQueue queued_requests;
  87    bool disable_request_queuing;
  88
  89    VMChangeStateEntry *vmsh;
  90    bool force_allow_inactivate;
  91
  92    /* Number of in-flight aio requests.  BlockDriverState also counts
  93     * in-flight requests but aio requests can exist even when blk->root is
  94     * NULL, so we cannot rely on its counter for that case.
  95     * Accessed with atomic ops.
  96     */
  97    unsigned int in_flight;
  98};
  99
 100typedef struct BlockBackendAIOCB {
 101    BlockAIOCB common;
 102    BlockBackend *blk;
 103    int ret;
 104} BlockBackendAIOCB;
 105
 106static const AIOCBInfo block_backend_aiocb_info = {
 107    .get_aio_context = blk_aiocb_get_aio_context,
 108    .aiocb_size = sizeof(BlockBackendAIOCB),
 109};
 110
 111static void drive_info_del(DriveInfo *dinfo);
 112static BlockBackend *bdrv_first_blk(BlockDriverState *bs);
 113
 114/* All BlockBackends */
 115static QTAILQ_HEAD(, BlockBackend) block_backends =
 116    QTAILQ_HEAD_INITIALIZER(block_backends);
 117
 118/* All BlockBackends referenced by the monitor and which are iterated through by
 119 * blk_next() */
 120static QTAILQ_HEAD(, BlockBackend) monitor_block_backends =
 121    QTAILQ_HEAD_INITIALIZER(monitor_block_backends);
 122
 123static void blk_root_inherit_options(BdrvChildRole role, bool parent_is_format,
 124                                     int *child_flags, QDict *child_options,
 125                                     int parent_flags, QDict *parent_options)
 126{
 127    /* We're not supposed to call this function for root nodes */
 128    abort();
 129}
 130static void blk_root_drained_begin(BdrvChild *child);
 131static bool blk_root_drained_poll(BdrvChild *child);
 132static void blk_root_drained_end(BdrvChild *child, int *drained_end_counter);
 133
 134static void blk_root_change_media(BdrvChild *child, bool load);
 135static void blk_root_resize(BdrvChild *child);
 136
 137static bool blk_root_can_set_aio_ctx(BdrvChild *child, AioContext *ctx,
 138                                     GSList **ignore, Error **errp);
 139static void blk_root_set_aio_ctx(BdrvChild *child, AioContext *ctx,
 140                                 GSList **ignore);
 141
 142static char *blk_root_get_parent_desc(BdrvChild *child)
 143{
 144    BlockBackend *blk = child->opaque;
 145    char *dev_id;
 146
 147    if (blk->name) {
 148        return g_strdup(blk->name);
 149    }
 150
 151    dev_id = blk_get_attached_dev_id(blk);
 152    if (*dev_id) {
 153        return dev_id;
 154    } else {
 155        /* TODO Callback into the BB owner for something more detailed */
 156        g_free(dev_id);
 157        return g_strdup("a block device");
 158    }
 159}
 160
 161static const char *blk_root_get_name(BdrvChild *child)
 162{
 163    return blk_name(child->opaque);
 164}
 165
 166static void blk_vm_state_changed(void *opaque, bool running, RunState state)
 167{
 168    Error *local_err = NULL;
 169    BlockBackend *blk = opaque;
 170
 171    if (state == RUN_STATE_INMIGRATE) {
 172        return;
 173    }
 174
 175    qemu_del_vm_change_state_handler(blk->vmsh);
 176    blk->vmsh = NULL;
 177    blk_set_perm(blk, blk->perm, blk->shared_perm, &local_err);
 178    if (local_err) {
 179        error_report_err(local_err);
 180    }
 181}
 182
 183/*
 184 * Notifies the user of the BlockBackend that migration has completed. qdev
 185 * devices can tighten their permissions in response (specifically revoke
 186 * shared write permissions that we needed for storage migration).
 187 *
 188 * If an error is returned, the VM cannot be allowed to be resumed.
 189 */
 190static void blk_root_activate(BdrvChild *child, Error **errp)
 191{
 192    BlockBackend *blk = child->opaque;
 193    Error *local_err = NULL;
 194
 195    if (!blk->disable_perm) {
 196        return;
 197    }
 198
 199    blk->disable_perm = false;
 200
 201    blk_set_perm(blk, blk->perm, BLK_PERM_ALL, &local_err);
 202    if (local_err) {
 203        error_propagate(errp, local_err);
 204        blk->disable_perm = true;
 205        return;
 206    }
 207
 208    if (runstate_check(RUN_STATE_INMIGRATE)) {
 209        /* Activation can happen when migration process is still active, for
 210         * example when nbd_server_add is called during non-shared storage
 211         * migration. Defer the shared_perm update to migration completion. */
 212        if (!blk->vmsh) {
 213            blk->vmsh = qemu_add_vm_change_state_handler(blk_vm_state_changed,
 214                                                         blk);
 215        }
 216        return;
 217    }
 218
 219    blk_set_perm(blk, blk->perm, blk->shared_perm, &local_err);
 220    if (local_err) {
 221        error_propagate(errp, local_err);
 222        blk->disable_perm = true;
 223        return;
 224    }
 225}
 226
 227void blk_set_force_allow_inactivate(BlockBackend *blk)
 228{
 229    blk->force_allow_inactivate = true;
 230}
 231
 232static bool blk_can_inactivate(BlockBackend *blk)
 233{
 234    /* If it is a guest device, inactivate is ok. */
 235    if (blk->dev || blk_name(blk)[0]) {
 236        return true;
 237    }
 238
 239    /* Inactivating means no more writes to the image can be done,
 240     * even if those writes would be changes invisible to the
 241     * guest.  For block job BBs that satisfy this, we can just allow
 242     * it.  This is the case for mirror job source, which is required
 243     * by libvirt non-shared block migration. */
 244    if (!(blk->perm & (BLK_PERM_WRITE | BLK_PERM_WRITE_UNCHANGED))) {
 245        return true;
 246    }
 247
 248    return blk->force_allow_inactivate;
 249}
 250
 251static int blk_root_inactivate(BdrvChild *child)
 252{
 253    BlockBackend *blk = child->opaque;
 254
 255    if (blk->disable_perm) {
 256        return 0;
 257    }
 258
 259    if (!blk_can_inactivate(blk)) {
 260        return -EPERM;
 261    }
 262
 263    blk->disable_perm = true;
 264    if (blk->root) {
 265        bdrv_child_try_set_perm(blk->root, 0, BLK_PERM_ALL, &error_abort);
 266    }
 267
 268    return 0;
 269}
 270
 271static void blk_root_attach(BdrvChild *child)
 272{
 273    BlockBackend *blk = child->opaque;
 274    BlockBackendAioNotifier *notifier;
 275
 276    trace_blk_root_attach(child, blk, child->bs);
 277
 278    QLIST_FOREACH(notifier, &blk->aio_notifiers, list) {
 279        bdrv_add_aio_context_notifier(child->bs,
 280                notifier->attached_aio_context,
 281                notifier->detach_aio_context,
 282                notifier->opaque);
 283    }
 284}
 285
 286static void blk_root_detach(BdrvChild *child)
 287{
 288    BlockBackend *blk = child->opaque;
 289    BlockBackendAioNotifier *notifier;
 290
 291    trace_blk_root_detach(child, blk, child->bs);
 292
 293    QLIST_FOREACH(notifier, &blk->aio_notifiers, list) {
 294        bdrv_remove_aio_context_notifier(child->bs,
 295                notifier->attached_aio_context,
 296                notifier->detach_aio_context,
 297                notifier->opaque);
 298    }
 299}
 300
 301static const BdrvChildClass child_root = {
 302    .inherit_options    = blk_root_inherit_options,
 303
 304    .change_media       = blk_root_change_media,
 305    .resize             = blk_root_resize,
 306    .get_name           = blk_root_get_name,
 307    .get_parent_desc    = blk_root_get_parent_desc,
 308
 309    .drained_begin      = blk_root_drained_begin,
 310    .drained_poll       = blk_root_drained_poll,
 311    .drained_end        = blk_root_drained_end,
 312
 313    .activate           = blk_root_activate,
 314    .inactivate         = blk_root_inactivate,
 315
 316    .attach             = blk_root_attach,
 317    .detach             = blk_root_detach,
 318
 319    .can_set_aio_ctx    = blk_root_can_set_aio_ctx,
 320    .set_aio_ctx        = blk_root_set_aio_ctx,
 321};
 322
 323/*
 324 * Create a new BlockBackend with a reference count of one.
 325 *
 326 * @perm is a bitmasks of BLK_PERM_* constants which describes the permissions
 327 * to request for a block driver node that is attached to this BlockBackend.
 328 * @shared_perm is a bitmask which describes which permissions may be granted
 329 * to other users of the attached node.
 330 * Both sets of permissions can be changed later using blk_set_perm().
 331 *
 332 * Return the new BlockBackend on success, null on failure.
 333 */
 334BlockBackend *blk_new(AioContext *ctx, uint64_t perm, uint64_t shared_perm)
 335{
 336    BlockBackend *blk;
 337
 338    blk = g_new0(BlockBackend, 1);
 339    blk->refcnt = 1;
 340    blk->ctx = ctx;
 341    blk->perm = perm;
 342    blk->shared_perm = shared_perm;
 343    blk_set_enable_write_cache(blk, true);
 344
 345    blk->on_read_error = BLOCKDEV_ON_ERROR_REPORT;
 346    blk->on_write_error = BLOCKDEV_ON_ERROR_ENOSPC;
 347
 348    block_acct_init(&blk->stats);
 349
 350    qemu_co_queue_init(&blk->queued_requests);
 351    notifier_list_init(&blk->remove_bs_notifiers);
 352    notifier_list_init(&blk->insert_bs_notifiers);
 353    QLIST_INIT(&blk->aio_notifiers);
 354
 355    QTAILQ_INSERT_TAIL(&block_backends, blk, link);
 356    return blk;
 357}
 358
 359/*
 360 * Create a new BlockBackend connected to an existing BlockDriverState.
 361 *
 362 * @perm is a bitmasks of BLK_PERM_* constants which describes the
 363 * permissions to request for @bs that is attached to this
 364 * BlockBackend.  @shared_perm is a bitmask which describes which
 365 * permissions may be granted to other users of the attached node.
 366 * Both sets of permissions can be changed later using blk_set_perm().
 367 *
 368 * Return the new BlockBackend on success, null on failure.
 369 */
 370BlockBackend *blk_new_with_bs(BlockDriverState *bs, uint64_t perm,
 371                              uint64_t shared_perm, Error **errp)
 372{
 373    BlockBackend *blk = blk_new(bdrv_get_aio_context(bs), perm, shared_perm);
 374
 375    if (blk_insert_bs(blk, bs, errp) < 0) {
 376        blk_unref(blk);
 377        return NULL;
 378    }
 379    return blk;
 380}
 381
 382/*
 383 * Creates a new BlockBackend, opens a new BlockDriverState, and connects both.
 384 * The new BlockBackend is in the main AioContext.
 385 *
 386 * Just as with bdrv_open(), after having called this function the reference to
 387 * @options belongs to the block layer (even on failure).
 388 *
 389 * TODO: Remove @filename and @flags; it should be possible to specify a whole
 390 * BDS tree just by specifying the @options QDict (or @reference,
 391 * alternatively). At the time of adding this function, this is not possible,
 392 * though, so callers of this function have to be able to specify @filename and
 393 * @flags.
 394 */
 395BlockBackend *blk_new_open(const char *filename, const char *reference,
 396                           QDict *options, int flags, Error **errp)
 397{
 398    BlockBackend *blk;
 399    BlockDriverState *bs;
 400    uint64_t perm = 0;
 401
 402    /* blk_new_open() is mainly used in .bdrv_create implementations and the
 403     * tools where sharing isn't a concern because the BDS stays private, so we
 404     * just request permission according to the flags.
 405     *
 406     * The exceptions are xen_disk and blockdev_init(); in these cases, the
 407     * caller of blk_new_open() doesn't make use of the permissions, but they
 408     * shouldn't hurt either. We can still share everything here because the
 409     * guest devices will add their own blockers if they can't share. */
 410    if ((flags & BDRV_O_NO_IO) == 0) {
 411        perm |= BLK_PERM_CONSISTENT_READ;
 412        if (flags & BDRV_O_RDWR) {
 413            perm |= BLK_PERM_WRITE;
 414        }
 415    }
 416    if (flags & BDRV_O_RESIZE) {
 417        perm |= BLK_PERM_RESIZE;
 418    }
 419
 420    blk = blk_new(qemu_get_aio_context(), perm, BLK_PERM_ALL);
 421    bs = bdrv_open(filename, reference, options, flags, errp);
 422    if (!bs) {
 423        blk_unref(blk);
 424        return NULL;
 425    }
 426
 427    blk->root = bdrv_root_attach_child(bs, "root", &child_root,
 428                                       BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY,
 429                                       blk->ctx, perm, BLK_PERM_ALL, blk, errp);
 430    if (!blk->root) {
 431        blk_unref(blk);
 432        return NULL;
 433    }
 434
 435    return blk;
 436}
 437
 438static void blk_delete(BlockBackend *blk)
 439{
 440    assert(!blk->refcnt);
 441    assert(!blk->name);
 442    assert(!blk->dev);
 443    if (blk->public.throttle_group_member.throttle_state) {
 444        blk_io_limits_disable(blk);
 445    }
 446    if (blk->root) {
 447        blk_remove_bs(blk);
 448    }
 449    if (blk->vmsh) {
 450        qemu_del_vm_change_state_handler(blk->vmsh);
 451        blk->vmsh = NULL;
 452    }
 453    assert(QLIST_EMPTY(&blk->remove_bs_notifiers.notifiers));
 454    assert(QLIST_EMPTY(&blk->insert_bs_notifiers.notifiers));
 455    assert(QLIST_EMPTY(&blk->aio_notifiers));
 456    QTAILQ_REMOVE(&block_backends, blk, link);
 457    drive_info_del(blk->legacy_dinfo);
 458    block_acct_cleanup(&blk->stats);
 459    g_free(blk);
 460}
 461
 462static void drive_info_del(DriveInfo *dinfo)
 463{
 464    if (!dinfo) {
 465        return;
 466    }
 467    qemu_opts_del(dinfo->opts);
 468    g_free(dinfo);
 469}
 470
 471int blk_get_refcnt(BlockBackend *blk)
 472{
 473    return blk ? blk->refcnt : 0;
 474}
 475
 476/*
 477 * Increment @blk's reference count.
 478 * @blk must not be null.
 479 */
 480void blk_ref(BlockBackend *blk)
 481{
 482    assert(blk->refcnt > 0);
 483    blk->refcnt++;
 484}
 485
 486/*
 487 * Decrement @blk's reference count.
 488 * If this drops it to zero, destroy @blk.
 489 * For convenience, do nothing if @blk is null.
 490 */
 491void blk_unref(BlockBackend *blk)
 492{
 493    if (blk) {
 494        assert(blk->refcnt > 0);
 495        if (blk->refcnt > 1) {
 496            blk->refcnt--;
 497        } else {
 498            blk_drain(blk);
 499            /* blk_drain() cannot resurrect blk, nobody held a reference */
 500            assert(blk->refcnt == 1);
 501            blk->refcnt = 0;
 502            blk_delete(blk);
 503        }
 504    }
 505}
 506
 507/*
 508 * Behaves similarly to blk_next() but iterates over all BlockBackends, even the
 509 * ones which are hidden (i.e. are not referenced by the monitor).
 510 */
 511BlockBackend *blk_all_next(BlockBackend *blk)
 512{
 513    return blk ? QTAILQ_NEXT(blk, link)
 514               : QTAILQ_FIRST(&block_backends);
 515}
 516
 517void blk_remove_all_bs(void)
 518{
 519    BlockBackend *blk = NULL;
 520
 521    while ((blk = blk_all_next(blk)) != NULL) {
 522        AioContext *ctx = blk_get_aio_context(blk);
 523
 524        aio_context_acquire(ctx);
 525        if (blk->root) {
 526            blk_remove_bs(blk);
 527        }
 528        aio_context_release(ctx);
 529    }
 530}
 531
 532/*
 533 * Return the monitor-owned BlockBackend after @blk.
 534 * If @blk is null, return the first one.
 535 * Else, return @blk's next sibling, which may be null.
 536 *
 537 * To iterate over all BlockBackends, do
 538 * for (blk = blk_next(NULL); blk; blk = blk_next(blk)) {
 539 *     ...
 540 * }
 541 */
 542BlockBackend *blk_next(BlockBackend *blk)
 543{
 544    return blk ? QTAILQ_NEXT(blk, monitor_link)
 545               : QTAILQ_FIRST(&monitor_block_backends);
 546}
 547
 548/* Iterates over all top-level BlockDriverStates, i.e. BDSs that are owned by
 549 * the monitor or attached to a BlockBackend */
 550BlockDriverState *bdrv_next(BdrvNextIterator *it)
 551{
 552    BlockDriverState *bs, *old_bs;
 553
 554    /* Must be called from the main loop */
 555    assert(qemu_get_current_aio_context() == qemu_get_aio_context());
 556
 557    /* First, return all root nodes of BlockBackends. In order to avoid
 558     * returning a BDS twice when multiple BBs refer to it, we only return it
 559     * if the BB is the first one in the parent list of the BDS. */
 560    if (it->phase == BDRV_NEXT_BACKEND_ROOTS) {
 561        BlockBackend *old_blk = it->blk;
 562
 563        old_bs = old_blk ? blk_bs(old_blk) : NULL;
 564
 565        do {
 566            it->blk = blk_all_next(it->blk);
 567            bs = it->blk ? blk_bs(it->blk) : NULL;
 568        } while (it->blk && (bs == NULL || bdrv_first_blk(bs) != it->blk));
 569
 570        if (it->blk) {
 571            blk_ref(it->blk);
 572        }
 573        blk_unref(old_blk);
 574
 575        if (bs) {
 576            bdrv_ref(bs);
 577            bdrv_unref(old_bs);
 578            return bs;
 579        }
 580        it->phase = BDRV_NEXT_MONITOR_OWNED;
 581    } else {
 582        old_bs = it->bs;
 583    }
 584
 585    /* Then return the monitor-owned BDSes without a BB attached. Ignore all
 586     * BDSes that are attached to a BlockBackend here; they have been handled
 587     * by the above block already */
 588    do {
 589        it->bs = bdrv_next_monitor_owned(it->bs);
 590        bs = it->bs;
 591    } while (bs && bdrv_has_blk(bs));
 592
 593    if (bs) {
 594        bdrv_ref(bs);
 595    }
 596    bdrv_unref(old_bs);
 597
 598    return bs;
 599}
 600
 601static void bdrv_next_reset(BdrvNextIterator *it)
 602{
 603    *it = (BdrvNextIterator) {
 604        .phase = BDRV_NEXT_BACKEND_ROOTS,
 605    };
 606}
 607
 608BlockDriverState *bdrv_first(BdrvNextIterator *it)
 609{
 610    bdrv_next_reset(it);
 611    return bdrv_next(it);
 612}
 613
 614/* Must be called when aborting a bdrv_next() iteration before
 615 * bdrv_next() returns NULL */
 616void bdrv_next_cleanup(BdrvNextIterator *it)
 617{
 618    /* Must be called from the main loop */
 619    assert(qemu_get_current_aio_context() == qemu_get_aio_context());
 620
 621    if (it->phase == BDRV_NEXT_BACKEND_ROOTS) {
 622        if (it->blk) {
 623            bdrv_unref(blk_bs(it->blk));
 624            blk_unref(it->blk);
 625        }
 626    } else {
 627        bdrv_unref(it->bs);
 628    }
 629
 630    bdrv_next_reset(it);
 631}
 632
 633/*
 634 * Add a BlockBackend into the list of backends referenced by the monitor, with
 635 * the given @name acting as the handle for the monitor.
 636 * Strictly for use by blockdev.c.
 637 *
 638 * @name must not be null or empty.
 639 *
 640 * Returns true on success and false on failure. In the latter case, an Error
 641 * object is returned through @errp.
 642 */
 643bool monitor_add_blk(BlockBackend *blk, const char *name, Error **errp)
 644{
 645    assert(!blk->name);
 646    assert(name && name[0]);
 647
 648    if (!id_wellformed(name)) {
 649        error_setg(errp, "Invalid device name");
 650        return false;
 651    }
 652    if (blk_by_name(name)) {
 653        error_setg(errp, "Device with id '%s' already exists", name);
 654        return false;
 655    }
 656    if (bdrv_find_node(name)) {
 657        error_setg(errp,
 658                   "Device name '%s' conflicts with an existing node name",
 659                   name);
 660        return false;
 661    }
 662
 663    blk->name = g_strdup(name);
 664    QTAILQ_INSERT_TAIL(&monitor_block_backends, blk, monitor_link);
 665    return true;
 666}
 667
 668/*
 669 * Remove a BlockBackend from the list of backends referenced by the monitor.
 670 * Strictly for use by blockdev.c.
 671 */
 672void monitor_remove_blk(BlockBackend *blk)
 673{
 674    if (!blk->name) {
 675        return;
 676    }
 677
 678    QTAILQ_REMOVE(&monitor_block_backends, blk, monitor_link);
 679    g_free(blk->name);
 680    blk->name = NULL;
 681}
 682
 683/*
 684 * Return @blk's name, a non-null string.
 685 * Returns an empty string iff @blk is not referenced by the monitor.
 686 */
 687const char *blk_name(const BlockBackend *blk)
 688{
 689    return blk->name ?: "";
 690}
 691
 692/*
 693 * Return the BlockBackend with name @name if it exists, else null.
 694 * @name must not be null.
 695 */
 696BlockBackend *blk_by_name(const char *name)
 697{
 698    BlockBackend *blk = NULL;
 699
 700    assert(name);
 701    while ((blk = blk_next(blk)) != NULL) {
 702        if (!strcmp(name, blk->name)) {
 703            return blk;
 704        }
 705    }
 706    return NULL;
 707}
 708
 709/*
 710 * Return the BlockDriverState attached to @blk if any, else null.
 711 */
 712BlockDriverState *blk_bs(BlockBackend *blk)
 713{
 714    return blk->root ? blk->root->bs : NULL;
 715}
 716
 717static BlockBackend *bdrv_first_blk(BlockDriverState *bs)
 718{
 719    BdrvChild *child;
 720    QLIST_FOREACH(child, &bs->parents, next_parent) {
 721        if (child->klass == &child_root) {
 722            return child->opaque;
 723        }
 724    }
 725
 726    return NULL;
 727}
 728
 729/*
 730 * Returns true if @bs has an associated BlockBackend.
 731 */
 732bool bdrv_has_blk(BlockDriverState *bs)
 733{
 734    return bdrv_first_blk(bs) != NULL;
 735}
 736
 737/*
 738 * Returns true if @bs has only BlockBackends as parents.
 739 */
 740bool bdrv_is_root_node(BlockDriverState *bs)
 741{
 742    BdrvChild *c;
 743
 744    QLIST_FOREACH(c, &bs->parents, next_parent) {
 745        if (c->klass != &child_root) {
 746            return false;
 747        }
 748    }
 749
 750    return true;
 751}
 752
 753/*
 754 * Return @blk's DriveInfo if any, else null.
 755 */
 756DriveInfo *blk_legacy_dinfo(BlockBackend *blk)
 757{
 758    return blk->legacy_dinfo;
 759}
 760
 761/*
 762 * Set @blk's DriveInfo to @dinfo, and return it.
 763 * @blk must not have a DriveInfo set already.
 764 * No other BlockBackend may have the same DriveInfo set.
 765 */
 766DriveInfo *blk_set_legacy_dinfo(BlockBackend *blk, DriveInfo *dinfo)
 767{
 768    assert(!blk->legacy_dinfo);
 769    return blk->legacy_dinfo = dinfo;
 770}
 771
 772/*
 773 * Return the BlockBackend with DriveInfo @dinfo.
 774 * It must exist.
 775 */
 776BlockBackend *blk_by_legacy_dinfo(DriveInfo *dinfo)
 777{
 778    BlockBackend *blk = NULL;
 779
 780    while ((blk = blk_next(blk)) != NULL) {
 781        if (blk->legacy_dinfo == dinfo) {
 782            return blk;
 783        }
 784    }
 785    abort();
 786}
 787
 788/*
 789 * Returns a pointer to the publicly accessible fields of @blk.
 790 */
 791BlockBackendPublic *blk_get_public(BlockBackend *blk)
 792{
 793    return &blk->public;
 794}
 795
 796/*
 797 * Returns a BlockBackend given the associated @public fields.
 798 */
 799BlockBackend *blk_by_public(BlockBackendPublic *public)
 800{
 801    return container_of(public, BlockBackend, public);
 802}
 803
 804/*
 805 * Disassociates the currently associated BlockDriverState from @blk.
 806 */
 807void blk_remove_bs(BlockBackend *blk)
 808{
 809    ThrottleGroupMember *tgm = &blk->public.throttle_group_member;
 810    BlockDriverState *bs;
 811    BdrvChild *root;
 812
 813    notifier_list_notify(&blk->remove_bs_notifiers, blk);
 814    if (tgm->throttle_state) {
 815        bs = blk_bs(blk);
 816        bdrv_drained_begin(bs);
 817        throttle_group_detach_aio_context(tgm);
 818        throttle_group_attach_aio_context(tgm, qemu_get_aio_context());
 819        bdrv_drained_end(bs);
 820    }
 821
 822    blk_update_root_state(blk);
 823
 824    /* bdrv_root_unref_child() will cause blk->root to become stale and may
 825     * switch to a completion coroutine later on. Let's drain all I/O here
 826     * to avoid that and a potential QEMU crash.
 827     */
 828    blk_drain(blk);
 829    root = blk->root;
 830    blk->root = NULL;
 831    bdrv_root_unref_child(root);
 832}
 833
 834/*
 835 * Associates a new BlockDriverState with @blk.
 836 */
 837int blk_insert_bs(BlockBackend *blk, BlockDriverState *bs, Error **errp)
 838{
 839    ThrottleGroupMember *tgm = &blk->public.throttle_group_member;
 840    bdrv_ref(bs);
 841    blk->root = bdrv_root_attach_child(bs, "root", &child_root,
 842                                       BDRV_CHILD_FILTERED | BDRV_CHILD_PRIMARY,
 843                                       blk->ctx, blk->perm, blk->shared_perm,
 844                                       blk, errp);
 845    if (blk->root == NULL) {
 846        return -EPERM;
 847    }
 848
 849    notifier_list_notify(&blk->insert_bs_notifiers, blk);
 850    if (tgm->throttle_state) {
 851        throttle_group_detach_aio_context(tgm);
 852        throttle_group_attach_aio_context(tgm, bdrv_get_aio_context(bs));
 853    }
 854
 855    return 0;
 856}
 857
 858/*
 859 * Sets the permission bitmasks that the user of the BlockBackend needs.
 860 */
 861int blk_set_perm(BlockBackend *blk, uint64_t perm, uint64_t shared_perm,
 862                 Error **errp)
 863{
 864    int ret;
 865
 866    if (blk->root && !blk->disable_perm) {
 867        ret = bdrv_child_try_set_perm(blk->root, perm, shared_perm, errp);
 868        if (ret < 0) {
 869            return ret;
 870        }
 871    }
 872
 873    blk->perm = perm;
 874    blk->shared_perm = shared_perm;
 875
 876    return 0;
 877}
 878
 879void blk_get_perm(BlockBackend *blk, uint64_t *perm, uint64_t *shared_perm)
 880{
 881    *perm = blk->perm;
 882    *shared_perm = blk->shared_perm;
 883}
 884
 885/*
 886 * Attach device model @dev to @blk.
 887 * Return 0 on success, -EBUSY when a device model is attached already.
 888 */
 889int blk_attach_dev(BlockBackend *blk, DeviceState *dev)
 890{
 891    if (blk->dev) {
 892        return -EBUSY;
 893    }
 894
 895    /* While migration is still incoming, we don't need to apply the
 896     * permissions of guest device BlockBackends. We might still have a block
 897     * job or NBD server writing to the image for storage migration. */
 898    if (runstate_check(RUN_STATE_INMIGRATE)) {
 899        blk->disable_perm = true;
 900    }
 901
 902    blk_ref(blk);
 903    blk->dev = dev;
 904    blk_iostatus_reset(blk);
 905
 906    return 0;
 907}
 908
 909/*
 910 * Detach device model @dev from @blk.
 911 * @dev must be currently attached to @blk.
 912 */
 913void blk_detach_dev(BlockBackend *blk, DeviceState *dev)
 914{
 915    assert(blk->dev == dev);
 916    blk->dev = NULL;
 917    blk->dev_ops = NULL;
 918    blk->dev_opaque = NULL;
 919    blk->guest_block_size = 512;
 920    blk_set_perm(blk, 0, BLK_PERM_ALL, &error_abort);
 921    blk_unref(blk);
 922}
 923
 924/*
 925 * Return the device model attached to @blk if any, else null.
 926 */
 927DeviceState *blk_get_attached_dev(BlockBackend *blk)
 928{
 929    return blk->dev;
 930}
 931
 932/* Return the qdev ID, or if no ID is assigned the QOM path, of the block
 933 * device attached to the BlockBackend. */
 934char *blk_get_attached_dev_id(BlockBackend *blk)
 935{
 936    DeviceState *dev = blk->dev;
 937
 938    if (!dev) {
 939        return g_strdup("");
 940    } else if (dev->id) {
 941        return g_strdup(dev->id);
 942    }
 943
 944    return object_get_canonical_path(OBJECT(dev)) ?: g_strdup("");
 945}
 946
 947/*
 948 * Return the BlockBackend which has the device model @dev attached if it
 949 * exists, else null.
 950 *
 951 * @dev must not be null.
 952 */
 953BlockBackend *blk_by_dev(void *dev)
 954{
 955    BlockBackend *blk = NULL;
 956
 957    assert(dev != NULL);
 958    while ((blk = blk_all_next(blk)) != NULL) {
 959        if (blk->dev == dev) {
 960            return blk;
 961        }
 962    }
 963    return NULL;
 964}
 965
 966/*
 967 * Set @blk's device model callbacks to @ops.
 968 * @opaque is the opaque argument to pass to the callbacks.
 969 * This is for use by device models.
 970 */
 971void blk_set_dev_ops(BlockBackend *blk, const BlockDevOps *ops,
 972                     void *opaque)
 973{
 974    blk->dev_ops = ops;
 975    blk->dev_opaque = opaque;
 976
 977    /* Are we currently quiesced? Should we enforce this right now? */
 978    if (blk->quiesce_counter && ops->drained_begin) {
 979        ops->drained_begin(opaque);
 980    }
 981}
 982
 983/*
 984 * Notify @blk's attached device model of media change.
 985 *
 986 * If @load is true, notify of media load. This action can fail, meaning that
 987 * the medium cannot be loaded. @errp is set then.
 988 *
 989 * If @load is false, notify of media eject. This can never fail.
 990 *
 991 * Also send DEVICE_TRAY_MOVED events as appropriate.
 992 */
 993void blk_dev_change_media_cb(BlockBackend *blk, bool load, Error **errp)
 994{
 995    if (blk->dev_ops && blk->dev_ops->change_media_cb) {
 996        bool tray_was_open, tray_is_open;
 997        Error *local_err = NULL;
 998
 999        tray_was_open = blk_dev_is_tray_open(blk);
1000        blk->dev_ops->change_media_cb(blk->dev_opaque, load, &local_err);
1001        if (local_err) {
1002            assert(load == true);
1003            error_propagate(errp, local_err);
1004            return;
1005        }
1006        tray_is_open = blk_dev_is_tray_open(blk);
1007
1008        if (tray_was_open != tray_is_open) {
1009            char *id = blk_get_attached_dev_id(blk);
1010            qapi_event_send_device_tray_moved(blk_name(blk), id, tray_is_open);
1011            g_free(id);
1012        }
1013    }
1014}
1015
1016static void blk_root_change_media(BdrvChild *child, bool load)
1017{
1018    blk_dev_change_media_cb(child->opaque, load, NULL);
1019}
1020
1021/*
1022 * Does @blk's attached device model have removable media?
1023 * %true if no device model is attached.
1024 */
1025bool blk_dev_has_removable_media(BlockBackend *blk)
1026{
1027    return !blk->dev || (blk->dev_ops && blk->dev_ops->change_media_cb);
1028}
1029
1030/*
1031 * Does @blk's attached device model have a tray?
1032 */
1033bool blk_dev_has_tray(BlockBackend *blk)
1034{
1035    return blk->dev_ops && blk->dev_ops->is_tray_open;
1036}
1037
1038/*
1039 * Notify @blk's attached device model of a media eject request.
1040 * If @force is true, the medium is about to be yanked out forcefully.
1041 */
1042void blk_dev_eject_request(BlockBackend *blk, bool force)
1043{
1044    if (blk->dev_ops && blk->dev_ops->eject_request_cb) {
1045        blk->dev_ops->eject_request_cb(blk->dev_opaque, force);
1046    }
1047}
1048
1049/*
1050 * Does @blk's attached device model have a tray, and is it open?
1051 */
1052bool blk_dev_is_tray_open(BlockBackend *blk)
1053{
1054    if (blk_dev_has_tray(blk)) {
1055        return blk->dev_ops->is_tray_open(blk->dev_opaque);
1056    }
1057    return false;
1058}
1059
1060/*
1061 * Does @blk's attached device model have the medium locked?
1062 * %false if the device model has no such lock.
1063 */
1064bool blk_dev_is_medium_locked(BlockBackend *blk)
1065{
1066    if (blk->dev_ops && blk->dev_ops->is_medium_locked) {
1067        return blk->dev_ops->is_medium_locked(blk->dev_opaque);
1068    }
1069    return false;
1070}
1071
1072/*
1073 * Notify @blk's attached device model of a backend size change.
1074 */
1075static void blk_root_resize(BdrvChild *child)
1076{
1077    BlockBackend *blk = child->opaque;
1078
1079    if (blk->dev_ops && blk->dev_ops->resize_cb) {
1080        blk->dev_ops->resize_cb(blk->dev_opaque);
1081    }
1082}
1083
1084void blk_iostatus_enable(BlockBackend *blk)
1085{
1086    blk->iostatus_enabled = true;
1087    blk->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
1088}
1089
1090/* The I/O status is only enabled if the drive explicitly
1091 * enables it _and_ the VM is configured to stop on errors */
1092bool blk_iostatus_is_enabled(const BlockBackend *blk)
1093{
1094    return (blk->iostatus_enabled &&
1095           (blk->on_write_error == BLOCKDEV_ON_ERROR_ENOSPC ||
1096            blk->on_write_error == BLOCKDEV_ON_ERROR_STOP   ||
1097            blk->on_read_error == BLOCKDEV_ON_ERROR_STOP));
1098}
1099
1100BlockDeviceIoStatus blk_iostatus(const BlockBackend *blk)
1101{
1102    return blk->iostatus;
1103}
1104
1105void blk_iostatus_disable(BlockBackend *blk)
1106{
1107    blk->iostatus_enabled = false;
1108}
1109
1110void blk_iostatus_reset(BlockBackend *blk)
1111{
1112    if (blk_iostatus_is_enabled(blk)) {
1113        blk->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
1114    }
1115}
1116
1117void blk_iostatus_set_err(BlockBackend *blk, int error)
1118{
1119    assert(blk_iostatus_is_enabled(blk));
1120    if (blk->iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
1121        blk->iostatus = error == ENOSPC ? BLOCK_DEVICE_IO_STATUS_NOSPACE :
1122                                          BLOCK_DEVICE_IO_STATUS_FAILED;
1123    }
1124}
1125
1126void blk_set_allow_write_beyond_eof(BlockBackend *blk, bool allow)
1127{
1128    blk->allow_write_beyond_eof = allow;
1129}
1130
1131void blk_set_allow_aio_context_change(BlockBackend *blk, bool allow)
1132{
1133    blk->allow_aio_context_change = allow;
1134}
1135
1136void blk_set_disable_request_queuing(BlockBackend *blk, bool disable)
1137{
1138    blk->disable_request_queuing = disable;
1139}
1140
1141static int blk_check_byte_request(BlockBackend *blk, int64_t offset,
1142                                  size_t size)
1143{
1144    int64_t len;
1145
1146    if (size > INT_MAX) {
1147        return -EIO;
1148    }
1149
1150    if (!blk_is_available(blk)) {
1151        return -ENOMEDIUM;
1152    }
1153
1154    if (offset < 0) {
1155        return -EIO;
1156    }
1157
1158    if (!blk->allow_write_beyond_eof) {
1159        len = blk_getlength(blk);
1160        if (len < 0) {
1161            return len;
1162        }
1163
1164        if (offset > len || len - offset < size) {
1165            return -EIO;
1166        }
1167    }
1168
1169    return 0;
1170}
1171
1172/* To be called between exactly one pair of blk_inc/dec_in_flight() */
1173static void coroutine_fn blk_wait_while_drained(BlockBackend *blk)
1174{
1175    assert(blk->in_flight > 0);
1176
1177    if (blk->quiesce_counter && !blk->disable_request_queuing) {
1178        blk_dec_in_flight(blk);
1179        qemu_co_queue_wait(&blk->queued_requests, NULL);
1180        blk_inc_in_flight(blk);
1181    }
1182}
1183
1184/* To be called between exactly one pair of blk_inc/dec_in_flight() */
1185static int coroutine_fn
1186blk_do_preadv(BlockBackend *blk, int64_t offset, unsigned int bytes,
1187              QEMUIOVector *qiov, BdrvRequestFlags flags)
1188{
1189    int ret;
1190    BlockDriverState *bs;
1191
1192    blk_wait_while_drained(blk);
1193
1194    /* Call blk_bs() only after waiting, the graph may have changed */
1195    bs = blk_bs(blk);
1196    trace_blk_co_preadv(blk, bs, offset, bytes, flags);
1197
1198    ret = blk_check_byte_request(blk, offset, bytes);
1199    if (ret < 0) {
1200        return ret;
1201    }
1202
1203    bdrv_inc_in_flight(bs);
1204
1205    /* throttling disk I/O */
1206    if (blk->public.throttle_group_member.throttle_state) {
1207        throttle_group_co_io_limits_intercept(&blk->public.throttle_group_member,
1208                bytes, false);
1209    }
1210
1211    ret = bdrv_co_preadv(blk->root, offset, bytes, qiov, flags);
1212    bdrv_dec_in_flight(bs);
1213    return ret;
1214}
1215
1216int coroutine_fn blk_co_preadv(BlockBackend *blk, int64_t offset,
1217                               unsigned int bytes, QEMUIOVector *qiov,
1218                               BdrvRequestFlags flags)
1219{
1220    int ret;
1221
1222    blk_inc_in_flight(blk);
1223    ret = blk_do_preadv(blk, offset, bytes, qiov, flags);
1224    blk_dec_in_flight(blk);
1225
1226    return ret;
1227}
1228
1229/* To be called between exactly one pair of blk_inc/dec_in_flight() */
1230static int coroutine_fn
1231blk_do_pwritev_part(BlockBackend *blk, int64_t offset, unsigned int bytes,
1232                    QEMUIOVector *qiov, size_t qiov_offset,
1233                    BdrvRequestFlags flags)
1234{
1235    int ret;
1236    BlockDriverState *bs;
1237
1238    blk_wait_while_drained(blk);
1239
1240    /* Call blk_bs() only after waiting, the graph may have changed */
1241    bs = blk_bs(blk);
1242    trace_blk_co_pwritev(blk, bs, offset, bytes, flags);
1243
1244    ret = blk_check_byte_request(blk, offset, bytes);
1245    if (ret < 0) {
1246        return ret;
1247    }
1248
1249    bdrv_inc_in_flight(bs);
1250    /* throttling disk I/O */
1251    if (blk->public.throttle_group_member.throttle_state) {
1252        throttle_group_co_io_limits_intercept(&blk->public.throttle_group_member,
1253                bytes, true);
1254    }
1255
1256    if (!blk->enable_write_cache) {
1257        flags |= BDRV_REQ_FUA;
1258    }
1259
1260    ret = bdrv_co_pwritev_part(blk->root, offset, bytes, qiov, qiov_offset,
1261                               flags);
1262    bdrv_dec_in_flight(bs);
1263    return ret;
1264}
1265
1266int coroutine_fn blk_co_pwritev_part(BlockBackend *blk, int64_t offset,
1267                                     unsigned int bytes,
1268                                     QEMUIOVector *qiov, size_t qiov_offset,
1269                                     BdrvRequestFlags flags)
1270{
1271    int ret;
1272
1273    blk_inc_in_flight(blk);
1274    ret = blk_do_pwritev_part(blk, offset, bytes, qiov, qiov_offset, flags);
1275    blk_dec_in_flight(blk);
1276
1277    return ret;
1278}
1279
1280int coroutine_fn blk_co_pwritev(BlockBackend *blk, int64_t offset,
1281                                unsigned int bytes, QEMUIOVector *qiov,
1282                                BdrvRequestFlags flags)
1283{
1284    return blk_co_pwritev_part(blk, offset, bytes, qiov, 0, flags);
1285}
1286
1287typedef struct BlkRwCo {
1288    BlockBackend *blk;
1289    int64_t offset;
1290    void *iobuf;
1291    int ret;
1292    BdrvRequestFlags flags;
1293} BlkRwCo;
1294
1295static void blk_read_entry(void *opaque)
1296{
1297    BlkRwCo *rwco = opaque;
1298    QEMUIOVector *qiov = rwco->iobuf;
1299
1300    rwco->ret = blk_do_preadv(rwco->blk, rwco->offset, qiov->size,
1301                              qiov, rwco->flags);
1302    aio_wait_kick();
1303}
1304
1305static void blk_write_entry(void *opaque)
1306{
1307    BlkRwCo *rwco = opaque;
1308    QEMUIOVector *qiov = rwco->iobuf;
1309
1310    rwco->ret = blk_do_pwritev_part(rwco->blk, rwco->offset, qiov->size,
1311                                    qiov, 0, rwco->flags);
1312    aio_wait_kick();
1313}
1314
1315static int blk_prw(BlockBackend *blk, int64_t offset, uint8_t *buf,
1316                   int64_t bytes, CoroutineEntry co_entry,
1317                   BdrvRequestFlags flags)
1318{
1319    QEMUIOVector qiov = QEMU_IOVEC_INIT_BUF(qiov, buf, bytes);
1320    BlkRwCo rwco = {
1321        .blk    = blk,
1322        .offset = offset,
1323        .iobuf  = &qiov,
1324        .flags  = flags,
1325        .ret    = NOT_DONE,
1326    };
1327
1328    blk_inc_in_flight(blk);
1329    if (qemu_in_coroutine()) {
1330        /* Fast-path if already in coroutine context */
1331        co_entry(&rwco);
1332    } else {
1333        Coroutine *co = qemu_coroutine_create(co_entry, &rwco);
1334        bdrv_coroutine_enter(blk_bs(blk), co);
1335        BDRV_POLL_WHILE(blk_bs(blk), rwco.ret == NOT_DONE);
1336    }
1337    blk_dec_in_flight(blk);
1338
1339    return rwco.ret;
1340}
1341
1342int blk_pwrite_zeroes(BlockBackend *blk, int64_t offset,
1343                      int bytes, BdrvRequestFlags flags)
1344{
1345    return blk_prw(blk, offset, NULL, bytes, blk_write_entry,
1346                   flags | BDRV_REQ_ZERO_WRITE);
1347}
1348
1349int blk_make_zero(BlockBackend *blk, BdrvRequestFlags flags)
1350{
1351    return bdrv_make_zero(blk->root, flags);
1352}
1353
1354void blk_inc_in_flight(BlockBackend *blk)
1355{
1356    qatomic_inc(&blk->in_flight);
1357}
1358
1359void blk_dec_in_flight(BlockBackend *blk)
1360{
1361    qatomic_dec(&blk->in_flight);
1362    aio_wait_kick();
1363}
1364
1365static void error_callback_bh(void *opaque)
1366{
1367    struct BlockBackendAIOCB *acb = opaque;
1368
1369    blk_dec_in_flight(acb->blk);
1370    acb->common.cb(acb->common.opaque, acb->ret);
1371    qemu_aio_unref(acb);
1372}
1373
1374BlockAIOCB *blk_abort_aio_request(BlockBackend *blk,
1375                                  BlockCompletionFunc *cb,
1376                                  void *opaque, int ret)
1377{
1378    struct BlockBackendAIOCB *acb;
1379
1380    blk_inc_in_flight(blk);
1381    acb = blk_aio_get(&block_backend_aiocb_info, blk, cb, opaque);
1382    acb->blk = blk;
1383    acb->ret = ret;
1384
1385    replay_bh_schedule_oneshot_event(blk_get_aio_context(blk),
1386                                     error_callback_bh, acb);
1387    return &acb->common;
1388}
1389
1390typedef struct BlkAioEmAIOCB {
1391    BlockAIOCB common;
1392    BlkRwCo rwco;
1393    int bytes;
1394    bool has_returned;
1395} BlkAioEmAIOCB;
1396
1397static AioContext *blk_aio_em_aiocb_get_aio_context(BlockAIOCB *acb_)
1398{
1399    BlkAioEmAIOCB *acb = container_of(acb_, BlkAioEmAIOCB, common);
1400
1401    return blk_get_aio_context(acb->rwco.blk);
1402}
1403
1404static const AIOCBInfo blk_aio_em_aiocb_info = {
1405    .aiocb_size         = sizeof(BlkAioEmAIOCB),
1406    .get_aio_context    = blk_aio_em_aiocb_get_aio_context,
1407};
1408
1409static void blk_aio_complete(BlkAioEmAIOCB *acb)
1410{
1411    if (acb->has_returned) {
1412        acb->common.cb(acb->common.opaque, acb->rwco.ret);
1413        blk_dec_in_flight(acb->rwco.blk);
1414        qemu_aio_unref(acb);
1415    }
1416}
1417
1418static void blk_aio_complete_bh(void *opaque)
1419{
1420    BlkAioEmAIOCB *acb = opaque;
1421    assert(acb->has_returned);
1422    blk_aio_complete(acb);
1423}
1424
1425static BlockAIOCB *blk_aio_prwv(BlockBackend *blk, int64_t offset, int bytes,
1426                                void *iobuf, CoroutineEntry co_entry,
1427                                BdrvRequestFlags flags,
1428                                BlockCompletionFunc *cb, void *opaque)
1429{
1430    BlkAioEmAIOCB *acb;
1431    Coroutine *co;
1432
1433    blk_inc_in_flight(blk);
1434    acb = blk_aio_get(&blk_aio_em_aiocb_info, blk, cb, opaque);
1435    acb->rwco = (BlkRwCo) {
1436        .blk    = blk,
1437        .offset = offset,
1438        .iobuf  = iobuf,
1439        .flags  = flags,
1440        .ret    = NOT_DONE,
1441    };
1442    acb->bytes = bytes;
1443    acb->has_returned = false;
1444
1445    co = qemu_coroutine_create(co_entry, acb);
1446    bdrv_coroutine_enter(blk_bs(blk), co);
1447
1448    acb->has_returned = true;
1449    if (acb->rwco.ret != NOT_DONE) {
1450        replay_bh_schedule_oneshot_event(blk_get_aio_context(blk),
1451                                         blk_aio_complete_bh, acb);
1452    }
1453
1454    return &acb->common;
1455}
1456
1457static void blk_aio_read_entry(void *opaque)
1458{
1459    BlkAioEmAIOCB *acb = opaque;
1460    BlkRwCo *rwco = &acb->rwco;
1461    QEMUIOVector *qiov = rwco->iobuf;
1462
1463    assert(qiov->size == acb->bytes);
1464    rwco->ret = blk_do_preadv(rwco->blk, rwco->offset, acb->bytes,
1465                              qiov, rwco->flags);
1466    blk_aio_complete(acb);
1467}
1468
1469static void blk_aio_write_entry(void *opaque)
1470{
1471    BlkAioEmAIOCB *acb = opaque;
1472    BlkRwCo *rwco = &acb->rwco;
1473    QEMUIOVector *qiov = rwco->iobuf;
1474
1475    assert(!qiov || qiov->size == acb->bytes);
1476    rwco->ret = blk_do_pwritev_part(rwco->blk, rwco->offset, acb->bytes,
1477                                    qiov, 0, rwco->flags);
1478    blk_aio_complete(acb);
1479}
1480
1481BlockAIOCB *blk_aio_pwrite_zeroes(BlockBackend *blk, int64_t offset,
1482                                  int count, BdrvRequestFlags flags,
1483                                  BlockCompletionFunc *cb, void *opaque)
1484{
1485    return blk_aio_prwv(blk, offset, count, NULL, blk_aio_write_entry,
1486                        flags | BDRV_REQ_ZERO_WRITE, cb, opaque);
1487}
1488
1489int blk_pread(BlockBackend *blk, int64_t offset, void *buf, int count)
1490{
1491    int ret = blk_prw(blk, offset, buf, count, blk_read_entry, 0);
1492    if (ret < 0) {
1493        return ret;
1494    }
1495    return count;
1496}
1497
1498int blk_pwrite(BlockBackend *blk, int64_t offset, const void *buf, int count,
1499               BdrvRequestFlags flags)
1500{
1501    int ret = blk_prw(blk, offset, (void *) buf, count, blk_write_entry,
1502                      flags);
1503    if (ret < 0) {
1504        return ret;
1505    }
1506    return count;
1507}
1508
1509int64_t blk_getlength(BlockBackend *blk)
1510{
1511    if (!blk_is_available(blk)) {
1512        return -ENOMEDIUM;
1513    }
1514
1515    return bdrv_getlength(blk_bs(blk));
1516}
1517
1518void blk_get_geometry(BlockBackend *blk, uint64_t *nb_sectors_ptr)
1519{
1520    if (!blk_bs(blk)) {
1521        *nb_sectors_ptr = 0;
1522    } else {
1523        bdrv_get_geometry(blk_bs(blk), nb_sectors_ptr);
1524    }
1525}
1526
1527int64_t blk_nb_sectors(BlockBackend *blk)
1528{
1529    if (!blk_is_available(blk)) {
1530        return -ENOMEDIUM;
1531    }
1532
1533    return bdrv_nb_sectors(blk_bs(blk));
1534}
1535
1536BlockAIOCB *blk_aio_preadv(BlockBackend *blk, int64_t offset,
1537                           QEMUIOVector *qiov, BdrvRequestFlags flags,
1538                           BlockCompletionFunc *cb, void *opaque)
1539{
1540    return blk_aio_prwv(blk, offset, qiov->size, qiov,
1541                        blk_aio_read_entry, flags, cb, opaque);
1542}
1543
1544BlockAIOCB *blk_aio_pwritev(BlockBackend *blk, int64_t offset,
1545                            QEMUIOVector *qiov, BdrvRequestFlags flags,
1546                            BlockCompletionFunc *cb, void *opaque)
1547{
1548    return blk_aio_prwv(blk, offset, qiov->size, qiov,
1549                        blk_aio_write_entry, flags, cb, opaque);
1550}
1551
1552void blk_aio_cancel(BlockAIOCB *acb)
1553{
1554    bdrv_aio_cancel(acb);
1555}
1556
1557void blk_aio_cancel_async(BlockAIOCB *acb)
1558{
1559    bdrv_aio_cancel_async(acb);
1560}
1561
1562/* To be called between exactly one pair of blk_inc/dec_in_flight() */
1563static int coroutine_fn
1564blk_do_ioctl(BlockBackend *blk, unsigned long int req, void *buf)
1565{
1566    blk_wait_while_drained(blk);
1567
1568    if (!blk_is_available(blk)) {
1569        return -ENOMEDIUM;
1570    }
1571
1572    return bdrv_co_ioctl(blk_bs(blk), req, buf);
1573}
1574
1575static void blk_ioctl_entry(void *opaque)
1576{
1577    BlkRwCo *rwco = opaque;
1578    QEMUIOVector *qiov = rwco->iobuf;
1579
1580    rwco->ret = blk_do_ioctl(rwco->blk, rwco->offset, qiov->iov[0].iov_base);
1581    aio_wait_kick();
1582}
1583
1584int blk_ioctl(BlockBackend *blk, unsigned long int req, void *buf)
1585{
1586    return blk_prw(blk, req, buf, 0, blk_ioctl_entry, 0);
1587}
1588
1589static void blk_aio_ioctl_entry(void *opaque)
1590{
1591    BlkAioEmAIOCB *acb = opaque;
1592    BlkRwCo *rwco = &acb->rwco;
1593
1594    rwco->ret = blk_do_ioctl(rwco->blk, rwco->offset, rwco->iobuf);
1595
1596    blk_aio_complete(acb);
1597}
1598
1599BlockAIOCB *blk_aio_ioctl(BlockBackend *blk, unsigned long int req, void *buf,
1600                          BlockCompletionFunc *cb, void *opaque)
1601{
1602    return blk_aio_prwv(blk, req, 0, buf, blk_aio_ioctl_entry, 0, cb, opaque);
1603}
1604
1605/* To be called between exactly one pair of blk_inc/dec_in_flight() */
1606static int coroutine_fn
1607blk_do_pdiscard(BlockBackend *blk, int64_t offset, int bytes)
1608{
1609    int ret;
1610
1611    blk_wait_while_drained(blk);
1612
1613    ret = blk_check_byte_request(blk, offset, bytes);
1614    if (ret < 0) {
1615        return ret;
1616    }
1617
1618    return bdrv_co_pdiscard(blk->root, offset, bytes);
1619}
1620
1621static void blk_aio_pdiscard_entry(void *opaque)
1622{
1623    BlkAioEmAIOCB *acb = opaque;
1624    BlkRwCo *rwco = &acb->rwco;
1625
1626    rwco->ret = blk_do_pdiscard(rwco->blk, rwco->offset, acb->bytes);
1627    blk_aio_complete(acb);
1628}
1629
1630BlockAIOCB *blk_aio_pdiscard(BlockBackend *blk,
1631                             int64_t offset, int bytes,
1632                             BlockCompletionFunc *cb, void *opaque)
1633{
1634    return blk_aio_prwv(blk, offset, bytes, NULL, blk_aio_pdiscard_entry, 0,
1635                        cb, opaque);
1636}
1637
1638int coroutine_fn blk_co_pdiscard(BlockBackend *blk, int64_t offset, int bytes)
1639{
1640    int ret;
1641
1642    blk_inc_in_flight(blk);
1643    ret = blk_do_pdiscard(blk, offset, bytes);
1644    blk_dec_in_flight(blk);
1645
1646    return ret;
1647}
1648
1649static void blk_pdiscard_entry(void *opaque)
1650{
1651    BlkRwCo *rwco = opaque;
1652    QEMUIOVector *qiov = rwco->iobuf;
1653
1654    rwco->ret = blk_do_pdiscard(rwco->blk, rwco->offset, qiov->size);
1655    aio_wait_kick();
1656}
1657
1658int blk_pdiscard(BlockBackend *blk, int64_t offset, int bytes)
1659{
1660    return blk_prw(blk, offset, NULL, bytes, blk_pdiscard_entry, 0);
1661}
1662
1663/* To be called between exactly one pair of blk_inc/dec_in_flight() */
1664static int coroutine_fn blk_do_flush(BlockBackend *blk)
1665{
1666    blk_wait_while_drained(blk);
1667
1668    if (!blk_is_available(blk)) {
1669        return -ENOMEDIUM;
1670    }
1671
1672    return bdrv_co_flush(blk_bs(blk));
1673}
1674
1675static void blk_aio_flush_entry(void *opaque)
1676{
1677    BlkAioEmAIOCB *acb = opaque;
1678    BlkRwCo *rwco = &acb->rwco;
1679
1680    rwco->ret = blk_do_flush(rwco->blk);
1681    blk_aio_complete(acb);
1682}
1683
1684BlockAIOCB *blk_aio_flush(BlockBackend *blk,
1685                          BlockCompletionFunc *cb, void *opaque)
1686{
1687    return blk_aio_prwv(blk, 0, 0, NULL, blk_aio_flush_entry, 0, cb, opaque);
1688}
1689
1690int coroutine_fn blk_co_flush(BlockBackend *blk)
1691{
1692    int ret;
1693
1694    blk_inc_in_flight(blk);
1695    ret = blk_do_flush(blk);
1696    blk_dec_in_flight(blk);
1697
1698    return ret;
1699}
1700
1701static void blk_flush_entry(void *opaque)
1702{
1703    BlkRwCo *rwco = opaque;
1704    rwco->ret = blk_do_flush(rwco->blk);
1705    aio_wait_kick();
1706}
1707
1708int blk_flush(BlockBackend *blk)
1709{
1710    return blk_prw(blk, 0, NULL, 0, blk_flush_entry, 0);
1711}
1712
1713void blk_drain(BlockBackend *blk)
1714{
1715    BlockDriverState *bs = blk_bs(blk);
1716
1717    if (bs) {
1718        bdrv_drained_begin(bs);
1719    }
1720
1721    /* We may have -ENOMEDIUM completions in flight */
1722    AIO_WAIT_WHILE(blk_get_aio_context(blk),
1723                   qatomic_mb_read(&blk->in_flight) > 0);
1724
1725    if (bs) {
1726        bdrv_drained_end(bs);
1727    }
1728}
1729
1730void blk_drain_all(void)
1731{
1732    BlockBackend *blk = NULL;
1733
1734    bdrv_drain_all_begin();
1735
1736    while ((blk = blk_all_next(blk)) != NULL) {
1737        AioContext *ctx = blk_get_aio_context(blk);
1738
1739        aio_context_acquire(ctx);
1740
1741        /* We may have -ENOMEDIUM completions in flight */
1742        AIO_WAIT_WHILE(ctx, qatomic_mb_read(&blk->in_flight) > 0);
1743
1744        aio_context_release(ctx);
1745    }
1746
1747    bdrv_drain_all_end();
1748}
1749
1750void blk_set_on_error(BlockBackend *blk, BlockdevOnError on_read_error,
1751                      BlockdevOnError on_write_error)
1752{
1753    blk->on_read_error = on_read_error;
1754    blk->on_write_error = on_write_error;
1755}
1756
1757BlockdevOnError blk_get_on_error(BlockBackend *blk, bool is_read)
1758{
1759    return is_read ? blk->on_read_error : blk->on_write_error;
1760}
1761
1762BlockErrorAction blk_get_error_action(BlockBackend *blk, bool is_read,
1763                                      int error)
1764{
1765    BlockdevOnError on_err = blk_get_on_error(blk, is_read);
1766
1767    switch (on_err) {
1768    case BLOCKDEV_ON_ERROR_ENOSPC:
1769        return (error == ENOSPC) ?
1770               BLOCK_ERROR_ACTION_STOP : BLOCK_ERROR_ACTION_REPORT;
1771    case BLOCKDEV_ON_ERROR_STOP:
1772        return BLOCK_ERROR_ACTION_STOP;
1773    case BLOCKDEV_ON_ERROR_REPORT:
1774        return BLOCK_ERROR_ACTION_REPORT;
1775    case BLOCKDEV_ON_ERROR_IGNORE:
1776        return BLOCK_ERROR_ACTION_IGNORE;
1777    case BLOCKDEV_ON_ERROR_AUTO:
1778    default:
1779        abort();
1780    }
1781}
1782
1783static void send_qmp_error_event(BlockBackend *blk,
1784                                 BlockErrorAction action,
1785                                 bool is_read, int error)
1786{
1787    IoOperationType optype;
1788    BlockDriverState *bs = blk_bs(blk);
1789
1790    optype = is_read ? IO_OPERATION_TYPE_READ : IO_OPERATION_TYPE_WRITE;
1791    qapi_event_send_block_io_error(blk_name(blk), !!bs,
1792                                   bs ? bdrv_get_node_name(bs) : NULL, optype,
1793                                   action, blk_iostatus_is_enabled(blk),
1794                                   error == ENOSPC, strerror(error));
1795}
1796
1797/* This is done by device models because, while the block layer knows
1798 * about the error, it does not know whether an operation comes from
1799 * the device or the block layer (from a job, for example).
1800 */
1801void blk_error_action(BlockBackend *blk, BlockErrorAction action,
1802                      bool is_read, int error)
1803{
1804    assert(error >= 0);
1805
1806    if (action == BLOCK_ERROR_ACTION_STOP) {
1807        /* First set the iostatus, so that "info block" returns an iostatus
1808         * that matches the events raised so far (an additional error iostatus
1809         * is fine, but not a lost one).
1810         */
1811        blk_iostatus_set_err(blk, error);
1812
1813        /* Then raise the request to stop the VM and the event.
1814         * qemu_system_vmstop_request_prepare has two effects.  First,
1815         * it ensures that the STOP event always comes after the
1816         * BLOCK_IO_ERROR event.  Second, it ensures that even if management
1817         * can observe the STOP event and do a "cont" before the STOP
1818         * event is issued, the VM will not stop.  In this case, vm_start()
1819         * also ensures that the STOP/RESUME pair of events is emitted.
1820         */
1821        qemu_system_vmstop_request_prepare();
1822        send_qmp_error_event(blk, action, is_read, error);
1823        qemu_system_vmstop_request(RUN_STATE_IO_ERROR);
1824    } else {
1825        send_qmp_error_event(blk, action, is_read, error);
1826    }
1827}
1828
1829/*
1830 * Returns true if the BlockBackend can support taking write permissions
1831 * (because its root node is not read-only).
1832 */
1833bool blk_supports_write_perm(BlockBackend *blk)
1834{
1835    BlockDriverState *bs = blk_bs(blk);
1836
1837    if (bs) {
1838        return !bdrv_is_read_only(bs);
1839    } else {
1840        return !blk->root_state.read_only;
1841    }
1842}
1843
1844/*
1845 * Returns true if the BlockBackend can be written to in its current
1846 * configuration (i.e. if write permission have been requested)
1847 */
1848bool blk_is_writable(BlockBackend *blk)
1849{
1850    return blk->perm & BLK_PERM_WRITE;
1851}
1852
1853bool blk_is_sg(BlockBackend *blk)
1854{
1855    BlockDriverState *bs = blk_bs(blk);
1856
1857    if (!bs) {
1858        return false;
1859    }
1860
1861    return bdrv_is_sg(bs);
1862}
1863
1864bool blk_enable_write_cache(BlockBackend *blk)
1865{
1866    return blk->enable_write_cache;
1867}
1868
1869void blk_set_enable_write_cache(BlockBackend *blk, bool wce)
1870{
1871    blk->enable_write_cache = wce;
1872}
1873
1874void blk_invalidate_cache(BlockBackend *blk, Error **errp)
1875{
1876    BlockDriverState *bs = blk_bs(blk);
1877
1878    if (!bs) {
1879        error_setg(errp, "Device '%s' has no medium", blk->name);
1880        return;
1881    }
1882
1883    bdrv_invalidate_cache(bs, errp);
1884}
1885
1886bool blk_is_inserted(BlockBackend *blk)
1887{
1888    BlockDriverState *bs = blk_bs(blk);
1889
1890    return bs && bdrv_is_inserted(bs);
1891}
1892
1893bool blk_is_available(BlockBackend *blk)
1894{
1895    return blk_is_inserted(blk) && !blk_dev_is_tray_open(blk);
1896}
1897
1898void blk_lock_medium(BlockBackend *blk, bool locked)
1899{
1900    BlockDriverState *bs = blk_bs(blk);
1901
1902    if (bs) {
1903        bdrv_lock_medium(bs, locked);
1904    }
1905}
1906
1907void blk_eject(BlockBackend *blk, bool eject_flag)
1908{
1909    BlockDriverState *bs = blk_bs(blk);
1910    char *id;
1911
1912    if (bs) {
1913        bdrv_eject(bs, eject_flag);
1914    }
1915
1916    /* Whether or not we ejected on the backend,
1917     * the frontend experienced a tray event. */
1918    id = blk_get_attached_dev_id(blk);
1919    qapi_event_send_device_tray_moved(blk_name(blk), id,
1920                                      eject_flag);
1921    g_free(id);
1922}
1923
1924int blk_get_flags(BlockBackend *blk)
1925{
1926    BlockDriverState *bs = blk_bs(blk);
1927
1928    if (bs) {
1929        return bdrv_get_flags(bs);
1930    } else {
1931        return blk->root_state.open_flags;
1932    }
1933}
1934
1935/* Returns the minimum request alignment, in bytes; guaranteed nonzero */
1936uint32_t blk_get_request_alignment(BlockBackend *blk)
1937{
1938    BlockDriverState *bs = blk_bs(blk);
1939    return bs ? bs->bl.request_alignment : BDRV_SECTOR_SIZE;
1940}
1941
1942/* Returns the maximum transfer length, in bytes; guaranteed nonzero */
1943uint32_t blk_get_max_transfer(BlockBackend *blk)
1944{
1945    BlockDriverState *bs = blk_bs(blk);
1946    uint32_t max = 0;
1947
1948    if (bs) {
1949        max = bs->bl.max_transfer;
1950    }
1951    return MIN_NON_ZERO(max, INT_MAX);
1952}
1953
1954int blk_get_max_iov(BlockBackend *blk)
1955{
1956    return blk->root->bs->bl.max_iov;
1957}
1958
1959void blk_set_guest_block_size(BlockBackend *blk, int align)
1960{
1961    blk->guest_block_size = align;
1962}
1963
1964void *blk_try_blockalign(BlockBackend *blk, size_t size)
1965{
1966    return qemu_try_blockalign(blk ? blk_bs(blk) : NULL, size);
1967}
1968
1969void *blk_blockalign(BlockBackend *blk, size_t size)
1970{
1971    return qemu_blockalign(blk ? blk_bs(blk) : NULL, size);
1972}
1973
1974bool blk_op_is_blocked(BlockBackend *blk, BlockOpType op, Error **errp)
1975{
1976    BlockDriverState *bs = blk_bs(blk);
1977
1978    if (!bs) {
1979        return false;
1980    }
1981
1982    return bdrv_op_is_blocked(bs, op, errp);
1983}
1984
1985void blk_op_unblock(BlockBackend *blk, BlockOpType op, Error *reason)
1986{
1987    BlockDriverState *bs = blk_bs(blk);
1988
1989    if (bs) {
1990        bdrv_op_unblock(bs, op, reason);
1991    }
1992}
1993
1994void blk_op_block_all(BlockBackend *blk, Error *reason)
1995{
1996    BlockDriverState *bs = blk_bs(blk);
1997
1998    if (bs) {
1999        bdrv_op_block_all(bs, reason);
2000    }
2001}
2002
2003void blk_op_unblock_all(BlockBackend *blk, Error *reason)
2004{
2005    BlockDriverState *bs = blk_bs(blk);
2006
2007    if (bs) {
2008        bdrv_op_unblock_all(bs, reason);
2009    }
2010}
2011
2012AioContext *blk_get_aio_context(BlockBackend *blk)
2013{
2014    BlockDriverState *bs = blk_bs(blk);
2015
2016    if (bs) {
2017        AioContext *ctx = bdrv_get_aio_context(blk_bs(blk));
2018        assert(ctx == blk->ctx);
2019    }
2020
2021    return blk->ctx;
2022}
2023
2024static AioContext *blk_aiocb_get_aio_context(BlockAIOCB *acb)
2025{
2026    BlockBackendAIOCB *blk_acb = DO_UPCAST(BlockBackendAIOCB, common, acb);
2027    return blk_get_aio_context(blk_acb->blk);
2028}
2029
2030static int blk_do_set_aio_context(BlockBackend *blk, AioContext *new_context,
2031                                  bool update_root_node, Error **errp)
2032{
2033    BlockDriverState *bs = blk_bs(blk);
2034    ThrottleGroupMember *tgm = &blk->public.throttle_group_member;
2035    int ret;
2036
2037    if (bs) {
2038        if (update_root_node) {
2039            ret = bdrv_child_try_set_aio_context(bs, new_context, blk->root,
2040                                                 errp);
2041            if (ret < 0) {
2042                return ret;
2043            }
2044        }
2045        if (tgm->throttle_state) {
2046            bdrv_drained_begin(bs);
2047            throttle_group_detach_aio_context(tgm);
2048            throttle_group_attach_aio_context(tgm, new_context);
2049            bdrv_drained_end(bs);
2050        }
2051    }
2052
2053    blk->ctx = new_context;
2054    return 0;
2055}
2056
2057int blk_set_aio_context(BlockBackend *blk, AioContext *new_context,
2058                        Error **errp)
2059{
2060    return blk_do_set_aio_context(blk, new_context, true, errp);
2061}
2062
2063static bool blk_root_can_set_aio_ctx(BdrvChild *child, AioContext *ctx,
2064                                     GSList **ignore, Error **errp)
2065{
2066    BlockBackend *blk = child->opaque;
2067
2068    if (blk->allow_aio_context_change) {
2069        return true;
2070    }
2071
2072    /* Only manually created BlockBackends that are not attached to anything
2073     * can change their AioContext without updating their user. */
2074    if (!blk->name || blk->dev) {
2075        /* TODO Add BB name/QOM path */
2076        error_setg(errp, "Cannot change iothread of active block backend");
2077        return false;
2078    }
2079
2080    return true;
2081}
2082
2083static void blk_root_set_aio_ctx(BdrvChild *child, AioContext *ctx,
2084                                 GSList **ignore)
2085{
2086    BlockBackend *blk = child->opaque;
2087    blk_do_set_aio_context(blk, ctx, false, &error_abort);
2088}
2089
2090void blk_add_aio_context_notifier(BlockBackend *blk,
2091        void (*attached_aio_context)(AioContext *new_context, void *opaque),
2092        void (*detach_aio_context)(void *opaque), void *opaque)
2093{
2094    BlockBackendAioNotifier *notifier;
2095    BlockDriverState *bs = blk_bs(blk);
2096
2097    notifier = g_new(BlockBackendAioNotifier, 1);
2098    notifier->attached_aio_context = attached_aio_context;
2099    notifier->detach_aio_context = detach_aio_context;
2100    notifier->opaque = opaque;
2101    QLIST_INSERT_HEAD(&blk->aio_notifiers, notifier, list);
2102
2103    if (bs) {
2104        bdrv_add_aio_context_notifier(bs, attached_aio_context,
2105                                      detach_aio_context, opaque);
2106    }
2107}
2108
2109void blk_remove_aio_context_notifier(BlockBackend *blk,
2110                                     void (*attached_aio_context)(AioContext *,
2111                                                                  void *),
2112                                     void (*detach_aio_context)(void *),
2113                                     void *opaque)
2114{
2115    BlockBackendAioNotifier *notifier;
2116    BlockDriverState *bs = blk_bs(blk);
2117
2118    if (bs) {
2119        bdrv_remove_aio_context_notifier(bs, attached_aio_context,
2120                                         detach_aio_context, opaque);
2121    }
2122
2123    QLIST_FOREACH(notifier, &blk->aio_notifiers, list) {
2124        if (notifier->attached_aio_context == attached_aio_context &&
2125            notifier->detach_aio_context == detach_aio_context &&
2126            notifier->opaque == opaque) {
2127            QLIST_REMOVE(notifier, list);
2128            g_free(notifier);
2129            return;
2130        }
2131    }
2132
2133    abort();
2134}
2135
2136void blk_add_remove_bs_notifier(BlockBackend *blk, Notifier *notify)
2137{
2138    notifier_list_add(&blk->remove_bs_notifiers, notify);
2139}
2140
2141void blk_add_insert_bs_notifier(BlockBackend *blk, Notifier *notify)
2142{
2143    notifier_list_add(&blk->insert_bs_notifiers, notify);
2144}
2145
2146void blk_io_plug(BlockBackend *blk)
2147{
2148    BlockDriverState *bs = blk_bs(blk);
2149
2150    if (bs) {
2151        bdrv_io_plug(bs);
2152    }
2153}
2154
2155void blk_io_unplug(BlockBackend *blk)
2156{
2157    BlockDriverState *bs = blk_bs(blk);
2158
2159    if (bs) {
2160        bdrv_io_unplug(bs);
2161    }
2162}
2163
2164BlockAcctStats *blk_get_stats(BlockBackend *blk)
2165{
2166    return &blk->stats;
2167}
2168
2169void *blk_aio_get(const AIOCBInfo *aiocb_info, BlockBackend *blk,
2170                  BlockCompletionFunc *cb, void *opaque)
2171{
2172    return qemu_aio_get(aiocb_info, blk_bs(blk), cb, opaque);
2173}
2174
2175int coroutine_fn blk_co_pwrite_zeroes(BlockBackend *blk, int64_t offset,
2176                                      int bytes, BdrvRequestFlags flags)
2177{
2178    return blk_co_pwritev(blk, offset, bytes, NULL,
2179                          flags | BDRV_REQ_ZERO_WRITE);
2180}
2181
2182int blk_pwrite_compressed(BlockBackend *blk, int64_t offset, const void *buf,
2183                          int count)
2184{
2185    return blk_prw(blk, offset, (void *) buf, count, blk_write_entry,
2186                   BDRV_REQ_WRITE_COMPRESSED);
2187}
2188
2189int blk_truncate(BlockBackend *blk, int64_t offset, bool exact,
2190                 PreallocMode prealloc, BdrvRequestFlags flags, Error **errp)
2191{
2192    if (!blk_is_available(blk)) {
2193        error_setg(errp, "No medium inserted");
2194        return -ENOMEDIUM;
2195    }
2196
2197    return bdrv_truncate(blk->root, offset, exact, prealloc, flags, errp);
2198}
2199
2200int blk_save_vmstate(BlockBackend *blk, const uint8_t *buf,
2201                     int64_t pos, int size)
2202{
2203    int ret;
2204
2205    if (!blk_is_available(blk)) {
2206        return -ENOMEDIUM;
2207    }
2208
2209    ret = bdrv_save_vmstate(blk_bs(blk), buf, pos, size);
2210    if (ret < 0) {
2211        return ret;
2212    }
2213
2214    if (ret == size && !blk->enable_write_cache) {
2215        ret = bdrv_flush(blk_bs(blk));
2216    }
2217
2218    return ret < 0 ? ret : size;
2219}
2220
2221int blk_load_vmstate(BlockBackend *blk, uint8_t *buf, int64_t pos, int size)
2222{
2223    if (!blk_is_available(blk)) {
2224        return -ENOMEDIUM;
2225    }
2226
2227    return bdrv_load_vmstate(blk_bs(blk), buf, pos, size);
2228}
2229
2230int blk_probe_blocksizes(BlockBackend *blk, BlockSizes *bsz)
2231{
2232    if (!blk_is_available(blk)) {
2233        return -ENOMEDIUM;
2234    }
2235
2236    return bdrv_probe_blocksizes(blk_bs(blk), bsz);
2237}
2238
2239int blk_probe_geometry(BlockBackend *blk, HDGeometry *geo)
2240{
2241    if (!blk_is_available(blk)) {
2242        return -ENOMEDIUM;
2243    }
2244
2245    return bdrv_probe_geometry(blk_bs(blk), geo);
2246}
2247
2248/*
2249 * Updates the BlockBackendRootState object with data from the currently
2250 * attached BlockDriverState.
2251 */
2252void blk_update_root_state(BlockBackend *blk)
2253{
2254    assert(blk->root);
2255
2256    blk->root_state.open_flags    = blk->root->bs->open_flags;
2257    blk->root_state.read_only     = blk->root->bs->read_only;
2258    blk->root_state.detect_zeroes = blk->root->bs->detect_zeroes;
2259}
2260
2261/*
2262 * Returns the detect-zeroes setting to be used for bdrv_open() of a
2263 * BlockDriverState which is supposed to inherit the root state.
2264 */
2265bool blk_get_detect_zeroes_from_root_state(BlockBackend *blk)
2266{
2267    return blk->root_state.detect_zeroes;
2268}
2269
2270/*
2271 * Returns the flags to be used for bdrv_open() of a BlockDriverState which is
2272 * supposed to inherit the root state.
2273 */
2274int blk_get_open_flags_from_root_state(BlockBackend *blk)
2275{
2276    int bs_flags;
2277
2278    bs_flags = blk->root_state.read_only ? 0 : BDRV_O_RDWR;
2279    bs_flags |= blk->root_state.open_flags & ~BDRV_O_RDWR;
2280
2281    return bs_flags;
2282}
2283
2284BlockBackendRootState *blk_get_root_state(BlockBackend *blk)
2285{
2286    return &blk->root_state;
2287}
2288
2289int blk_commit_all(void)
2290{
2291    BlockBackend *blk = NULL;
2292
2293    while ((blk = blk_all_next(blk)) != NULL) {
2294        AioContext *aio_context = blk_get_aio_context(blk);
2295        BlockDriverState *unfiltered_bs = bdrv_skip_filters(blk_bs(blk));
2296
2297        aio_context_acquire(aio_context);
2298        if (blk_is_inserted(blk) && bdrv_cow_child(unfiltered_bs)) {
2299            int ret;
2300
2301            ret = bdrv_commit(unfiltered_bs);
2302            if (ret < 0) {
2303                aio_context_release(aio_context);
2304                return ret;
2305            }
2306        }
2307        aio_context_release(aio_context);
2308    }
2309    return 0;
2310}
2311
2312
2313/* throttling disk I/O limits */
2314void blk_set_io_limits(BlockBackend *blk, ThrottleConfig *cfg)
2315{
2316    throttle_group_config(&blk->public.throttle_group_member, cfg);
2317}
2318
2319void blk_io_limits_disable(BlockBackend *blk)
2320{
2321    BlockDriverState *bs = blk_bs(blk);
2322    ThrottleGroupMember *tgm = &blk->public.throttle_group_member;
2323    assert(tgm->throttle_state);
2324    if (bs) {
2325        bdrv_drained_begin(bs);
2326    }
2327    throttle_group_unregister_tgm(tgm);
2328    if (bs) {
2329        bdrv_drained_end(bs);
2330    }
2331}
2332
2333/* should be called before blk_set_io_limits if a limit is set */
2334void blk_io_limits_enable(BlockBackend *blk, const char *group)
2335{
2336    assert(!blk->public.throttle_group_member.throttle_state);
2337    throttle_group_register_tgm(&blk->public.throttle_group_member,
2338                                group, blk_get_aio_context(blk));
2339}
2340
2341void blk_io_limits_update_group(BlockBackend *blk, const char *group)
2342{
2343    /* this BB is not part of any group */
2344    if (!blk->public.throttle_group_member.throttle_state) {
2345        return;
2346    }
2347
2348    /* this BB is a part of the same group than the one we want */
2349    if (!g_strcmp0(throttle_group_get_name(&blk->public.throttle_group_member),
2350                group)) {
2351        return;
2352    }
2353
2354    /* need to change the group this bs belong to */
2355    blk_io_limits_disable(blk);
2356    blk_io_limits_enable(blk, group);
2357}
2358
2359static void blk_root_drained_begin(BdrvChild *child)
2360{
2361    BlockBackend *blk = child->opaque;
2362    ThrottleGroupMember *tgm = &blk->public.throttle_group_member;
2363
2364    if (++blk->quiesce_counter == 1) {
2365        if (blk->dev_ops && blk->dev_ops->drained_begin) {
2366            blk->dev_ops->drained_begin(blk->dev_opaque);
2367        }
2368    }
2369
2370    /* Note that blk->root may not be accessible here yet if we are just
2371     * attaching to a BlockDriverState that is drained. Use child instead. */
2372
2373    if (qatomic_fetch_inc(&tgm->io_limits_disabled) == 0) {
2374        throttle_group_restart_tgm(tgm);
2375    }
2376}
2377
2378static bool blk_root_drained_poll(BdrvChild *child)
2379{
2380    BlockBackend *blk = child->opaque;
2381    assert(blk->quiesce_counter);
2382    return !!blk->in_flight;
2383}
2384
2385static void blk_root_drained_end(BdrvChild *child, int *drained_end_counter)
2386{
2387    BlockBackend *blk = child->opaque;
2388    assert(blk->quiesce_counter);
2389
2390    assert(blk->public.throttle_group_member.io_limits_disabled);
2391    qatomic_dec(&blk->public.throttle_group_member.io_limits_disabled);
2392
2393    if (--blk->quiesce_counter == 0) {
2394        if (blk->dev_ops && blk->dev_ops->drained_end) {
2395            blk->dev_ops->drained_end(blk->dev_opaque);
2396        }
2397        while (qemu_co_enter_next(&blk->queued_requests, NULL)) {
2398            /* Resume all queued requests */
2399        }
2400    }
2401}
2402
2403void blk_register_buf(BlockBackend *blk, void *host, size_t size)
2404{
2405    bdrv_register_buf(blk_bs(blk), host, size);
2406}
2407
2408void blk_unregister_buf(BlockBackend *blk, void *host)
2409{
2410    bdrv_unregister_buf(blk_bs(blk), host);
2411}
2412
2413int coroutine_fn blk_co_copy_range(BlockBackend *blk_in, int64_t off_in,
2414                                   BlockBackend *blk_out, int64_t off_out,
2415                                   int bytes, BdrvRequestFlags read_flags,
2416                                   BdrvRequestFlags write_flags)
2417{
2418    int r;
2419    r = blk_check_byte_request(blk_in, off_in, bytes);
2420    if (r) {
2421        return r;
2422    }
2423    r = blk_check_byte_request(blk_out, off_out, bytes);
2424    if (r) {
2425        return r;
2426    }
2427    return bdrv_co_copy_range(blk_in->root, off_in,
2428                              blk_out->root, off_out,
2429                              bytes, read_flags, write_flags);
2430}
2431
2432const BdrvChild *blk_root(BlockBackend *blk)
2433{
2434    return blk->root;
2435}
2436
2437int blk_make_empty(BlockBackend *blk, Error **errp)
2438{
2439    if (!blk_is_available(blk)) {
2440        error_setg(errp, "No medium inserted");
2441        return -ENOMEDIUM;
2442    }
2443
2444    return bdrv_make_empty(blk->root, errp);
2445}
2446