qemu/block/block-backend.c
<<
>>
Prefs
   1/*
   2 * QEMU Block backends
   3 *
   4 * Copyright (C) 2014-2016 Red Hat, Inc.
   5 *
   6 * Authors:
   7 *  Markus Armbruster <armbru@redhat.com>,
   8 *
   9 * This work is licensed under the terms of the GNU LGPL, version 2.1
  10 * or later.  See the COPYING.LIB file in the top-level directory.
  11 */
  12
  13#include "qemu/osdep.h"
  14#include "sysemu/block-backend.h"
  15#include "block/block_int.h"
  16#include "block/blockjob.h"
  17#include "block/throttle-groups.h"
  18#include "sysemu/blockdev.h"
  19#include "sysemu/sysemu.h"
  20#include "qapi/error.h"
  21#include "qapi/qapi-events-block.h"
  22#include "qemu/id.h"
  23#include "qemu/option.h"
  24#include "trace.h"
  25#include "migration/misc.h"
  26
  27/* Number of coroutines to reserve per attached device model */
  28#define COROUTINE_POOL_RESERVATION 64
  29
  30#define NOT_DONE 0x7fffffff /* used while emulated sync operation in progress */
  31
  32static AioContext *blk_aiocb_get_aio_context(BlockAIOCB *acb);
  33
  34typedef struct BlockBackendAioNotifier {
  35    void (*attached_aio_context)(AioContext *new_context, void *opaque);
  36    void (*detach_aio_context)(void *opaque);
  37    void *opaque;
  38    QLIST_ENTRY(BlockBackendAioNotifier) list;
  39} BlockBackendAioNotifier;
  40
  41struct BlockBackend {
  42    char *name;
  43    int refcnt;
  44    BdrvChild *root;
  45    DriveInfo *legacy_dinfo;    /* null unless created by drive_new() */
  46    QTAILQ_ENTRY(BlockBackend) link;         /* for block_backends */
  47    QTAILQ_ENTRY(BlockBackend) monitor_link; /* for monitor_block_backends */
  48    BlockBackendPublic public;
  49
  50    void *dev;                  /* attached device model, if any */
  51    bool legacy_dev;            /* true if dev is not a DeviceState */
  52    /* TODO change to DeviceState when all users are qdevified */
  53    const BlockDevOps *dev_ops;
  54    void *dev_opaque;
  55
  56    /* the block size for which the guest device expects atomicity */
  57    int guest_block_size;
  58
  59    /* If the BDS tree is removed, some of its options are stored here (which
  60     * can be used to restore those options in the new BDS on insert) */
  61    BlockBackendRootState root_state;
  62
  63    bool enable_write_cache;
  64
  65    /* I/O stats (display with "info blockstats"). */
  66    BlockAcctStats stats;
  67
  68    BlockdevOnError on_read_error, on_write_error;
  69    bool iostatus_enabled;
  70    BlockDeviceIoStatus iostatus;
  71
  72    uint64_t perm;
  73    uint64_t shared_perm;
  74    bool disable_perm;
  75
  76    bool allow_write_beyond_eof;
  77
  78    NotifierList remove_bs_notifiers, insert_bs_notifiers;
  79    QLIST_HEAD(, BlockBackendAioNotifier) aio_notifiers;
  80
  81    int quiesce_counter;
  82    VMChangeStateEntry *vmsh;
  83    bool force_allow_inactivate;
  84
  85    /* Number of in-flight aio requests.  BlockDriverState also counts
  86     * in-flight requests but aio requests can exist even when blk->root is
  87     * NULL, so we cannot rely on its counter for that case.
  88     * Accessed with atomic ops.
  89     */
  90    unsigned int in_flight;
  91};
  92
  93typedef struct BlockBackendAIOCB {
  94    BlockAIOCB common;
  95    BlockBackend *blk;
  96    int ret;
  97} BlockBackendAIOCB;
  98
  99static const AIOCBInfo block_backend_aiocb_info = {
 100    .get_aio_context = blk_aiocb_get_aio_context,
 101    .aiocb_size = sizeof(BlockBackendAIOCB),
 102};
 103
 104static void drive_info_del(DriveInfo *dinfo);
 105static BlockBackend *bdrv_first_blk(BlockDriverState *bs);
 106
 107/* All BlockBackends */
 108static QTAILQ_HEAD(, BlockBackend) block_backends =
 109    QTAILQ_HEAD_INITIALIZER(block_backends);
 110
 111/* All BlockBackends referenced by the monitor and which are iterated through by
 112 * blk_next() */
 113static QTAILQ_HEAD(, BlockBackend) monitor_block_backends =
 114    QTAILQ_HEAD_INITIALIZER(monitor_block_backends);
 115
 116static void blk_root_inherit_options(int *child_flags, QDict *child_options,
 117                                     int parent_flags, QDict *parent_options)
 118{
 119    /* We're not supposed to call this function for root nodes */
 120    abort();
 121}
 122static void blk_root_drained_begin(BdrvChild *child);
 123static bool blk_root_drained_poll(BdrvChild *child);
 124static void blk_root_drained_end(BdrvChild *child);
 125
 126static void blk_root_change_media(BdrvChild *child, bool load);
 127static void blk_root_resize(BdrvChild *child);
 128
 129static char *blk_root_get_parent_desc(BdrvChild *child)
 130{
 131    BlockBackend *blk = child->opaque;
 132    char *dev_id;
 133
 134    if (blk->name) {
 135        return g_strdup(blk->name);
 136    }
 137
 138    dev_id = blk_get_attached_dev_id(blk);
 139    if (*dev_id) {
 140        return dev_id;
 141    } else {
 142        /* TODO Callback into the BB owner for something more detailed */
 143        g_free(dev_id);
 144        return g_strdup("a block device");
 145    }
 146}
 147
 148static const char *blk_root_get_name(BdrvChild *child)
 149{
 150    return blk_name(child->opaque);
 151}
 152
 153static void blk_vm_state_changed(void *opaque, int running, RunState state)
 154{
 155    Error *local_err = NULL;
 156    BlockBackend *blk = opaque;
 157
 158    if (state == RUN_STATE_INMIGRATE) {
 159        return;
 160    }
 161
 162    qemu_del_vm_change_state_handler(blk->vmsh);
 163    blk->vmsh = NULL;
 164    blk_set_perm(blk, blk->perm, blk->shared_perm, &local_err);
 165    if (local_err) {
 166        error_report_err(local_err);
 167    }
 168}
 169
 170/*
 171 * Notifies the user of the BlockBackend that migration has completed. qdev
 172 * devices can tighten their permissions in response (specifically revoke
 173 * shared write permissions that we needed for storage migration).
 174 *
 175 * If an error is returned, the VM cannot be allowed to be resumed.
 176 */
 177static void blk_root_activate(BdrvChild *child, Error **errp)
 178{
 179    BlockBackend *blk = child->opaque;
 180    Error *local_err = NULL;
 181
 182    if (!blk->disable_perm) {
 183        return;
 184    }
 185
 186    blk->disable_perm = false;
 187
 188    blk_set_perm(blk, blk->perm, BLK_PERM_ALL, &local_err);
 189    if (local_err) {
 190        error_propagate(errp, local_err);
 191        blk->disable_perm = true;
 192        return;
 193    }
 194
 195    if (runstate_check(RUN_STATE_INMIGRATE)) {
 196        /* Activation can happen when migration process is still active, for
 197         * example when nbd_server_add is called during non-shared storage
 198         * migration. Defer the shared_perm update to migration completion. */
 199        if (!blk->vmsh) {
 200            blk->vmsh = qemu_add_vm_change_state_handler(blk_vm_state_changed,
 201                                                         blk);
 202        }
 203        return;
 204    }
 205
 206    blk_set_perm(blk, blk->perm, blk->shared_perm, &local_err);
 207    if (local_err) {
 208        error_propagate(errp, local_err);
 209        blk->disable_perm = true;
 210        return;
 211    }
 212}
 213
 214void blk_set_force_allow_inactivate(BlockBackend *blk)
 215{
 216    blk->force_allow_inactivate = true;
 217}
 218
 219static bool blk_can_inactivate(BlockBackend *blk)
 220{
 221    /* If it is a guest device, inactivate is ok. */
 222    if (blk->dev || blk_name(blk)[0]) {
 223        return true;
 224    }
 225
 226    /* Inactivating means no more writes to the image can be done,
 227     * even if those writes would be changes invisible to the
 228     * guest.  For block job BBs that satisfy this, we can just allow
 229     * it.  This is the case for mirror job source, which is required
 230     * by libvirt non-shared block migration. */
 231    if (!(blk->perm & (BLK_PERM_WRITE | BLK_PERM_WRITE_UNCHANGED))) {
 232        return true;
 233    }
 234
 235    return blk->force_allow_inactivate;
 236}
 237
 238static int blk_root_inactivate(BdrvChild *child)
 239{
 240    BlockBackend *blk = child->opaque;
 241
 242    if (blk->disable_perm) {
 243        return 0;
 244    }
 245
 246    if (!blk_can_inactivate(blk)) {
 247        return -EPERM;
 248    }
 249
 250    blk->disable_perm = true;
 251    if (blk->root) {
 252        bdrv_child_try_set_perm(blk->root, 0, BLK_PERM_ALL, &error_abort);
 253    }
 254
 255    return 0;
 256}
 257
 258static void blk_root_attach(BdrvChild *child)
 259{
 260    BlockBackend *blk = child->opaque;
 261    BlockBackendAioNotifier *notifier;
 262
 263    trace_blk_root_attach(child, blk, child->bs);
 264
 265    QLIST_FOREACH(notifier, &blk->aio_notifiers, list) {
 266        bdrv_add_aio_context_notifier(child->bs,
 267                notifier->attached_aio_context,
 268                notifier->detach_aio_context,
 269                notifier->opaque);
 270    }
 271}
 272
 273static void blk_root_detach(BdrvChild *child)
 274{
 275    BlockBackend *blk = child->opaque;
 276    BlockBackendAioNotifier *notifier;
 277
 278    trace_blk_root_detach(child, blk, child->bs);
 279
 280    QLIST_FOREACH(notifier, &blk->aio_notifiers, list) {
 281        bdrv_remove_aio_context_notifier(child->bs,
 282                notifier->attached_aio_context,
 283                notifier->detach_aio_context,
 284                notifier->opaque);
 285    }
 286}
 287
 288static const BdrvChildRole child_root = {
 289    .inherit_options    = blk_root_inherit_options,
 290
 291    .change_media       = blk_root_change_media,
 292    .resize             = blk_root_resize,
 293    .get_name           = blk_root_get_name,
 294    .get_parent_desc    = blk_root_get_parent_desc,
 295
 296    .drained_begin      = blk_root_drained_begin,
 297    .drained_poll       = blk_root_drained_poll,
 298    .drained_end        = blk_root_drained_end,
 299
 300    .activate           = blk_root_activate,
 301    .inactivate         = blk_root_inactivate,
 302
 303    .attach             = blk_root_attach,
 304    .detach             = blk_root_detach,
 305};
 306
 307/*
 308 * Create a new BlockBackend with a reference count of one.
 309 *
 310 * @perm is a bitmasks of BLK_PERM_* constants which describes the permissions
 311 * to request for a block driver node that is attached to this BlockBackend.
 312 * @shared_perm is a bitmask which describes which permissions may be granted
 313 * to other users of the attached node.
 314 * Both sets of permissions can be changed later using blk_set_perm().
 315 *
 316 * Return the new BlockBackend on success, null on failure.
 317 */
 318BlockBackend *blk_new(uint64_t perm, uint64_t shared_perm)
 319{
 320    BlockBackend *blk;
 321
 322    blk = g_new0(BlockBackend, 1);
 323    blk->refcnt = 1;
 324    blk->perm = perm;
 325    blk->shared_perm = shared_perm;
 326    blk_set_enable_write_cache(blk, true);
 327
 328    blk->on_read_error = BLOCKDEV_ON_ERROR_REPORT;
 329    blk->on_write_error = BLOCKDEV_ON_ERROR_ENOSPC;
 330
 331    block_acct_init(&blk->stats);
 332
 333    notifier_list_init(&blk->remove_bs_notifiers);
 334    notifier_list_init(&blk->insert_bs_notifiers);
 335    QLIST_INIT(&blk->aio_notifiers);
 336
 337    QTAILQ_INSERT_TAIL(&block_backends, blk, link);
 338    return blk;
 339}
 340
 341/*
 342 * Creates a new BlockBackend, opens a new BlockDriverState, and connects both.
 343 *
 344 * Just as with bdrv_open(), after having called this function the reference to
 345 * @options belongs to the block layer (even on failure).
 346 *
 347 * TODO: Remove @filename and @flags; it should be possible to specify a whole
 348 * BDS tree just by specifying the @options QDict (or @reference,
 349 * alternatively). At the time of adding this function, this is not possible,
 350 * though, so callers of this function have to be able to specify @filename and
 351 * @flags.
 352 */
 353BlockBackend *blk_new_open(const char *filename, const char *reference,
 354                           QDict *options, int flags, Error **errp)
 355{
 356    BlockBackend *blk;
 357    BlockDriverState *bs;
 358    uint64_t perm = 0;
 359
 360    /* blk_new_open() is mainly used in .bdrv_create implementations and the
 361     * tools where sharing isn't a concern because the BDS stays private, so we
 362     * just request permission according to the flags.
 363     *
 364     * The exceptions are xen_disk and blockdev_init(); in these cases, the
 365     * caller of blk_new_open() doesn't make use of the permissions, but they
 366     * shouldn't hurt either. We can still share everything here because the
 367     * guest devices will add their own blockers if they can't share. */
 368    if ((flags & BDRV_O_NO_IO) == 0) {
 369        perm |= BLK_PERM_CONSISTENT_READ;
 370        if (flags & BDRV_O_RDWR) {
 371            perm |= BLK_PERM_WRITE;
 372        }
 373    }
 374    if (flags & BDRV_O_RESIZE) {
 375        perm |= BLK_PERM_RESIZE;
 376    }
 377
 378    blk = blk_new(perm, BLK_PERM_ALL);
 379    bs = bdrv_open(filename, reference, options, flags, errp);
 380    if (!bs) {
 381        blk_unref(blk);
 382        return NULL;
 383    }
 384
 385    blk->root = bdrv_root_attach_child(bs, "root", &child_root,
 386                                       perm, BLK_PERM_ALL, blk, errp);
 387    if (!blk->root) {
 388        bdrv_unref(bs);
 389        blk_unref(blk);
 390        return NULL;
 391    }
 392
 393    return blk;
 394}
 395
 396static void blk_delete(BlockBackend *blk)
 397{
 398    assert(!blk->refcnt);
 399    assert(!blk->name);
 400    assert(!blk->dev);
 401    if (blk->public.throttle_group_member.throttle_state) {
 402        blk_io_limits_disable(blk);
 403    }
 404    if (blk->root) {
 405        blk_remove_bs(blk);
 406    }
 407    if (blk->vmsh) {
 408        qemu_del_vm_change_state_handler(blk->vmsh);
 409        blk->vmsh = NULL;
 410    }
 411    assert(QLIST_EMPTY(&blk->remove_bs_notifiers.notifiers));
 412    assert(QLIST_EMPTY(&blk->insert_bs_notifiers.notifiers));
 413    assert(QLIST_EMPTY(&blk->aio_notifiers));
 414    QTAILQ_REMOVE(&block_backends, blk, link);
 415    drive_info_del(blk->legacy_dinfo);
 416    block_acct_cleanup(&blk->stats);
 417    g_free(blk);
 418}
 419
 420static void drive_info_del(DriveInfo *dinfo)
 421{
 422    if (!dinfo) {
 423        return;
 424    }
 425    qemu_opts_del(dinfo->opts);
 426    g_free(dinfo);
 427}
 428
 429int blk_get_refcnt(BlockBackend *blk)
 430{
 431    return blk ? blk->refcnt : 0;
 432}
 433
 434/*
 435 * Increment @blk's reference count.
 436 * @blk must not be null.
 437 */
 438void blk_ref(BlockBackend *blk)
 439{
 440    assert(blk->refcnt > 0);
 441    blk->refcnt++;
 442}
 443
 444/*
 445 * Decrement @blk's reference count.
 446 * If this drops it to zero, destroy @blk.
 447 * For convenience, do nothing if @blk is null.
 448 */
 449void blk_unref(BlockBackend *blk)
 450{
 451    if (blk) {
 452        assert(blk->refcnt > 0);
 453        if (blk->refcnt > 1) {
 454            blk->refcnt--;
 455        } else {
 456            blk_drain(blk);
 457            /* blk_drain() cannot resurrect blk, nobody held a reference */
 458            assert(blk->refcnt == 1);
 459            blk->refcnt = 0;
 460            blk_delete(blk);
 461        }
 462    }
 463}
 464
 465/*
 466 * Behaves similarly to blk_next() but iterates over all BlockBackends, even the
 467 * ones which are hidden (i.e. are not referenced by the monitor).
 468 */
 469BlockBackend *blk_all_next(BlockBackend *blk)
 470{
 471    return blk ? QTAILQ_NEXT(blk, link)
 472               : QTAILQ_FIRST(&block_backends);
 473}
 474
 475void blk_remove_all_bs(void)
 476{
 477    BlockBackend *blk = NULL;
 478
 479    while ((blk = blk_all_next(blk)) != NULL) {
 480        AioContext *ctx = blk_get_aio_context(blk);
 481
 482        aio_context_acquire(ctx);
 483        if (blk->root) {
 484            blk_remove_bs(blk);
 485        }
 486        aio_context_release(ctx);
 487    }
 488}
 489
 490/*
 491 * Return the monitor-owned BlockBackend after @blk.
 492 * If @blk is null, return the first one.
 493 * Else, return @blk's next sibling, which may be null.
 494 *
 495 * To iterate over all BlockBackends, do
 496 * for (blk = blk_next(NULL); blk; blk = blk_next(blk)) {
 497 *     ...
 498 * }
 499 */
 500BlockBackend *blk_next(BlockBackend *blk)
 501{
 502    return blk ? QTAILQ_NEXT(blk, monitor_link)
 503               : QTAILQ_FIRST(&monitor_block_backends);
 504}
 505
 506/* Iterates over all top-level BlockDriverStates, i.e. BDSs that are owned by
 507 * the monitor or attached to a BlockBackend */
 508BlockDriverState *bdrv_next(BdrvNextIterator *it)
 509{
 510    BlockDriverState *bs, *old_bs;
 511
 512    /* Must be called from the main loop */
 513    assert(qemu_get_current_aio_context() == qemu_get_aio_context());
 514
 515    /* First, return all root nodes of BlockBackends. In order to avoid
 516     * returning a BDS twice when multiple BBs refer to it, we only return it
 517     * if the BB is the first one in the parent list of the BDS. */
 518    if (it->phase == BDRV_NEXT_BACKEND_ROOTS) {
 519        BlockBackend *old_blk = it->blk;
 520
 521        old_bs = old_blk ? blk_bs(old_blk) : NULL;
 522
 523        do {
 524            it->blk = blk_all_next(it->blk);
 525            bs = it->blk ? blk_bs(it->blk) : NULL;
 526        } while (it->blk && (bs == NULL || bdrv_first_blk(bs) != it->blk));
 527
 528        if (it->blk) {
 529            blk_ref(it->blk);
 530        }
 531        blk_unref(old_blk);
 532
 533        if (bs) {
 534            bdrv_ref(bs);
 535            bdrv_unref(old_bs);
 536            return bs;
 537        }
 538        it->phase = BDRV_NEXT_MONITOR_OWNED;
 539    } else {
 540        old_bs = it->bs;
 541    }
 542
 543    /* Then return the monitor-owned BDSes without a BB attached. Ignore all
 544     * BDSes that are attached to a BlockBackend here; they have been handled
 545     * by the above block already */
 546    do {
 547        it->bs = bdrv_next_monitor_owned(it->bs);
 548        bs = it->bs;
 549    } while (bs && bdrv_has_blk(bs));
 550
 551    if (bs) {
 552        bdrv_ref(bs);
 553    }
 554    bdrv_unref(old_bs);
 555
 556    return bs;
 557}
 558
 559static void bdrv_next_reset(BdrvNextIterator *it)
 560{
 561    *it = (BdrvNextIterator) {
 562        .phase = BDRV_NEXT_BACKEND_ROOTS,
 563    };
 564}
 565
 566BlockDriverState *bdrv_first(BdrvNextIterator *it)
 567{
 568    bdrv_next_reset(it);
 569    return bdrv_next(it);
 570}
 571
 572/* Must be called when aborting a bdrv_next() iteration before
 573 * bdrv_next() returns NULL */
 574void bdrv_next_cleanup(BdrvNextIterator *it)
 575{
 576    /* Must be called from the main loop */
 577    assert(qemu_get_current_aio_context() == qemu_get_aio_context());
 578
 579    if (it->phase == BDRV_NEXT_BACKEND_ROOTS) {
 580        if (it->blk) {
 581            bdrv_unref(blk_bs(it->blk));
 582            blk_unref(it->blk);
 583        }
 584    } else {
 585        bdrv_unref(it->bs);
 586    }
 587
 588    bdrv_next_reset(it);
 589}
 590
 591/*
 592 * Add a BlockBackend into the list of backends referenced by the monitor, with
 593 * the given @name acting as the handle for the monitor.
 594 * Strictly for use by blockdev.c.
 595 *
 596 * @name must not be null or empty.
 597 *
 598 * Returns true on success and false on failure. In the latter case, an Error
 599 * object is returned through @errp.
 600 */
 601bool monitor_add_blk(BlockBackend *blk, const char *name, Error **errp)
 602{
 603    assert(!blk->name);
 604    assert(name && name[0]);
 605
 606    if (!id_wellformed(name)) {
 607        error_setg(errp, "Invalid device name");
 608        return false;
 609    }
 610    if (blk_by_name(name)) {
 611        error_setg(errp, "Device with id '%s' already exists", name);
 612        return false;
 613    }
 614    if (bdrv_find_node(name)) {
 615        error_setg(errp,
 616                   "Device name '%s' conflicts with an existing node name",
 617                   name);
 618        return false;
 619    }
 620
 621    blk->name = g_strdup(name);
 622    QTAILQ_INSERT_TAIL(&monitor_block_backends, blk, monitor_link);
 623    return true;
 624}
 625
 626/*
 627 * Remove a BlockBackend from the list of backends referenced by the monitor.
 628 * Strictly for use by blockdev.c.
 629 */
 630void monitor_remove_blk(BlockBackend *blk)
 631{
 632    if (!blk->name) {
 633        return;
 634    }
 635
 636    QTAILQ_REMOVE(&monitor_block_backends, blk, monitor_link);
 637    g_free(blk->name);
 638    blk->name = NULL;
 639}
 640
 641/*
 642 * Return @blk's name, a non-null string.
 643 * Returns an empty string iff @blk is not referenced by the monitor.
 644 */
 645const char *blk_name(const BlockBackend *blk)
 646{
 647    return blk->name ?: "";
 648}
 649
 650/*
 651 * Return the BlockBackend with name @name if it exists, else null.
 652 * @name must not be null.
 653 */
 654BlockBackend *blk_by_name(const char *name)
 655{
 656    BlockBackend *blk = NULL;
 657
 658    assert(name);
 659    while ((blk = blk_next(blk)) != NULL) {
 660        if (!strcmp(name, blk->name)) {
 661            return blk;
 662        }
 663    }
 664    return NULL;
 665}
 666
 667/*
 668 * Return the BlockDriverState attached to @blk if any, else null.
 669 */
 670BlockDriverState *blk_bs(BlockBackend *blk)
 671{
 672    return blk->root ? blk->root->bs : NULL;
 673}
 674
 675static BlockBackend *bdrv_first_blk(BlockDriverState *bs)
 676{
 677    BdrvChild *child;
 678    QLIST_FOREACH(child, &bs->parents, next_parent) {
 679        if (child->role == &child_root) {
 680            return child->opaque;
 681        }
 682    }
 683
 684    return NULL;
 685}
 686
 687/*
 688 * Returns true if @bs has an associated BlockBackend.
 689 */
 690bool bdrv_has_blk(BlockDriverState *bs)
 691{
 692    return bdrv_first_blk(bs) != NULL;
 693}
 694
 695/*
 696 * Returns true if @bs has only BlockBackends as parents.
 697 */
 698bool bdrv_is_root_node(BlockDriverState *bs)
 699{
 700    BdrvChild *c;
 701
 702    QLIST_FOREACH(c, &bs->parents, next_parent) {
 703        if (c->role != &child_root) {
 704            return false;
 705        }
 706    }
 707
 708    return true;
 709}
 710
 711/*
 712 * Return @blk's DriveInfo if any, else null.
 713 */
 714DriveInfo *blk_legacy_dinfo(BlockBackend *blk)
 715{
 716    return blk->legacy_dinfo;
 717}
 718
 719/*
 720 * Set @blk's DriveInfo to @dinfo, and return it.
 721 * @blk must not have a DriveInfo set already.
 722 * No other BlockBackend may have the same DriveInfo set.
 723 */
 724DriveInfo *blk_set_legacy_dinfo(BlockBackend *blk, DriveInfo *dinfo)
 725{
 726    assert(!blk->legacy_dinfo);
 727    return blk->legacy_dinfo = dinfo;
 728}
 729
 730/*
 731 * Return the BlockBackend with DriveInfo @dinfo.
 732 * It must exist.
 733 */
 734BlockBackend *blk_by_legacy_dinfo(DriveInfo *dinfo)
 735{
 736    BlockBackend *blk = NULL;
 737
 738    while ((blk = blk_next(blk)) != NULL) {
 739        if (blk->legacy_dinfo == dinfo) {
 740            return blk;
 741        }
 742    }
 743    abort();
 744}
 745
 746/*
 747 * Returns a pointer to the publicly accessible fields of @blk.
 748 */
 749BlockBackendPublic *blk_get_public(BlockBackend *blk)
 750{
 751    return &blk->public;
 752}
 753
 754/*
 755 * Returns a BlockBackend given the associated @public fields.
 756 */
 757BlockBackend *blk_by_public(BlockBackendPublic *public)
 758{
 759    return container_of(public, BlockBackend, public);
 760}
 761
 762/*
 763 * Disassociates the currently associated BlockDriverState from @blk.
 764 */
 765void blk_remove_bs(BlockBackend *blk)
 766{
 767    ThrottleGroupMember *tgm = &blk->public.throttle_group_member;
 768    BlockDriverState *bs;
 769
 770    notifier_list_notify(&blk->remove_bs_notifiers, blk);
 771    if (tgm->throttle_state) {
 772        bs = blk_bs(blk);
 773        bdrv_drained_begin(bs);
 774        throttle_group_detach_aio_context(tgm);
 775        throttle_group_attach_aio_context(tgm, qemu_get_aio_context());
 776        bdrv_drained_end(bs);
 777    }
 778
 779    blk_update_root_state(blk);
 780
 781    /* bdrv_root_unref_child() will cause blk->root to become stale and may
 782     * switch to a completion coroutine later on. Let's drain all I/O here
 783     * to avoid that and a potential QEMU crash.
 784     */
 785    blk_drain(blk);
 786    bdrv_root_unref_child(blk->root);
 787    blk->root = NULL;
 788}
 789
 790/*
 791 * Associates a new BlockDriverState with @blk.
 792 */
 793int blk_insert_bs(BlockBackend *blk, BlockDriverState *bs, Error **errp)
 794{
 795    ThrottleGroupMember *tgm = &blk->public.throttle_group_member;
 796    blk->root = bdrv_root_attach_child(bs, "root", &child_root,
 797                                       blk->perm, blk->shared_perm, blk, errp);
 798    if (blk->root == NULL) {
 799        return -EPERM;
 800    }
 801    bdrv_ref(bs);
 802
 803    notifier_list_notify(&blk->insert_bs_notifiers, blk);
 804    if (tgm->throttle_state) {
 805        throttle_group_detach_aio_context(tgm);
 806        throttle_group_attach_aio_context(tgm, bdrv_get_aio_context(bs));
 807    }
 808
 809    return 0;
 810}
 811
 812/*
 813 * Sets the permission bitmasks that the user of the BlockBackend needs.
 814 */
 815int blk_set_perm(BlockBackend *blk, uint64_t perm, uint64_t shared_perm,
 816                 Error **errp)
 817{
 818    int ret;
 819
 820    if (blk->root && !blk->disable_perm) {
 821        ret = bdrv_child_try_set_perm(blk->root, perm, shared_perm, errp);
 822        if (ret < 0) {
 823            return ret;
 824        }
 825    }
 826
 827    blk->perm = perm;
 828    blk->shared_perm = shared_perm;
 829
 830    return 0;
 831}
 832
 833void blk_get_perm(BlockBackend *blk, uint64_t *perm, uint64_t *shared_perm)
 834{
 835    *perm = blk->perm;
 836    *shared_perm = blk->shared_perm;
 837}
 838
 839static int blk_do_attach_dev(BlockBackend *blk, void *dev)
 840{
 841    if (blk->dev) {
 842        return -EBUSY;
 843    }
 844
 845    /* While migration is still incoming, we don't need to apply the
 846     * permissions of guest device BlockBackends. We might still have a block
 847     * job or NBD server writing to the image for storage migration. */
 848    if (runstate_check(RUN_STATE_INMIGRATE)) {
 849        blk->disable_perm = true;
 850    }
 851
 852    blk_ref(blk);
 853    blk->dev = dev;
 854    blk->legacy_dev = false;
 855    blk_iostatus_reset(blk);
 856
 857    return 0;
 858}
 859
 860/*
 861 * Attach device model @dev to @blk.
 862 * Return 0 on success, -EBUSY when a device model is attached already.
 863 */
 864int blk_attach_dev(BlockBackend *blk, DeviceState *dev)
 865{
 866    return blk_do_attach_dev(blk, dev);
 867}
 868
 869/*
 870 * Attach device model @dev to @blk.
 871 * @blk must not have a device model attached already.
 872 * TODO qdevified devices don't use this, remove when devices are qdevified
 873 */
 874void blk_attach_dev_legacy(BlockBackend *blk, void *dev)
 875{
 876    if (blk_do_attach_dev(blk, dev) < 0) {
 877        abort();
 878    }
 879    blk->legacy_dev = true;
 880}
 881
 882/*
 883 * Detach device model @dev from @blk.
 884 * @dev must be currently attached to @blk.
 885 */
 886void blk_detach_dev(BlockBackend *blk, void *dev)
 887/* TODO change to DeviceState *dev when all users are qdevified */
 888{
 889    assert(blk->dev == dev);
 890    blk->dev = NULL;
 891    blk->dev_ops = NULL;
 892    blk->dev_opaque = NULL;
 893    blk->guest_block_size = 512;
 894    blk_set_perm(blk, 0, BLK_PERM_ALL, &error_abort);
 895    blk_unref(blk);
 896}
 897
 898/*
 899 * Return the device model attached to @blk if any, else null.
 900 */
 901void *blk_get_attached_dev(BlockBackend *blk)
 902/* TODO change to return DeviceState * when all users are qdevified */
 903{
 904    return blk->dev;
 905}
 906
 907/* Return the qdev ID, or if no ID is assigned the QOM path, of the block
 908 * device attached to the BlockBackend. */
 909char *blk_get_attached_dev_id(BlockBackend *blk)
 910{
 911    DeviceState *dev;
 912
 913    assert(!blk->legacy_dev);
 914    dev = blk->dev;
 915
 916    if (!dev) {
 917        return g_strdup("");
 918    } else if (dev->id) {
 919        return g_strdup(dev->id);
 920    }
 921
 922    return object_get_canonical_path(OBJECT(dev)) ?: g_strdup("");
 923}
 924
 925/*
 926 * Return the BlockBackend which has the device model @dev attached if it
 927 * exists, else null.
 928 *
 929 * @dev must not be null.
 930 */
 931BlockBackend *blk_by_dev(void *dev)
 932{
 933    BlockBackend *blk = NULL;
 934
 935    assert(dev != NULL);
 936    while ((blk = blk_all_next(blk)) != NULL) {
 937        if (blk->dev == dev) {
 938            return blk;
 939        }
 940    }
 941    return NULL;
 942}
 943
 944/*
 945 * Set @blk's device model callbacks to @ops.
 946 * @opaque is the opaque argument to pass to the callbacks.
 947 * This is for use by device models.
 948 */
 949void blk_set_dev_ops(BlockBackend *blk, const BlockDevOps *ops,
 950                     void *opaque)
 951{
 952    /* All drivers that use blk_set_dev_ops() are qdevified and we want to keep
 953     * it that way, so we can assume blk->dev, if present, is a DeviceState if
 954     * blk->dev_ops is set. Non-device users may use dev_ops without device. */
 955    assert(!blk->legacy_dev);
 956
 957    blk->dev_ops = ops;
 958    blk->dev_opaque = opaque;
 959
 960    /* Are we currently quiesced? Should we enforce this right now? */
 961    if (blk->quiesce_counter && ops->drained_begin) {
 962        ops->drained_begin(opaque);
 963    }
 964}
 965
 966/*
 967 * Notify @blk's attached device model of media change.
 968 *
 969 * If @load is true, notify of media load. This action can fail, meaning that
 970 * the medium cannot be loaded. @errp is set then.
 971 *
 972 * If @load is false, notify of media eject. This can never fail.
 973 *
 974 * Also send DEVICE_TRAY_MOVED events as appropriate.
 975 */
 976void blk_dev_change_media_cb(BlockBackend *blk, bool load, Error **errp)
 977{
 978    if (blk->dev_ops && blk->dev_ops->change_media_cb) {
 979        bool tray_was_open, tray_is_open;
 980        Error *local_err = NULL;
 981
 982        assert(!blk->legacy_dev);
 983
 984        tray_was_open = blk_dev_is_tray_open(blk);
 985        blk->dev_ops->change_media_cb(blk->dev_opaque, load, &local_err);
 986        if (local_err) {
 987            assert(load == true);
 988            error_propagate(errp, local_err);
 989            return;
 990        }
 991        tray_is_open = blk_dev_is_tray_open(blk);
 992
 993        if (tray_was_open != tray_is_open) {
 994            char *id = blk_get_attached_dev_id(blk);
 995            qapi_event_send_device_tray_moved(blk_name(blk), id, tray_is_open);
 996            g_free(id);
 997        }
 998    }
 999}
1000
1001static void blk_root_change_media(BdrvChild *child, bool load)
1002{
1003    blk_dev_change_media_cb(child->opaque, load, NULL);
1004}
1005
1006/*
1007 * Does @blk's attached device model have removable media?
1008 * %true if no device model is attached.
1009 */
1010bool blk_dev_has_removable_media(BlockBackend *blk)
1011{
1012    return !blk->dev || (blk->dev_ops && blk->dev_ops->change_media_cb);
1013}
1014
1015/*
1016 * Does @blk's attached device model have a tray?
1017 */
1018bool blk_dev_has_tray(BlockBackend *blk)
1019{
1020    return blk->dev_ops && blk->dev_ops->is_tray_open;
1021}
1022
1023/*
1024 * Notify @blk's attached device model of a media eject request.
1025 * If @force is true, the medium is about to be yanked out forcefully.
1026 */
1027void blk_dev_eject_request(BlockBackend *blk, bool force)
1028{
1029    if (blk->dev_ops && blk->dev_ops->eject_request_cb) {
1030        blk->dev_ops->eject_request_cb(blk->dev_opaque, force);
1031    }
1032}
1033
1034/*
1035 * Does @blk's attached device model have a tray, and is it open?
1036 */
1037bool blk_dev_is_tray_open(BlockBackend *blk)
1038{
1039    if (blk_dev_has_tray(blk)) {
1040        return blk->dev_ops->is_tray_open(blk->dev_opaque);
1041    }
1042    return false;
1043}
1044
1045/*
1046 * Does @blk's attached device model have the medium locked?
1047 * %false if the device model has no such lock.
1048 */
1049bool blk_dev_is_medium_locked(BlockBackend *blk)
1050{
1051    if (blk->dev_ops && blk->dev_ops->is_medium_locked) {
1052        return blk->dev_ops->is_medium_locked(blk->dev_opaque);
1053    }
1054    return false;
1055}
1056
1057/*
1058 * Notify @blk's attached device model of a backend size change.
1059 */
1060static void blk_root_resize(BdrvChild *child)
1061{
1062    BlockBackend *blk = child->opaque;
1063
1064    if (blk->dev_ops && blk->dev_ops->resize_cb) {
1065        blk->dev_ops->resize_cb(blk->dev_opaque);
1066    }
1067}
1068
1069void blk_iostatus_enable(BlockBackend *blk)
1070{
1071    blk->iostatus_enabled = true;
1072    blk->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
1073}
1074
1075/* The I/O status is only enabled if the drive explicitly
1076 * enables it _and_ the VM is configured to stop on errors */
1077bool blk_iostatus_is_enabled(const BlockBackend *blk)
1078{
1079    return (blk->iostatus_enabled &&
1080           (blk->on_write_error == BLOCKDEV_ON_ERROR_ENOSPC ||
1081            blk->on_write_error == BLOCKDEV_ON_ERROR_STOP   ||
1082            blk->on_read_error == BLOCKDEV_ON_ERROR_STOP));
1083}
1084
1085BlockDeviceIoStatus blk_iostatus(const BlockBackend *blk)
1086{
1087    return blk->iostatus;
1088}
1089
1090void blk_iostatus_disable(BlockBackend *blk)
1091{
1092    blk->iostatus_enabled = false;
1093}
1094
1095void blk_iostatus_reset(BlockBackend *blk)
1096{
1097    if (blk_iostatus_is_enabled(blk)) {
1098        BlockDriverState *bs = blk_bs(blk);
1099        blk->iostatus = BLOCK_DEVICE_IO_STATUS_OK;
1100        if (bs && bs->job) {
1101            block_job_iostatus_reset(bs->job);
1102        }
1103    }
1104}
1105
1106void blk_iostatus_set_err(BlockBackend *blk, int error)
1107{
1108    assert(blk_iostatus_is_enabled(blk));
1109    if (blk->iostatus == BLOCK_DEVICE_IO_STATUS_OK) {
1110        blk->iostatus = error == ENOSPC ? BLOCK_DEVICE_IO_STATUS_NOSPACE :
1111                                          BLOCK_DEVICE_IO_STATUS_FAILED;
1112    }
1113}
1114
1115void blk_set_allow_write_beyond_eof(BlockBackend *blk, bool allow)
1116{
1117    blk->allow_write_beyond_eof = allow;
1118}
1119
1120static int blk_check_byte_request(BlockBackend *blk, int64_t offset,
1121                                  size_t size)
1122{
1123    int64_t len;
1124
1125    if (size > INT_MAX) {
1126        return -EIO;
1127    }
1128
1129    if (!blk_is_available(blk)) {
1130        return -ENOMEDIUM;
1131    }
1132
1133    if (offset < 0) {
1134        return -EIO;
1135    }
1136
1137    if (!blk->allow_write_beyond_eof) {
1138        len = blk_getlength(blk);
1139        if (len < 0) {
1140            return len;
1141        }
1142
1143        if (offset > len || len - offset < size) {
1144            return -EIO;
1145        }
1146    }
1147
1148    return 0;
1149}
1150
1151int coroutine_fn blk_co_preadv(BlockBackend *blk, int64_t offset,
1152                               unsigned int bytes, QEMUIOVector *qiov,
1153                               BdrvRequestFlags flags)
1154{
1155    int ret;
1156    BlockDriverState *bs = blk_bs(blk);
1157
1158    trace_blk_co_preadv(blk, bs, offset, bytes, flags);
1159
1160    ret = blk_check_byte_request(blk, offset, bytes);
1161    if (ret < 0) {
1162        return ret;
1163    }
1164
1165    bdrv_inc_in_flight(bs);
1166
1167    /* throttling disk I/O */
1168    if (blk->public.throttle_group_member.throttle_state) {
1169        throttle_group_co_io_limits_intercept(&blk->public.throttle_group_member,
1170                bytes, false);
1171    }
1172
1173    ret = bdrv_co_preadv(blk->root, offset, bytes, qiov, flags);
1174    bdrv_dec_in_flight(bs);
1175    return ret;
1176}
1177
1178int coroutine_fn blk_co_pwritev(BlockBackend *blk, int64_t offset,
1179                                unsigned int bytes, QEMUIOVector *qiov,
1180                                BdrvRequestFlags flags)
1181{
1182    int ret;
1183    BlockDriverState *bs = blk_bs(blk);
1184
1185    trace_blk_co_pwritev(blk, bs, offset, bytes, flags);
1186
1187    ret = blk_check_byte_request(blk, offset, bytes);
1188    if (ret < 0) {
1189        return ret;
1190    }
1191
1192    bdrv_inc_in_flight(bs);
1193    /* throttling disk I/O */
1194    if (blk->public.throttle_group_member.throttle_state) {
1195        throttle_group_co_io_limits_intercept(&blk->public.throttle_group_member,
1196                bytes, true);
1197    }
1198
1199    if (!blk->enable_write_cache) {
1200        flags |= BDRV_REQ_FUA;
1201    }
1202
1203    ret = bdrv_co_pwritev(blk->root, offset, bytes, qiov, flags);
1204    bdrv_dec_in_flight(bs);
1205    return ret;
1206}
1207
1208typedef struct BlkRwCo {
1209    BlockBackend *blk;
1210    int64_t offset;
1211    void *iobuf;
1212    int ret;
1213    BdrvRequestFlags flags;
1214} BlkRwCo;
1215
1216static void blk_read_entry(void *opaque)
1217{
1218    BlkRwCo *rwco = opaque;
1219    QEMUIOVector *qiov = rwco->iobuf;
1220
1221    rwco->ret = blk_co_preadv(rwco->blk, rwco->offset, qiov->size,
1222                              qiov, rwco->flags);
1223    aio_wait_kick();
1224}
1225
1226static void blk_write_entry(void *opaque)
1227{
1228    BlkRwCo *rwco = opaque;
1229    QEMUIOVector *qiov = rwco->iobuf;
1230
1231    rwco->ret = blk_co_pwritev(rwco->blk, rwco->offset, qiov->size,
1232                               qiov, rwco->flags);
1233    aio_wait_kick();
1234}
1235
1236static int blk_prw(BlockBackend *blk, int64_t offset, uint8_t *buf,
1237                   int64_t bytes, CoroutineEntry co_entry,
1238                   BdrvRequestFlags flags)
1239{
1240    QEMUIOVector qiov;
1241    struct iovec iov;
1242    BlkRwCo rwco;
1243
1244    iov = (struct iovec) {
1245        .iov_base = buf,
1246        .iov_len = bytes,
1247    };
1248    qemu_iovec_init_external(&qiov, &iov, 1);
1249
1250    rwco = (BlkRwCo) {
1251        .blk    = blk,
1252        .offset = offset,
1253        .iobuf  = &qiov,
1254        .flags  = flags,
1255        .ret    = NOT_DONE,
1256    };
1257
1258    if (qemu_in_coroutine()) {
1259        /* Fast-path if already in coroutine context */
1260        co_entry(&rwco);
1261    } else {
1262        Coroutine *co = qemu_coroutine_create(co_entry, &rwco);
1263        bdrv_coroutine_enter(blk_bs(blk), co);
1264        BDRV_POLL_WHILE(blk_bs(blk), rwco.ret == NOT_DONE);
1265    }
1266
1267    return rwco.ret;
1268}
1269
1270int blk_pread_unthrottled(BlockBackend *blk, int64_t offset, uint8_t *buf,
1271                          int count)
1272{
1273    int ret;
1274
1275    ret = blk_check_byte_request(blk, offset, count);
1276    if (ret < 0) {
1277        return ret;
1278    }
1279
1280    blk_root_drained_begin(blk->root);
1281    ret = blk_pread(blk, offset, buf, count);
1282    blk_root_drained_end(blk->root);
1283    return ret;
1284}
1285
1286int blk_pwrite_zeroes(BlockBackend *blk, int64_t offset,
1287                      int bytes, BdrvRequestFlags flags)
1288{
1289    return blk_prw(blk, offset, NULL, bytes, blk_write_entry,
1290                   flags | BDRV_REQ_ZERO_WRITE);
1291}
1292
1293int blk_make_zero(BlockBackend *blk, BdrvRequestFlags flags)
1294{
1295    return bdrv_make_zero(blk->root, flags);
1296}
1297
1298static void blk_inc_in_flight(BlockBackend *blk)
1299{
1300    atomic_inc(&blk->in_flight);
1301}
1302
1303static void blk_dec_in_flight(BlockBackend *blk)
1304{
1305    atomic_dec(&blk->in_flight);
1306    aio_wait_kick();
1307}
1308
1309static void error_callback_bh(void *opaque)
1310{
1311    struct BlockBackendAIOCB *acb = opaque;
1312
1313    blk_dec_in_flight(acb->blk);
1314    acb->common.cb(acb->common.opaque, acb->ret);
1315    qemu_aio_unref(acb);
1316}
1317
1318BlockAIOCB *blk_abort_aio_request(BlockBackend *blk,
1319                                  BlockCompletionFunc *cb,
1320                                  void *opaque, int ret)
1321{
1322    struct BlockBackendAIOCB *acb;
1323
1324    blk_inc_in_flight(blk);
1325    acb = blk_aio_get(&block_backend_aiocb_info, blk, cb, opaque);
1326    acb->blk = blk;
1327    acb->ret = ret;
1328
1329    aio_bh_schedule_oneshot(blk_get_aio_context(blk), error_callback_bh, acb);
1330    return &acb->common;
1331}
1332
1333typedef struct BlkAioEmAIOCB {
1334    BlockAIOCB common;
1335    BlkRwCo rwco;
1336    int bytes;
1337    bool has_returned;
1338} BlkAioEmAIOCB;
1339
1340static const AIOCBInfo blk_aio_em_aiocb_info = {
1341    .aiocb_size         = sizeof(BlkAioEmAIOCB),
1342};
1343
1344static void blk_aio_complete(BlkAioEmAIOCB *acb)
1345{
1346    if (acb->has_returned) {
1347        acb->common.cb(acb->common.opaque, acb->rwco.ret);
1348        blk_dec_in_flight(acb->rwco.blk);
1349        qemu_aio_unref(acb);
1350    }
1351}
1352
1353static void blk_aio_complete_bh(void *opaque)
1354{
1355    BlkAioEmAIOCB *acb = opaque;
1356    assert(acb->has_returned);
1357    blk_aio_complete(acb);
1358}
1359
1360static BlockAIOCB *blk_aio_prwv(BlockBackend *blk, int64_t offset, int bytes,
1361                                void *iobuf, CoroutineEntry co_entry,
1362                                BdrvRequestFlags flags,
1363                                BlockCompletionFunc *cb, void *opaque)
1364{
1365    BlkAioEmAIOCB *acb;
1366    Coroutine *co;
1367
1368    blk_inc_in_flight(blk);
1369    acb = blk_aio_get(&blk_aio_em_aiocb_info, blk, cb, opaque);
1370    acb->rwco = (BlkRwCo) {
1371        .blk    = blk,
1372        .offset = offset,
1373        .iobuf  = iobuf,
1374        .flags  = flags,
1375        .ret    = NOT_DONE,
1376    };
1377    acb->bytes = bytes;
1378    acb->has_returned = false;
1379
1380    co = qemu_coroutine_create(co_entry, acb);
1381    bdrv_coroutine_enter(blk_bs(blk), co);
1382
1383    acb->has_returned = true;
1384    if (acb->rwco.ret != NOT_DONE) {
1385        aio_bh_schedule_oneshot(blk_get_aio_context(blk),
1386                                blk_aio_complete_bh, acb);
1387    }
1388
1389    return &acb->common;
1390}
1391
1392static void blk_aio_read_entry(void *opaque)
1393{
1394    BlkAioEmAIOCB *acb = opaque;
1395    BlkRwCo *rwco = &acb->rwco;
1396    QEMUIOVector *qiov = rwco->iobuf;
1397
1398    assert(qiov->size == acb->bytes);
1399    rwco->ret = blk_co_preadv(rwco->blk, rwco->offset, acb->bytes,
1400                              qiov, rwco->flags);
1401    blk_aio_complete(acb);
1402}
1403
1404static void blk_aio_write_entry(void *opaque)
1405{
1406    BlkAioEmAIOCB *acb = opaque;
1407    BlkRwCo *rwco = &acb->rwco;
1408    QEMUIOVector *qiov = rwco->iobuf;
1409
1410    assert(!qiov || qiov->size == acb->bytes);
1411    rwco->ret = blk_co_pwritev(rwco->blk, rwco->offset, acb->bytes,
1412                               qiov, rwco->flags);
1413    blk_aio_complete(acb);
1414}
1415
1416BlockAIOCB *blk_aio_pwrite_zeroes(BlockBackend *blk, int64_t offset,
1417                                  int count, BdrvRequestFlags flags,
1418                                  BlockCompletionFunc *cb, void *opaque)
1419{
1420    return blk_aio_prwv(blk, offset, count, NULL, blk_aio_write_entry,
1421                        flags | BDRV_REQ_ZERO_WRITE, cb, opaque);
1422}
1423
1424int blk_pread(BlockBackend *blk, int64_t offset, void *buf, int count)
1425{
1426    int ret = blk_prw(blk, offset, buf, count, blk_read_entry, 0);
1427    if (ret < 0) {
1428        return ret;
1429    }
1430    return count;
1431}
1432
1433int blk_pwrite(BlockBackend *blk, int64_t offset, const void *buf, int count,
1434               BdrvRequestFlags flags)
1435{
1436    int ret = blk_prw(blk, offset, (void *) buf, count, blk_write_entry,
1437                      flags);
1438    if (ret < 0) {
1439        return ret;
1440    }
1441    return count;
1442}
1443
1444int64_t blk_getlength(BlockBackend *blk)
1445{
1446    if (!blk_is_available(blk)) {
1447        return -ENOMEDIUM;
1448    }
1449
1450    return bdrv_getlength(blk_bs(blk));
1451}
1452
1453void blk_get_geometry(BlockBackend *blk, uint64_t *nb_sectors_ptr)
1454{
1455    if (!blk_bs(blk)) {
1456        *nb_sectors_ptr = 0;
1457    } else {
1458        bdrv_get_geometry(blk_bs(blk), nb_sectors_ptr);
1459    }
1460}
1461
1462int64_t blk_nb_sectors(BlockBackend *blk)
1463{
1464    if (!blk_is_available(blk)) {
1465        return -ENOMEDIUM;
1466    }
1467
1468    return bdrv_nb_sectors(blk_bs(blk));
1469}
1470
1471BlockAIOCB *blk_aio_preadv(BlockBackend *blk, int64_t offset,
1472                           QEMUIOVector *qiov, BdrvRequestFlags flags,
1473                           BlockCompletionFunc *cb, void *opaque)
1474{
1475    return blk_aio_prwv(blk, offset, qiov->size, qiov,
1476                        blk_aio_read_entry, flags, cb, opaque);
1477}
1478
1479BlockAIOCB *blk_aio_pwritev(BlockBackend *blk, int64_t offset,
1480                            QEMUIOVector *qiov, BdrvRequestFlags flags,
1481                            BlockCompletionFunc *cb, void *opaque)
1482{
1483    return blk_aio_prwv(blk, offset, qiov->size, qiov,
1484                        blk_aio_write_entry, flags, cb, opaque);
1485}
1486
1487static void blk_aio_flush_entry(void *opaque)
1488{
1489    BlkAioEmAIOCB *acb = opaque;
1490    BlkRwCo *rwco = &acb->rwco;
1491
1492    rwco->ret = blk_co_flush(rwco->blk);
1493    blk_aio_complete(acb);
1494}
1495
1496BlockAIOCB *blk_aio_flush(BlockBackend *blk,
1497                          BlockCompletionFunc *cb, void *opaque)
1498{
1499    return blk_aio_prwv(blk, 0, 0, NULL, blk_aio_flush_entry, 0, cb, opaque);
1500}
1501
1502static void blk_aio_pdiscard_entry(void *opaque)
1503{
1504    BlkAioEmAIOCB *acb = opaque;
1505    BlkRwCo *rwco = &acb->rwco;
1506
1507    rwco->ret = blk_co_pdiscard(rwco->blk, rwco->offset, acb->bytes);
1508    blk_aio_complete(acb);
1509}
1510
1511BlockAIOCB *blk_aio_pdiscard(BlockBackend *blk,
1512                             int64_t offset, int bytes,
1513                             BlockCompletionFunc *cb, void *opaque)
1514{
1515    return blk_aio_prwv(blk, offset, bytes, NULL, blk_aio_pdiscard_entry, 0,
1516                        cb, opaque);
1517}
1518
1519void blk_aio_cancel(BlockAIOCB *acb)
1520{
1521    bdrv_aio_cancel(acb);
1522}
1523
1524void blk_aio_cancel_async(BlockAIOCB *acb)
1525{
1526    bdrv_aio_cancel_async(acb);
1527}
1528
1529int blk_co_ioctl(BlockBackend *blk, unsigned long int req, void *buf)
1530{
1531    if (!blk_is_available(blk)) {
1532        return -ENOMEDIUM;
1533    }
1534
1535    return bdrv_co_ioctl(blk_bs(blk), req, buf);
1536}
1537
1538static void blk_ioctl_entry(void *opaque)
1539{
1540    BlkRwCo *rwco = opaque;
1541    QEMUIOVector *qiov = rwco->iobuf;
1542
1543    rwco->ret = blk_co_ioctl(rwco->blk, rwco->offset,
1544                             qiov->iov[0].iov_base);
1545    aio_wait_kick();
1546}
1547
1548int blk_ioctl(BlockBackend *blk, unsigned long int req, void *buf)
1549{
1550    return blk_prw(blk, req, buf, 0, blk_ioctl_entry, 0);
1551}
1552
1553static void blk_aio_ioctl_entry(void *opaque)
1554{
1555    BlkAioEmAIOCB *acb = opaque;
1556    BlkRwCo *rwco = &acb->rwco;
1557
1558    rwco->ret = blk_co_ioctl(rwco->blk, rwco->offset, rwco->iobuf);
1559
1560    blk_aio_complete(acb);
1561}
1562
1563BlockAIOCB *blk_aio_ioctl(BlockBackend *blk, unsigned long int req, void *buf,
1564                          BlockCompletionFunc *cb, void *opaque)
1565{
1566    return blk_aio_prwv(blk, req, 0, buf, blk_aio_ioctl_entry, 0, cb, opaque);
1567}
1568
1569int blk_co_pdiscard(BlockBackend *blk, int64_t offset, int bytes)
1570{
1571    int ret = blk_check_byte_request(blk, offset, bytes);
1572    if (ret < 0) {
1573        return ret;
1574    }
1575
1576    return bdrv_co_pdiscard(blk->root, offset, bytes);
1577}
1578
1579int blk_co_flush(BlockBackend *blk)
1580{
1581    if (!blk_is_available(blk)) {
1582        return -ENOMEDIUM;
1583    }
1584
1585    return bdrv_co_flush(blk_bs(blk));
1586}
1587
1588static void blk_flush_entry(void *opaque)
1589{
1590    BlkRwCo *rwco = opaque;
1591    rwco->ret = blk_co_flush(rwco->blk);
1592    aio_wait_kick();
1593}
1594
1595int blk_flush(BlockBackend *blk)
1596{
1597    return blk_prw(blk, 0, NULL, 0, blk_flush_entry, 0);
1598}
1599
1600void blk_drain(BlockBackend *blk)
1601{
1602    BlockDriverState *bs = blk_bs(blk);
1603
1604    if (bs) {
1605        bdrv_drained_begin(bs);
1606    }
1607
1608    /* We may have -ENOMEDIUM completions in flight */
1609    AIO_WAIT_WHILE(blk_get_aio_context(blk),
1610                   atomic_mb_read(&blk->in_flight) > 0);
1611
1612    if (bs) {
1613        bdrv_drained_end(bs);
1614    }
1615}
1616
1617void blk_drain_all(void)
1618{
1619    BlockBackend *blk = NULL;
1620
1621    bdrv_drain_all_begin();
1622
1623    while ((blk = blk_all_next(blk)) != NULL) {
1624        AioContext *ctx = blk_get_aio_context(blk);
1625
1626        aio_context_acquire(ctx);
1627
1628        /* We may have -ENOMEDIUM completions in flight */
1629        AIO_WAIT_WHILE(ctx, atomic_mb_read(&blk->in_flight) > 0);
1630
1631        aio_context_release(ctx);
1632    }
1633
1634    bdrv_drain_all_end();
1635}
1636
1637void blk_set_on_error(BlockBackend *blk, BlockdevOnError on_read_error,
1638                      BlockdevOnError on_write_error)
1639{
1640    blk->on_read_error = on_read_error;
1641    blk->on_write_error = on_write_error;
1642}
1643
1644BlockdevOnError blk_get_on_error(BlockBackend *blk, bool is_read)
1645{
1646    return is_read ? blk->on_read_error : blk->on_write_error;
1647}
1648
1649BlockErrorAction blk_get_error_action(BlockBackend *blk, bool is_read,
1650                                      int error)
1651{
1652    BlockdevOnError on_err = blk_get_on_error(blk, is_read);
1653
1654    switch (on_err) {
1655    case BLOCKDEV_ON_ERROR_ENOSPC:
1656        return (error == ENOSPC) ?
1657               BLOCK_ERROR_ACTION_STOP : BLOCK_ERROR_ACTION_REPORT;
1658    case BLOCKDEV_ON_ERROR_STOP:
1659        return BLOCK_ERROR_ACTION_STOP;
1660    case BLOCKDEV_ON_ERROR_REPORT:
1661        return BLOCK_ERROR_ACTION_REPORT;
1662    case BLOCKDEV_ON_ERROR_IGNORE:
1663        return BLOCK_ERROR_ACTION_IGNORE;
1664    case BLOCKDEV_ON_ERROR_AUTO:
1665    default:
1666        abort();
1667    }
1668}
1669
1670static void send_qmp_error_event(BlockBackend *blk,
1671                                 BlockErrorAction action,
1672                                 bool is_read, int error)
1673{
1674    IoOperationType optype;
1675    BlockDriverState *bs = blk_bs(blk);
1676
1677    optype = is_read ? IO_OPERATION_TYPE_READ : IO_OPERATION_TYPE_WRITE;
1678    qapi_event_send_block_io_error(blk_name(blk), !!bs,
1679                                   bs ? bdrv_get_node_name(bs) : NULL, optype,
1680                                   action, blk_iostatus_is_enabled(blk),
1681                                   error == ENOSPC, strerror(error));
1682}
1683
1684/* This is done by device models because, while the block layer knows
1685 * about the error, it does not know whether an operation comes from
1686 * the device or the block layer (from a job, for example).
1687 */
1688void blk_error_action(BlockBackend *blk, BlockErrorAction action,
1689                      bool is_read, int error)
1690{
1691    assert(error >= 0);
1692
1693    if (action == BLOCK_ERROR_ACTION_STOP) {
1694        /* First set the iostatus, so that "info block" returns an iostatus
1695         * that matches the events raised so far (an additional error iostatus
1696         * is fine, but not a lost one).
1697         */
1698        blk_iostatus_set_err(blk, error);
1699
1700        /* Then raise the request to stop the VM and the event.
1701         * qemu_system_vmstop_request_prepare has two effects.  First,
1702         * it ensures that the STOP event always comes after the
1703         * BLOCK_IO_ERROR event.  Second, it ensures that even if management
1704         * can observe the STOP event and do a "cont" before the STOP
1705         * event is issued, the VM will not stop.  In this case, vm_start()
1706         * also ensures that the STOP/RESUME pair of events is emitted.
1707         */
1708        qemu_system_vmstop_request_prepare();
1709        send_qmp_error_event(blk, action, is_read, error);
1710        qemu_system_vmstop_request(RUN_STATE_IO_ERROR);
1711    } else {
1712        send_qmp_error_event(blk, action, is_read, error);
1713    }
1714}
1715
1716bool blk_is_read_only(BlockBackend *blk)
1717{
1718    BlockDriverState *bs = blk_bs(blk);
1719
1720    if (bs) {
1721        return bdrv_is_read_only(bs);
1722    } else {
1723        return blk->root_state.read_only;
1724    }
1725}
1726
1727bool blk_is_sg(BlockBackend *blk)
1728{
1729    BlockDriverState *bs = blk_bs(blk);
1730
1731    if (!bs) {
1732        return false;
1733    }
1734
1735    return bdrv_is_sg(bs);
1736}
1737
1738bool blk_enable_write_cache(BlockBackend *blk)
1739{
1740    return blk->enable_write_cache;
1741}
1742
1743void blk_set_enable_write_cache(BlockBackend *blk, bool wce)
1744{
1745    blk->enable_write_cache = wce;
1746}
1747
1748void blk_invalidate_cache(BlockBackend *blk, Error **errp)
1749{
1750    BlockDriverState *bs = blk_bs(blk);
1751
1752    if (!bs) {
1753        error_setg(errp, "Device '%s' has no medium", blk->name);
1754        return;
1755    }
1756
1757    bdrv_invalidate_cache(bs, errp);
1758}
1759
1760bool blk_is_inserted(BlockBackend *blk)
1761{
1762    BlockDriverState *bs = blk_bs(blk);
1763
1764    return bs && bdrv_is_inserted(bs);
1765}
1766
1767bool blk_is_available(BlockBackend *blk)
1768{
1769    return blk_is_inserted(blk) && !blk_dev_is_tray_open(blk);
1770}
1771
1772void blk_lock_medium(BlockBackend *blk, bool locked)
1773{
1774    BlockDriverState *bs = blk_bs(blk);
1775
1776    if (bs) {
1777        bdrv_lock_medium(bs, locked);
1778    }
1779}
1780
1781void blk_eject(BlockBackend *blk, bool eject_flag)
1782{
1783    BlockDriverState *bs = blk_bs(blk);
1784    char *id;
1785
1786    /* blk_eject is only called by qdevified devices */
1787    assert(!blk->legacy_dev);
1788
1789    if (bs) {
1790        bdrv_eject(bs, eject_flag);
1791    }
1792
1793    /* Whether or not we ejected on the backend,
1794     * the frontend experienced a tray event. */
1795    id = blk_get_attached_dev_id(blk);
1796    qapi_event_send_device_tray_moved(blk_name(blk), id,
1797                                      eject_flag);
1798    g_free(id);
1799}
1800
1801int blk_get_flags(BlockBackend *blk)
1802{
1803    BlockDriverState *bs = blk_bs(blk);
1804
1805    if (bs) {
1806        return bdrv_get_flags(bs);
1807    } else {
1808        return blk->root_state.open_flags;
1809    }
1810}
1811
1812/* Returns the maximum transfer length, in bytes; guaranteed nonzero */
1813uint32_t blk_get_max_transfer(BlockBackend *blk)
1814{
1815    BlockDriverState *bs = blk_bs(blk);
1816    uint32_t max = 0;
1817
1818    if (bs) {
1819        max = bs->bl.max_transfer;
1820    }
1821    return MIN_NON_ZERO(max, INT_MAX);
1822}
1823
1824int blk_get_max_iov(BlockBackend *blk)
1825{
1826    return blk->root->bs->bl.max_iov;
1827}
1828
1829void blk_set_guest_block_size(BlockBackend *blk, int align)
1830{
1831    blk->guest_block_size = align;
1832}
1833
1834void *blk_try_blockalign(BlockBackend *blk, size_t size)
1835{
1836    return qemu_try_blockalign(blk ? blk_bs(blk) : NULL, size);
1837}
1838
1839void *blk_blockalign(BlockBackend *blk, size_t size)
1840{
1841    return qemu_blockalign(blk ? blk_bs(blk) : NULL, size);
1842}
1843
1844bool blk_op_is_blocked(BlockBackend *blk, BlockOpType op, Error **errp)
1845{
1846    BlockDriverState *bs = blk_bs(blk);
1847
1848    if (!bs) {
1849        return false;
1850    }
1851
1852    return bdrv_op_is_blocked(bs, op, errp);
1853}
1854
1855void blk_op_unblock(BlockBackend *blk, BlockOpType op, Error *reason)
1856{
1857    BlockDriverState *bs = blk_bs(blk);
1858
1859    if (bs) {
1860        bdrv_op_unblock(bs, op, reason);
1861    }
1862}
1863
1864void blk_op_block_all(BlockBackend *blk, Error *reason)
1865{
1866    BlockDriverState *bs = blk_bs(blk);
1867
1868    if (bs) {
1869        bdrv_op_block_all(bs, reason);
1870    }
1871}
1872
1873void blk_op_unblock_all(BlockBackend *blk, Error *reason)
1874{
1875    BlockDriverState *bs = blk_bs(blk);
1876
1877    if (bs) {
1878        bdrv_op_unblock_all(bs, reason);
1879    }
1880}
1881
1882AioContext *blk_get_aio_context(BlockBackend *blk)
1883{
1884    return bdrv_get_aio_context(blk_bs(blk));
1885}
1886
1887static AioContext *blk_aiocb_get_aio_context(BlockAIOCB *acb)
1888{
1889    BlockBackendAIOCB *blk_acb = DO_UPCAST(BlockBackendAIOCB, common, acb);
1890    return blk_get_aio_context(blk_acb->blk);
1891}
1892
1893void blk_set_aio_context(BlockBackend *blk, AioContext *new_context)
1894{
1895    BlockDriverState *bs = blk_bs(blk);
1896    ThrottleGroupMember *tgm = &blk->public.throttle_group_member;
1897
1898    if (bs) {
1899        if (tgm->throttle_state) {
1900            bdrv_drained_begin(bs);
1901            throttle_group_detach_aio_context(tgm);
1902            throttle_group_attach_aio_context(tgm, new_context);
1903            bdrv_drained_end(bs);
1904        }
1905        bdrv_set_aio_context(bs, new_context);
1906    }
1907}
1908
1909void blk_add_aio_context_notifier(BlockBackend *blk,
1910        void (*attached_aio_context)(AioContext *new_context, void *opaque),
1911        void (*detach_aio_context)(void *opaque), void *opaque)
1912{
1913    BlockBackendAioNotifier *notifier;
1914    BlockDriverState *bs = blk_bs(blk);
1915
1916    notifier = g_new(BlockBackendAioNotifier, 1);
1917    notifier->attached_aio_context = attached_aio_context;
1918    notifier->detach_aio_context = detach_aio_context;
1919    notifier->opaque = opaque;
1920    QLIST_INSERT_HEAD(&blk->aio_notifiers, notifier, list);
1921
1922    if (bs) {
1923        bdrv_add_aio_context_notifier(bs, attached_aio_context,
1924                                      detach_aio_context, opaque);
1925    }
1926}
1927
1928void blk_remove_aio_context_notifier(BlockBackend *blk,
1929                                     void (*attached_aio_context)(AioContext *,
1930                                                                  void *),
1931                                     void (*detach_aio_context)(void *),
1932                                     void *opaque)
1933{
1934    BlockBackendAioNotifier *notifier;
1935    BlockDriverState *bs = blk_bs(blk);
1936
1937    if (bs) {
1938        bdrv_remove_aio_context_notifier(bs, attached_aio_context,
1939                                         detach_aio_context, opaque);
1940    }
1941
1942    QLIST_FOREACH(notifier, &blk->aio_notifiers, list) {
1943        if (notifier->attached_aio_context == attached_aio_context &&
1944            notifier->detach_aio_context == detach_aio_context &&
1945            notifier->opaque == opaque) {
1946            QLIST_REMOVE(notifier, list);
1947            g_free(notifier);
1948            return;
1949        }
1950    }
1951
1952    abort();
1953}
1954
1955void blk_add_remove_bs_notifier(BlockBackend *blk, Notifier *notify)
1956{
1957    notifier_list_add(&blk->remove_bs_notifiers, notify);
1958}
1959
1960void blk_add_insert_bs_notifier(BlockBackend *blk, Notifier *notify)
1961{
1962    notifier_list_add(&blk->insert_bs_notifiers, notify);
1963}
1964
1965void blk_io_plug(BlockBackend *blk)
1966{
1967    BlockDriverState *bs = blk_bs(blk);
1968
1969    if (bs) {
1970        bdrv_io_plug(bs);
1971    }
1972}
1973
1974void blk_io_unplug(BlockBackend *blk)
1975{
1976    BlockDriverState *bs = blk_bs(blk);
1977
1978    if (bs) {
1979        bdrv_io_unplug(bs);
1980    }
1981}
1982
1983BlockAcctStats *blk_get_stats(BlockBackend *blk)
1984{
1985    return &blk->stats;
1986}
1987
1988void *blk_aio_get(const AIOCBInfo *aiocb_info, BlockBackend *blk,
1989                  BlockCompletionFunc *cb, void *opaque)
1990{
1991    return qemu_aio_get(aiocb_info, blk_bs(blk), cb, opaque);
1992}
1993
1994int coroutine_fn blk_co_pwrite_zeroes(BlockBackend *blk, int64_t offset,
1995                                      int bytes, BdrvRequestFlags flags)
1996{
1997    return blk_co_pwritev(blk, offset, bytes, NULL,
1998                          flags | BDRV_REQ_ZERO_WRITE);
1999}
2000
2001int blk_pwrite_compressed(BlockBackend *blk, int64_t offset, const void *buf,
2002                          int count)
2003{
2004    return blk_prw(blk, offset, (void *) buf, count, blk_write_entry,
2005                   BDRV_REQ_WRITE_COMPRESSED);
2006}
2007
2008int blk_truncate(BlockBackend *blk, int64_t offset, PreallocMode prealloc,
2009                 Error **errp)
2010{
2011    if (!blk_is_available(blk)) {
2012        error_setg(errp, "No medium inserted");
2013        return -ENOMEDIUM;
2014    }
2015
2016    return bdrv_truncate(blk->root, offset, prealloc, errp);
2017}
2018
2019static void blk_pdiscard_entry(void *opaque)
2020{
2021    BlkRwCo *rwco = opaque;
2022    QEMUIOVector *qiov = rwco->iobuf;
2023
2024    rwco->ret = blk_co_pdiscard(rwco->blk, rwco->offset, qiov->size);
2025    aio_wait_kick();
2026}
2027
2028int blk_pdiscard(BlockBackend *blk, int64_t offset, int bytes)
2029{
2030    return blk_prw(blk, offset, NULL, bytes, blk_pdiscard_entry, 0);
2031}
2032
2033int blk_save_vmstate(BlockBackend *blk, const uint8_t *buf,
2034                     int64_t pos, int size)
2035{
2036    int ret;
2037
2038    if (!blk_is_available(blk)) {
2039        return -ENOMEDIUM;
2040    }
2041
2042    ret = bdrv_save_vmstate(blk_bs(blk), buf, pos, size);
2043    if (ret < 0) {
2044        return ret;
2045    }
2046
2047    if (ret == size && !blk->enable_write_cache) {
2048        ret = bdrv_flush(blk_bs(blk));
2049    }
2050
2051    return ret < 0 ? ret : size;
2052}
2053
2054int blk_load_vmstate(BlockBackend *blk, uint8_t *buf, int64_t pos, int size)
2055{
2056    if (!blk_is_available(blk)) {
2057        return -ENOMEDIUM;
2058    }
2059
2060    return bdrv_load_vmstate(blk_bs(blk), buf, pos, size);
2061}
2062
2063int blk_probe_blocksizes(BlockBackend *blk, BlockSizes *bsz)
2064{
2065    if (!blk_is_available(blk)) {
2066        return -ENOMEDIUM;
2067    }
2068
2069    return bdrv_probe_blocksizes(blk_bs(blk), bsz);
2070}
2071
2072int blk_probe_geometry(BlockBackend *blk, HDGeometry *geo)
2073{
2074    if (!blk_is_available(blk)) {
2075        return -ENOMEDIUM;
2076    }
2077
2078    return bdrv_probe_geometry(blk_bs(blk), geo);
2079}
2080
2081/*
2082 * Updates the BlockBackendRootState object with data from the currently
2083 * attached BlockDriverState.
2084 */
2085void blk_update_root_state(BlockBackend *blk)
2086{
2087    assert(blk->root);
2088
2089    blk->root_state.open_flags    = blk->root->bs->open_flags;
2090    blk->root_state.read_only     = blk->root->bs->read_only;
2091    blk->root_state.detect_zeroes = blk->root->bs->detect_zeroes;
2092}
2093
2094/*
2095 * Returns the detect-zeroes setting to be used for bdrv_open() of a
2096 * BlockDriverState which is supposed to inherit the root state.
2097 */
2098bool blk_get_detect_zeroes_from_root_state(BlockBackend *blk)
2099{
2100    return blk->root_state.detect_zeroes;
2101}
2102
2103/*
2104 * Returns the flags to be used for bdrv_open() of a BlockDriverState which is
2105 * supposed to inherit the root state.
2106 */
2107int blk_get_open_flags_from_root_state(BlockBackend *blk)
2108{
2109    int bs_flags;
2110
2111    bs_flags = blk->root_state.read_only ? 0 : BDRV_O_RDWR;
2112    bs_flags |= blk->root_state.open_flags & ~BDRV_O_RDWR;
2113
2114    return bs_flags;
2115}
2116
2117BlockBackendRootState *blk_get_root_state(BlockBackend *blk)
2118{
2119    return &blk->root_state;
2120}
2121
2122int blk_commit_all(void)
2123{
2124    BlockBackend *blk = NULL;
2125
2126    while ((blk = blk_all_next(blk)) != NULL) {
2127        AioContext *aio_context = blk_get_aio_context(blk);
2128
2129        aio_context_acquire(aio_context);
2130        if (blk_is_inserted(blk) && blk->root->bs->backing) {
2131            int ret = bdrv_commit(blk->root->bs);
2132            if (ret < 0) {
2133                aio_context_release(aio_context);
2134                return ret;
2135            }
2136        }
2137        aio_context_release(aio_context);
2138    }
2139    return 0;
2140}
2141
2142
2143/* throttling disk I/O limits */
2144void blk_set_io_limits(BlockBackend *blk, ThrottleConfig *cfg)
2145{
2146    throttle_group_config(&blk->public.throttle_group_member, cfg);
2147}
2148
2149void blk_io_limits_disable(BlockBackend *blk)
2150{
2151    BlockDriverState *bs = blk_bs(blk);
2152    ThrottleGroupMember *tgm = &blk->public.throttle_group_member;
2153    assert(tgm->throttle_state);
2154    if (bs) {
2155        bdrv_drained_begin(bs);
2156    }
2157    throttle_group_unregister_tgm(tgm);
2158    if (bs) {
2159        bdrv_drained_end(bs);
2160    }
2161}
2162
2163/* should be called before blk_set_io_limits if a limit is set */
2164void blk_io_limits_enable(BlockBackend *blk, const char *group)
2165{
2166    assert(!blk->public.throttle_group_member.throttle_state);
2167    throttle_group_register_tgm(&blk->public.throttle_group_member,
2168                                group, blk_get_aio_context(blk));
2169}
2170
2171void blk_io_limits_update_group(BlockBackend *blk, const char *group)
2172{
2173    /* this BB is not part of any group */
2174    if (!blk->public.throttle_group_member.throttle_state) {
2175        return;
2176    }
2177
2178    /* this BB is a part of the same group than the one we want */
2179    if (!g_strcmp0(throttle_group_get_name(&blk->public.throttle_group_member),
2180                group)) {
2181        return;
2182    }
2183
2184    /* need to change the group this bs belong to */
2185    blk_io_limits_disable(blk);
2186    blk_io_limits_enable(blk, group);
2187}
2188
2189static void blk_root_drained_begin(BdrvChild *child)
2190{
2191    BlockBackend *blk = child->opaque;
2192
2193    if (++blk->quiesce_counter == 1) {
2194        if (blk->dev_ops && blk->dev_ops->drained_begin) {
2195            blk->dev_ops->drained_begin(blk->dev_opaque);
2196        }
2197    }
2198
2199    /* Note that blk->root may not be accessible here yet if we are just
2200     * attaching to a BlockDriverState that is drained. Use child instead. */
2201
2202    if (atomic_fetch_inc(&blk->public.throttle_group_member.io_limits_disabled) == 0) {
2203        throttle_group_restart_tgm(&blk->public.throttle_group_member);
2204    }
2205}
2206
2207static bool blk_root_drained_poll(BdrvChild *child)
2208{
2209    BlockBackend *blk = child->opaque;
2210    assert(blk->quiesce_counter);
2211    return !!blk->in_flight;
2212}
2213
2214static void blk_root_drained_end(BdrvChild *child)
2215{
2216    BlockBackend *blk = child->opaque;
2217    assert(blk->quiesce_counter);
2218
2219    assert(blk->public.throttle_group_member.io_limits_disabled);
2220    atomic_dec(&blk->public.throttle_group_member.io_limits_disabled);
2221
2222    if (--blk->quiesce_counter == 0) {
2223        if (blk->dev_ops && blk->dev_ops->drained_end) {
2224            blk->dev_ops->drained_end(blk->dev_opaque);
2225        }
2226    }
2227}
2228
2229void blk_register_buf(BlockBackend *blk, void *host, size_t size)
2230{
2231    bdrv_register_buf(blk_bs(blk), host, size);
2232}
2233
2234void blk_unregister_buf(BlockBackend *blk, void *host)
2235{
2236    bdrv_unregister_buf(blk_bs(blk), host);
2237}
2238
2239int coroutine_fn blk_co_copy_range(BlockBackend *blk_in, int64_t off_in,
2240                                   BlockBackend *blk_out, int64_t off_out,
2241                                   int bytes, BdrvRequestFlags read_flags,
2242                                   BdrvRequestFlags write_flags)
2243{
2244    int r;
2245    r = blk_check_byte_request(blk_in, off_in, bytes);
2246    if (r) {
2247        return r;
2248    }
2249    r = blk_check_byte_request(blk_out, off_out, bytes);
2250    if (r) {
2251        return r;
2252    }
2253    return bdrv_co_copy_range(blk_in->root, off_in,
2254                              blk_out->root, off_out,
2255                              bytes, read_flags, write_flags);
2256}
2257