qemu/block/block-backend.c
<<
>>
Prefs
   1/*
   2 * QEMU Block backends
   3 *
   4 * Copyright (C) 2014 Red Hat, Inc.
   5 *
   6 * Authors:
   7 *  Markus Armbruster <armbru@redhat.com>,
   8 *
   9 * This work is licensed under the terms of the GNU LGPL, version 2.1
  10 * or later.  See the COPYING.LIB file in the top-level directory.
  11 */
  12
  13#include "sysemu/block-backend.h"
  14#include "block/block_int.h"
  15#include "sysemu/blockdev.h"
  16#include "qapi-event.h"
  17
  18/* Number of coroutines to reserve per attached device model */
  19#define COROUTINE_POOL_RESERVATION 64
  20
  21struct BlockBackend {
  22    char *name;
  23    int refcnt;
  24    BlockDriverState *bs;
  25    DriveInfo *legacy_dinfo;    /* null unless created by drive_new() */
  26    QTAILQ_ENTRY(BlockBackend) link; /* for blk_backends */
  27
  28    void *dev;                  /* attached device model, if any */
  29    /* TODO change to DeviceState when all users are qdevified */
  30    const BlockDevOps *dev_ops;
  31    void *dev_opaque;
  32};
  33
  34typedef struct BlockBackendAIOCB {
  35    BlockAIOCB common;
  36    QEMUBH *bh;
  37    int ret;
  38} BlockBackendAIOCB;
  39
  40static const AIOCBInfo block_backend_aiocb_info = {
  41    .aiocb_size = sizeof(BlockBackendAIOCB),
  42};
  43
  44static void drive_info_del(DriveInfo *dinfo);
  45
  46/* All the BlockBackends (except for hidden ones) */
  47static QTAILQ_HEAD(, BlockBackend) blk_backends =
  48    QTAILQ_HEAD_INITIALIZER(blk_backends);
  49
  50/*
  51 * Create a new BlockBackend with @name, with a reference count of one.
  52 * @name must not be null or empty.
  53 * Fail if a BlockBackend with this name already exists.
  54 * Store an error through @errp on failure, unless it's null.
  55 * Return the new BlockBackend on success, null on failure.
  56 */
  57BlockBackend *blk_new(const char *name, Error **errp)
  58{
  59    BlockBackend *blk;
  60
  61    assert(name && name[0]);
  62    if (!id_wellformed(name)) {
  63        error_setg(errp, "Invalid device name");
  64        return NULL;
  65    }
  66    if (blk_by_name(name)) {
  67        error_setg(errp, "Device with id '%s' already exists", name);
  68        return NULL;
  69    }
  70    if (bdrv_find_node(name)) {
  71        error_setg(errp,
  72                   "Device name '%s' conflicts with an existing node name",
  73                   name);
  74        return NULL;
  75    }
  76
  77    blk = g_new0(BlockBackend, 1);
  78    blk->name = g_strdup(name);
  79    blk->refcnt = 1;
  80    QTAILQ_INSERT_TAIL(&blk_backends, blk, link);
  81    return blk;
  82}
  83
  84/*
  85 * Create a new BlockBackend with a new BlockDriverState attached.
  86 * Otherwise just like blk_new(), which see.
  87 */
  88BlockBackend *blk_new_with_bs(const char *name, Error **errp)
  89{
  90    BlockBackend *blk;
  91    BlockDriverState *bs;
  92
  93    blk = blk_new(name, errp);
  94    if (!blk) {
  95        return NULL;
  96    }
  97
  98    bs = bdrv_new_root();
  99    blk->bs = bs;
 100    bs->blk = blk;
 101    return blk;
 102}
 103
 104/*
 105 * Calls blk_new_with_bs() and then calls bdrv_open() on the BlockDriverState.
 106 *
 107 * Just as with bdrv_open(), after having called this function the reference to
 108 * @options belongs to the block layer (even on failure).
 109 *
 110 * TODO: Remove @filename and @flags; it should be possible to specify a whole
 111 * BDS tree just by specifying the @options QDict (or @reference,
 112 * alternatively). At the time of adding this function, this is not possible,
 113 * though, so callers of this function have to be able to specify @filename and
 114 * @flags.
 115 */
 116BlockBackend *blk_new_open(const char *name, const char *filename,
 117                           const char *reference, QDict *options, int flags,
 118                           Error **errp)
 119{
 120    BlockBackend *blk;
 121    int ret;
 122
 123    blk = blk_new_with_bs(name, errp);
 124    if (!blk) {
 125        QDECREF(options);
 126        return NULL;
 127    }
 128
 129    ret = bdrv_open(&blk->bs, filename, reference, options, flags, NULL, errp);
 130    if (ret < 0) {
 131        blk_unref(blk);
 132        return NULL;
 133    }
 134
 135    return blk;
 136}
 137
 138static void blk_delete(BlockBackend *blk)
 139{
 140    assert(!blk->refcnt);
 141    assert(!blk->dev);
 142    if (blk->bs) {
 143        assert(blk->bs->blk == blk);
 144        blk->bs->blk = NULL;
 145        bdrv_unref(blk->bs);
 146        blk->bs = NULL;
 147    }
 148    /* Avoid double-remove after blk_hide_on_behalf_of_hmp_drive_del() */
 149    if (blk->name[0]) {
 150        QTAILQ_REMOVE(&blk_backends, blk, link);
 151    }
 152    g_free(blk->name);
 153    drive_info_del(blk->legacy_dinfo);
 154    g_free(blk);
 155}
 156
 157static void drive_info_del(DriveInfo *dinfo)
 158{
 159    if (!dinfo) {
 160        return;
 161    }
 162    qemu_opts_del(dinfo->opts);
 163    g_free(dinfo->serial);
 164    g_free(dinfo);
 165}
 166
 167/*
 168 * Increment @blk's reference count.
 169 * @blk must not be null.
 170 */
 171void blk_ref(BlockBackend *blk)
 172{
 173    blk->refcnt++;
 174}
 175
 176/*
 177 * Decrement @blk's reference count.
 178 * If this drops it to zero, destroy @blk.
 179 * For convenience, do nothing if @blk is null.
 180 */
 181void blk_unref(BlockBackend *blk)
 182{
 183    if (blk) {
 184        assert(blk->refcnt > 0);
 185        if (!--blk->refcnt) {
 186            blk_delete(blk);
 187        }
 188    }
 189}
 190
 191/*
 192 * Return the BlockBackend after @blk.
 193 * If @blk is null, return the first one.
 194 * Else, return @blk's next sibling, which may be null.
 195 *
 196 * To iterate over all BlockBackends, do
 197 * for (blk = blk_next(NULL); blk; blk = blk_next(blk)) {
 198 *     ...
 199 * }
 200 */
 201BlockBackend *blk_next(BlockBackend *blk)
 202{
 203    return blk ? QTAILQ_NEXT(blk, link) : QTAILQ_FIRST(&blk_backends);
 204}
 205
 206/*
 207 * Return @blk's name, a non-null string.
 208 * Wart: the name is empty iff @blk has been hidden with
 209 * blk_hide_on_behalf_of_hmp_drive_del().
 210 */
 211const char *blk_name(BlockBackend *blk)
 212{
 213    return blk->name;
 214}
 215
 216/*
 217 * Return the BlockBackend with name @name if it exists, else null.
 218 * @name must not be null.
 219 */
 220BlockBackend *blk_by_name(const char *name)
 221{
 222    BlockBackend *blk;
 223
 224    assert(name);
 225    QTAILQ_FOREACH(blk, &blk_backends, link) {
 226        if (!strcmp(name, blk->name)) {
 227            return blk;
 228        }
 229    }
 230    return NULL;
 231}
 232
 233/*
 234 * Return the BlockDriverState attached to @blk if any, else null.
 235 */
 236BlockDriverState *blk_bs(BlockBackend *blk)
 237{
 238    return blk->bs;
 239}
 240
 241/*
 242 * Return @blk's DriveInfo if any, else null.
 243 */
 244DriveInfo *blk_legacy_dinfo(BlockBackend *blk)
 245{
 246    return blk->legacy_dinfo;
 247}
 248
 249/*
 250 * Set @blk's DriveInfo to @dinfo, and return it.
 251 * @blk must not have a DriveInfo set already.
 252 * No other BlockBackend may have the same DriveInfo set.
 253 */
 254DriveInfo *blk_set_legacy_dinfo(BlockBackend *blk, DriveInfo *dinfo)
 255{
 256    assert(!blk->legacy_dinfo);
 257    return blk->legacy_dinfo = dinfo;
 258}
 259
 260/*
 261 * Return the BlockBackend with DriveInfo @dinfo.
 262 * It must exist.
 263 */
 264BlockBackend *blk_by_legacy_dinfo(DriveInfo *dinfo)
 265{
 266    BlockBackend *blk;
 267
 268    QTAILQ_FOREACH(blk, &blk_backends, link) {
 269        if (blk->legacy_dinfo == dinfo) {
 270            return blk;
 271        }
 272    }
 273    abort();
 274}
 275
 276/*
 277 * Hide @blk.
 278 * @blk must not have been hidden already.
 279 * Make attached BlockDriverState, if any, anonymous.
 280 * Once hidden, @blk is invisible to all functions that don't receive
 281 * it as argument.  For example, blk_by_name() won't return it.
 282 * Strictly for use by do_drive_del().
 283 * TODO get rid of it!
 284 */
 285void blk_hide_on_behalf_of_hmp_drive_del(BlockBackend *blk)
 286{
 287    QTAILQ_REMOVE(&blk_backends, blk, link);
 288    blk->name[0] = 0;
 289    if (blk->bs) {
 290        bdrv_make_anon(blk->bs);
 291    }
 292}
 293
 294/*
 295 * Attach device model @dev to @blk.
 296 * Return 0 on success, -EBUSY when a device model is attached already.
 297 */
 298int blk_attach_dev(BlockBackend *blk, void *dev)
 299/* TODO change to DeviceState *dev when all users are qdevified */
 300{
 301    if (blk->dev) {
 302        return -EBUSY;
 303    }
 304    blk_ref(blk);
 305    blk->dev = dev;
 306    bdrv_iostatus_reset(blk->bs);
 307    return 0;
 308}
 309
 310/*
 311 * Attach device model @dev to @blk.
 312 * @blk must not have a device model attached already.
 313 * TODO qdevified devices don't use this, remove when devices are qdevified
 314 */
 315void blk_attach_dev_nofail(BlockBackend *blk, void *dev)
 316{
 317    if (blk_attach_dev(blk, dev) < 0) {
 318        abort();
 319    }
 320}
 321
 322/*
 323 * Detach device model @dev from @blk.
 324 * @dev must be currently attached to @blk.
 325 */
 326void blk_detach_dev(BlockBackend *blk, void *dev)
 327/* TODO change to DeviceState *dev when all users are qdevified */
 328{
 329    assert(blk->dev == dev);
 330    blk->dev = NULL;
 331    blk->dev_ops = NULL;
 332    blk->dev_opaque = NULL;
 333    bdrv_set_guest_block_size(blk->bs, 512);
 334    blk_unref(blk);
 335}
 336
 337/*
 338 * Return the device model attached to @blk if any, else null.
 339 */
 340void *blk_get_attached_dev(BlockBackend *blk)
 341/* TODO change to return DeviceState * when all users are qdevified */
 342{
 343    return blk->dev;
 344}
 345
 346/*
 347 * Set @blk's device model callbacks to @ops.
 348 * @opaque is the opaque argument to pass to the callbacks.
 349 * This is for use by device models.
 350 */
 351void blk_set_dev_ops(BlockBackend *blk, const BlockDevOps *ops,
 352                     void *opaque)
 353{
 354    blk->dev_ops = ops;
 355    blk->dev_opaque = opaque;
 356}
 357
 358/*
 359 * Notify @blk's attached device model of media change.
 360 * If @load is true, notify of media load.
 361 * Else, notify of media eject.
 362 * Also send DEVICE_TRAY_MOVED events as appropriate.
 363 */
 364void blk_dev_change_media_cb(BlockBackend *blk, bool load)
 365{
 366    if (blk->dev_ops && blk->dev_ops->change_media_cb) {
 367        bool tray_was_closed = !blk_dev_is_tray_open(blk);
 368
 369        blk->dev_ops->change_media_cb(blk->dev_opaque, load);
 370        if (tray_was_closed) {
 371            /* tray open */
 372            qapi_event_send_device_tray_moved(blk_name(blk),
 373                                              true, &error_abort);
 374        }
 375        if (load) {
 376            /* tray close */
 377            qapi_event_send_device_tray_moved(blk_name(blk),
 378                                              false, &error_abort);
 379        }
 380    }
 381}
 382
 383/*
 384 * Does @blk's attached device model have removable media?
 385 * %true if no device model is attached.
 386 */
 387bool blk_dev_has_removable_media(BlockBackend *blk)
 388{
 389    return !blk->dev || (blk->dev_ops && blk->dev_ops->change_media_cb);
 390}
 391
 392/*
 393 * Notify @blk's attached device model of a media eject request.
 394 * If @force is true, the medium is about to be yanked out forcefully.
 395 */
 396void blk_dev_eject_request(BlockBackend *blk, bool force)
 397{
 398    if (blk->dev_ops && blk->dev_ops->eject_request_cb) {
 399        blk->dev_ops->eject_request_cb(blk->dev_opaque, force);
 400    }
 401}
 402
 403/*
 404 * Does @blk's attached device model have a tray, and is it open?
 405 */
 406bool blk_dev_is_tray_open(BlockBackend *blk)
 407{
 408    if (blk->dev_ops && blk->dev_ops->is_tray_open) {
 409        return blk->dev_ops->is_tray_open(blk->dev_opaque);
 410    }
 411    return false;
 412}
 413
 414/*
 415 * Does @blk's attached device model have the medium locked?
 416 * %false if the device model has no such lock.
 417 */
 418bool blk_dev_is_medium_locked(BlockBackend *blk)
 419{
 420    if (blk->dev_ops && blk->dev_ops->is_medium_locked) {
 421        return blk->dev_ops->is_medium_locked(blk->dev_opaque);
 422    }
 423    return false;
 424}
 425
 426/*
 427 * Notify @blk's attached device model of a backend size change.
 428 */
 429void blk_dev_resize_cb(BlockBackend *blk)
 430{
 431    if (blk->dev_ops && blk->dev_ops->resize_cb) {
 432        blk->dev_ops->resize_cb(blk->dev_opaque);
 433    }
 434}
 435
 436void blk_iostatus_enable(BlockBackend *blk)
 437{
 438    bdrv_iostatus_enable(blk->bs);
 439}
 440
 441static int blk_check_byte_request(BlockBackend *blk, int64_t offset,
 442                                  size_t size)
 443{
 444    int64_t len;
 445
 446    if (size > INT_MAX) {
 447        return -EIO;
 448    }
 449
 450    if (!blk_is_inserted(blk)) {
 451        return -ENOMEDIUM;
 452    }
 453
 454    len = blk_getlength(blk);
 455    if (len < 0) {
 456        return len;
 457    }
 458
 459    if (offset < 0) {
 460        return -EIO;
 461    }
 462
 463    if (offset > len || len - offset < size) {
 464        return -EIO;
 465    }
 466
 467    return 0;
 468}
 469
 470static int blk_check_request(BlockBackend *blk, int64_t sector_num,
 471                             int nb_sectors)
 472{
 473    if (sector_num < 0 || sector_num > INT64_MAX / BDRV_SECTOR_SIZE) {
 474        return -EIO;
 475    }
 476
 477    if (nb_sectors < 0 || nb_sectors > INT_MAX / BDRV_SECTOR_SIZE) {
 478        return -EIO;
 479    }
 480
 481    return blk_check_byte_request(blk, sector_num * BDRV_SECTOR_SIZE,
 482                                  nb_sectors * BDRV_SECTOR_SIZE);
 483}
 484
 485int blk_read(BlockBackend *blk, int64_t sector_num, uint8_t *buf,
 486             int nb_sectors)
 487{
 488    int ret = blk_check_request(blk, sector_num, nb_sectors);
 489    if (ret < 0) {
 490        return ret;
 491    }
 492
 493    return bdrv_read(blk->bs, sector_num, buf, nb_sectors);
 494}
 495
 496int blk_read_unthrottled(BlockBackend *blk, int64_t sector_num, uint8_t *buf,
 497                         int nb_sectors)
 498{
 499    int ret = blk_check_request(blk, sector_num, nb_sectors);
 500    if (ret < 0) {
 501        return ret;
 502    }
 503
 504    return bdrv_read_unthrottled(blk->bs, sector_num, buf, nb_sectors);
 505}
 506
 507int blk_write(BlockBackend *blk, int64_t sector_num, const uint8_t *buf,
 508              int nb_sectors)
 509{
 510    int ret = blk_check_request(blk, sector_num, nb_sectors);
 511    if (ret < 0) {
 512        return ret;
 513    }
 514
 515    return bdrv_write(blk->bs, sector_num, buf, nb_sectors);
 516}
 517
 518static void error_callback_bh(void *opaque)
 519{
 520    struct BlockBackendAIOCB *acb = opaque;
 521    qemu_bh_delete(acb->bh);
 522    acb->common.cb(acb->common.opaque, acb->ret);
 523    qemu_aio_unref(acb);
 524}
 525
 526static BlockAIOCB *abort_aio_request(BlockBackend *blk, BlockCompletionFunc *cb,
 527                                     void *opaque, int ret)
 528{
 529    struct BlockBackendAIOCB *acb;
 530    QEMUBH *bh;
 531
 532    acb = blk_aio_get(&block_backend_aiocb_info, blk, cb, opaque);
 533    acb->ret = ret;
 534
 535    bh = aio_bh_new(blk_get_aio_context(blk), error_callback_bh, acb);
 536    acb->bh = bh;
 537    qemu_bh_schedule(bh);
 538
 539    return &acb->common;
 540}
 541
 542BlockAIOCB *blk_aio_write_zeroes(BlockBackend *blk, int64_t sector_num,
 543                                 int nb_sectors, BdrvRequestFlags flags,
 544                                 BlockCompletionFunc *cb, void *opaque)
 545{
 546    int ret = blk_check_request(blk, sector_num, nb_sectors);
 547    if (ret < 0) {
 548        return abort_aio_request(blk, cb, opaque, ret);
 549    }
 550
 551    return bdrv_aio_write_zeroes(blk->bs, sector_num, nb_sectors, flags,
 552                                 cb, opaque);
 553}
 554
 555int blk_pread(BlockBackend *blk, int64_t offset, void *buf, int count)
 556{
 557    int ret = blk_check_byte_request(blk, offset, count);
 558    if (ret < 0) {
 559        return ret;
 560    }
 561
 562    return bdrv_pread(blk->bs, offset, buf, count);
 563}
 564
 565int blk_pwrite(BlockBackend *blk, int64_t offset, const void *buf, int count)
 566{
 567    int ret = blk_check_byte_request(blk, offset, count);
 568    if (ret < 0) {
 569        return ret;
 570    }
 571
 572    return bdrv_pwrite(blk->bs, offset, buf, count);
 573}
 574
 575int64_t blk_getlength(BlockBackend *blk)
 576{
 577    return bdrv_getlength(blk->bs);
 578}
 579
 580void blk_get_geometry(BlockBackend *blk, uint64_t *nb_sectors_ptr)
 581{
 582    bdrv_get_geometry(blk->bs, nb_sectors_ptr);
 583}
 584
 585int64_t blk_nb_sectors(BlockBackend *blk)
 586{
 587    return bdrv_nb_sectors(blk->bs);
 588}
 589
 590BlockAIOCB *blk_aio_readv(BlockBackend *blk, int64_t sector_num,
 591                          QEMUIOVector *iov, int nb_sectors,
 592                          BlockCompletionFunc *cb, void *opaque)
 593{
 594    int ret = blk_check_request(blk, sector_num, nb_sectors);
 595    if (ret < 0) {
 596        return abort_aio_request(blk, cb, opaque, ret);
 597    }
 598
 599    return bdrv_aio_readv(blk->bs, sector_num, iov, nb_sectors, cb, opaque);
 600}
 601
 602BlockAIOCB *blk_aio_writev(BlockBackend *blk, int64_t sector_num,
 603                           QEMUIOVector *iov, int nb_sectors,
 604                           BlockCompletionFunc *cb, void *opaque)
 605{
 606    int ret = blk_check_request(blk, sector_num, nb_sectors);
 607    if (ret < 0) {
 608        return abort_aio_request(blk, cb, opaque, ret);
 609    }
 610
 611    return bdrv_aio_writev(blk->bs, sector_num, iov, nb_sectors, cb, opaque);
 612}
 613
 614BlockAIOCB *blk_aio_flush(BlockBackend *blk,
 615                          BlockCompletionFunc *cb, void *opaque)
 616{
 617    return bdrv_aio_flush(blk->bs, cb, opaque);
 618}
 619
 620BlockAIOCB *blk_aio_discard(BlockBackend *blk,
 621                            int64_t sector_num, int nb_sectors,
 622                            BlockCompletionFunc *cb, void *opaque)
 623{
 624    int ret = blk_check_request(blk, sector_num, nb_sectors);
 625    if (ret < 0) {
 626        return abort_aio_request(blk, cb, opaque, ret);
 627    }
 628
 629    return bdrv_aio_discard(blk->bs, sector_num, nb_sectors, cb, opaque);
 630}
 631
 632void blk_aio_cancel(BlockAIOCB *acb)
 633{
 634    bdrv_aio_cancel(acb);
 635}
 636
 637void blk_aio_cancel_async(BlockAIOCB *acb)
 638{
 639    bdrv_aio_cancel_async(acb);
 640}
 641
 642int blk_aio_multiwrite(BlockBackend *blk, BlockRequest *reqs, int num_reqs)
 643{
 644    int i, ret;
 645
 646    for (i = 0; i < num_reqs; i++) {
 647        ret = blk_check_request(blk, reqs[i].sector, reqs[i].nb_sectors);
 648        if (ret < 0) {
 649            return ret;
 650        }
 651    }
 652
 653    return bdrv_aio_multiwrite(blk->bs, reqs, num_reqs);
 654}
 655
 656int blk_ioctl(BlockBackend *blk, unsigned long int req, void *buf)
 657{
 658    return bdrv_ioctl(blk->bs, req, buf);
 659}
 660
 661BlockAIOCB *blk_aio_ioctl(BlockBackend *blk, unsigned long int req, void *buf,
 662                          BlockCompletionFunc *cb, void *opaque)
 663{
 664    return bdrv_aio_ioctl(blk->bs, req, buf, cb, opaque);
 665}
 666
 667int blk_co_discard(BlockBackend *blk, int64_t sector_num, int nb_sectors)
 668{
 669    int ret = blk_check_request(blk, sector_num, nb_sectors);
 670    if (ret < 0) {
 671        return ret;
 672    }
 673
 674    return bdrv_co_discard(blk->bs, sector_num, nb_sectors);
 675}
 676
 677int blk_co_flush(BlockBackend *blk)
 678{
 679    return bdrv_co_flush(blk->bs);
 680}
 681
 682int blk_flush(BlockBackend *blk)
 683{
 684    return bdrv_flush(blk->bs);
 685}
 686
 687int blk_flush_all(void)
 688{
 689    return bdrv_flush_all();
 690}
 691
 692void blk_drain_all(void)
 693{
 694    bdrv_drain_all();
 695}
 696
 697BlockdevOnError blk_get_on_error(BlockBackend *blk, bool is_read)
 698{
 699    return bdrv_get_on_error(blk->bs, is_read);
 700}
 701
 702BlockErrorAction blk_get_error_action(BlockBackend *blk, bool is_read,
 703                                      int error)
 704{
 705    return bdrv_get_error_action(blk->bs, is_read, error);
 706}
 707
 708void blk_error_action(BlockBackend *blk, BlockErrorAction action,
 709                      bool is_read, int error)
 710{
 711    bdrv_error_action(blk->bs, action, is_read, error);
 712}
 713
 714int blk_is_read_only(BlockBackend *blk)
 715{
 716    return bdrv_is_read_only(blk->bs);
 717}
 718
 719int blk_is_sg(BlockBackend *blk)
 720{
 721    return bdrv_is_sg(blk->bs);
 722}
 723
 724int blk_enable_write_cache(BlockBackend *blk)
 725{
 726    return bdrv_enable_write_cache(blk->bs);
 727}
 728
 729void blk_set_enable_write_cache(BlockBackend *blk, bool wce)
 730{
 731    bdrv_set_enable_write_cache(blk->bs, wce);
 732}
 733
 734void blk_invalidate_cache(BlockBackend *blk, Error **errp)
 735{
 736    bdrv_invalidate_cache(blk->bs, errp);
 737}
 738
 739int blk_is_inserted(BlockBackend *blk)
 740{
 741    return bdrv_is_inserted(blk->bs);
 742}
 743
 744void blk_lock_medium(BlockBackend *blk, bool locked)
 745{
 746    bdrv_lock_medium(blk->bs, locked);
 747}
 748
 749void blk_eject(BlockBackend *blk, bool eject_flag)
 750{
 751    bdrv_eject(blk->bs, eject_flag);
 752}
 753
 754int blk_get_flags(BlockBackend *blk)
 755{
 756    return bdrv_get_flags(blk->bs);
 757}
 758
 759int blk_get_max_transfer_length(BlockBackend *blk)
 760{
 761    return blk->bs->bl.max_transfer_length;
 762}
 763
 764void blk_set_guest_block_size(BlockBackend *blk, int align)
 765{
 766    bdrv_set_guest_block_size(blk->bs, align);
 767}
 768
 769void *blk_blockalign(BlockBackend *blk, size_t size)
 770{
 771    return qemu_blockalign(blk ? blk->bs : NULL, size);
 772}
 773
 774bool blk_op_is_blocked(BlockBackend *blk, BlockOpType op, Error **errp)
 775{
 776    return bdrv_op_is_blocked(blk->bs, op, errp);
 777}
 778
 779void blk_op_unblock(BlockBackend *blk, BlockOpType op, Error *reason)
 780{
 781    bdrv_op_unblock(blk->bs, op, reason);
 782}
 783
 784void blk_op_block_all(BlockBackend *blk, Error *reason)
 785{
 786    bdrv_op_block_all(blk->bs, reason);
 787}
 788
 789void blk_op_unblock_all(BlockBackend *blk, Error *reason)
 790{
 791    bdrv_op_unblock_all(blk->bs, reason);
 792}
 793
 794AioContext *blk_get_aio_context(BlockBackend *blk)
 795{
 796    return bdrv_get_aio_context(blk->bs);
 797}
 798
 799void blk_set_aio_context(BlockBackend *blk, AioContext *new_context)
 800{
 801    bdrv_set_aio_context(blk->bs, new_context);
 802}
 803
 804void blk_add_aio_context_notifier(BlockBackend *blk,
 805        void (*attached_aio_context)(AioContext *new_context, void *opaque),
 806        void (*detach_aio_context)(void *opaque), void *opaque)
 807{
 808    bdrv_add_aio_context_notifier(blk->bs, attached_aio_context,
 809                                  detach_aio_context, opaque);
 810}
 811
 812void blk_remove_aio_context_notifier(BlockBackend *blk,
 813                                     void (*attached_aio_context)(AioContext *,
 814                                                                  void *),
 815                                     void (*detach_aio_context)(void *),
 816                                     void *opaque)
 817{
 818    bdrv_remove_aio_context_notifier(blk->bs, attached_aio_context,
 819                                     detach_aio_context, opaque);
 820}
 821
 822void blk_add_close_notifier(BlockBackend *blk, Notifier *notify)
 823{
 824    bdrv_add_close_notifier(blk->bs, notify);
 825}
 826
 827void blk_io_plug(BlockBackend *blk)
 828{
 829    bdrv_io_plug(blk->bs);
 830}
 831
 832void blk_io_unplug(BlockBackend *blk)
 833{
 834    bdrv_io_unplug(blk->bs);
 835}
 836
 837BlockAcctStats *blk_get_stats(BlockBackend *blk)
 838{
 839    return bdrv_get_stats(blk->bs);
 840}
 841
 842void *blk_aio_get(const AIOCBInfo *aiocb_info, BlockBackend *blk,
 843                  BlockCompletionFunc *cb, void *opaque)
 844{
 845    return qemu_aio_get(aiocb_info, blk_bs(blk), cb, opaque);
 846}
 847
 848int coroutine_fn blk_co_write_zeroes(BlockBackend *blk, int64_t sector_num,
 849                                     int nb_sectors, BdrvRequestFlags flags)
 850{
 851    int ret = blk_check_request(blk, sector_num, nb_sectors);
 852    if (ret < 0) {
 853        return ret;
 854    }
 855
 856    return bdrv_co_write_zeroes(blk->bs, sector_num, nb_sectors, flags);
 857}
 858
 859int blk_write_compressed(BlockBackend *blk, int64_t sector_num,
 860                         const uint8_t *buf, int nb_sectors)
 861{
 862    int ret = blk_check_request(blk, sector_num, nb_sectors);
 863    if (ret < 0) {
 864        return ret;
 865    }
 866
 867    return bdrv_write_compressed(blk->bs, sector_num, buf, nb_sectors);
 868}
 869
 870int blk_truncate(BlockBackend *blk, int64_t offset)
 871{
 872    return bdrv_truncate(blk->bs, offset);
 873}
 874
 875int blk_discard(BlockBackend *blk, int64_t sector_num, int nb_sectors)
 876{
 877    int ret = blk_check_request(blk, sector_num, nb_sectors);
 878    if (ret < 0) {
 879        return ret;
 880    }
 881
 882    return bdrv_discard(blk->bs, sector_num, nb_sectors);
 883}
 884
 885int blk_save_vmstate(BlockBackend *blk, const uint8_t *buf,
 886                     int64_t pos, int size)
 887{
 888    return bdrv_save_vmstate(blk->bs, buf, pos, size);
 889}
 890
 891int blk_load_vmstate(BlockBackend *blk, uint8_t *buf, int64_t pos, int size)
 892{
 893    return bdrv_load_vmstate(blk->bs, buf, pos, size);
 894}
 895
 896int blk_probe_blocksizes(BlockBackend *blk, BlockSizes *bsz)
 897{
 898    return bdrv_probe_blocksizes(blk->bs, bsz);
 899}
 900
 901int blk_probe_geometry(BlockBackend *blk, HDGeometry *geo)
 902{
 903    return bdrv_probe_geometry(blk->bs, geo);
 904}
 905