qemu/include/block/block.h
<<
>>
Prefs
   1#ifndef BLOCK_H
   2#define BLOCK_H
   3
   4#include "block/aio.h"
   5#include "qemu-common.h"
   6#include "qemu/option.h"
   7#include "block/coroutine.h"
   8#include "qapi/qmp/qobject.h"
   9#include "qapi-types.h"
  10
  11/* block.c */
  12typedef struct BlockDriver BlockDriver;
  13typedef struct BlockJob BlockJob;
  14
  15typedef struct BlockDriverInfo {
  16    /* in bytes, 0 if irrelevant */
  17    int cluster_size;
  18    /* offset at which the VM state can be saved (0 if not possible) */
  19    int64_t vm_state_offset;
  20    bool is_dirty;
  21} BlockDriverInfo;
  22
  23typedef struct BlockFragInfo {
  24    uint64_t allocated_clusters;
  25    uint64_t total_clusters;
  26    uint64_t fragmented_clusters;
  27    uint64_t compressed_clusters;
  28} BlockFragInfo;
  29
  30/* Callbacks for block device models */
  31typedef struct BlockDevOps {
  32    /*
  33     * Runs when virtual media changed (monitor commands eject, change)
  34     * Argument load is true on load and false on eject.
  35     * Beware: doesn't run when a host device's physical media
  36     * changes.  Sure would be useful if it did.
  37     * Device models with removable media must implement this callback.
  38     */
  39    void (*change_media_cb)(void *opaque, bool load);
  40    /*
  41     * Runs when an eject request is issued from the monitor, the tray
  42     * is closed, and the medium is locked.
  43     * Device models that do not implement is_medium_locked will not need
  44     * this callback.  Device models that can lock the medium or tray might
  45     * want to implement the callback and unlock the tray when "force" is
  46     * true, even if they do not support eject requests.
  47     */
  48    void (*eject_request_cb)(void *opaque, bool force);
  49    /*
  50     * Is the virtual tray open?
  51     * Device models implement this only when the device has a tray.
  52     */
  53    bool (*is_tray_open)(void *opaque);
  54    /*
  55     * Is the virtual medium locked into the device?
  56     * Device models implement this only when device has such a lock.
  57     */
  58    bool (*is_medium_locked)(void *opaque);
  59    /*
  60     * Runs when the size changed (e.g. monitor command block_resize)
  61     */
  62    void (*resize_cb)(void *opaque);
  63} BlockDevOps;
  64
  65#define BDRV_O_RDWR        0x0002
  66#define BDRV_O_SNAPSHOT    0x0008 /* open the file read only and save writes in a snapshot */
  67#define BDRV_O_NOCACHE     0x0020 /* do not use the host page cache */
  68#define BDRV_O_CACHE_WB    0x0040 /* use write-back caching */
  69#define BDRV_O_NATIVE_AIO  0x0080 /* use native AIO instead of the thread pool */
  70#define BDRV_O_NO_BACKING  0x0100 /* don't open the backing file */
  71#define BDRV_O_NO_FLUSH    0x0200 /* disable flushing on this disk */
  72#define BDRV_O_COPY_ON_READ 0x0400 /* copy read backing sectors into image */
  73#define BDRV_O_INCOMING    0x0800  /* consistency hint for incoming migration */
  74#define BDRV_O_CHECK       0x1000  /* open solely for consistency check */
  75#define BDRV_O_ALLOW_RDWR  0x2000  /* allow reopen to change from r/o to r/w */
  76#define BDRV_O_UNMAP       0x4000  /* execute guest UNMAP/TRIM operations */
  77
  78#define BDRV_O_CACHE_MASK  (BDRV_O_NOCACHE | BDRV_O_CACHE_WB | BDRV_O_NO_FLUSH)
  79
  80#define BDRV_SECTOR_BITS   9
  81#define BDRV_SECTOR_SIZE   (1ULL << BDRV_SECTOR_BITS)
  82#define BDRV_SECTOR_MASK   ~(BDRV_SECTOR_SIZE - 1)
  83
  84/* BDRV_BLOCK_DATA: data is read from bs->file or another file
  85 * BDRV_BLOCK_ZERO: sectors read as zero
  86 * BDRV_BLOCK_OFFSET_VALID: sector stored in bs->file as raw data
  87 * BDRV_BLOCK_RAW: used internally to indicate that the request
  88 *                 was answered by the raw driver and that one
  89 *                 should look in bs->file directly.
  90 *
  91 * If BDRV_BLOCK_OFFSET_VALID is set, bits 9-62 represent the offset in
  92 * bs->file where sector data can be read from as raw data.
  93 *
  94 * DATA == 0 && ZERO == 0 means that data is read from backing_hd if present.
  95 *
  96 * DATA ZERO OFFSET_VALID
  97 *  t    t        t       sectors read as zero, bs->file is zero at offset
  98 *  t    f        t       sectors read as valid from bs->file at offset
  99 *  f    t        t       sectors preallocated, read as zero, bs->file not
 100 *                        necessarily zero at offset
 101 *  f    f        t       sectors preallocated but read from backing_hd,
 102 *                        bs->file contains garbage at offset
 103 *  t    t        f       sectors preallocated, read as zero, unknown offset
 104 *  t    f        f       sectors read from unknown file or offset
 105 *  f    t        f       not allocated or unknown offset, read as zero
 106 *  f    f        f       not allocated or unknown offset, read from backing_hd
 107 */
 108#define BDRV_BLOCK_DATA         1
 109#define BDRV_BLOCK_ZERO         2
 110#define BDRV_BLOCK_OFFSET_VALID 4
 111#define BDRV_BLOCK_RAW          8
 112#define BDRV_BLOCK_OFFSET_MASK  BDRV_SECTOR_MASK
 113
 114typedef enum {
 115    BDRV_ACTION_REPORT, BDRV_ACTION_IGNORE, BDRV_ACTION_STOP
 116} BlockErrorAction;
 117
 118typedef QSIMPLEQ_HEAD(BlockReopenQueue, BlockReopenQueueEntry) BlockReopenQueue;
 119
 120typedef struct BDRVReopenState {
 121    BlockDriverState *bs;
 122    int flags;
 123    void *opaque;
 124} BDRVReopenState;
 125
 126
 127void bdrv_iostatus_enable(BlockDriverState *bs);
 128void bdrv_iostatus_reset(BlockDriverState *bs);
 129void bdrv_iostatus_disable(BlockDriverState *bs);
 130bool bdrv_iostatus_is_enabled(const BlockDriverState *bs);
 131void bdrv_iostatus_set_err(BlockDriverState *bs, int error);
 132void bdrv_info_print(Monitor *mon, const QObject *data);
 133void bdrv_info(Monitor *mon, QObject **ret_data);
 134void bdrv_stats_print(Monitor *mon, const QObject *data);
 135void bdrv_info_stats(Monitor *mon, QObject **ret_data);
 136
 137/* disk I/O throttling */
 138void bdrv_io_limits_enable(BlockDriverState *bs);
 139void bdrv_io_limits_disable(BlockDriverState *bs);
 140
 141void bdrv_init(void);
 142void bdrv_init_with_whitelist(void);
 143BlockDriver *bdrv_find_protocol(const char *filename,
 144                                bool allow_protocol_prefix);
 145BlockDriver *bdrv_find_format(const char *format_name);
 146BlockDriver *bdrv_find_whitelisted_format(const char *format_name,
 147                                          bool readonly);
 148int bdrv_create(BlockDriver *drv, const char* filename,
 149    QEMUOptionParameter *options, Error **errp);
 150int bdrv_create_file(const char* filename, QEMUOptionParameter *options,
 151                     Error **errp);
 152BlockDriverState *bdrv_new(const char *device_name);
 153void bdrv_make_anon(BlockDriverState *bs);
 154void bdrv_swap(BlockDriverState *bs_new, BlockDriverState *bs_old);
 155void bdrv_append(BlockDriverState *bs_new, BlockDriverState *bs_top);
 156int bdrv_parse_cache_flags(const char *mode, int *flags);
 157int bdrv_parse_discard_flags(const char *mode, int *flags);
 158int bdrv_file_open(BlockDriverState **pbs, const char *filename,
 159                   QDict *options, int flags, Error **errp);
 160int bdrv_open_backing_file(BlockDriverState *bs, QDict *options, Error **errp);
 161int bdrv_open(BlockDriverState *bs, const char *filename, QDict *options,
 162              int flags, BlockDriver *drv, Error **errp);
 163BlockReopenQueue *bdrv_reopen_queue(BlockReopenQueue *bs_queue,
 164                                    BlockDriverState *bs, int flags);
 165int bdrv_reopen_multiple(BlockReopenQueue *bs_queue, Error **errp);
 166int bdrv_reopen(BlockDriverState *bs, int bdrv_flags, Error **errp);
 167int bdrv_reopen_prepare(BDRVReopenState *reopen_state,
 168                        BlockReopenQueue *queue, Error **errp);
 169void bdrv_reopen_commit(BDRVReopenState *reopen_state);
 170void bdrv_reopen_abort(BDRVReopenState *reopen_state);
 171void bdrv_close(BlockDriverState *bs);
 172void bdrv_add_close_notifier(BlockDriverState *bs, Notifier *notify);
 173int bdrv_attach_dev(BlockDriverState *bs, void *dev);
 174void bdrv_attach_dev_nofail(BlockDriverState *bs, void *dev);
 175void bdrv_detach_dev(BlockDriverState *bs, void *dev);
 176void *bdrv_get_attached_dev(BlockDriverState *bs);
 177void bdrv_set_dev_ops(BlockDriverState *bs, const BlockDevOps *ops,
 178                      void *opaque);
 179void bdrv_dev_eject_request(BlockDriverState *bs, bool force);
 180bool bdrv_dev_has_removable_media(BlockDriverState *bs);
 181bool bdrv_dev_is_tray_open(BlockDriverState *bs);
 182bool bdrv_dev_is_medium_locked(BlockDriverState *bs);
 183int bdrv_read(BlockDriverState *bs, int64_t sector_num,
 184              uint8_t *buf, int nb_sectors);
 185int bdrv_read_unthrottled(BlockDriverState *bs, int64_t sector_num,
 186                          uint8_t *buf, int nb_sectors);
 187int bdrv_write(BlockDriverState *bs, int64_t sector_num,
 188               const uint8_t *buf, int nb_sectors);
 189int bdrv_write_zeroes(BlockDriverState *bs, int64_t sector_num,
 190               int nb_sectors);
 191int bdrv_writev(BlockDriverState *bs, int64_t sector_num, QEMUIOVector *qiov);
 192int bdrv_pread(BlockDriverState *bs, int64_t offset,
 193               void *buf, int count);
 194int bdrv_pwrite(BlockDriverState *bs, int64_t offset,
 195                const void *buf, int count);
 196int bdrv_pwritev(BlockDriverState *bs, int64_t offset, QEMUIOVector *qiov);
 197int bdrv_pwrite_sync(BlockDriverState *bs, int64_t offset,
 198    const void *buf, int count);
 199int coroutine_fn bdrv_co_readv(BlockDriverState *bs, int64_t sector_num,
 200    int nb_sectors, QEMUIOVector *qiov);
 201int coroutine_fn bdrv_co_copy_on_readv(BlockDriverState *bs,
 202    int64_t sector_num, int nb_sectors, QEMUIOVector *qiov);
 203int coroutine_fn bdrv_co_writev(BlockDriverState *bs, int64_t sector_num,
 204    int nb_sectors, QEMUIOVector *qiov);
 205/*
 206 * Efficiently zero a region of the disk image.  Note that this is a regular
 207 * I/O request like read or write and should have a reasonable size.  This
 208 * function is not suitable for zeroing the entire image in a single request
 209 * because it may allocate memory for the entire region.
 210 */
 211int coroutine_fn bdrv_co_write_zeroes(BlockDriverState *bs, int64_t sector_num,
 212    int nb_sectors);
 213BlockDriverState *bdrv_find_backing_image(BlockDriverState *bs,
 214    const char *backing_file);
 215int bdrv_get_backing_file_depth(BlockDriverState *bs);
 216int bdrv_truncate(BlockDriverState *bs, int64_t offset);
 217int64_t bdrv_getlength(BlockDriverState *bs);
 218int64_t bdrv_get_allocated_file_size(BlockDriverState *bs);
 219void bdrv_get_geometry(BlockDriverState *bs, uint64_t *nb_sectors_ptr);
 220int bdrv_commit(BlockDriverState *bs);
 221int bdrv_commit_all(void);
 222int bdrv_change_backing_file(BlockDriverState *bs,
 223    const char *backing_file, const char *backing_fmt);
 224void bdrv_register(BlockDriver *bdrv);
 225int bdrv_drop_intermediate(BlockDriverState *active, BlockDriverState *top,
 226                           BlockDriverState *base);
 227BlockDriverState *bdrv_find_overlay(BlockDriverState *active,
 228                                    BlockDriverState *bs);
 229BlockDriverState *bdrv_find_base(BlockDriverState *bs);
 230
 231
 232typedef struct BdrvCheckResult {
 233    int corruptions;
 234    int leaks;
 235    int check_errors;
 236    int corruptions_fixed;
 237    int leaks_fixed;
 238    int64_t image_end_offset;
 239    BlockFragInfo bfi;
 240} BdrvCheckResult;
 241
 242typedef enum {
 243    BDRV_FIX_LEAKS    = 1,
 244    BDRV_FIX_ERRORS   = 2,
 245} BdrvCheckMode;
 246
 247int bdrv_check(BlockDriverState *bs, BdrvCheckResult *res, BdrvCheckMode fix);
 248
 249int bdrv_amend_options(BlockDriverState *bs_new, QEMUOptionParameter *options);
 250
 251/* external snapshots */
 252
 253typedef enum {
 254    EXT_SNAPSHOT_ALLOWED,
 255    EXT_SNAPSHOT_FORBIDDEN,
 256} ExtSnapshotPerm;
 257
 258/* return EXT_SNAPSHOT_ALLOWED if external snapshot is allowed
 259 * return EXT_SNAPSHOT_FORBIDDEN if external snapshot is forbidden
 260 */
 261ExtSnapshotPerm bdrv_check_ext_snapshot(BlockDriverState *bs);
 262/* helper used to forbid external snapshots like in blkverify */
 263ExtSnapshotPerm bdrv_check_ext_snapshot_forbidden(BlockDriverState *bs);
 264
 265/* async block I/O */
 266typedef void BlockDriverDirtyHandler(BlockDriverState *bs, int64_t sector,
 267                                     int sector_num);
 268BlockDriverAIOCB *bdrv_aio_readv(BlockDriverState *bs, int64_t sector_num,
 269                                 QEMUIOVector *iov, int nb_sectors,
 270                                 BlockDriverCompletionFunc *cb, void *opaque);
 271BlockDriverAIOCB *bdrv_aio_writev(BlockDriverState *bs, int64_t sector_num,
 272                                  QEMUIOVector *iov, int nb_sectors,
 273                                  BlockDriverCompletionFunc *cb, void *opaque);
 274BlockDriverAIOCB *bdrv_aio_flush(BlockDriverState *bs,
 275                                 BlockDriverCompletionFunc *cb, void *opaque);
 276BlockDriverAIOCB *bdrv_aio_discard(BlockDriverState *bs,
 277                                   int64_t sector_num, int nb_sectors,
 278                                   BlockDriverCompletionFunc *cb, void *opaque);
 279void bdrv_aio_cancel(BlockDriverAIOCB *acb);
 280
 281typedef struct BlockRequest {
 282    /* Fields to be filled by multiwrite caller */
 283    int64_t sector;
 284    int nb_sectors;
 285    QEMUIOVector *qiov;
 286    BlockDriverCompletionFunc *cb;
 287    void *opaque;
 288
 289    /* Filled by multiwrite implementation */
 290    int error;
 291} BlockRequest;
 292
 293int bdrv_aio_multiwrite(BlockDriverState *bs, BlockRequest *reqs,
 294    int num_reqs);
 295
 296/* sg packet commands */
 297int bdrv_ioctl(BlockDriverState *bs, unsigned long int req, void *buf);
 298BlockDriverAIOCB *bdrv_aio_ioctl(BlockDriverState *bs,
 299        unsigned long int req, void *buf,
 300        BlockDriverCompletionFunc *cb, void *opaque);
 301
 302/* Invalidate any cached metadata used by image formats */
 303void bdrv_invalidate_cache(BlockDriverState *bs);
 304void bdrv_invalidate_cache_all(void);
 305
 306void bdrv_clear_incoming_migration_all(void);
 307
 308/* Ensure contents are flushed to disk.  */
 309int bdrv_flush(BlockDriverState *bs);
 310int coroutine_fn bdrv_co_flush(BlockDriverState *bs);
 311int bdrv_flush_all(void);
 312void bdrv_close_all(void);
 313void bdrv_drain_all(void);
 314
 315int bdrv_discard(BlockDriverState *bs, int64_t sector_num, int nb_sectors);
 316int bdrv_co_discard(BlockDriverState *bs, int64_t sector_num, int nb_sectors);
 317int bdrv_has_zero_init_1(BlockDriverState *bs);
 318int bdrv_has_zero_init(BlockDriverState *bs);
 319int64_t bdrv_get_block_status(BlockDriverState *bs, int64_t sector_num,
 320                              int nb_sectors, int *pnum);
 321int bdrv_is_allocated(BlockDriverState *bs, int64_t sector_num, int nb_sectors,
 322                      int *pnum);
 323int bdrv_is_allocated_above(BlockDriverState *top, BlockDriverState *base,
 324                            int64_t sector_num, int nb_sectors, int *pnum);
 325
 326void bdrv_set_on_error(BlockDriverState *bs, BlockdevOnError on_read_error,
 327                       BlockdevOnError on_write_error);
 328BlockdevOnError bdrv_get_on_error(BlockDriverState *bs, bool is_read);
 329BlockErrorAction bdrv_get_error_action(BlockDriverState *bs, bool is_read, int error);
 330void bdrv_error_action(BlockDriverState *bs, BlockErrorAction action,
 331                       bool is_read, int error);
 332int bdrv_is_read_only(BlockDriverState *bs);
 333int bdrv_is_sg(BlockDriverState *bs);
 334int bdrv_enable_write_cache(BlockDriverState *bs);
 335void bdrv_set_enable_write_cache(BlockDriverState *bs, bool wce);
 336int bdrv_is_inserted(BlockDriverState *bs);
 337int bdrv_media_changed(BlockDriverState *bs);
 338void bdrv_lock_medium(BlockDriverState *bs, bool locked);
 339void bdrv_eject(BlockDriverState *bs, bool eject_flag);
 340const char *bdrv_get_format_name(BlockDriverState *bs);
 341BlockDriverState *bdrv_find(const char *name);
 342BlockDriverState *bdrv_next(BlockDriverState *bs);
 343void bdrv_iterate(void (*it)(void *opaque, BlockDriverState *bs),
 344                  void *opaque);
 345int bdrv_is_encrypted(BlockDriverState *bs);
 346int bdrv_key_required(BlockDriverState *bs);
 347int bdrv_set_key(BlockDriverState *bs, const char *key);
 348int bdrv_query_missing_keys(void);
 349void bdrv_iterate_format(void (*it)(void *opaque, const char *name),
 350                         void *opaque);
 351const char *bdrv_get_device_name(BlockDriverState *bs);
 352int bdrv_get_flags(BlockDriverState *bs);
 353int bdrv_write_compressed(BlockDriverState *bs, int64_t sector_num,
 354                          const uint8_t *buf, int nb_sectors);
 355int bdrv_get_info(BlockDriverState *bs, BlockDriverInfo *bdi);
 356ImageInfoSpecific *bdrv_get_specific_info(BlockDriverState *bs);
 357void bdrv_round_to_clusters(BlockDriverState *bs,
 358                            int64_t sector_num, int nb_sectors,
 359                            int64_t *cluster_sector_num,
 360                            int *cluster_nb_sectors);
 361
 362const char *bdrv_get_encrypted_filename(BlockDriverState *bs);
 363void bdrv_get_backing_filename(BlockDriverState *bs,
 364                               char *filename, int filename_size);
 365void bdrv_get_full_backing_filename(BlockDriverState *bs,
 366                                    char *dest, size_t sz);
 367int bdrv_is_snapshot(BlockDriverState *bs);
 368
 369int path_is_absolute(const char *path);
 370void path_combine(char *dest, int dest_size,
 371                  const char *base_path,
 372                  const char *filename);
 373
 374int bdrv_writev_vmstate(BlockDriverState *bs, QEMUIOVector *qiov, int64_t pos);
 375int bdrv_save_vmstate(BlockDriverState *bs, const uint8_t *buf,
 376                      int64_t pos, int size);
 377
 378int bdrv_load_vmstate(BlockDriverState *bs, uint8_t *buf,
 379                      int64_t pos, int size);
 380
 381void bdrv_img_create(const char *filename, const char *fmt,
 382                     const char *base_filename, const char *base_fmt,
 383                     char *options, uint64_t img_size, int flags,
 384                     Error **errp, bool quiet);
 385
 386void bdrv_set_buffer_alignment(BlockDriverState *bs, int align);
 387void *qemu_blockalign(BlockDriverState *bs, size_t size);
 388bool bdrv_qiov_is_aligned(BlockDriverState *bs, QEMUIOVector *qiov);
 389
 390struct HBitmapIter;
 391void bdrv_set_dirty_tracking(BlockDriverState *bs, int granularity);
 392int bdrv_get_dirty(BlockDriverState *bs, int64_t sector);
 393void bdrv_set_dirty(BlockDriverState *bs, int64_t cur_sector, int nr_sectors);
 394void bdrv_reset_dirty(BlockDriverState *bs, int64_t cur_sector, int nr_sectors);
 395void bdrv_dirty_iter_init(BlockDriverState *bs, struct HBitmapIter *hbi);
 396int64_t bdrv_get_dirty_count(BlockDriverState *bs);
 397
 398void bdrv_enable_copy_on_read(BlockDriverState *bs);
 399void bdrv_disable_copy_on_read(BlockDriverState *bs);
 400
 401void bdrv_ref(BlockDriverState *bs);
 402void bdrv_unref(BlockDriverState *bs);
 403void bdrv_set_in_use(BlockDriverState *bs, int in_use);
 404int bdrv_in_use(BlockDriverState *bs);
 405
 406#ifdef CONFIG_LINUX_AIO
 407int raw_get_aio_fd(BlockDriverState *bs);
 408#else
 409static inline int raw_get_aio_fd(BlockDriverState *bs)
 410{
 411    return -ENOTSUP;
 412}
 413#endif
 414
 415enum BlockAcctType {
 416    BDRV_ACCT_READ,
 417    BDRV_ACCT_WRITE,
 418    BDRV_ACCT_FLUSH,
 419    BDRV_MAX_IOTYPE,
 420};
 421
 422typedef struct BlockAcctCookie {
 423    int64_t bytes;
 424    int64_t start_time_ns;
 425    enum BlockAcctType type;
 426} BlockAcctCookie;
 427
 428void bdrv_acct_start(BlockDriverState *bs, BlockAcctCookie *cookie,
 429        int64_t bytes, enum BlockAcctType type);
 430void bdrv_acct_done(BlockDriverState *bs, BlockAcctCookie *cookie);
 431
 432typedef enum {
 433    BLKDBG_L1_UPDATE,
 434
 435    BLKDBG_L1_GROW_ALLOC_TABLE,
 436    BLKDBG_L1_GROW_WRITE_TABLE,
 437    BLKDBG_L1_GROW_ACTIVATE_TABLE,
 438
 439    BLKDBG_L2_LOAD,
 440    BLKDBG_L2_UPDATE,
 441    BLKDBG_L2_UPDATE_COMPRESSED,
 442    BLKDBG_L2_ALLOC_COW_READ,
 443    BLKDBG_L2_ALLOC_WRITE,
 444
 445    BLKDBG_READ_AIO,
 446    BLKDBG_READ_BACKING_AIO,
 447    BLKDBG_READ_COMPRESSED,
 448
 449    BLKDBG_WRITE_AIO,
 450    BLKDBG_WRITE_COMPRESSED,
 451
 452    BLKDBG_VMSTATE_LOAD,
 453    BLKDBG_VMSTATE_SAVE,
 454
 455    BLKDBG_COW_READ,
 456    BLKDBG_COW_WRITE,
 457
 458    BLKDBG_REFTABLE_LOAD,
 459    BLKDBG_REFTABLE_GROW,
 460    BLKDBG_REFTABLE_UPDATE,
 461
 462    BLKDBG_REFBLOCK_LOAD,
 463    BLKDBG_REFBLOCK_UPDATE,
 464    BLKDBG_REFBLOCK_UPDATE_PART,
 465    BLKDBG_REFBLOCK_ALLOC,
 466    BLKDBG_REFBLOCK_ALLOC_HOOKUP,
 467    BLKDBG_REFBLOCK_ALLOC_WRITE,
 468    BLKDBG_REFBLOCK_ALLOC_WRITE_BLOCKS,
 469    BLKDBG_REFBLOCK_ALLOC_WRITE_TABLE,
 470    BLKDBG_REFBLOCK_ALLOC_SWITCH_TABLE,
 471
 472    BLKDBG_CLUSTER_ALLOC,
 473    BLKDBG_CLUSTER_ALLOC_BYTES,
 474    BLKDBG_CLUSTER_FREE,
 475
 476    BLKDBG_FLUSH_TO_OS,
 477    BLKDBG_FLUSH_TO_DISK,
 478
 479    BLKDBG_EVENT_MAX,
 480} BlkDebugEvent;
 481
 482#define BLKDBG_EVENT(bs, evt) bdrv_debug_event(bs, evt)
 483void bdrv_debug_event(BlockDriverState *bs, BlkDebugEvent event);
 484
 485int bdrv_debug_breakpoint(BlockDriverState *bs, const char *event,
 486                           const char *tag);
 487int bdrv_debug_resume(BlockDriverState *bs, const char *tag);
 488bool bdrv_debug_is_suspended(BlockDriverState *bs, const char *tag);
 489
 490#endif
 491