qemu/block/qed.c
<<
>>
Prefs
   1/*
   2 * QEMU Enhanced Disk Format
   3 *
   4 * Copyright IBM, Corp. 2010
   5 *
   6 * Authors:
   7 *  Stefan Hajnoczi   <stefanha@linux.vnet.ibm.com>
   8 *  Anthony Liguori   <aliguori@us.ibm.com>
   9 *
  10 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
  11 * See the COPYING.LIB file in the top-level directory.
  12 *
  13 */
  14
  15#include "qemu/osdep.h"
  16#include "qapi/error.h"
  17#include "qemu/timer.h"
  18#include "qemu/bswap.h"
  19#include "trace.h"
  20#include "qed.h"
  21#include "qapi/qmp/qerror.h"
  22#include "sysemu/block-backend.h"
  23
  24static int bdrv_qed_probe(const uint8_t *buf, int buf_size,
  25                          const char *filename)
  26{
  27    const QEDHeader *header = (const QEDHeader *)buf;
  28
  29    if (buf_size < sizeof(*header)) {
  30        return 0;
  31    }
  32    if (le32_to_cpu(header->magic) != QED_MAGIC) {
  33        return 0;
  34    }
  35    return 100;
  36}
  37
  38/**
  39 * Check whether an image format is raw
  40 *
  41 * @fmt:    Backing file format, may be NULL
  42 */
  43static bool qed_fmt_is_raw(const char *fmt)
  44{
  45    return fmt && strcmp(fmt, "raw") == 0;
  46}
  47
  48static void qed_header_le_to_cpu(const QEDHeader *le, QEDHeader *cpu)
  49{
  50    cpu->magic = le32_to_cpu(le->magic);
  51    cpu->cluster_size = le32_to_cpu(le->cluster_size);
  52    cpu->table_size = le32_to_cpu(le->table_size);
  53    cpu->header_size = le32_to_cpu(le->header_size);
  54    cpu->features = le64_to_cpu(le->features);
  55    cpu->compat_features = le64_to_cpu(le->compat_features);
  56    cpu->autoclear_features = le64_to_cpu(le->autoclear_features);
  57    cpu->l1_table_offset = le64_to_cpu(le->l1_table_offset);
  58    cpu->image_size = le64_to_cpu(le->image_size);
  59    cpu->backing_filename_offset = le32_to_cpu(le->backing_filename_offset);
  60    cpu->backing_filename_size = le32_to_cpu(le->backing_filename_size);
  61}
  62
  63static void qed_header_cpu_to_le(const QEDHeader *cpu, QEDHeader *le)
  64{
  65    le->magic = cpu_to_le32(cpu->magic);
  66    le->cluster_size = cpu_to_le32(cpu->cluster_size);
  67    le->table_size = cpu_to_le32(cpu->table_size);
  68    le->header_size = cpu_to_le32(cpu->header_size);
  69    le->features = cpu_to_le64(cpu->features);
  70    le->compat_features = cpu_to_le64(cpu->compat_features);
  71    le->autoclear_features = cpu_to_le64(cpu->autoclear_features);
  72    le->l1_table_offset = cpu_to_le64(cpu->l1_table_offset);
  73    le->image_size = cpu_to_le64(cpu->image_size);
  74    le->backing_filename_offset = cpu_to_le32(cpu->backing_filename_offset);
  75    le->backing_filename_size = cpu_to_le32(cpu->backing_filename_size);
  76}
  77
  78int qed_write_header_sync(BDRVQEDState *s)
  79{
  80    QEDHeader le;
  81    int ret;
  82
  83    qed_header_cpu_to_le(&s->header, &le);
  84    ret = bdrv_pwrite(s->bs->file, 0, &le, sizeof(le));
  85    if (ret != sizeof(le)) {
  86        return ret;
  87    }
  88    return 0;
  89}
  90
  91/**
  92 * Update header in-place (does not rewrite backing filename or other strings)
  93 *
  94 * This function only updates known header fields in-place and does not affect
  95 * extra data after the QED header.
  96 *
  97 * No new allocating reqs can start while this function runs.
  98 */
  99static int coroutine_fn qed_write_header(BDRVQEDState *s)
 100{
 101    /* We must write full sectors for O_DIRECT but cannot necessarily generate
 102     * the data following the header if an unrecognized compat feature is
 103     * active.  Therefore, first read the sectors containing the header, update
 104     * them, and write back.
 105     */
 106
 107    int nsectors = DIV_ROUND_UP(sizeof(QEDHeader), BDRV_SECTOR_SIZE);
 108    size_t len = nsectors * BDRV_SECTOR_SIZE;
 109    uint8_t *buf;
 110    struct iovec iov;
 111    QEMUIOVector qiov;
 112    int ret;
 113
 114    assert(s->allocating_acb || s->allocating_write_reqs_plugged);
 115
 116    buf = qemu_blockalign(s->bs, len);
 117    iov = (struct iovec) {
 118        .iov_base = buf,
 119        .iov_len = len,
 120    };
 121    qemu_iovec_init_external(&qiov, &iov, 1);
 122
 123    ret = bdrv_co_preadv(s->bs->file, 0, qiov.size, &qiov, 0);
 124    if (ret < 0) {
 125        goto out;
 126    }
 127
 128    /* Update header */
 129    qed_header_cpu_to_le(&s->header, (QEDHeader *) buf);
 130
 131    ret = bdrv_co_pwritev(s->bs->file, 0, qiov.size,  &qiov, 0);
 132    if (ret < 0) {
 133        goto out;
 134    }
 135
 136    ret = 0;
 137out:
 138    qemu_vfree(buf);
 139    return ret;
 140}
 141
 142static uint64_t qed_max_image_size(uint32_t cluster_size, uint32_t table_size)
 143{
 144    uint64_t table_entries;
 145    uint64_t l2_size;
 146
 147    table_entries = (table_size * cluster_size) / sizeof(uint64_t);
 148    l2_size = table_entries * cluster_size;
 149
 150    return l2_size * table_entries;
 151}
 152
 153static bool qed_is_cluster_size_valid(uint32_t cluster_size)
 154{
 155    if (cluster_size < QED_MIN_CLUSTER_SIZE ||
 156        cluster_size > QED_MAX_CLUSTER_SIZE) {
 157        return false;
 158    }
 159    if (cluster_size & (cluster_size - 1)) {
 160        return false; /* not power of 2 */
 161    }
 162    return true;
 163}
 164
 165static bool qed_is_table_size_valid(uint32_t table_size)
 166{
 167    if (table_size < QED_MIN_TABLE_SIZE ||
 168        table_size > QED_MAX_TABLE_SIZE) {
 169        return false;
 170    }
 171    if (table_size & (table_size - 1)) {
 172        return false; /* not power of 2 */
 173    }
 174    return true;
 175}
 176
 177static bool qed_is_image_size_valid(uint64_t image_size, uint32_t cluster_size,
 178                                    uint32_t table_size)
 179{
 180    if (image_size % BDRV_SECTOR_SIZE != 0) {
 181        return false; /* not multiple of sector size */
 182    }
 183    if (image_size > qed_max_image_size(cluster_size, table_size)) {
 184        return false; /* image is too large */
 185    }
 186    return true;
 187}
 188
 189/**
 190 * Read a string of known length from the image file
 191 *
 192 * @file:       Image file
 193 * @offset:     File offset to start of string, in bytes
 194 * @n:          String length in bytes
 195 * @buf:        Destination buffer
 196 * @buflen:     Destination buffer length in bytes
 197 * @ret:        0 on success, -errno on failure
 198 *
 199 * The string is NUL-terminated.
 200 */
 201static int qed_read_string(BdrvChild *file, uint64_t offset, size_t n,
 202                           char *buf, size_t buflen)
 203{
 204    int ret;
 205    if (n >= buflen) {
 206        return -EINVAL;
 207    }
 208    ret = bdrv_pread(file, offset, buf, n);
 209    if (ret < 0) {
 210        return ret;
 211    }
 212    buf[n] = '\0';
 213    return 0;
 214}
 215
 216/**
 217 * Allocate new clusters
 218 *
 219 * @s:          QED state
 220 * @n:          Number of contiguous clusters to allocate
 221 * @ret:        Offset of first allocated cluster
 222 *
 223 * This function only produces the offset where the new clusters should be
 224 * written.  It updates BDRVQEDState but does not make any changes to the image
 225 * file.
 226 *
 227 * Called with table_lock held.
 228 */
 229static uint64_t qed_alloc_clusters(BDRVQEDState *s, unsigned int n)
 230{
 231    uint64_t offset = s->file_size;
 232    s->file_size += n * s->header.cluster_size;
 233    return offset;
 234}
 235
 236QEDTable *qed_alloc_table(BDRVQEDState *s)
 237{
 238    /* Honor O_DIRECT memory alignment requirements */
 239    return qemu_blockalign(s->bs,
 240                           s->header.cluster_size * s->header.table_size);
 241}
 242
 243/**
 244 * Allocate a new zeroed L2 table
 245 *
 246 * Called with table_lock held.
 247 */
 248static CachedL2Table *qed_new_l2_table(BDRVQEDState *s)
 249{
 250    CachedL2Table *l2_table = qed_alloc_l2_cache_entry(&s->l2_cache);
 251
 252    l2_table->table = qed_alloc_table(s);
 253    l2_table->offset = qed_alloc_clusters(s, s->header.table_size);
 254
 255    memset(l2_table->table->offsets, 0,
 256           s->header.cluster_size * s->header.table_size);
 257    return l2_table;
 258}
 259
 260static bool qed_plug_allocating_write_reqs(BDRVQEDState *s)
 261{
 262    qemu_co_mutex_lock(&s->table_lock);
 263
 264    /* No reentrancy is allowed.  */
 265    assert(!s->allocating_write_reqs_plugged);
 266    if (s->allocating_acb != NULL) {
 267        /* Another allocating write came concurrently.  This cannot happen
 268         * from bdrv_qed_co_drain, but it can happen when the timer runs.
 269         */
 270        qemu_co_mutex_unlock(&s->table_lock);
 271        return false;
 272    }
 273
 274    s->allocating_write_reqs_plugged = true;
 275    qemu_co_mutex_unlock(&s->table_lock);
 276    return true;
 277}
 278
 279static void qed_unplug_allocating_write_reqs(BDRVQEDState *s)
 280{
 281    qemu_co_mutex_lock(&s->table_lock);
 282    assert(s->allocating_write_reqs_plugged);
 283    s->allocating_write_reqs_plugged = false;
 284    qemu_co_queue_next(&s->allocating_write_reqs);
 285    qemu_co_mutex_unlock(&s->table_lock);
 286}
 287
 288static void coroutine_fn qed_need_check_timer_entry(void *opaque)
 289{
 290    BDRVQEDState *s = opaque;
 291    int ret;
 292
 293    trace_qed_need_check_timer_cb(s);
 294
 295    if (!qed_plug_allocating_write_reqs(s)) {
 296        return;
 297    }
 298
 299    /* Ensure writes are on disk before clearing flag */
 300    ret = bdrv_co_flush(s->bs->file->bs);
 301    if (ret < 0) {
 302        qed_unplug_allocating_write_reqs(s);
 303        return;
 304    }
 305
 306    s->header.features &= ~QED_F_NEED_CHECK;
 307    ret = qed_write_header(s);
 308    (void) ret;
 309
 310    qed_unplug_allocating_write_reqs(s);
 311
 312    ret = bdrv_co_flush(s->bs);
 313    (void) ret;
 314}
 315
 316static void qed_need_check_timer_cb(void *opaque)
 317{
 318    Coroutine *co = qemu_coroutine_create(qed_need_check_timer_entry, opaque);
 319    qemu_coroutine_enter(co);
 320}
 321
 322static void qed_start_need_check_timer(BDRVQEDState *s)
 323{
 324    trace_qed_start_need_check_timer(s);
 325
 326    /* Use QEMU_CLOCK_VIRTUAL so we don't alter the image file while suspended for
 327     * migration.
 328     */
 329    timer_mod(s->need_check_timer, qemu_clock_get_ns(QEMU_CLOCK_VIRTUAL) +
 330                   NANOSECONDS_PER_SECOND * QED_NEED_CHECK_TIMEOUT);
 331}
 332
 333/* It's okay to call this multiple times or when no timer is started */
 334static void qed_cancel_need_check_timer(BDRVQEDState *s)
 335{
 336    trace_qed_cancel_need_check_timer(s);
 337    timer_del(s->need_check_timer);
 338}
 339
 340static void bdrv_qed_detach_aio_context(BlockDriverState *bs)
 341{
 342    BDRVQEDState *s = bs->opaque;
 343
 344    qed_cancel_need_check_timer(s);
 345    timer_free(s->need_check_timer);
 346}
 347
 348static void bdrv_qed_attach_aio_context(BlockDriverState *bs,
 349                                        AioContext *new_context)
 350{
 351    BDRVQEDState *s = bs->opaque;
 352
 353    s->need_check_timer = aio_timer_new(new_context,
 354                                        QEMU_CLOCK_VIRTUAL, SCALE_NS,
 355                                        qed_need_check_timer_cb, s);
 356    if (s->header.features & QED_F_NEED_CHECK) {
 357        qed_start_need_check_timer(s);
 358    }
 359}
 360
 361static void coroutine_fn bdrv_qed_co_drain(BlockDriverState *bs)
 362{
 363    BDRVQEDState *s = bs->opaque;
 364
 365    /* Fire the timer immediately in order to start doing I/O as soon as the
 366     * header is flushed.
 367     */
 368    if (s->need_check_timer && timer_pending(s->need_check_timer)) {
 369        qed_cancel_need_check_timer(s);
 370        qed_need_check_timer_entry(s);
 371    }
 372}
 373
 374static void bdrv_qed_init_state(BlockDriverState *bs)
 375{
 376    BDRVQEDState *s = bs->opaque;
 377
 378    memset(s, 0, sizeof(BDRVQEDState));
 379    s->bs = bs;
 380    qemu_co_mutex_init(&s->table_lock);
 381    qemu_co_queue_init(&s->allocating_write_reqs);
 382}
 383
 384static int bdrv_qed_do_open(BlockDriverState *bs, QDict *options, int flags,
 385                            Error **errp)
 386{
 387    BDRVQEDState *s = bs->opaque;
 388    QEDHeader le_header;
 389    int64_t file_size;
 390    int ret;
 391
 392    ret = bdrv_pread(bs->file, 0, &le_header, sizeof(le_header));
 393    if (ret < 0) {
 394        return ret;
 395    }
 396    qed_header_le_to_cpu(&le_header, &s->header);
 397
 398    if (s->header.magic != QED_MAGIC) {
 399        error_setg(errp, "Image not in QED format");
 400        return -EINVAL;
 401    }
 402    if (s->header.features & ~QED_FEATURE_MASK) {
 403        /* image uses unsupported feature bits */
 404        error_setg(errp, "Unsupported QED features: %" PRIx64,
 405                   s->header.features & ~QED_FEATURE_MASK);
 406        return -ENOTSUP;
 407    }
 408    if (!qed_is_cluster_size_valid(s->header.cluster_size)) {
 409        return -EINVAL;
 410    }
 411
 412    /* Round down file size to the last cluster */
 413    file_size = bdrv_getlength(bs->file->bs);
 414    if (file_size < 0) {
 415        return file_size;
 416    }
 417    s->file_size = qed_start_of_cluster(s, file_size);
 418
 419    if (!qed_is_table_size_valid(s->header.table_size)) {
 420        return -EINVAL;
 421    }
 422    if (!qed_is_image_size_valid(s->header.image_size,
 423                                 s->header.cluster_size,
 424                                 s->header.table_size)) {
 425        return -EINVAL;
 426    }
 427    if (!qed_check_table_offset(s, s->header.l1_table_offset)) {
 428        return -EINVAL;
 429    }
 430
 431    s->table_nelems = (s->header.cluster_size * s->header.table_size) /
 432                      sizeof(uint64_t);
 433    s->l2_shift = ctz32(s->header.cluster_size);
 434    s->l2_mask = s->table_nelems - 1;
 435    s->l1_shift = s->l2_shift + ctz32(s->table_nelems);
 436
 437    /* Header size calculation must not overflow uint32_t */
 438    if (s->header.header_size > UINT32_MAX / s->header.cluster_size) {
 439        return -EINVAL;
 440    }
 441
 442    if ((s->header.features & QED_F_BACKING_FILE)) {
 443        if ((uint64_t)s->header.backing_filename_offset +
 444            s->header.backing_filename_size >
 445            s->header.cluster_size * s->header.header_size) {
 446            return -EINVAL;
 447        }
 448
 449        ret = qed_read_string(bs->file, s->header.backing_filename_offset,
 450                              s->header.backing_filename_size, bs->backing_file,
 451                              sizeof(bs->backing_file));
 452        if (ret < 0) {
 453            return ret;
 454        }
 455
 456        if (s->header.features & QED_F_BACKING_FORMAT_NO_PROBE) {
 457            pstrcpy(bs->backing_format, sizeof(bs->backing_format), "raw");
 458        }
 459    }
 460
 461    /* Reset unknown autoclear feature bits.  This is a backwards
 462     * compatibility mechanism that allows images to be opened by older
 463     * programs, which "knock out" unknown feature bits.  When an image is
 464     * opened by a newer program again it can detect that the autoclear
 465     * feature is no longer valid.
 466     */
 467    if ((s->header.autoclear_features & ~QED_AUTOCLEAR_FEATURE_MASK) != 0 &&
 468        !bdrv_is_read_only(bs->file->bs) && !(flags & BDRV_O_INACTIVE)) {
 469        s->header.autoclear_features &= QED_AUTOCLEAR_FEATURE_MASK;
 470
 471        ret = qed_write_header_sync(s);
 472        if (ret) {
 473            return ret;
 474        }
 475
 476        /* From here on only known autoclear feature bits are valid */
 477        bdrv_flush(bs->file->bs);
 478    }
 479
 480    s->l1_table = qed_alloc_table(s);
 481    qed_init_l2_cache(&s->l2_cache);
 482
 483    ret = qed_read_l1_table_sync(s);
 484    if (ret) {
 485        goto out;
 486    }
 487
 488    /* If image was not closed cleanly, check consistency */
 489    if (!(flags & BDRV_O_CHECK) && (s->header.features & QED_F_NEED_CHECK)) {
 490        /* Read-only images cannot be fixed.  There is no risk of corruption
 491         * since write operations are not possible.  Therefore, allow
 492         * potentially inconsistent images to be opened read-only.  This can
 493         * aid data recovery from an otherwise inconsistent image.
 494         */
 495        if (!bdrv_is_read_only(bs->file->bs) &&
 496            !(flags & BDRV_O_INACTIVE)) {
 497            BdrvCheckResult result = {0};
 498
 499            ret = qed_check(s, &result, true);
 500            if (ret) {
 501                goto out;
 502            }
 503        }
 504    }
 505
 506    bdrv_qed_attach_aio_context(bs, bdrv_get_aio_context(bs));
 507
 508out:
 509    if (ret) {
 510        qed_free_l2_cache(&s->l2_cache);
 511        qemu_vfree(s->l1_table);
 512    }
 513    return ret;
 514}
 515
 516static int bdrv_qed_open(BlockDriverState *bs, QDict *options, int flags,
 517                         Error **errp)
 518{
 519    bs->file = bdrv_open_child(NULL, options, "file", bs, &child_file,
 520                               false, errp);
 521    if (!bs->file) {
 522        return -EINVAL;
 523    }
 524
 525    bdrv_qed_init_state(bs);
 526    return bdrv_qed_do_open(bs, options, flags, errp);
 527}
 528
 529static void bdrv_qed_refresh_limits(BlockDriverState *bs, Error **errp)
 530{
 531    BDRVQEDState *s = bs->opaque;
 532
 533    bs->bl.pwrite_zeroes_alignment = s->header.cluster_size;
 534}
 535
 536/* We have nothing to do for QED reopen, stubs just return
 537 * success */
 538static int bdrv_qed_reopen_prepare(BDRVReopenState *state,
 539                                   BlockReopenQueue *queue, Error **errp)
 540{
 541    return 0;
 542}
 543
 544static void bdrv_qed_close(BlockDriverState *bs)
 545{
 546    BDRVQEDState *s = bs->opaque;
 547
 548    bdrv_qed_detach_aio_context(bs);
 549
 550    /* Ensure writes reach stable storage */
 551    bdrv_flush(bs->file->bs);
 552
 553    /* Clean shutdown, no check required on next open */
 554    if (s->header.features & QED_F_NEED_CHECK) {
 555        s->header.features &= ~QED_F_NEED_CHECK;
 556        qed_write_header_sync(s);
 557    }
 558
 559    qed_free_l2_cache(&s->l2_cache);
 560    qemu_vfree(s->l1_table);
 561}
 562
 563static int qed_create(const char *filename, uint32_t cluster_size,
 564                      uint64_t image_size, uint32_t table_size,
 565                      const char *backing_file, const char *backing_fmt,
 566                      QemuOpts *opts, Error **errp)
 567{
 568    QEDHeader header = {
 569        .magic = QED_MAGIC,
 570        .cluster_size = cluster_size,
 571        .table_size = table_size,
 572        .header_size = 1,
 573        .features = 0,
 574        .compat_features = 0,
 575        .l1_table_offset = cluster_size,
 576        .image_size = image_size,
 577    };
 578    QEDHeader le_header;
 579    uint8_t *l1_table = NULL;
 580    size_t l1_size = header.cluster_size * header.table_size;
 581    Error *local_err = NULL;
 582    int ret = 0;
 583    BlockBackend *blk;
 584
 585    ret = bdrv_create_file(filename, opts, &local_err);
 586    if (ret < 0) {
 587        error_propagate(errp, local_err);
 588        return ret;
 589    }
 590
 591    blk = blk_new_open(filename, NULL, NULL,
 592                       BDRV_O_RDWR | BDRV_O_RESIZE | BDRV_O_PROTOCOL,
 593                       &local_err);
 594    if (blk == NULL) {
 595        error_propagate(errp, local_err);
 596        return -EIO;
 597    }
 598
 599    blk_set_allow_write_beyond_eof(blk, true);
 600
 601    /* File must start empty and grow, check truncate is supported */
 602    ret = blk_truncate(blk, 0, PREALLOC_MODE_OFF, errp);
 603    if (ret < 0) {
 604        goto out;
 605    }
 606
 607    if (backing_file) {
 608        header.features |= QED_F_BACKING_FILE;
 609        header.backing_filename_offset = sizeof(le_header);
 610        header.backing_filename_size = strlen(backing_file);
 611
 612        if (qed_fmt_is_raw(backing_fmt)) {
 613            header.features |= QED_F_BACKING_FORMAT_NO_PROBE;
 614        }
 615    }
 616
 617    qed_header_cpu_to_le(&header, &le_header);
 618    ret = blk_pwrite(blk, 0, &le_header, sizeof(le_header), 0);
 619    if (ret < 0) {
 620        goto out;
 621    }
 622    ret = blk_pwrite(blk, sizeof(le_header), backing_file,
 623                     header.backing_filename_size, 0);
 624    if (ret < 0) {
 625        goto out;
 626    }
 627
 628    l1_table = g_malloc0(l1_size);
 629    ret = blk_pwrite(blk, header.l1_table_offset, l1_table, l1_size, 0);
 630    if (ret < 0) {
 631        goto out;
 632    }
 633
 634    ret = 0; /* success */
 635out:
 636    g_free(l1_table);
 637    blk_unref(blk);
 638    return ret;
 639}
 640
 641static int bdrv_qed_create(const char *filename, QemuOpts *opts, Error **errp)
 642{
 643    uint64_t image_size = 0;
 644    uint32_t cluster_size = QED_DEFAULT_CLUSTER_SIZE;
 645    uint32_t table_size = QED_DEFAULT_TABLE_SIZE;
 646    char *backing_file = NULL;
 647    char *backing_fmt = NULL;
 648    int ret;
 649
 650    image_size = ROUND_UP(qemu_opt_get_size_del(opts, BLOCK_OPT_SIZE, 0),
 651                          BDRV_SECTOR_SIZE);
 652    backing_file = qemu_opt_get_del(opts, BLOCK_OPT_BACKING_FILE);
 653    backing_fmt = qemu_opt_get_del(opts, BLOCK_OPT_BACKING_FMT);
 654    cluster_size = qemu_opt_get_size_del(opts,
 655                                         BLOCK_OPT_CLUSTER_SIZE,
 656                                         QED_DEFAULT_CLUSTER_SIZE);
 657    table_size = qemu_opt_get_size_del(opts, BLOCK_OPT_TABLE_SIZE,
 658                                       QED_DEFAULT_TABLE_SIZE);
 659
 660    if (!qed_is_cluster_size_valid(cluster_size)) {
 661        error_setg(errp, "QED cluster size must be within range [%u, %u] "
 662                         "and power of 2",
 663                   QED_MIN_CLUSTER_SIZE, QED_MAX_CLUSTER_SIZE);
 664        ret = -EINVAL;
 665        goto finish;
 666    }
 667    if (!qed_is_table_size_valid(table_size)) {
 668        error_setg(errp, "QED table size must be within range [%u, %u] "
 669                         "and power of 2",
 670                   QED_MIN_TABLE_SIZE, QED_MAX_TABLE_SIZE);
 671        ret = -EINVAL;
 672        goto finish;
 673    }
 674    if (!qed_is_image_size_valid(image_size, cluster_size, table_size)) {
 675        error_setg(errp, "QED image size must be a non-zero multiple of "
 676                         "cluster size and less than %" PRIu64 " bytes",
 677                   qed_max_image_size(cluster_size, table_size));
 678        ret = -EINVAL;
 679        goto finish;
 680    }
 681
 682    ret = qed_create(filename, cluster_size, image_size, table_size,
 683                     backing_file, backing_fmt, opts, errp);
 684
 685finish:
 686    g_free(backing_file);
 687    g_free(backing_fmt);
 688    return ret;
 689}
 690
 691typedef struct {
 692    BlockDriverState *bs;
 693    Coroutine *co;
 694    uint64_t pos;
 695    int64_t status;
 696    int *pnum;
 697    BlockDriverState **file;
 698} QEDIsAllocatedCB;
 699
 700/* Called with table_lock held.  */
 701static void qed_is_allocated_cb(void *opaque, int ret, uint64_t offset, size_t len)
 702{
 703    QEDIsAllocatedCB *cb = opaque;
 704    BDRVQEDState *s = cb->bs->opaque;
 705    *cb->pnum = len / BDRV_SECTOR_SIZE;
 706    switch (ret) {
 707    case QED_CLUSTER_FOUND:
 708        offset |= qed_offset_into_cluster(s, cb->pos);
 709        cb->status = BDRV_BLOCK_DATA | BDRV_BLOCK_OFFSET_VALID | offset;
 710        *cb->file = cb->bs->file->bs;
 711        break;
 712    case QED_CLUSTER_ZERO:
 713        cb->status = BDRV_BLOCK_ZERO;
 714        break;
 715    case QED_CLUSTER_L2:
 716    case QED_CLUSTER_L1:
 717        cb->status = 0;
 718        break;
 719    default:
 720        assert(ret < 0);
 721        cb->status = ret;
 722        break;
 723    }
 724
 725    if (cb->co) {
 726        aio_co_wake(cb->co);
 727    }
 728}
 729
 730static int64_t coroutine_fn bdrv_qed_co_get_block_status(BlockDriverState *bs,
 731                                                 int64_t sector_num,
 732                                                 int nb_sectors, int *pnum,
 733                                                 BlockDriverState **file)
 734{
 735    BDRVQEDState *s = bs->opaque;
 736    size_t len = (size_t)nb_sectors * BDRV_SECTOR_SIZE;
 737    QEDIsAllocatedCB cb = {
 738        .bs = bs,
 739        .pos = (uint64_t)sector_num * BDRV_SECTOR_SIZE,
 740        .status = BDRV_BLOCK_OFFSET_MASK,
 741        .pnum = pnum,
 742        .file = file,
 743    };
 744    QEDRequest request = { .l2_table = NULL };
 745    uint64_t offset;
 746    int ret;
 747
 748    qemu_co_mutex_lock(&s->table_lock);
 749    ret = qed_find_cluster(s, &request, cb.pos, &len, &offset);
 750    qed_is_allocated_cb(&cb, ret, offset, len);
 751
 752    /* The callback was invoked immediately */
 753    assert(cb.status != BDRV_BLOCK_OFFSET_MASK);
 754
 755    qed_unref_l2_cache_entry(request.l2_table);
 756    qemu_co_mutex_unlock(&s->table_lock);
 757
 758    return cb.status;
 759}
 760
 761static BDRVQEDState *acb_to_s(QEDAIOCB *acb)
 762{
 763    return acb->bs->opaque;
 764}
 765
 766/**
 767 * Read from the backing file or zero-fill if no backing file
 768 *
 769 * @s:              QED state
 770 * @pos:            Byte position in device
 771 * @qiov:           Destination I/O vector
 772 * @backing_qiov:   Possibly shortened copy of qiov, to be allocated here
 773 * @cb:             Completion function
 774 * @opaque:         User data for completion function
 775 *
 776 * This function reads qiov->size bytes starting at pos from the backing file.
 777 * If there is no backing file then zeroes are read.
 778 */
 779static int coroutine_fn qed_read_backing_file(BDRVQEDState *s, uint64_t pos,
 780                                              QEMUIOVector *qiov,
 781                                              QEMUIOVector **backing_qiov)
 782{
 783    uint64_t backing_length = 0;
 784    size_t size;
 785    int ret;
 786
 787    /* If there is a backing file, get its length.  Treat the absence of a
 788     * backing file like a zero length backing file.
 789     */
 790    if (s->bs->backing) {
 791        int64_t l = bdrv_getlength(s->bs->backing->bs);
 792        if (l < 0) {
 793            return l;
 794        }
 795        backing_length = l;
 796    }
 797
 798    /* Zero all sectors if reading beyond the end of the backing file */
 799    if (pos >= backing_length ||
 800        pos + qiov->size > backing_length) {
 801        qemu_iovec_memset(qiov, 0, 0, qiov->size);
 802    }
 803
 804    /* Complete now if there are no backing file sectors to read */
 805    if (pos >= backing_length) {
 806        return 0;
 807    }
 808
 809    /* If the read straddles the end of the backing file, shorten it */
 810    size = MIN((uint64_t)backing_length - pos, qiov->size);
 811
 812    assert(*backing_qiov == NULL);
 813    *backing_qiov = g_new(QEMUIOVector, 1);
 814    qemu_iovec_init(*backing_qiov, qiov->niov);
 815    qemu_iovec_concat(*backing_qiov, qiov, 0, size);
 816
 817    BLKDBG_EVENT(s->bs->file, BLKDBG_READ_BACKING_AIO);
 818    ret = bdrv_co_preadv(s->bs->backing, pos, size, *backing_qiov, 0);
 819    if (ret < 0) {
 820        return ret;
 821    }
 822    return 0;
 823}
 824
 825/**
 826 * Copy data from backing file into the image
 827 *
 828 * @s:          QED state
 829 * @pos:        Byte position in device
 830 * @len:        Number of bytes
 831 * @offset:     Byte offset in image file
 832 */
 833static int coroutine_fn qed_copy_from_backing_file(BDRVQEDState *s,
 834                                                   uint64_t pos, uint64_t len,
 835                                                   uint64_t offset)
 836{
 837    QEMUIOVector qiov;
 838    QEMUIOVector *backing_qiov = NULL;
 839    struct iovec iov;
 840    int ret;
 841
 842    /* Skip copy entirely if there is no work to do */
 843    if (len == 0) {
 844        return 0;
 845    }
 846
 847    iov = (struct iovec) {
 848        .iov_base = qemu_blockalign(s->bs, len),
 849        .iov_len = len,
 850    };
 851    qemu_iovec_init_external(&qiov, &iov, 1);
 852
 853    ret = qed_read_backing_file(s, pos, &qiov, &backing_qiov);
 854
 855    if (backing_qiov) {
 856        qemu_iovec_destroy(backing_qiov);
 857        g_free(backing_qiov);
 858        backing_qiov = NULL;
 859    }
 860
 861    if (ret) {
 862        goto out;
 863    }
 864
 865    BLKDBG_EVENT(s->bs->file, BLKDBG_COW_WRITE);
 866    ret = bdrv_co_pwritev(s->bs->file, offset, qiov.size, &qiov, 0);
 867    if (ret < 0) {
 868        goto out;
 869    }
 870    ret = 0;
 871out:
 872    qemu_vfree(iov.iov_base);
 873    return ret;
 874}
 875
 876/**
 877 * Link one or more contiguous clusters into a table
 878 *
 879 * @s:              QED state
 880 * @table:          L2 table
 881 * @index:          First cluster index
 882 * @n:              Number of contiguous clusters
 883 * @cluster:        First cluster offset
 884 *
 885 * The cluster offset may be an allocated byte offset in the image file, the
 886 * zero cluster marker, or the unallocated cluster marker.
 887 *
 888 * Called with table_lock held.
 889 */
 890static void coroutine_fn qed_update_l2_table(BDRVQEDState *s, QEDTable *table,
 891                                             int index, unsigned int n,
 892                                             uint64_t cluster)
 893{
 894    int i;
 895    for (i = index; i < index + n; i++) {
 896        table->offsets[i] = cluster;
 897        if (!qed_offset_is_unalloc_cluster(cluster) &&
 898            !qed_offset_is_zero_cluster(cluster)) {
 899            cluster += s->header.cluster_size;
 900        }
 901    }
 902}
 903
 904/* Called with table_lock held.  */
 905static void coroutine_fn qed_aio_complete(QEDAIOCB *acb)
 906{
 907    BDRVQEDState *s = acb_to_s(acb);
 908
 909    /* Free resources */
 910    qemu_iovec_destroy(&acb->cur_qiov);
 911    qed_unref_l2_cache_entry(acb->request.l2_table);
 912
 913    /* Free the buffer we may have allocated for zero writes */
 914    if (acb->flags & QED_AIOCB_ZERO) {
 915        qemu_vfree(acb->qiov->iov[0].iov_base);
 916        acb->qiov->iov[0].iov_base = NULL;
 917    }
 918
 919    /* Start next allocating write request waiting behind this one.  Note that
 920     * requests enqueue themselves when they first hit an unallocated cluster
 921     * but they wait until the entire request is finished before waking up the
 922     * next request in the queue.  This ensures that we don't cycle through
 923     * requests multiple times but rather finish one at a time completely.
 924     */
 925    if (acb == s->allocating_acb) {
 926        s->allocating_acb = NULL;
 927        if (!qemu_co_queue_empty(&s->allocating_write_reqs)) {
 928            qemu_co_queue_next(&s->allocating_write_reqs);
 929        } else if (s->header.features & QED_F_NEED_CHECK) {
 930            qed_start_need_check_timer(s);
 931        }
 932    }
 933}
 934
 935/**
 936 * Update L1 table with new L2 table offset and write it out
 937 *
 938 * Called with table_lock held.
 939 */
 940static int coroutine_fn qed_aio_write_l1_update(QEDAIOCB *acb)
 941{
 942    BDRVQEDState *s = acb_to_s(acb);
 943    CachedL2Table *l2_table = acb->request.l2_table;
 944    uint64_t l2_offset = l2_table->offset;
 945    int index, ret;
 946
 947    index = qed_l1_index(s, acb->cur_pos);
 948    s->l1_table->offsets[index] = l2_table->offset;
 949
 950    ret = qed_write_l1_table(s, index, 1);
 951
 952    /* Commit the current L2 table to the cache */
 953    qed_commit_l2_cache_entry(&s->l2_cache, l2_table);
 954
 955    /* This is guaranteed to succeed because we just committed the entry to the
 956     * cache.
 957     */
 958    acb->request.l2_table = qed_find_l2_cache_entry(&s->l2_cache, l2_offset);
 959    assert(acb->request.l2_table != NULL);
 960
 961    return ret;
 962}
 963
 964
 965/**
 966 * Update L2 table with new cluster offsets and write them out
 967 *
 968 * Called with table_lock held.
 969 */
 970static int coroutine_fn qed_aio_write_l2_update(QEDAIOCB *acb, uint64_t offset)
 971{
 972    BDRVQEDState *s = acb_to_s(acb);
 973    bool need_alloc = acb->find_cluster_ret == QED_CLUSTER_L1;
 974    int index, ret;
 975
 976    if (need_alloc) {
 977        qed_unref_l2_cache_entry(acb->request.l2_table);
 978        acb->request.l2_table = qed_new_l2_table(s);
 979    }
 980
 981    index = qed_l2_index(s, acb->cur_pos);
 982    qed_update_l2_table(s, acb->request.l2_table->table, index, acb->cur_nclusters,
 983                         offset);
 984
 985    if (need_alloc) {
 986        /* Write out the whole new L2 table */
 987        ret = qed_write_l2_table(s, &acb->request, 0, s->table_nelems, true);
 988        if (ret) {
 989            return ret;
 990        }
 991        return qed_aio_write_l1_update(acb);
 992    } else {
 993        /* Write out only the updated part of the L2 table */
 994        ret = qed_write_l2_table(s, &acb->request, index, acb->cur_nclusters,
 995                                 false);
 996        if (ret) {
 997            return ret;
 998        }
 999    }
1000    return 0;
1001}
1002
1003/**
1004 * Write data to the image file
1005 *
1006 * Called with table_lock *not* held.
1007 */
1008static int coroutine_fn qed_aio_write_main(QEDAIOCB *acb)
1009{
1010    BDRVQEDState *s = acb_to_s(acb);
1011    uint64_t offset = acb->cur_cluster +
1012                      qed_offset_into_cluster(s, acb->cur_pos);
1013
1014    trace_qed_aio_write_main(s, acb, 0, offset, acb->cur_qiov.size);
1015
1016    BLKDBG_EVENT(s->bs->file, BLKDBG_WRITE_AIO);
1017    return bdrv_co_pwritev(s->bs->file, offset, acb->cur_qiov.size,
1018                           &acb->cur_qiov, 0);
1019}
1020
1021/**
1022 * Populate untouched regions of new data cluster
1023 *
1024 * Called with table_lock held.
1025 */
1026static int coroutine_fn qed_aio_write_cow(QEDAIOCB *acb)
1027{
1028    BDRVQEDState *s = acb_to_s(acb);
1029    uint64_t start, len, offset;
1030    int ret;
1031
1032    qemu_co_mutex_unlock(&s->table_lock);
1033
1034    /* Populate front untouched region of new data cluster */
1035    start = qed_start_of_cluster(s, acb->cur_pos);
1036    len = qed_offset_into_cluster(s, acb->cur_pos);
1037
1038    trace_qed_aio_write_prefill(s, acb, start, len, acb->cur_cluster);
1039    ret = qed_copy_from_backing_file(s, start, len, acb->cur_cluster);
1040    if (ret < 0) {
1041        goto out;
1042    }
1043
1044    /* Populate back untouched region of new data cluster */
1045    start = acb->cur_pos + acb->cur_qiov.size;
1046    len = qed_start_of_cluster(s, start + s->header.cluster_size - 1) - start;
1047    offset = acb->cur_cluster +
1048             qed_offset_into_cluster(s, acb->cur_pos) +
1049             acb->cur_qiov.size;
1050
1051    trace_qed_aio_write_postfill(s, acb, start, len, offset);
1052    ret = qed_copy_from_backing_file(s, start, len, offset);
1053    if (ret < 0) {
1054        goto out;
1055    }
1056
1057    ret = qed_aio_write_main(acb);
1058    if (ret < 0) {
1059        goto out;
1060    }
1061
1062    if (s->bs->backing) {
1063        /*
1064         * Flush new data clusters before updating the L2 table
1065         *
1066         * This flush is necessary when a backing file is in use.  A crash
1067         * during an allocating write could result in empty clusters in the
1068         * image.  If the write only touched a subregion of the cluster,
1069         * then backing image sectors have been lost in the untouched
1070         * region.  The solution is to flush after writing a new data
1071         * cluster and before updating the L2 table.
1072         */
1073        ret = bdrv_co_flush(s->bs->file->bs);
1074    }
1075
1076out:
1077    qemu_co_mutex_lock(&s->table_lock);
1078    return ret;
1079}
1080
1081/**
1082 * Check if the QED_F_NEED_CHECK bit should be set during allocating write
1083 */
1084static bool qed_should_set_need_check(BDRVQEDState *s)
1085{
1086    /* The flush before L2 update path ensures consistency */
1087    if (s->bs->backing) {
1088        return false;
1089    }
1090
1091    return !(s->header.features & QED_F_NEED_CHECK);
1092}
1093
1094/**
1095 * Write new data cluster
1096 *
1097 * @acb:        Write request
1098 * @len:        Length in bytes
1099 *
1100 * This path is taken when writing to previously unallocated clusters.
1101 *
1102 * Called with table_lock held.
1103 */
1104static int coroutine_fn qed_aio_write_alloc(QEDAIOCB *acb, size_t len)
1105{
1106    BDRVQEDState *s = acb_to_s(acb);
1107    int ret;
1108
1109    /* Cancel timer when the first allocating request comes in */
1110    if (s->allocating_acb == NULL) {
1111        qed_cancel_need_check_timer(s);
1112    }
1113
1114    /* Freeze this request if another allocating write is in progress */
1115    if (s->allocating_acb != acb || s->allocating_write_reqs_plugged) {
1116        if (s->allocating_acb != NULL) {
1117            qemu_co_queue_wait(&s->allocating_write_reqs, &s->table_lock);
1118            assert(s->allocating_acb == NULL);
1119        }
1120        s->allocating_acb = acb;
1121        return -EAGAIN; /* start over with looking up table entries */
1122    }
1123
1124    acb->cur_nclusters = qed_bytes_to_clusters(s,
1125            qed_offset_into_cluster(s, acb->cur_pos) + len);
1126    qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len);
1127
1128    if (acb->flags & QED_AIOCB_ZERO) {
1129        /* Skip ahead if the clusters are already zero */
1130        if (acb->find_cluster_ret == QED_CLUSTER_ZERO) {
1131            return 0;
1132        }
1133        acb->cur_cluster = 1;
1134    } else {
1135        acb->cur_cluster = qed_alloc_clusters(s, acb->cur_nclusters);
1136    }
1137
1138    if (qed_should_set_need_check(s)) {
1139        s->header.features |= QED_F_NEED_CHECK;
1140        ret = qed_write_header(s);
1141        if (ret < 0) {
1142            return ret;
1143        }
1144    }
1145
1146    if (!(acb->flags & QED_AIOCB_ZERO)) {
1147        ret = qed_aio_write_cow(acb);
1148        if (ret < 0) {
1149            return ret;
1150        }
1151    }
1152
1153    return qed_aio_write_l2_update(acb, acb->cur_cluster);
1154}
1155
1156/**
1157 * Write data cluster in place
1158 *
1159 * @acb:        Write request
1160 * @offset:     Cluster offset in bytes
1161 * @len:        Length in bytes
1162 *
1163 * This path is taken when writing to already allocated clusters.
1164 *
1165 * Called with table_lock held.
1166 */
1167static int coroutine_fn qed_aio_write_inplace(QEDAIOCB *acb, uint64_t offset,
1168                                              size_t len)
1169{
1170    BDRVQEDState *s = acb_to_s(acb);
1171    int r;
1172
1173    qemu_co_mutex_unlock(&s->table_lock);
1174
1175    /* Allocate buffer for zero writes */
1176    if (acb->flags & QED_AIOCB_ZERO) {
1177        struct iovec *iov = acb->qiov->iov;
1178
1179        if (!iov->iov_base) {
1180            iov->iov_base = qemu_try_blockalign(acb->bs, iov->iov_len);
1181            if (iov->iov_base == NULL) {
1182                r = -ENOMEM;
1183                goto out;
1184            }
1185            memset(iov->iov_base, 0, iov->iov_len);
1186        }
1187    }
1188
1189    /* Calculate the I/O vector */
1190    acb->cur_cluster = offset;
1191    qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len);
1192
1193    /* Do the actual write.  */
1194    r = qed_aio_write_main(acb);
1195out:
1196    qemu_co_mutex_lock(&s->table_lock);
1197    return r;
1198}
1199
1200/**
1201 * Write data cluster
1202 *
1203 * @opaque:     Write request
1204 * @ret:        QED_CLUSTER_FOUND, QED_CLUSTER_L2 or QED_CLUSTER_L1
1205 * @offset:     Cluster offset in bytes
1206 * @len:        Length in bytes
1207 *
1208 * Called with table_lock held.
1209 */
1210static int coroutine_fn qed_aio_write_data(void *opaque, int ret,
1211                                           uint64_t offset, size_t len)
1212{
1213    QEDAIOCB *acb = opaque;
1214
1215    trace_qed_aio_write_data(acb_to_s(acb), acb, ret, offset, len);
1216
1217    acb->find_cluster_ret = ret;
1218
1219    switch (ret) {
1220    case QED_CLUSTER_FOUND:
1221        return qed_aio_write_inplace(acb, offset, len);
1222
1223    case QED_CLUSTER_L2:
1224    case QED_CLUSTER_L1:
1225    case QED_CLUSTER_ZERO:
1226        return qed_aio_write_alloc(acb, len);
1227
1228    default:
1229        g_assert_not_reached();
1230    }
1231}
1232
1233/**
1234 * Read data cluster
1235 *
1236 * @opaque:     Read request
1237 * @ret:        QED_CLUSTER_FOUND, QED_CLUSTER_L2 or QED_CLUSTER_L1
1238 * @offset:     Cluster offset in bytes
1239 * @len:        Length in bytes
1240 *
1241 * Called with table_lock held.
1242 */
1243static int coroutine_fn qed_aio_read_data(void *opaque, int ret,
1244                                          uint64_t offset, size_t len)
1245{
1246    QEDAIOCB *acb = opaque;
1247    BDRVQEDState *s = acb_to_s(acb);
1248    BlockDriverState *bs = acb->bs;
1249    int r;
1250
1251    qemu_co_mutex_unlock(&s->table_lock);
1252
1253    /* Adjust offset into cluster */
1254    offset += qed_offset_into_cluster(s, acb->cur_pos);
1255
1256    trace_qed_aio_read_data(s, acb, ret, offset, len);
1257
1258    qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len);
1259
1260    /* Handle zero cluster and backing file reads, otherwise read
1261     * data cluster directly.
1262     */
1263    if (ret == QED_CLUSTER_ZERO) {
1264        qemu_iovec_memset(&acb->cur_qiov, 0, 0, acb->cur_qiov.size);
1265        r = 0;
1266    } else if (ret != QED_CLUSTER_FOUND) {
1267        r = qed_read_backing_file(s, acb->cur_pos, &acb->cur_qiov,
1268                                  &acb->backing_qiov);
1269    } else {
1270        BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO);
1271        r = bdrv_co_preadv(bs->file, offset, acb->cur_qiov.size,
1272                           &acb->cur_qiov, 0);
1273    }
1274
1275    qemu_co_mutex_lock(&s->table_lock);
1276    return r;
1277}
1278
1279/**
1280 * Begin next I/O or complete the request
1281 */
1282static int coroutine_fn qed_aio_next_io(QEDAIOCB *acb)
1283{
1284    BDRVQEDState *s = acb_to_s(acb);
1285    uint64_t offset;
1286    size_t len;
1287    int ret;
1288
1289    qemu_co_mutex_lock(&s->table_lock);
1290    while (1) {
1291        trace_qed_aio_next_io(s, acb, 0, acb->cur_pos + acb->cur_qiov.size);
1292
1293        if (acb->backing_qiov) {
1294            qemu_iovec_destroy(acb->backing_qiov);
1295            g_free(acb->backing_qiov);
1296            acb->backing_qiov = NULL;
1297        }
1298
1299        acb->qiov_offset += acb->cur_qiov.size;
1300        acb->cur_pos += acb->cur_qiov.size;
1301        qemu_iovec_reset(&acb->cur_qiov);
1302
1303        /* Complete request */
1304        if (acb->cur_pos >= acb->end_pos) {
1305            ret = 0;
1306            break;
1307        }
1308
1309        /* Find next cluster and start I/O */
1310        len = acb->end_pos - acb->cur_pos;
1311        ret = qed_find_cluster(s, &acb->request, acb->cur_pos, &len, &offset);
1312        if (ret < 0) {
1313            break;
1314        }
1315
1316        if (acb->flags & QED_AIOCB_WRITE) {
1317            ret = qed_aio_write_data(acb, ret, offset, len);
1318        } else {
1319            ret = qed_aio_read_data(acb, ret, offset, len);
1320        }
1321
1322        if (ret < 0 && ret != -EAGAIN) {
1323            break;
1324        }
1325    }
1326
1327    trace_qed_aio_complete(s, acb, ret);
1328    qed_aio_complete(acb);
1329    qemu_co_mutex_unlock(&s->table_lock);
1330    return ret;
1331}
1332
1333static int coroutine_fn qed_co_request(BlockDriverState *bs, int64_t sector_num,
1334                                       QEMUIOVector *qiov, int nb_sectors,
1335                                       int flags)
1336{
1337    QEDAIOCB acb = {
1338        .bs         = bs,
1339        .cur_pos    = (uint64_t) sector_num * BDRV_SECTOR_SIZE,
1340        .end_pos    = (sector_num + nb_sectors) * BDRV_SECTOR_SIZE,
1341        .qiov       = qiov,
1342        .flags      = flags,
1343    };
1344    qemu_iovec_init(&acb.cur_qiov, qiov->niov);
1345
1346    trace_qed_aio_setup(bs->opaque, &acb, sector_num, nb_sectors, NULL, flags);
1347
1348    /* Start request */
1349    return qed_aio_next_io(&acb);
1350}
1351
1352static int coroutine_fn bdrv_qed_co_readv(BlockDriverState *bs,
1353                                          int64_t sector_num, int nb_sectors,
1354                                          QEMUIOVector *qiov)
1355{
1356    return qed_co_request(bs, sector_num, qiov, nb_sectors, 0);
1357}
1358
1359static int coroutine_fn bdrv_qed_co_writev(BlockDriverState *bs,
1360                                           int64_t sector_num, int nb_sectors,
1361                                           QEMUIOVector *qiov)
1362{
1363    return qed_co_request(bs, sector_num, qiov, nb_sectors, QED_AIOCB_WRITE);
1364}
1365
1366static int coroutine_fn bdrv_qed_co_pwrite_zeroes(BlockDriverState *bs,
1367                                                  int64_t offset,
1368                                                  int bytes,
1369                                                  BdrvRequestFlags flags)
1370{
1371    BDRVQEDState *s = bs->opaque;
1372    QEMUIOVector qiov;
1373    struct iovec iov;
1374
1375    /* Fall back if the request is not aligned */
1376    if (qed_offset_into_cluster(s, offset) ||
1377        qed_offset_into_cluster(s, bytes)) {
1378        return -ENOTSUP;
1379    }
1380
1381    /* Zero writes start without an I/O buffer.  If a buffer becomes necessary
1382     * then it will be allocated during request processing.
1383     */
1384    iov.iov_base = NULL;
1385    iov.iov_len = bytes;
1386
1387    qemu_iovec_init_external(&qiov, &iov, 1);
1388    return qed_co_request(bs, offset >> BDRV_SECTOR_BITS, &qiov,
1389                          bytes >> BDRV_SECTOR_BITS,
1390                          QED_AIOCB_WRITE | QED_AIOCB_ZERO);
1391}
1392
1393static int bdrv_qed_truncate(BlockDriverState *bs, int64_t offset,
1394                             PreallocMode prealloc, Error **errp)
1395{
1396    BDRVQEDState *s = bs->opaque;
1397    uint64_t old_image_size;
1398    int ret;
1399
1400    if (prealloc != PREALLOC_MODE_OFF) {
1401        error_setg(errp, "Unsupported preallocation mode '%s'",
1402                   PreallocMode_lookup[prealloc]);
1403        return -ENOTSUP;
1404    }
1405
1406    if (!qed_is_image_size_valid(offset, s->header.cluster_size,
1407                                 s->header.table_size)) {
1408        error_setg(errp, "Invalid image size specified");
1409        return -EINVAL;
1410    }
1411
1412    if ((uint64_t)offset < s->header.image_size) {
1413        error_setg(errp, "Shrinking images is currently not supported");
1414        return -ENOTSUP;
1415    }
1416
1417    old_image_size = s->header.image_size;
1418    s->header.image_size = offset;
1419    ret = qed_write_header_sync(s);
1420    if (ret < 0) {
1421        s->header.image_size = old_image_size;
1422        error_setg_errno(errp, -ret, "Failed to update the image size");
1423    }
1424    return ret;
1425}
1426
1427static int64_t bdrv_qed_getlength(BlockDriverState *bs)
1428{
1429    BDRVQEDState *s = bs->opaque;
1430    return s->header.image_size;
1431}
1432
1433static int bdrv_qed_get_info(BlockDriverState *bs, BlockDriverInfo *bdi)
1434{
1435    BDRVQEDState *s = bs->opaque;
1436
1437    memset(bdi, 0, sizeof(*bdi));
1438    bdi->cluster_size = s->header.cluster_size;
1439    bdi->is_dirty = s->header.features & QED_F_NEED_CHECK;
1440    bdi->unallocated_blocks_are_zero = true;
1441    bdi->can_write_zeroes_with_unmap = true;
1442    return 0;
1443}
1444
1445static int bdrv_qed_change_backing_file(BlockDriverState *bs,
1446                                        const char *backing_file,
1447                                        const char *backing_fmt)
1448{
1449    BDRVQEDState *s = bs->opaque;
1450    QEDHeader new_header, le_header;
1451    void *buffer;
1452    size_t buffer_len, backing_file_len;
1453    int ret;
1454
1455    /* Refuse to set backing filename if unknown compat feature bits are
1456     * active.  If the image uses an unknown compat feature then we may not
1457     * know the layout of data following the header structure and cannot safely
1458     * add a new string.
1459     */
1460    if (backing_file && (s->header.compat_features &
1461                         ~QED_COMPAT_FEATURE_MASK)) {
1462        return -ENOTSUP;
1463    }
1464
1465    memcpy(&new_header, &s->header, sizeof(new_header));
1466
1467    new_header.features &= ~(QED_F_BACKING_FILE |
1468                             QED_F_BACKING_FORMAT_NO_PROBE);
1469
1470    /* Adjust feature flags */
1471    if (backing_file) {
1472        new_header.features |= QED_F_BACKING_FILE;
1473
1474        if (qed_fmt_is_raw(backing_fmt)) {
1475            new_header.features |= QED_F_BACKING_FORMAT_NO_PROBE;
1476        }
1477    }
1478
1479    /* Calculate new header size */
1480    backing_file_len = 0;
1481
1482    if (backing_file) {
1483        backing_file_len = strlen(backing_file);
1484    }
1485
1486    buffer_len = sizeof(new_header);
1487    new_header.backing_filename_offset = buffer_len;
1488    new_header.backing_filename_size = backing_file_len;
1489    buffer_len += backing_file_len;
1490
1491    /* Make sure we can rewrite header without failing */
1492    if (buffer_len > new_header.header_size * new_header.cluster_size) {
1493        return -ENOSPC;
1494    }
1495
1496    /* Prepare new header */
1497    buffer = g_malloc(buffer_len);
1498
1499    qed_header_cpu_to_le(&new_header, &le_header);
1500    memcpy(buffer, &le_header, sizeof(le_header));
1501    buffer_len = sizeof(le_header);
1502
1503    if (backing_file) {
1504        memcpy(buffer + buffer_len, backing_file, backing_file_len);
1505        buffer_len += backing_file_len;
1506    }
1507
1508    /* Write new header */
1509    ret = bdrv_pwrite_sync(bs->file, 0, buffer, buffer_len);
1510    g_free(buffer);
1511    if (ret == 0) {
1512        memcpy(&s->header, &new_header, sizeof(new_header));
1513    }
1514    return ret;
1515}
1516
1517static void bdrv_qed_invalidate_cache(BlockDriverState *bs, Error **errp)
1518{
1519    BDRVQEDState *s = bs->opaque;
1520    Error *local_err = NULL;
1521    int ret;
1522
1523    bdrv_qed_close(bs);
1524
1525    bdrv_qed_init_state(bs);
1526    if (qemu_in_coroutine()) {
1527        qemu_co_mutex_lock(&s->table_lock);
1528    }
1529    ret = bdrv_qed_do_open(bs, NULL, bs->open_flags, &local_err);
1530    if (qemu_in_coroutine()) {
1531        qemu_co_mutex_unlock(&s->table_lock);
1532    }
1533    if (local_err) {
1534        error_propagate(errp, local_err);
1535        error_prepend(errp, "Could not reopen qed layer: ");
1536        return;
1537    } else if (ret < 0) {
1538        error_setg_errno(errp, -ret, "Could not reopen qed layer");
1539        return;
1540    }
1541}
1542
1543static int bdrv_qed_check(BlockDriverState *bs, BdrvCheckResult *result,
1544                          BdrvCheckMode fix)
1545{
1546    BDRVQEDState *s = bs->opaque;
1547
1548    return qed_check(s, result, !!fix);
1549}
1550
1551static QemuOptsList qed_create_opts = {
1552    .name = "qed-create-opts",
1553    .head = QTAILQ_HEAD_INITIALIZER(qed_create_opts.head),
1554    .desc = {
1555        {
1556            .name = BLOCK_OPT_SIZE,
1557            .type = QEMU_OPT_SIZE,
1558            .help = "Virtual disk size"
1559        },
1560        {
1561            .name = BLOCK_OPT_BACKING_FILE,
1562            .type = QEMU_OPT_STRING,
1563            .help = "File name of a base image"
1564        },
1565        {
1566            .name = BLOCK_OPT_BACKING_FMT,
1567            .type = QEMU_OPT_STRING,
1568            .help = "Image format of the base image"
1569        },
1570        {
1571            .name = BLOCK_OPT_CLUSTER_SIZE,
1572            .type = QEMU_OPT_SIZE,
1573            .help = "Cluster size (in bytes)",
1574            .def_value_str = stringify(QED_DEFAULT_CLUSTER_SIZE)
1575        },
1576        {
1577            .name = BLOCK_OPT_TABLE_SIZE,
1578            .type = QEMU_OPT_SIZE,
1579            .help = "L1/L2 table size (in clusters)"
1580        },
1581        { /* end of list */ }
1582    }
1583};
1584
1585static BlockDriver bdrv_qed = {
1586    .format_name              = "qed",
1587    .instance_size            = sizeof(BDRVQEDState),
1588    .create_opts              = &qed_create_opts,
1589    .supports_backing         = true,
1590
1591    .bdrv_probe               = bdrv_qed_probe,
1592    .bdrv_open                = bdrv_qed_open,
1593    .bdrv_close               = bdrv_qed_close,
1594    .bdrv_reopen_prepare      = bdrv_qed_reopen_prepare,
1595    .bdrv_child_perm          = bdrv_format_default_perms,
1596    .bdrv_create              = bdrv_qed_create,
1597    .bdrv_has_zero_init       = bdrv_has_zero_init_1,
1598    .bdrv_co_get_block_status = bdrv_qed_co_get_block_status,
1599    .bdrv_co_readv            = bdrv_qed_co_readv,
1600    .bdrv_co_writev           = bdrv_qed_co_writev,
1601    .bdrv_co_pwrite_zeroes    = bdrv_qed_co_pwrite_zeroes,
1602    .bdrv_truncate            = bdrv_qed_truncate,
1603    .bdrv_getlength           = bdrv_qed_getlength,
1604    .bdrv_get_info            = bdrv_qed_get_info,
1605    .bdrv_refresh_limits      = bdrv_qed_refresh_limits,
1606    .bdrv_change_backing_file = bdrv_qed_change_backing_file,
1607    .bdrv_invalidate_cache    = bdrv_qed_invalidate_cache,
1608    .bdrv_check               = bdrv_qed_check,
1609    .bdrv_detach_aio_context  = bdrv_qed_detach_aio_context,
1610    .bdrv_attach_aio_context  = bdrv_qed_attach_aio_context,
1611    .bdrv_co_drain            = bdrv_qed_co_drain,
1612};
1613
1614static void bdrv_qed_init(void)
1615{
1616    bdrv_register(&bdrv_qed);
1617}
1618
1619block_init(bdrv_qed_init);
1620