qemu/block/qed.c
<<
>>
Prefs
   1/*
   2 * QEMU Enhanced Disk Format
   3 *
   4 * Copyright IBM, Corp. 2010
   5 *
   6 * Authors:
   7 *  Stefan Hajnoczi   <stefanha@linux.vnet.ibm.com>
   8 *  Anthony Liguori   <aliguori@us.ibm.com>
   9 *
  10 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
  11 * See the COPYING.LIB file in the top-level directory.
  12 *
  13 */
  14
  15#include "qemu-timer.h"
  16#include "trace.h"
  17#include "qed.h"
  18#include "qerror.h"
  19#include "migration.h"
  20
  21static void qed_aio_cancel(BlockDriverAIOCB *blockacb)
  22{
  23    QEDAIOCB *acb = (QEDAIOCB *)blockacb;
  24    bool finished = false;
  25
  26    /* Wait for the request to finish */
  27    acb->finished = &finished;
  28    while (!finished) {
  29        qemu_aio_wait();
  30    }
  31}
  32
  33static const AIOCBInfo qed_aiocb_info = {
  34    .aiocb_size         = sizeof(QEDAIOCB),
  35    .cancel             = qed_aio_cancel,
  36};
  37
  38static int bdrv_qed_probe(const uint8_t *buf, int buf_size,
  39                          const char *filename)
  40{
  41    const QEDHeader *header = (const QEDHeader *)buf;
  42
  43    if (buf_size < sizeof(*header)) {
  44        return 0;
  45    }
  46    if (le32_to_cpu(header->magic) != QED_MAGIC) {
  47        return 0;
  48    }
  49    return 100;
  50}
  51
  52/**
  53 * Check whether an image format is raw
  54 *
  55 * @fmt:    Backing file format, may be NULL
  56 */
  57static bool qed_fmt_is_raw(const char *fmt)
  58{
  59    return fmt && strcmp(fmt, "raw") == 0;
  60}
  61
  62static void qed_header_le_to_cpu(const QEDHeader *le, QEDHeader *cpu)
  63{
  64    cpu->magic = le32_to_cpu(le->magic);
  65    cpu->cluster_size = le32_to_cpu(le->cluster_size);
  66    cpu->table_size = le32_to_cpu(le->table_size);
  67    cpu->header_size = le32_to_cpu(le->header_size);
  68    cpu->features = le64_to_cpu(le->features);
  69    cpu->compat_features = le64_to_cpu(le->compat_features);
  70    cpu->autoclear_features = le64_to_cpu(le->autoclear_features);
  71    cpu->l1_table_offset = le64_to_cpu(le->l1_table_offset);
  72    cpu->image_size = le64_to_cpu(le->image_size);
  73    cpu->backing_filename_offset = le32_to_cpu(le->backing_filename_offset);
  74    cpu->backing_filename_size = le32_to_cpu(le->backing_filename_size);
  75}
  76
  77static void qed_header_cpu_to_le(const QEDHeader *cpu, QEDHeader *le)
  78{
  79    le->magic = cpu_to_le32(cpu->magic);
  80    le->cluster_size = cpu_to_le32(cpu->cluster_size);
  81    le->table_size = cpu_to_le32(cpu->table_size);
  82    le->header_size = cpu_to_le32(cpu->header_size);
  83    le->features = cpu_to_le64(cpu->features);
  84    le->compat_features = cpu_to_le64(cpu->compat_features);
  85    le->autoclear_features = cpu_to_le64(cpu->autoclear_features);
  86    le->l1_table_offset = cpu_to_le64(cpu->l1_table_offset);
  87    le->image_size = cpu_to_le64(cpu->image_size);
  88    le->backing_filename_offset = cpu_to_le32(cpu->backing_filename_offset);
  89    le->backing_filename_size = cpu_to_le32(cpu->backing_filename_size);
  90}
  91
  92int qed_write_header_sync(BDRVQEDState *s)
  93{
  94    QEDHeader le;
  95    int ret;
  96
  97    qed_header_cpu_to_le(&s->header, &le);
  98    ret = bdrv_pwrite(s->bs->file, 0, &le, sizeof(le));
  99    if (ret != sizeof(le)) {
 100        return ret;
 101    }
 102    return 0;
 103}
 104
 105typedef struct {
 106    GenericCB gencb;
 107    BDRVQEDState *s;
 108    struct iovec iov;
 109    QEMUIOVector qiov;
 110    int nsectors;
 111    uint8_t *buf;
 112} QEDWriteHeaderCB;
 113
 114static void qed_write_header_cb(void *opaque, int ret)
 115{
 116    QEDWriteHeaderCB *write_header_cb = opaque;
 117
 118    qemu_vfree(write_header_cb->buf);
 119    gencb_complete(write_header_cb, ret);
 120}
 121
 122static void qed_write_header_read_cb(void *opaque, int ret)
 123{
 124    QEDWriteHeaderCB *write_header_cb = opaque;
 125    BDRVQEDState *s = write_header_cb->s;
 126
 127    if (ret) {
 128        qed_write_header_cb(write_header_cb, ret);
 129        return;
 130    }
 131
 132    /* Update header */
 133    qed_header_cpu_to_le(&s->header, (QEDHeader *)write_header_cb->buf);
 134
 135    bdrv_aio_writev(s->bs->file, 0, &write_header_cb->qiov,
 136                    write_header_cb->nsectors, qed_write_header_cb,
 137                    write_header_cb);
 138}
 139
 140/**
 141 * Update header in-place (does not rewrite backing filename or other strings)
 142 *
 143 * This function only updates known header fields in-place and does not affect
 144 * extra data after the QED header.
 145 */
 146static void qed_write_header(BDRVQEDState *s, BlockDriverCompletionFunc cb,
 147                             void *opaque)
 148{
 149    /* We must write full sectors for O_DIRECT but cannot necessarily generate
 150     * the data following the header if an unrecognized compat feature is
 151     * active.  Therefore, first read the sectors containing the header, update
 152     * them, and write back.
 153     */
 154
 155    int nsectors = (sizeof(QEDHeader) + BDRV_SECTOR_SIZE - 1) /
 156                   BDRV_SECTOR_SIZE;
 157    size_t len = nsectors * BDRV_SECTOR_SIZE;
 158    QEDWriteHeaderCB *write_header_cb = gencb_alloc(sizeof(*write_header_cb),
 159                                                    cb, opaque);
 160
 161    write_header_cb->s = s;
 162    write_header_cb->nsectors = nsectors;
 163    write_header_cb->buf = qemu_blockalign(s->bs, len);
 164    write_header_cb->iov.iov_base = write_header_cb->buf;
 165    write_header_cb->iov.iov_len = len;
 166    qemu_iovec_init_external(&write_header_cb->qiov, &write_header_cb->iov, 1);
 167
 168    bdrv_aio_readv(s->bs->file, 0, &write_header_cb->qiov, nsectors,
 169                   qed_write_header_read_cb, write_header_cb);
 170}
 171
 172static uint64_t qed_max_image_size(uint32_t cluster_size, uint32_t table_size)
 173{
 174    uint64_t table_entries;
 175    uint64_t l2_size;
 176
 177    table_entries = (table_size * cluster_size) / sizeof(uint64_t);
 178    l2_size = table_entries * cluster_size;
 179
 180    return l2_size * table_entries;
 181}
 182
 183static bool qed_is_cluster_size_valid(uint32_t cluster_size)
 184{
 185    if (cluster_size < QED_MIN_CLUSTER_SIZE ||
 186        cluster_size > QED_MAX_CLUSTER_SIZE) {
 187        return false;
 188    }
 189    if (cluster_size & (cluster_size - 1)) {
 190        return false; /* not power of 2 */
 191    }
 192    return true;
 193}
 194
 195static bool qed_is_table_size_valid(uint32_t table_size)
 196{
 197    if (table_size < QED_MIN_TABLE_SIZE ||
 198        table_size > QED_MAX_TABLE_SIZE) {
 199        return false;
 200    }
 201    if (table_size & (table_size - 1)) {
 202        return false; /* not power of 2 */
 203    }
 204    return true;
 205}
 206
 207static bool qed_is_image_size_valid(uint64_t image_size, uint32_t cluster_size,
 208                                    uint32_t table_size)
 209{
 210    if (image_size % BDRV_SECTOR_SIZE != 0) {
 211        return false; /* not multiple of sector size */
 212    }
 213    if (image_size > qed_max_image_size(cluster_size, table_size)) {
 214        return false; /* image is too large */
 215    }
 216    return true;
 217}
 218
 219/**
 220 * Read a string of known length from the image file
 221 *
 222 * @file:       Image file
 223 * @offset:     File offset to start of string, in bytes
 224 * @n:          String length in bytes
 225 * @buf:        Destination buffer
 226 * @buflen:     Destination buffer length in bytes
 227 * @ret:        0 on success, -errno on failure
 228 *
 229 * The string is NUL-terminated.
 230 */
 231static int qed_read_string(BlockDriverState *file, uint64_t offset, size_t n,
 232                           char *buf, size_t buflen)
 233{
 234    int ret;
 235    if (n >= buflen) {
 236        return -EINVAL;
 237    }
 238    ret = bdrv_pread(file, offset, buf, n);
 239    if (ret < 0) {
 240        return ret;
 241    }
 242    buf[n] = '\0';
 243    return 0;
 244}
 245
 246/**
 247 * Allocate new clusters
 248 *
 249 * @s:          QED state
 250 * @n:          Number of contiguous clusters to allocate
 251 * @ret:        Offset of first allocated cluster
 252 *
 253 * This function only produces the offset where the new clusters should be
 254 * written.  It updates BDRVQEDState but does not make any changes to the image
 255 * file.
 256 */
 257static uint64_t qed_alloc_clusters(BDRVQEDState *s, unsigned int n)
 258{
 259    uint64_t offset = s->file_size;
 260    s->file_size += n * s->header.cluster_size;
 261    return offset;
 262}
 263
 264QEDTable *qed_alloc_table(BDRVQEDState *s)
 265{
 266    /* Honor O_DIRECT memory alignment requirements */
 267    return qemu_blockalign(s->bs,
 268                           s->header.cluster_size * s->header.table_size);
 269}
 270
 271/**
 272 * Allocate a new zeroed L2 table
 273 */
 274static CachedL2Table *qed_new_l2_table(BDRVQEDState *s)
 275{
 276    CachedL2Table *l2_table = qed_alloc_l2_cache_entry(&s->l2_cache);
 277
 278    l2_table->table = qed_alloc_table(s);
 279    l2_table->offset = qed_alloc_clusters(s, s->header.table_size);
 280
 281    memset(l2_table->table->offsets, 0,
 282           s->header.cluster_size * s->header.table_size);
 283    return l2_table;
 284}
 285
 286static void qed_aio_next_io(void *opaque, int ret);
 287
 288static void qed_plug_allocating_write_reqs(BDRVQEDState *s)
 289{
 290    assert(!s->allocating_write_reqs_plugged);
 291
 292    s->allocating_write_reqs_plugged = true;
 293}
 294
 295static void qed_unplug_allocating_write_reqs(BDRVQEDState *s)
 296{
 297    QEDAIOCB *acb;
 298
 299    assert(s->allocating_write_reqs_plugged);
 300
 301    s->allocating_write_reqs_plugged = false;
 302
 303    acb = QSIMPLEQ_FIRST(&s->allocating_write_reqs);
 304    if (acb) {
 305        qed_aio_next_io(acb, 0);
 306    }
 307}
 308
 309static void qed_finish_clear_need_check(void *opaque, int ret)
 310{
 311    /* Do nothing */
 312}
 313
 314static void qed_flush_after_clear_need_check(void *opaque, int ret)
 315{
 316    BDRVQEDState *s = opaque;
 317
 318    bdrv_aio_flush(s->bs, qed_finish_clear_need_check, s);
 319
 320    /* No need to wait until flush completes */
 321    qed_unplug_allocating_write_reqs(s);
 322}
 323
 324static void qed_clear_need_check(void *opaque, int ret)
 325{
 326    BDRVQEDState *s = opaque;
 327
 328    if (ret) {
 329        qed_unplug_allocating_write_reqs(s);
 330        return;
 331    }
 332
 333    s->header.features &= ~QED_F_NEED_CHECK;
 334    qed_write_header(s, qed_flush_after_clear_need_check, s);
 335}
 336
 337static void qed_need_check_timer_cb(void *opaque)
 338{
 339    BDRVQEDState *s = opaque;
 340
 341    /* The timer should only fire when allocating writes have drained */
 342    assert(!QSIMPLEQ_FIRST(&s->allocating_write_reqs));
 343
 344    trace_qed_need_check_timer_cb(s);
 345
 346    qed_plug_allocating_write_reqs(s);
 347
 348    /* Ensure writes are on disk before clearing flag */
 349    bdrv_aio_flush(s->bs, qed_clear_need_check, s);
 350}
 351
 352static void qed_start_need_check_timer(BDRVQEDState *s)
 353{
 354    trace_qed_start_need_check_timer(s);
 355
 356    /* Use vm_clock so we don't alter the image file while suspended for
 357     * migration.
 358     */
 359    qemu_mod_timer(s->need_check_timer, qemu_get_clock_ns(vm_clock) +
 360                   get_ticks_per_sec() * QED_NEED_CHECK_TIMEOUT);
 361}
 362
 363/* It's okay to call this multiple times or when no timer is started */
 364static void qed_cancel_need_check_timer(BDRVQEDState *s)
 365{
 366    trace_qed_cancel_need_check_timer(s);
 367    qemu_del_timer(s->need_check_timer);
 368}
 369
 370static void bdrv_qed_rebind(BlockDriverState *bs)
 371{
 372    BDRVQEDState *s = bs->opaque;
 373    s->bs = bs;
 374}
 375
 376static int bdrv_qed_open(BlockDriverState *bs, int flags)
 377{
 378    BDRVQEDState *s = bs->opaque;
 379    QEDHeader le_header;
 380    int64_t file_size;
 381    int ret;
 382
 383    s->bs = bs;
 384    QSIMPLEQ_INIT(&s->allocating_write_reqs);
 385
 386    ret = bdrv_pread(bs->file, 0, &le_header, sizeof(le_header));
 387    if (ret < 0) {
 388        return ret;
 389    }
 390    qed_header_le_to_cpu(&le_header, &s->header);
 391
 392    if (s->header.magic != QED_MAGIC) {
 393        return -EINVAL;
 394    }
 395    if (s->header.features & ~QED_FEATURE_MASK) {
 396        /* image uses unsupported feature bits */
 397        char buf[64];
 398        snprintf(buf, sizeof(buf), "%" PRIx64,
 399            s->header.features & ~QED_FEATURE_MASK);
 400        qerror_report(QERR_UNKNOWN_BLOCK_FORMAT_FEATURE,
 401            bs->device_name, "QED", buf);
 402        return -ENOTSUP;
 403    }
 404    if (!qed_is_cluster_size_valid(s->header.cluster_size)) {
 405        return -EINVAL;
 406    }
 407
 408    /* Round down file size to the last cluster */
 409    file_size = bdrv_getlength(bs->file);
 410    if (file_size < 0) {
 411        return file_size;
 412    }
 413    s->file_size = qed_start_of_cluster(s, file_size);
 414
 415    if (!qed_is_table_size_valid(s->header.table_size)) {
 416        return -EINVAL;
 417    }
 418    if (!qed_is_image_size_valid(s->header.image_size,
 419                                 s->header.cluster_size,
 420                                 s->header.table_size)) {
 421        return -EINVAL;
 422    }
 423    if (!qed_check_table_offset(s, s->header.l1_table_offset)) {
 424        return -EINVAL;
 425    }
 426
 427    s->table_nelems = (s->header.cluster_size * s->header.table_size) /
 428                      sizeof(uint64_t);
 429    s->l2_shift = ffs(s->header.cluster_size) - 1;
 430    s->l2_mask = s->table_nelems - 1;
 431    s->l1_shift = s->l2_shift + ffs(s->table_nelems) - 1;
 432
 433    if ((s->header.features & QED_F_BACKING_FILE)) {
 434        if ((uint64_t)s->header.backing_filename_offset +
 435            s->header.backing_filename_size >
 436            s->header.cluster_size * s->header.header_size) {
 437            return -EINVAL;
 438        }
 439
 440        ret = qed_read_string(bs->file, s->header.backing_filename_offset,
 441                              s->header.backing_filename_size, bs->backing_file,
 442                              sizeof(bs->backing_file));
 443        if (ret < 0) {
 444            return ret;
 445        }
 446
 447        if (s->header.features & QED_F_BACKING_FORMAT_NO_PROBE) {
 448            pstrcpy(bs->backing_format, sizeof(bs->backing_format), "raw");
 449        }
 450    }
 451
 452    /* Reset unknown autoclear feature bits.  This is a backwards
 453     * compatibility mechanism that allows images to be opened by older
 454     * programs, which "knock out" unknown feature bits.  When an image is
 455     * opened by a newer program again it can detect that the autoclear
 456     * feature is no longer valid.
 457     */
 458    if ((s->header.autoclear_features & ~QED_AUTOCLEAR_FEATURE_MASK) != 0 &&
 459        !bdrv_is_read_only(bs->file) && !(flags & BDRV_O_INCOMING)) {
 460        s->header.autoclear_features &= QED_AUTOCLEAR_FEATURE_MASK;
 461
 462        ret = qed_write_header_sync(s);
 463        if (ret) {
 464            return ret;
 465        }
 466
 467        /* From here on only known autoclear feature bits are valid */
 468        bdrv_flush(bs->file);
 469    }
 470
 471    s->l1_table = qed_alloc_table(s);
 472    qed_init_l2_cache(&s->l2_cache);
 473
 474    ret = qed_read_l1_table_sync(s);
 475    if (ret) {
 476        goto out;
 477    }
 478
 479    /* If image was not closed cleanly, check consistency */
 480    if (!(flags & BDRV_O_CHECK) && (s->header.features & QED_F_NEED_CHECK)) {
 481        /* Read-only images cannot be fixed.  There is no risk of corruption
 482         * since write operations are not possible.  Therefore, allow
 483         * potentially inconsistent images to be opened read-only.  This can
 484         * aid data recovery from an otherwise inconsistent image.
 485         */
 486        if (!bdrv_is_read_only(bs->file) &&
 487            !(flags & BDRV_O_INCOMING)) {
 488            BdrvCheckResult result = {0};
 489
 490            ret = qed_check(s, &result, true);
 491            if (ret) {
 492                goto out;
 493            }
 494        }
 495    }
 496
 497    s->need_check_timer = qemu_new_timer_ns(vm_clock,
 498                                            qed_need_check_timer_cb, s);
 499
 500out:
 501    if (ret) {
 502        qed_free_l2_cache(&s->l2_cache);
 503        qemu_vfree(s->l1_table);
 504    }
 505    return ret;
 506}
 507
 508/* We have nothing to do for QED reopen, stubs just return
 509 * success */
 510static int bdrv_qed_reopen_prepare(BDRVReopenState *state,
 511                                   BlockReopenQueue *queue, Error **errp)
 512{
 513    return 0;
 514}
 515
 516static void bdrv_qed_close(BlockDriverState *bs)
 517{
 518    BDRVQEDState *s = bs->opaque;
 519
 520    qed_cancel_need_check_timer(s);
 521    qemu_free_timer(s->need_check_timer);
 522
 523    /* Ensure writes reach stable storage */
 524    bdrv_flush(bs->file);
 525
 526    /* Clean shutdown, no check required on next open */
 527    if (s->header.features & QED_F_NEED_CHECK) {
 528        s->header.features &= ~QED_F_NEED_CHECK;
 529        qed_write_header_sync(s);
 530    }
 531
 532    qed_free_l2_cache(&s->l2_cache);
 533    qemu_vfree(s->l1_table);
 534}
 535
 536static int qed_create(const char *filename, uint32_t cluster_size,
 537                      uint64_t image_size, uint32_t table_size,
 538                      const char *backing_file, const char *backing_fmt)
 539{
 540    QEDHeader header = {
 541        .magic = QED_MAGIC,
 542        .cluster_size = cluster_size,
 543        .table_size = table_size,
 544        .header_size = 1,
 545        .features = 0,
 546        .compat_features = 0,
 547        .l1_table_offset = cluster_size,
 548        .image_size = image_size,
 549    };
 550    QEDHeader le_header;
 551    uint8_t *l1_table = NULL;
 552    size_t l1_size = header.cluster_size * header.table_size;
 553    int ret = 0;
 554    BlockDriverState *bs = NULL;
 555
 556    ret = bdrv_create_file(filename, NULL);
 557    if (ret < 0) {
 558        return ret;
 559    }
 560
 561    ret = bdrv_file_open(&bs, filename, BDRV_O_RDWR | BDRV_O_CACHE_WB);
 562    if (ret < 0) {
 563        return ret;
 564    }
 565
 566    /* File must start empty and grow, check truncate is supported */
 567    ret = bdrv_truncate(bs, 0);
 568    if (ret < 0) {
 569        goto out;
 570    }
 571
 572    if (backing_file) {
 573        header.features |= QED_F_BACKING_FILE;
 574        header.backing_filename_offset = sizeof(le_header);
 575        header.backing_filename_size = strlen(backing_file);
 576
 577        if (qed_fmt_is_raw(backing_fmt)) {
 578            header.features |= QED_F_BACKING_FORMAT_NO_PROBE;
 579        }
 580    }
 581
 582    qed_header_cpu_to_le(&header, &le_header);
 583    ret = bdrv_pwrite(bs, 0, &le_header, sizeof(le_header));
 584    if (ret < 0) {
 585        goto out;
 586    }
 587    ret = bdrv_pwrite(bs, sizeof(le_header), backing_file,
 588                      header.backing_filename_size);
 589    if (ret < 0) {
 590        goto out;
 591    }
 592
 593    l1_table = g_malloc0(l1_size);
 594    ret = bdrv_pwrite(bs, header.l1_table_offset, l1_table, l1_size);
 595    if (ret < 0) {
 596        goto out;
 597    }
 598
 599    ret = 0; /* success */
 600out:
 601    g_free(l1_table);
 602    bdrv_delete(bs);
 603    return ret;
 604}
 605
 606static int bdrv_qed_create(const char *filename, QEMUOptionParameter *options)
 607{
 608    uint64_t image_size = 0;
 609    uint32_t cluster_size = QED_DEFAULT_CLUSTER_SIZE;
 610    uint32_t table_size = QED_DEFAULT_TABLE_SIZE;
 611    const char *backing_file = NULL;
 612    const char *backing_fmt = NULL;
 613
 614    while (options && options->name) {
 615        if (!strcmp(options->name, BLOCK_OPT_SIZE)) {
 616            image_size = options->value.n;
 617        } else if (!strcmp(options->name, BLOCK_OPT_BACKING_FILE)) {
 618            backing_file = options->value.s;
 619        } else if (!strcmp(options->name, BLOCK_OPT_BACKING_FMT)) {
 620            backing_fmt = options->value.s;
 621        } else if (!strcmp(options->name, BLOCK_OPT_CLUSTER_SIZE)) {
 622            if (options->value.n) {
 623                cluster_size = options->value.n;
 624            }
 625        } else if (!strcmp(options->name, BLOCK_OPT_TABLE_SIZE)) {
 626            if (options->value.n) {
 627                table_size = options->value.n;
 628            }
 629        }
 630        options++;
 631    }
 632
 633    if (!qed_is_cluster_size_valid(cluster_size)) {
 634        fprintf(stderr, "QED cluster size must be within range [%u, %u] and power of 2\n",
 635                QED_MIN_CLUSTER_SIZE, QED_MAX_CLUSTER_SIZE);
 636        return -EINVAL;
 637    }
 638    if (!qed_is_table_size_valid(table_size)) {
 639        fprintf(stderr, "QED table size must be within range [%u, %u] and power of 2\n",
 640                QED_MIN_TABLE_SIZE, QED_MAX_TABLE_SIZE);
 641        return -EINVAL;
 642    }
 643    if (!qed_is_image_size_valid(image_size, cluster_size, table_size)) {
 644        fprintf(stderr, "QED image size must be a non-zero multiple of "
 645                        "cluster size and less than %" PRIu64 " bytes\n",
 646                qed_max_image_size(cluster_size, table_size));
 647        return -EINVAL;
 648    }
 649
 650    return qed_create(filename, cluster_size, image_size, table_size,
 651                      backing_file, backing_fmt);
 652}
 653
 654typedef struct {
 655    Coroutine *co;
 656    int is_allocated;
 657    int *pnum;
 658} QEDIsAllocatedCB;
 659
 660static void qed_is_allocated_cb(void *opaque, int ret, uint64_t offset, size_t len)
 661{
 662    QEDIsAllocatedCB *cb = opaque;
 663    *cb->pnum = len / BDRV_SECTOR_SIZE;
 664    cb->is_allocated = (ret == QED_CLUSTER_FOUND || ret == QED_CLUSTER_ZERO);
 665    if (cb->co) {
 666        qemu_coroutine_enter(cb->co, NULL);
 667    }
 668}
 669
 670static int coroutine_fn bdrv_qed_co_is_allocated(BlockDriverState *bs,
 671                                                 int64_t sector_num,
 672                                                 int nb_sectors, int *pnum)
 673{
 674    BDRVQEDState *s = bs->opaque;
 675    uint64_t pos = (uint64_t)sector_num * BDRV_SECTOR_SIZE;
 676    size_t len = (size_t)nb_sectors * BDRV_SECTOR_SIZE;
 677    QEDIsAllocatedCB cb = {
 678        .is_allocated = -1,
 679        .pnum = pnum,
 680    };
 681    QEDRequest request = { .l2_table = NULL };
 682
 683    qed_find_cluster(s, &request, pos, len, qed_is_allocated_cb, &cb);
 684
 685    /* Now sleep if the callback wasn't invoked immediately */
 686    while (cb.is_allocated == -1) {
 687        cb.co = qemu_coroutine_self();
 688        qemu_coroutine_yield();
 689    }
 690
 691    qed_unref_l2_cache_entry(request.l2_table);
 692
 693    return cb.is_allocated;
 694}
 695
 696static int bdrv_qed_make_empty(BlockDriverState *bs)
 697{
 698    return -ENOTSUP;
 699}
 700
 701static BDRVQEDState *acb_to_s(QEDAIOCB *acb)
 702{
 703    return acb->common.bs->opaque;
 704}
 705
 706/**
 707 * Read from the backing file or zero-fill if no backing file
 708 *
 709 * @s:          QED state
 710 * @pos:        Byte position in device
 711 * @qiov:       Destination I/O vector
 712 * @cb:         Completion function
 713 * @opaque:     User data for completion function
 714 *
 715 * This function reads qiov->size bytes starting at pos from the backing file.
 716 * If there is no backing file then zeroes are read.
 717 */
 718static void qed_read_backing_file(BDRVQEDState *s, uint64_t pos,
 719                                  QEMUIOVector *qiov,
 720                                  BlockDriverCompletionFunc *cb, void *opaque)
 721{
 722    uint64_t backing_length = 0;
 723    size_t size;
 724
 725    /* If there is a backing file, get its length.  Treat the absence of a
 726     * backing file like a zero length backing file.
 727     */
 728    if (s->bs->backing_hd) {
 729        int64_t l = bdrv_getlength(s->bs->backing_hd);
 730        if (l < 0) {
 731            cb(opaque, l);
 732            return;
 733        }
 734        backing_length = l;
 735    }
 736
 737    /* Zero all sectors if reading beyond the end of the backing file */
 738    if (pos >= backing_length ||
 739        pos + qiov->size > backing_length) {
 740        qemu_iovec_memset(qiov, 0, 0, qiov->size);
 741    }
 742
 743    /* Complete now if there are no backing file sectors to read */
 744    if (pos >= backing_length) {
 745        cb(opaque, 0);
 746        return;
 747    }
 748
 749    /* If the read straddles the end of the backing file, shorten it */
 750    size = MIN((uint64_t)backing_length - pos, qiov->size);
 751
 752    BLKDBG_EVENT(s->bs->file, BLKDBG_READ_BACKING_AIO);
 753    bdrv_aio_readv(s->bs->backing_hd, pos / BDRV_SECTOR_SIZE,
 754                   qiov, size / BDRV_SECTOR_SIZE, cb, opaque);
 755}
 756
 757typedef struct {
 758    GenericCB gencb;
 759    BDRVQEDState *s;
 760    QEMUIOVector qiov;
 761    struct iovec iov;
 762    uint64_t offset;
 763} CopyFromBackingFileCB;
 764
 765static void qed_copy_from_backing_file_cb(void *opaque, int ret)
 766{
 767    CopyFromBackingFileCB *copy_cb = opaque;
 768    qemu_vfree(copy_cb->iov.iov_base);
 769    gencb_complete(&copy_cb->gencb, ret);
 770}
 771
 772static void qed_copy_from_backing_file_write(void *opaque, int ret)
 773{
 774    CopyFromBackingFileCB *copy_cb = opaque;
 775    BDRVQEDState *s = copy_cb->s;
 776
 777    if (ret) {
 778        qed_copy_from_backing_file_cb(copy_cb, ret);
 779        return;
 780    }
 781
 782    BLKDBG_EVENT(s->bs->file, BLKDBG_COW_WRITE);
 783    bdrv_aio_writev(s->bs->file, copy_cb->offset / BDRV_SECTOR_SIZE,
 784                    &copy_cb->qiov, copy_cb->qiov.size / BDRV_SECTOR_SIZE,
 785                    qed_copy_from_backing_file_cb, copy_cb);
 786}
 787
 788/**
 789 * Copy data from backing file into the image
 790 *
 791 * @s:          QED state
 792 * @pos:        Byte position in device
 793 * @len:        Number of bytes
 794 * @offset:     Byte offset in image file
 795 * @cb:         Completion function
 796 * @opaque:     User data for completion function
 797 */
 798static void qed_copy_from_backing_file(BDRVQEDState *s, uint64_t pos,
 799                                       uint64_t len, uint64_t offset,
 800                                       BlockDriverCompletionFunc *cb,
 801                                       void *opaque)
 802{
 803    CopyFromBackingFileCB *copy_cb;
 804
 805    /* Skip copy entirely if there is no work to do */
 806    if (len == 0) {
 807        cb(opaque, 0);
 808        return;
 809    }
 810
 811    copy_cb = gencb_alloc(sizeof(*copy_cb), cb, opaque);
 812    copy_cb->s = s;
 813    copy_cb->offset = offset;
 814    copy_cb->iov.iov_base = qemu_blockalign(s->bs, len);
 815    copy_cb->iov.iov_len = len;
 816    qemu_iovec_init_external(&copy_cb->qiov, &copy_cb->iov, 1);
 817
 818    qed_read_backing_file(s, pos, &copy_cb->qiov,
 819                          qed_copy_from_backing_file_write, copy_cb);
 820}
 821
 822/**
 823 * Link one or more contiguous clusters into a table
 824 *
 825 * @s:              QED state
 826 * @table:          L2 table
 827 * @index:          First cluster index
 828 * @n:              Number of contiguous clusters
 829 * @cluster:        First cluster offset
 830 *
 831 * The cluster offset may be an allocated byte offset in the image file, the
 832 * zero cluster marker, or the unallocated cluster marker.
 833 */
 834static void qed_update_l2_table(BDRVQEDState *s, QEDTable *table, int index,
 835                                unsigned int n, uint64_t cluster)
 836{
 837    int i;
 838    for (i = index; i < index + n; i++) {
 839        table->offsets[i] = cluster;
 840        if (!qed_offset_is_unalloc_cluster(cluster) &&
 841            !qed_offset_is_zero_cluster(cluster)) {
 842            cluster += s->header.cluster_size;
 843        }
 844    }
 845}
 846
 847static void qed_aio_complete_bh(void *opaque)
 848{
 849    QEDAIOCB *acb = opaque;
 850    BlockDriverCompletionFunc *cb = acb->common.cb;
 851    void *user_opaque = acb->common.opaque;
 852    int ret = acb->bh_ret;
 853    bool *finished = acb->finished;
 854
 855    qemu_bh_delete(acb->bh);
 856    qemu_aio_release(acb);
 857
 858    /* Invoke callback */
 859    cb(user_opaque, ret);
 860
 861    /* Signal cancel completion */
 862    if (finished) {
 863        *finished = true;
 864    }
 865}
 866
 867static void qed_aio_complete(QEDAIOCB *acb, int ret)
 868{
 869    BDRVQEDState *s = acb_to_s(acb);
 870
 871    trace_qed_aio_complete(s, acb, ret);
 872
 873    /* Free resources */
 874    qemu_iovec_destroy(&acb->cur_qiov);
 875    qed_unref_l2_cache_entry(acb->request.l2_table);
 876
 877    /* Free the buffer we may have allocated for zero writes */
 878    if (acb->flags & QED_AIOCB_ZERO) {
 879        qemu_vfree(acb->qiov->iov[0].iov_base);
 880        acb->qiov->iov[0].iov_base = NULL;
 881    }
 882
 883    /* Arrange for a bh to invoke the completion function */
 884    acb->bh_ret = ret;
 885    acb->bh = qemu_bh_new(qed_aio_complete_bh, acb);
 886    qemu_bh_schedule(acb->bh);
 887
 888    /* Start next allocating write request waiting behind this one.  Note that
 889     * requests enqueue themselves when they first hit an unallocated cluster
 890     * but they wait until the entire request is finished before waking up the
 891     * next request in the queue.  This ensures that we don't cycle through
 892     * requests multiple times but rather finish one at a time completely.
 893     */
 894    if (acb == QSIMPLEQ_FIRST(&s->allocating_write_reqs)) {
 895        QSIMPLEQ_REMOVE_HEAD(&s->allocating_write_reqs, next);
 896        acb = QSIMPLEQ_FIRST(&s->allocating_write_reqs);
 897        if (acb) {
 898            qed_aio_next_io(acb, 0);
 899        } else if (s->header.features & QED_F_NEED_CHECK) {
 900            qed_start_need_check_timer(s);
 901        }
 902    }
 903}
 904
 905/**
 906 * Commit the current L2 table to the cache
 907 */
 908static void qed_commit_l2_update(void *opaque, int ret)
 909{
 910    QEDAIOCB *acb = opaque;
 911    BDRVQEDState *s = acb_to_s(acb);
 912    CachedL2Table *l2_table = acb->request.l2_table;
 913    uint64_t l2_offset = l2_table->offset;
 914
 915    qed_commit_l2_cache_entry(&s->l2_cache, l2_table);
 916
 917    /* This is guaranteed to succeed because we just committed the entry to the
 918     * cache.
 919     */
 920    acb->request.l2_table = qed_find_l2_cache_entry(&s->l2_cache, l2_offset);
 921    assert(acb->request.l2_table != NULL);
 922
 923    qed_aio_next_io(opaque, ret);
 924}
 925
 926/**
 927 * Update L1 table with new L2 table offset and write it out
 928 */
 929static void qed_aio_write_l1_update(void *opaque, int ret)
 930{
 931    QEDAIOCB *acb = opaque;
 932    BDRVQEDState *s = acb_to_s(acb);
 933    int index;
 934
 935    if (ret) {
 936        qed_aio_complete(acb, ret);
 937        return;
 938    }
 939
 940    index = qed_l1_index(s, acb->cur_pos);
 941    s->l1_table->offsets[index] = acb->request.l2_table->offset;
 942
 943    qed_write_l1_table(s, index, 1, qed_commit_l2_update, acb);
 944}
 945
 946/**
 947 * Update L2 table with new cluster offsets and write them out
 948 */
 949static void qed_aio_write_l2_update(QEDAIOCB *acb, int ret, uint64_t offset)
 950{
 951    BDRVQEDState *s = acb_to_s(acb);
 952    bool need_alloc = acb->find_cluster_ret == QED_CLUSTER_L1;
 953    int index;
 954
 955    if (ret) {
 956        goto err;
 957    }
 958
 959    if (need_alloc) {
 960        qed_unref_l2_cache_entry(acb->request.l2_table);
 961        acb->request.l2_table = qed_new_l2_table(s);
 962    }
 963
 964    index = qed_l2_index(s, acb->cur_pos);
 965    qed_update_l2_table(s, acb->request.l2_table->table, index, acb->cur_nclusters,
 966                         offset);
 967
 968    if (need_alloc) {
 969        /* Write out the whole new L2 table */
 970        qed_write_l2_table(s, &acb->request, 0, s->table_nelems, true,
 971                            qed_aio_write_l1_update, acb);
 972    } else {
 973        /* Write out only the updated part of the L2 table */
 974        qed_write_l2_table(s, &acb->request, index, acb->cur_nclusters, false,
 975                            qed_aio_next_io, acb);
 976    }
 977    return;
 978
 979err:
 980    qed_aio_complete(acb, ret);
 981}
 982
 983static void qed_aio_write_l2_update_cb(void *opaque, int ret)
 984{
 985    QEDAIOCB *acb = opaque;
 986    qed_aio_write_l2_update(acb, ret, acb->cur_cluster);
 987}
 988
 989/**
 990 * Flush new data clusters before updating the L2 table
 991 *
 992 * This flush is necessary when a backing file is in use.  A crash during an
 993 * allocating write could result in empty clusters in the image.  If the write
 994 * only touched a subregion of the cluster, then backing image sectors have
 995 * been lost in the untouched region.  The solution is to flush after writing a
 996 * new data cluster and before updating the L2 table.
 997 */
 998static void qed_aio_write_flush_before_l2_update(void *opaque, int ret)
 999{
1000    QEDAIOCB *acb = opaque;
1001    BDRVQEDState *s = acb_to_s(acb);
1002
1003    if (!bdrv_aio_flush(s->bs->file, qed_aio_write_l2_update_cb, opaque)) {
1004        qed_aio_complete(acb, -EIO);
1005    }
1006}
1007
1008/**
1009 * Write data to the image file
1010 */
1011static void qed_aio_write_main(void *opaque, int ret)
1012{
1013    QEDAIOCB *acb = opaque;
1014    BDRVQEDState *s = acb_to_s(acb);
1015    uint64_t offset = acb->cur_cluster +
1016                      qed_offset_into_cluster(s, acb->cur_pos);
1017    BlockDriverCompletionFunc *next_fn;
1018
1019    trace_qed_aio_write_main(s, acb, ret, offset, acb->cur_qiov.size);
1020
1021    if (ret) {
1022        qed_aio_complete(acb, ret);
1023        return;
1024    }
1025
1026    if (acb->find_cluster_ret == QED_CLUSTER_FOUND) {
1027        next_fn = qed_aio_next_io;
1028    } else {
1029        if (s->bs->backing_hd) {
1030            next_fn = qed_aio_write_flush_before_l2_update;
1031        } else {
1032            next_fn = qed_aio_write_l2_update_cb;
1033        }
1034    }
1035
1036    BLKDBG_EVENT(s->bs->file, BLKDBG_WRITE_AIO);
1037    bdrv_aio_writev(s->bs->file, offset / BDRV_SECTOR_SIZE,
1038                    &acb->cur_qiov, acb->cur_qiov.size / BDRV_SECTOR_SIZE,
1039                    next_fn, acb);
1040}
1041
1042/**
1043 * Populate back untouched region of new data cluster
1044 */
1045static void qed_aio_write_postfill(void *opaque, int ret)
1046{
1047    QEDAIOCB *acb = opaque;
1048    BDRVQEDState *s = acb_to_s(acb);
1049    uint64_t start = acb->cur_pos + acb->cur_qiov.size;
1050    uint64_t len =
1051        qed_start_of_cluster(s, start + s->header.cluster_size - 1) - start;
1052    uint64_t offset = acb->cur_cluster +
1053                      qed_offset_into_cluster(s, acb->cur_pos) +
1054                      acb->cur_qiov.size;
1055
1056    if (ret) {
1057        qed_aio_complete(acb, ret);
1058        return;
1059    }
1060
1061    trace_qed_aio_write_postfill(s, acb, start, len, offset);
1062    qed_copy_from_backing_file(s, start, len, offset,
1063                                qed_aio_write_main, acb);
1064}
1065
1066/**
1067 * Populate front untouched region of new data cluster
1068 */
1069static void qed_aio_write_prefill(void *opaque, int ret)
1070{
1071    QEDAIOCB *acb = opaque;
1072    BDRVQEDState *s = acb_to_s(acb);
1073    uint64_t start = qed_start_of_cluster(s, acb->cur_pos);
1074    uint64_t len = qed_offset_into_cluster(s, acb->cur_pos);
1075
1076    trace_qed_aio_write_prefill(s, acb, start, len, acb->cur_cluster);
1077    qed_copy_from_backing_file(s, start, len, acb->cur_cluster,
1078                                qed_aio_write_postfill, acb);
1079}
1080
1081/**
1082 * Check if the QED_F_NEED_CHECK bit should be set during allocating write
1083 */
1084static bool qed_should_set_need_check(BDRVQEDState *s)
1085{
1086    /* The flush before L2 update path ensures consistency */
1087    if (s->bs->backing_hd) {
1088        return false;
1089    }
1090
1091    return !(s->header.features & QED_F_NEED_CHECK);
1092}
1093
1094static void qed_aio_write_zero_cluster(void *opaque, int ret)
1095{
1096    QEDAIOCB *acb = opaque;
1097
1098    if (ret) {
1099        qed_aio_complete(acb, ret);
1100        return;
1101    }
1102
1103    qed_aio_write_l2_update(acb, 0, 1);
1104}
1105
1106/**
1107 * Write new data cluster
1108 *
1109 * @acb:        Write request
1110 * @len:        Length in bytes
1111 *
1112 * This path is taken when writing to previously unallocated clusters.
1113 */
1114static void qed_aio_write_alloc(QEDAIOCB *acb, size_t len)
1115{
1116    BDRVQEDState *s = acb_to_s(acb);
1117    BlockDriverCompletionFunc *cb;
1118
1119    /* Cancel timer when the first allocating request comes in */
1120    if (QSIMPLEQ_EMPTY(&s->allocating_write_reqs)) {
1121        qed_cancel_need_check_timer(s);
1122    }
1123
1124    /* Freeze this request if another allocating write is in progress */
1125    if (acb != QSIMPLEQ_FIRST(&s->allocating_write_reqs)) {
1126        QSIMPLEQ_INSERT_TAIL(&s->allocating_write_reqs, acb, next);
1127    }
1128    if (acb != QSIMPLEQ_FIRST(&s->allocating_write_reqs) ||
1129        s->allocating_write_reqs_plugged) {
1130        return; /* wait for existing request to finish */
1131    }
1132
1133    acb->cur_nclusters = qed_bytes_to_clusters(s,
1134            qed_offset_into_cluster(s, acb->cur_pos) + len);
1135    qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len);
1136
1137    if (acb->flags & QED_AIOCB_ZERO) {
1138        /* Skip ahead if the clusters are already zero */
1139        if (acb->find_cluster_ret == QED_CLUSTER_ZERO) {
1140            qed_aio_next_io(acb, 0);
1141            return;
1142        }
1143
1144        cb = qed_aio_write_zero_cluster;
1145    } else {
1146        cb = qed_aio_write_prefill;
1147        acb->cur_cluster = qed_alloc_clusters(s, acb->cur_nclusters);
1148    }
1149
1150    if (qed_should_set_need_check(s)) {
1151        s->header.features |= QED_F_NEED_CHECK;
1152        qed_write_header(s, cb, acb);
1153    } else {
1154        cb(acb, 0);
1155    }
1156}
1157
1158/**
1159 * Write data cluster in place
1160 *
1161 * @acb:        Write request
1162 * @offset:     Cluster offset in bytes
1163 * @len:        Length in bytes
1164 *
1165 * This path is taken when writing to already allocated clusters.
1166 */
1167static void qed_aio_write_inplace(QEDAIOCB *acb, uint64_t offset, size_t len)
1168{
1169    /* Allocate buffer for zero writes */
1170    if (acb->flags & QED_AIOCB_ZERO) {
1171        struct iovec *iov = acb->qiov->iov;
1172
1173        if (!iov->iov_base) {
1174            iov->iov_base = qemu_blockalign(acb->common.bs, iov->iov_len);
1175            memset(iov->iov_base, 0, iov->iov_len);
1176        }
1177    }
1178
1179    /* Calculate the I/O vector */
1180    acb->cur_cluster = offset;
1181    qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len);
1182
1183    /* Do the actual write */
1184    qed_aio_write_main(acb, 0);
1185}
1186
1187/**
1188 * Write data cluster
1189 *
1190 * @opaque:     Write request
1191 * @ret:        QED_CLUSTER_FOUND, QED_CLUSTER_L2, QED_CLUSTER_L1,
1192 *              or -errno
1193 * @offset:     Cluster offset in bytes
1194 * @len:        Length in bytes
1195 *
1196 * Callback from qed_find_cluster().
1197 */
1198static void qed_aio_write_data(void *opaque, int ret,
1199                               uint64_t offset, size_t len)
1200{
1201    QEDAIOCB *acb = opaque;
1202
1203    trace_qed_aio_write_data(acb_to_s(acb), acb, ret, offset, len);
1204
1205    acb->find_cluster_ret = ret;
1206
1207    switch (ret) {
1208    case QED_CLUSTER_FOUND:
1209        qed_aio_write_inplace(acb, offset, len);
1210        break;
1211
1212    case QED_CLUSTER_L2:
1213    case QED_CLUSTER_L1:
1214    case QED_CLUSTER_ZERO:
1215        qed_aio_write_alloc(acb, len);
1216        break;
1217
1218    default:
1219        qed_aio_complete(acb, ret);
1220        break;
1221    }
1222}
1223
1224/**
1225 * Read data cluster
1226 *
1227 * @opaque:     Read request
1228 * @ret:        QED_CLUSTER_FOUND, QED_CLUSTER_L2, QED_CLUSTER_L1,
1229 *              or -errno
1230 * @offset:     Cluster offset in bytes
1231 * @len:        Length in bytes
1232 *
1233 * Callback from qed_find_cluster().
1234 */
1235static void qed_aio_read_data(void *opaque, int ret,
1236                              uint64_t offset, size_t len)
1237{
1238    QEDAIOCB *acb = opaque;
1239    BDRVQEDState *s = acb_to_s(acb);
1240    BlockDriverState *bs = acb->common.bs;
1241
1242    /* Adjust offset into cluster */
1243    offset += qed_offset_into_cluster(s, acb->cur_pos);
1244
1245    trace_qed_aio_read_data(s, acb, ret, offset, len);
1246
1247    if (ret < 0) {
1248        goto err;
1249    }
1250
1251    qemu_iovec_concat(&acb->cur_qiov, acb->qiov, acb->qiov_offset, len);
1252
1253    /* Handle zero cluster and backing file reads */
1254    if (ret == QED_CLUSTER_ZERO) {
1255        qemu_iovec_memset(&acb->cur_qiov, 0, 0, acb->cur_qiov.size);
1256        qed_aio_next_io(acb, 0);
1257        return;
1258    } else if (ret != QED_CLUSTER_FOUND) {
1259        qed_read_backing_file(s, acb->cur_pos, &acb->cur_qiov,
1260                              qed_aio_next_io, acb);
1261        return;
1262    }
1263
1264    BLKDBG_EVENT(bs->file, BLKDBG_READ_AIO);
1265    bdrv_aio_readv(bs->file, offset / BDRV_SECTOR_SIZE,
1266                   &acb->cur_qiov, acb->cur_qiov.size / BDRV_SECTOR_SIZE,
1267                   qed_aio_next_io, acb);
1268    return;
1269
1270err:
1271    qed_aio_complete(acb, ret);
1272}
1273
1274/**
1275 * Begin next I/O or complete the request
1276 */
1277static void qed_aio_next_io(void *opaque, int ret)
1278{
1279    QEDAIOCB *acb = opaque;
1280    BDRVQEDState *s = acb_to_s(acb);
1281    QEDFindClusterFunc *io_fn = (acb->flags & QED_AIOCB_WRITE) ?
1282                                qed_aio_write_data : qed_aio_read_data;
1283
1284    trace_qed_aio_next_io(s, acb, ret, acb->cur_pos + acb->cur_qiov.size);
1285
1286    /* Handle I/O error */
1287    if (ret) {
1288        qed_aio_complete(acb, ret);
1289        return;
1290    }
1291
1292    acb->qiov_offset += acb->cur_qiov.size;
1293    acb->cur_pos += acb->cur_qiov.size;
1294    qemu_iovec_reset(&acb->cur_qiov);
1295
1296    /* Complete request */
1297    if (acb->cur_pos >= acb->end_pos) {
1298        qed_aio_complete(acb, 0);
1299        return;
1300    }
1301
1302    /* Find next cluster and start I/O */
1303    qed_find_cluster(s, &acb->request,
1304                      acb->cur_pos, acb->end_pos - acb->cur_pos,
1305                      io_fn, acb);
1306}
1307
1308static BlockDriverAIOCB *qed_aio_setup(BlockDriverState *bs,
1309                                       int64_t sector_num,
1310                                       QEMUIOVector *qiov, int nb_sectors,
1311                                       BlockDriverCompletionFunc *cb,
1312                                       void *opaque, int flags)
1313{
1314    QEDAIOCB *acb = qemu_aio_get(&qed_aiocb_info, bs, cb, opaque);
1315
1316    trace_qed_aio_setup(bs->opaque, acb, sector_num, nb_sectors,
1317                        opaque, flags);
1318
1319    acb->flags = flags;
1320    acb->finished = NULL;
1321    acb->qiov = qiov;
1322    acb->qiov_offset = 0;
1323    acb->cur_pos = (uint64_t)sector_num * BDRV_SECTOR_SIZE;
1324    acb->end_pos = acb->cur_pos + nb_sectors * BDRV_SECTOR_SIZE;
1325    acb->request.l2_table = NULL;
1326    qemu_iovec_init(&acb->cur_qiov, qiov->niov);
1327
1328    /* Start request */
1329    qed_aio_next_io(acb, 0);
1330    return &acb->common;
1331}
1332
1333static BlockDriverAIOCB *bdrv_qed_aio_readv(BlockDriverState *bs,
1334                                            int64_t sector_num,
1335                                            QEMUIOVector *qiov, int nb_sectors,
1336                                            BlockDriverCompletionFunc *cb,
1337                                            void *opaque)
1338{
1339    return qed_aio_setup(bs, sector_num, qiov, nb_sectors, cb, opaque, 0);
1340}
1341
1342static BlockDriverAIOCB *bdrv_qed_aio_writev(BlockDriverState *bs,
1343                                             int64_t sector_num,
1344                                             QEMUIOVector *qiov, int nb_sectors,
1345                                             BlockDriverCompletionFunc *cb,
1346                                             void *opaque)
1347{
1348    return qed_aio_setup(bs, sector_num, qiov, nb_sectors, cb,
1349                         opaque, QED_AIOCB_WRITE);
1350}
1351
1352typedef struct {
1353    Coroutine *co;
1354    int ret;
1355    bool done;
1356} QEDWriteZeroesCB;
1357
1358static void coroutine_fn qed_co_write_zeroes_cb(void *opaque, int ret)
1359{
1360    QEDWriteZeroesCB *cb = opaque;
1361
1362    cb->done = true;
1363    cb->ret = ret;
1364    if (cb->co) {
1365        qemu_coroutine_enter(cb->co, NULL);
1366    }
1367}
1368
1369static int coroutine_fn bdrv_qed_co_write_zeroes(BlockDriverState *bs,
1370                                                 int64_t sector_num,
1371                                                 int nb_sectors)
1372{
1373    BlockDriverAIOCB *blockacb;
1374    BDRVQEDState *s = bs->opaque;
1375    QEDWriteZeroesCB cb = { .done = false };
1376    QEMUIOVector qiov;
1377    struct iovec iov;
1378
1379    /* Refuse if there are untouched backing file sectors */
1380    if (bs->backing_hd) {
1381        if (qed_offset_into_cluster(s, sector_num * BDRV_SECTOR_SIZE) != 0) {
1382            return -ENOTSUP;
1383        }
1384        if (qed_offset_into_cluster(s, nb_sectors * BDRV_SECTOR_SIZE) != 0) {
1385            return -ENOTSUP;
1386        }
1387    }
1388
1389    /* Zero writes start without an I/O buffer.  If a buffer becomes necessary
1390     * then it will be allocated during request processing.
1391     */
1392    iov.iov_base = NULL,
1393    iov.iov_len  = nb_sectors * BDRV_SECTOR_SIZE,
1394
1395    qemu_iovec_init_external(&qiov, &iov, 1);
1396    blockacb = qed_aio_setup(bs, sector_num, &qiov, nb_sectors,
1397                             qed_co_write_zeroes_cb, &cb,
1398                             QED_AIOCB_WRITE | QED_AIOCB_ZERO);
1399    if (!blockacb) {
1400        return -EIO;
1401    }
1402    if (!cb.done) {
1403        cb.co = qemu_coroutine_self();
1404        qemu_coroutine_yield();
1405    }
1406    assert(cb.done);
1407    return cb.ret;
1408}
1409
1410static int bdrv_qed_truncate(BlockDriverState *bs, int64_t offset)
1411{
1412    BDRVQEDState *s = bs->opaque;
1413    uint64_t old_image_size;
1414    int ret;
1415
1416    if (!qed_is_image_size_valid(offset, s->header.cluster_size,
1417                                 s->header.table_size)) {
1418        return -EINVAL;
1419    }
1420
1421    /* Shrinking is currently not supported */
1422    if ((uint64_t)offset < s->header.image_size) {
1423        return -ENOTSUP;
1424    }
1425
1426    old_image_size = s->header.image_size;
1427    s->header.image_size = offset;
1428    ret = qed_write_header_sync(s);
1429    if (ret < 0) {
1430        s->header.image_size = old_image_size;
1431    }
1432    return ret;
1433}
1434
1435static int64_t bdrv_qed_getlength(BlockDriverState *bs)
1436{
1437    BDRVQEDState *s = bs->opaque;
1438    return s->header.image_size;
1439}
1440
1441static int bdrv_qed_get_info(BlockDriverState *bs, BlockDriverInfo *bdi)
1442{
1443    BDRVQEDState *s = bs->opaque;
1444
1445    memset(bdi, 0, sizeof(*bdi));
1446    bdi->cluster_size = s->header.cluster_size;
1447    bdi->is_dirty = s->header.features & QED_F_NEED_CHECK;
1448    return 0;
1449}
1450
1451static int bdrv_qed_change_backing_file(BlockDriverState *bs,
1452                                        const char *backing_file,
1453                                        const char *backing_fmt)
1454{
1455    BDRVQEDState *s = bs->opaque;
1456    QEDHeader new_header, le_header;
1457    void *buffer;
1458    size_t buffer_len, backing_file_len;
1459    int ret;
1460
1461    /* Refuse to set backing filename if unknown compat feature bits are
1462     * active.  If the image uses an unknown compat feature then we may not
1463     * know the layout of data following the header structure and cannot safely
1464     * add a new string.
1465     */
1466    if (backing_file && (s->header.compat_features &
1467                         ~QED_COMPAT_FEATURE_MASK)) {
1468        return -ENOTSUP;
1469    }
1470
1471    memcpy(&new_header, &s->header, sizeof(new_header));
1472
1473    new_header.features &= ~(QED_F_BACKING_FILE |
1474                             QED_F_BACKING_FORMAT_NO_PROBE);
1475
1476    /* Adjust feature flags */
1477    if (backing_file) {
1478        new_header.features |= QED_F_BACKING_FILE;
1479
1480        if (qed_fmt_is_raw(backing_fmt)) {
1481            new_header.features |= QED_F_BACKING_FORMAT_NO_PROBE;
1482        }
1483    }
1484
1485    /* Calculate new header size */
1486    backing_file_len = 0;
1487
1488    if (backing_file) {
1489        backing_file_len = strlen(backing_file);
1490    }
1491
1492    buffer_len = sizeof(new_header);
1493    new_header.backing_filename_offset = buffer_len;
1494    new_header.backing_filename_size = backing_file_len;
1495    buffer_len += backing_file_len;
1496
1497    /* Make sure we can rewrite header without failing */
1498    if (buffer_len > new_header.header_size * new_header.cluster_size) {
1499        return -ENOSPC;
1500    }
1501
1502    /* Prepare new header */
1503    buffer = g_malloc(buffer_len);
1504
1505    qed_header_cpu_to_le(&new_header, &le_header);
1506    memcpy(buffer, &le_header, sizeof(le_header));
1507    buffer_len = sizeof(le_header);
1508
1509    if (backing_file) {
1510        memcpy(buffer + buffer_len, backing_file, backing_file_len);
1511        buffer_len += backing_file_len;
1512    }
1513
1514    /* Write new header */
1515    ret = bdrv_pwrite_sync(bs->file, 0, buffer, buffer_len);
1516    g_free(buffer);
1517    if (ret == 0) {
1518        memcpy(&s->header, &new_header, sizeof(new_header));
1519    }
1520    return ret;
1521}
1522
1523static void bdrv_qed_invalidate_cache(BlockDriverState *bs)
1524{
1525    BDRVQEDState *s = bs->opaque;
1526
1527    bdrv_qed_close(bs);
1528    memset(s, 0, sizeof(BDRVQEDState));
1529    bdrv_qed_open(bs, bs->open_flags);
1530}
1531
1532static int bdrv_qed_check(BlockDriverState *bs, BdrvCheckResult *result,
1533                          BdrvCheckMode fix)
1534{
1535    BDRVQEDState *s = bs->opaque;
1536
1537    return qed_check(s, result, !!fix);
1538}
1539
1540static QEMUOptionParameter qed_create_options[] = {
1541    {
1542        .name = BLOCK_OPT_SIZE,
1543        .type = OPT_SIZE,
1544        .help = "Virtual disk size (in bytes)"
1545    }, {
1546        .name = BLOCK_OPT_BACKING_FILE,
1547        .type = OPT_STRING,
1548        .help = "File name of a base image"
1549    }, {
1550        .name = BLOCK_OPT_BACKING_FMT,
1551        .type = OPT_STRING,
1552        .help = "Image format of the base image"
1553    }, {
1554        .name = BLOCK_OPT_CLUSTER_SIZE,
1555        .type = OPT_SIZE,
1556        .help = "Cluster size (in bytes)",
1557        .value = { .n = QED_DEFAULT_CLUSTER_SIZE },
1558    }, {
1559        .name = BLOCK_OPT_TABLE_SIZE,
1560        .type = OPT_SIZE,
1561        .help = "L1/L2 table size (in clusters)"
1562    },
1563    { /* end of list */ }
1564};
1565
1566static BlockDriver bdrv_qed = {
1567    .format_name              = "qed",
1568    .instance_size            = sizeof(BDRVQEDState),
1569    .create_options           = qed_create_options,
1570
1571    .bdrv_probe               = bdrv_qed_probe,
1572    .bdrv_rebind              = bdrv_qed_rebind,
1573    .bdrv_open                = bdrv_qed_open,
1574    .bdrv_close               = bdrv_qed_close,
1575    .bdrv_reopen_prepare      = bdrv_qed_reopen_prepare,
1576    .bdrv_create              = bdrv_qed_create,
1577    .bdrv_co_is_allocated     = bdrv_qed_co_is_allocated,
1578    .bdrv_make_empty          = bdrv_qed_make_empty,
1579    .bdrv_aio_readv           = bdrv_qed_aio_readv,
1580    .bdrv_aio_writev          = bdrv_qed_aio_writev,
1581    .bdrv_co_write_zeroes     = bdrv_qed_co_write_zeroes,
1582    .bdrv_truncate            = bdrv_qed_truncate,
1583    .bdrv_getlength           = bdrv_qed_getlength,
1584    .bdrv_get_info            = bdrv_qed_get_info,
1585    .bdrv_change_backing_file = bdrv_qed_change_backing_file,
1586    .bdrv_invalidate_cache    = bdrv_qed_invalidate_cache,
1587    .bdrv_check               = bdrv_qed_check,
1588};
1589
1590static void bdrv_qed_init(void)
1591{
1592    bdrv_register(&bdrv_qed);
1593}
1594
1595block_init(bdrv_qed_init);
1596