qemu/block/linux-aio.c
<<
>>
Prefs
   1/*
   2 * Linux native AIO support.
   3 *
   4 * Copyright (C) 2009 IBM, Corp.
   5 * Copyright (C) 2009 Red Hat, Inc.
   6 *
   7 * This work is licensed under the terms of the GNU GPL, version 2 or later.
   8 * See the COPYING file in the top-level directory.
   9 */
  10#include "qemu/osdep.h"
  11#include "block/aio.h"
  12#include "qemu/queue.h"
  13#include "block/block.h"
  14#include "block/raw-aio.h"
  15#include "qemu/event_notifier.h"
  16#include "qemu/coroutine.h"
  17#include "qapi/error.h"
  18
  19#include <libaio.h>
  20
  21/*
  22 * Queue size (per-device).
  23 *
  24 * XXX: eventually we need to communicate this to the guest and/or make it
  25 *      tunable by the guest.  If we get more outstanding requests at a time
  26 *      than this we will get EAGAIN from io_submit which is communicated to
  27 *      the guest as an I/O error.
  28 */
  29#define MAX_EVENTS 1024
  30
  31/* Maximum number of requests in a batch. (default value) */
  32#define DEFAULT_MAX_BATCH 32
  33
  34struct qemu_laiocb {
  35    Coroutine *co;
  36    LinuxAioState *ctx;
  37    struct iocb iocb;
  38    ssize_t ret;
  39    size_t nbytes;
  40    QEMUIOVector *qiov;
  41    bool is_read;
  42    QSIMPLEQ_ENTRY(qemu_laiocb) next;
  43};
  44
  45typedef struct {
  46    int plugged;
  47    unsigned int in_queue;
  48    unsigned int in_flight;
  49    bool blocked;
  50    QSIMPLEQ_HEAD(, qemu_laiocb) pending;
  51} LaioQueue;
  52
  53struct LinuxAioState {
  54    AioContext *aio_context;
  55
  56    io_context_t ctx;
  57    EventNotifier e;
  58
  59    /* io queue for submit at batch.  Protected by AioContext lock. */
  60    LaioQueue io_q;
  61
  62    /* I/O completion processing.  Only runs in I/O thread.  */
  63    QEMUBH *completion_bh;
  64    int event_idx;
  65    int event_max;
  66};
  67
  68static void ioq_submit(LinuxAioState *s);
  69
  70static inline ssize_t io_event_ret(struct io_event *ev)
  71{
  72    return (ssize_t)(((uint64_t)ev->res2 << 32) | ev->res);
  73}
  74
  75/*
  76 * Completes an AIO request.
  77 */
  78static void qemu_laio_process_completion(struct qemu_laiocb *laiocb)
  79{
  80    int ret;
  81
  82    ret = laiocb->ret;
  83    if (ret != -ECANCELED) {
  84        if (ret == laiocb->nbytes) {
  85            ret = 0;
  86        } else if (ret >= 0) {
  87            /* Short reads mean EOF, pad with zeros. */
  88            if (laiocb->is_read) {
  89                qemu_iovec_memset(laiocb->qiov, ret, 0,
  90                    laiocb->qiov->size - ret);
  91            } else {
  92                ret = -ENOSPC;
  93            }
  94        }
  95    }
  96
  97    laiocb->ret = ret;
  98
  99    /*
 100     * If the coroutine is already entered it must be in ioq_submit() and
 101     * will notice laio->ret has been filled in when it eventually runs
 102     * later.  Coroutines cannot be entered recursively so avoid doing
 103     * that!
 104     */
 105    if (!qemu_coroutine_entered(laiocb->co)) {
 106        aio_co_wake(laiocb->co);
 107    }
 108}
 109
 110/**
 111 * aio_ring buffer which is shared between userspace and kernel.
 112 *
 113 * This copied from linux/fs/aio.c, common header does not exist
 114 * but AIO exists for ages so we assume ABI is stable.
 115 */
 116struct aio_ring {
 117    unsigned    id;    /* kernel internal index number */
 118    unsigned    nr;    /* number of io_events */
 119    unsigned    head;  /* Written to by userland or by kernel. */
 120    unsigned    tail;
 121
 122    unsigned    magic;
 123    unsigned    compat_features;
 124    unsigned    incompat_features;
 125    unsigned    header_length;  /* size of aio_ring */
 126
 127    struct io_event io_events[];
 128};
 129
 130/**
 131 * io_getevents_peek:
 132 * @ctx: AIO context
 133 * @events: pointer on events array, output value
 134
 135 * Returns the number of completed events and sets a pointer
 136 * on events array.  This function does not update the internal
 137 * ring buffer, only reads head and tail.  When @events has been
 138 * processed io_getevents_commit() must be called.
 139 */
 140static inline unsigned int io_getevents_peek(io_context_t ctx,
 141                                             struct io_event **events)
 142{
 143    struct aio_ring *ring = (struct aio_ring *)ctx;
 144    unsigned int head = ring->head, tail = ring->tail;
 145    unsigned int nr;
 146
 147    nr = tail >= head ? tail - head : ring->nr - head;
 148    *events = ring->io_events + head;
 149    /* To avoid speculative loads of s->events[i] before observing tail.
 150       Paired with smp_wmb() inside linux/fs/aio.c: aio_complete(). */
 151    smp_rmb();
 152
 153    return nr;
 154}
 155
 156/**
 157 * io_getevents_commit:
 158 * @ctx: AIO context
 159 * @nr: the number of events on which head should be advanced
 160 *
 161 * Advances head of a ring buffer.
 162 */
 163static inline void io_getevents_commit(io_context_t ctx, unsigned int nr)
 164{
 165    struct aio_ring *ring = (struct aio_ring *)ctx;
 166
 167    if (nr) {
 168        ring->head = (ring->head + nr) % ring->nr;
 169    }
 170}
 171
 172/**
 173 * io_getevents_advance_and_peek:
 174 * @ctx: AIO context
 175 * @events: pointer on events array, output value
 176 * @nr: the number of events on which head should be advanced
 177 *
 178 * Advances head of a ring buffer and returns number of elements left.
 179 */
 180static inline unsigned int
 181io_getevents_advance_and_peek(io_context_t ctx,
 182                              struct io_event **events,
 183                              unsigned int nr)
 184{
 185    io_getevents_commit(ctx, nr);
 186    return io_getevents_peek(ctx, events);
 187}
 188
 189/**
 190 * qemu_laio_process_completions:
 191 * @s: AIO state
 192 *
 193 * Fetches completed I/O requests and invokes their callbacks.
 194 *
 195 * The function is somewhat tricky because it supports nested event loops, for
 196 * example when a request callback invokes aio_poll().  In order to do this,
 197 * indices are kept in LinuxAioState.  Function schedules BH completion so it
 198 * can be called again in a nested event loop.  When there are no events left
 199 * to complete the BH is being canceled.
 200 */
 201static void qemu_laio_process_completions(LinuxAioState *s)
 202{
 203    struct io_event *events;
 204
 205    /* Reschedule so nested event loops see currently pending completions */
 206    qemu_bh_schedule(s->completion_bh);
 207
 208    while ((s->event_max = io_getevents_advance_and_peek(s->ctx, &events,
 209                                                         s->event_idx))) {
 210        for (s->event_idx = 0; s->event_idx < s->event_max; ) {
 211            struct iocb *iocb = events[s->event_idx].obj;
 212            struct qemu_laiocb *laiocb =
 213                container_of(iocb, struct qemu_laiocb, iocb);
 214
 215            laiocb->ret = io_event_ret(&events[s->event_idx]);
 216
 217            /* Change counters one-by-one because we can be nested. */
 218            s->io_q.in_flight--;
 219            s->event_idx++;
 220            qemu_laio_process_completion(laiocb);
 221        }
 222    }
 223
 224    qemu_bh_cancel(s->completion_bh);
 225
 226    /* If we are nested we have to notify the level above that we are done
 227     * by setting event_max to zero, upper level will then jump out of it's
 228     * own `for` loop.  If we are the last all counters droped to zero. */
 229    s->event_max = 0;
 230    s->event_idx = 0;
 231}
 232
 233static void qemu_laio_process_completions_and_submit(LinuxAioState *s)
 234{
 235    aio_context_acquire(s->aio_context);
 236    qemu_laio_process_completions(s);
 237
 238    if (!s->io_q.plugged && !QSIMPLEQ_EMPTY(&s->io_q.pending)) {
 239        ioq_submit(s);
 240    }
 241    aio_context_release(s->aio_context);
 242}
 243
 244static void qemu_laio_completion_bh(void *opaque)
 245{
 246    LinuxAioState *s = opaque;
 247
 248    qemu_laio_process_completions_and_submit(s);
 249}
 250
 251static void qemu_laio_completion_cb(EventNotifier *e)
 252{
 253    LinuxAioState *s = container_of(e, LinuxAioState, e);
 254
 255    if (event_notifier_test_and_clear(&s->e)) {
 256        qemu_laio_process_completions_and_submit(s);
 257    }
 258}
 259
 260static bool qemu_laio_poll_cb(void *opaque)
 261{
 262    EventNotifier *e = opaque;
 263    LinuxAioState *s = container_of(e, LinuxAioState, e);
 264    struct io_event *events;
 265
 266    if (!io_getevents_peek(s->ctx, &events)) {
 267        return false;
 268    }
 269
 270    qemu_laio_process_completions_and_submit(s);
 271    return true;
 272}
 273
 274static void ioq_init(LaioQueue *io_q)
 275{
 276    QSIMPLEQ_INIT(&io_q->pending);
 277    io_q->plugged = 0;
 278    io_q->in_queue = 0;
 279    io_q->in_flight = 0;
 280    io_q->blocked = false;
 281}
 282
 283static void ioq_submit(LinuxAioState *s)
 284{
 285    int ret, len;
 286    struct qemu_laiocb *aiocb;
 287    struct iocb *iocbs[MAX_EVENTS];
 288    QSIMPLEQ_HEAD(, qemu_laiocb) completed;
 289
 290    do {
 291        if (s->io_q.in_flight >= MAX_EVENTS) {
 292            break;
 293        }
 294        len = 0;
 295        QSIMPLEQ_FOREACH(aiocb, &s->io_q.pending, next) {
 296            iocbs[len++] = &aiocb->iocb;
 297            if (s->io_q.in_flight + len >= MAX_EVENTS) {
 298                break;
 299            }
 300        }
 301
 302        ret = io_submit(s->ctx, len, iocbs);
 303        if (ret == -EAGAIN) {
 304            break;
 305        }
 306        if (ret < 0) {
 307            /* Fail the first request, retry the rest */
 308            aiocb = QSIMPLEQ_FIRST(&s->io_q.pending);
 309            QSIMPLEQ_REMOVE_HEAD(&s->io_q.pending, next);
 310            s->io_q.in_queue--;
 311            aiocb->ret = ret;
 312            qemu_laio_process_completion(aiocb);
 313            continue;
 314        }
 315
 316        s->io_q.in_flight += ret;
 317        s->io_q.in_queue  -= ret;
 318        aiocb = container_of(iocbs[ret - 1], struct qemu_laiocb, iocb);
 319        QSIMPLEQ_SPLIT_AFTER(&s->io_q.pending, aiocb, next, &completed);
 320    } while (ret == len && !QSIMPLEQ_EMPTY(&s->io_q.pending));
 321    s->io_q.blocked = (s->io_q.in_queue > 0);
 322
 323    if (s->io_q.in_flight) {
 324        /* We can try to complete something just right away if there are
 325         * still requests in-flight. */
 326        qemu_laio_process_completions(s);
 327        /*
 328         * Even we have completed everything (in_flight == 0), the queue can
 329         * have still pended requests (in_queue > 0).  We do not attempt to
 330         * repeat submission to avoid IO hang.  The reason is simple: s->e is
 331         * still set and completion callback will be called shortly and all
 332         * pended requests will be submitted from there.
 333         */
 334    }
 335}
 336
 337static uint64_t laio_max_batch(LinuxAioState *s, uint64_t dev_max_batch)
 338{
 339    uint64_t max_batch = s->aio_context->aio_max_batch ?: DEFAULT_MAX_BATCH;
 340
 341    /*
 342     * AIO context can be shared between multiple block devices, so
 343     * `dev_max_batch` allows reducing the batch size for latency-sensitive
 344     * devices.
 345     */
 346    max_batch = MIN_NON_ZERO(dev_max_batch, max_batch);
 347
 348    /* limit the batch with the number of available events */
 349    max_batch = MIN_NON_ZERO(MAX_EVENTS - s->io_q.in_flight, max_batch);
 350
 351    return max_batch;
 352}
 353
 354void laio_io_plug(BlockDriverState *bs, LinuxAioState *s)
 355{
 356    s->io_q.plugged++;
 357}
 358
 359void laio_io_unplug(BlockDriverState *bs, LinuxAioState *s,
 360                    uint64_t dev_max_batch)
 361{
 362    assert(s->io_q.plugged);
 363    if (s->io_q.in_queue >= laio_max_batch(s, dev_max_batch) ||
 364        (--s->io_q.plugged == 0 &&
 365         !s->io_q.blocked && !QSIMPLEQ_EMPTY(&s->io_q.pending))) {
 366        ioq_submit(s);
 367    }
 368}
 369
 370static int laio_do_submit(int fd, struct qemu_laiocb *laiocb, off_t offset,
 371                          int type, uint64_t dev_max_batch)
 372{
 373    LinuxAioState *s = laiocb->ctx;
 374    struct iocb *iocbs = &laiocb->iocb;
 375    QEMUIOVector *qiov = laiocb->qiov;
 376
 377    switch (type) {
 378    case QEMU_AIO_WRITE:
 379        io_prep_pwritev(iocbs, fd, qiov->iov, qiov->niov, offset);
 380        break;
 381    case QEMU_AIO_READ:
 382        io_prep_preadv(iocbs, fd, qiov->iov, qiov->niov, offset);
 383        break;
 384    /* Currently Linux kernel does not support other operations */
 385    default:
 386        fprintf(stderr, "%s: invalid AIO request type 0x%x.\n",
 387                        __func__, type);
 388        return -EIO;
 389    }
 390    io_set_eventfd(&laiocb->iocb, event_notifier_get_fd(&s->e));
 391
 392    QSIMPLEQ_INSERT_TAIL(&s->io_q.pending, laiocb, next);
 393    s->io_q.in_queue++;
 394    if (!s->io_q.blocked &&
 395        (!s->io_q.plugged ||
 396         s->io_q.in_queue >= laio_max_batch(s, dev_max_batch))) {
 397        ioq_submit(s);
 398    }
 399
 400    return 0;
 401}
 402
 403int coroutine_fn laio_co_submit(BlockDriverState *bs, LinuxAioState *s, int fd,
 404                                uint64_t offset, QEMUIOVector *qiov, int type,
 405                                uint64_t dev_max_batch)
 406{
 407    int ret;
 408    struct qemu_laiocb laiocb = {
 409        .co         = qemu_coroutine_self(),
 410        .nbytes     = qiov->size,
 411        .ctx        = s,
 412        .ret        = -EINPROGRESS,
 413        .is_read    = (type == QEMU_AIO_READ),
 414        .qiov       = qiov,
 415    };
 416
 417    ret = laio_do_submit(fd, &laiocb, offset, type, dev_max_batch);
 418    if (ret < 0) {
 419        return ret;
 420    }
 421
 422    if (laiocb.ret == -EINPROGRESS) {
 423        qemu_coroutine_yield();
 424    }
 425    return laiocb.ret;
 426}
 427
 428void laio_detach_aio_context(LinuxAioState *s, AioContext *old_context)
 429{
 430    aio_set_event_notifier(old_context, &s->e, false, NULL, NULL);
 431    qemu_bh_delete(s->completion_bh);
 432    s->aio_context = NULL;
 433}
 434
 435void laio_attach_aio_context(LinuxAioState *s, AioContext *new_context)
 436{
 437    s->aio_context = new_context;
 438    s->completion_bh = aio_bh_new(new_context, qemu_laio_completion_bh, s);
 439    aio_set_event_notifier(new_context, &s->e, false,
 440                           qemu_laio_completion_cb,
 441                           qemu_laio_poll_cb);
 442}
 443
 444LinuxAioState *laio_init(Error **errp)
 445{
 446    int rc;
 447    LinuxAioState *s;
 448
 449    s = g_malloc0(sizeof(*s));
 450    rc = event_notifier_init(&s->e, false);
 451    if (rc < 0) {
 452        error_setg_errno(errp, -rc, "failed to to initialize event notifier");
 453        goto out_free_state;
 454    }
 455
 456    rc = io_setup(MAX_EVENTS, &s->ctx);
 457    if (rc < 0) {
 458        error_setg_errno(errp, -rc, "failed to create linux AIO context");
 459        goto out_close_efd;
 460    }
 461
 462    ioq_init(&s->io_q);
 463
 464    return s;
 465
 466out_close_efd:
 467    event_notifier_cleanup(&s->e);
 468out_free_state:
 469    g_free(s);
 470    return NULL;
 471}
 472
 473void laio_cleanup(LinuxAioState *s)
 474{
 475    event_notifier_cleanup(&s->e);
 476
 477    if (io_destroy(s->ctx) != 0) {
 478        fprintf(stderr, "%s: destroy AIO context %p failed\n",
 479                        __func__, &s->ctx);
 480    }
 481    g_free(s);
 482}
 483