qemu/block/io_uring.c
<<
>>
Prefs
   1/*
   2 * Linux io_uring support.
   3 *
   4 * Copyright (C) 2009 IBM, Corp.
   5 * Copyright (C) 2009 Red Hat, Inc.
   6 * Copyright (C) 2019 Aarushi Mehta
   7 *
   8 * This work is licensed under the terms of the GNU GPL, version 2 or later.
   9 * See the COPYING file in the top-level directory.
  10 */
  11#include "qemu/osdep.h"
  12#include <liburing.h>
  13#include "block/aio.h"
  14#include "qemu/error-report.h"
  15#include "qemu/queue.h"
  16#include "block/block.h"
  17#include "block/raw-aio.h"
  18#include "qemu/coroutine.h"
  19#include "qapi/error.h"
  20#include "trace.h"
  21
  22
  23/* io_uring ring size */
  24#define MAX_ENTRIES 128
  25
  26typedef struct LuringAIOCB {
  27    Coroutine *co;
  28    struct io_uring_sqe sqeq;
  29    ssize_t ret;
  30    QEMUIOVector *qiov;
  31    bool is_read;
  32    QSIMPLEQ_ENTRY(LuringAIOCB) next;
  33
  34    /*
  35     * Buffered reads may require resubmission, see
  36     * luring_resubmit_short_read().
  37     */
  38    int total_read;
  39    QEMUIOVector resubmit_qiov;
  40} LuringAIOCB;
  41
  42typedef struct LuringQueue {
  43    int plugged;
  44    unsigned int in_queue;
  45    unsigned int in_flight;
  46    bool blocked;
  47    QSIMPLEQ_HEAD(, LuringAIOCB) submit_queue;
  48} LuringQueue;
  49
  50typedef struct LuringState {
  51    AioContext *aio_context;
  52
  53    struct io_uring ring;
  54
  55    /* io queue for submit at batch.  Protected by AioContext lock. */
  56    LuringQueue io_q;
  57
  58    /* I/O completion processing.  Only runs in I/O thread.  */
  59    QEMUBH *completion_bh;
  60} LuringState;
  61
  62/**
  63 * luring_resubmit:
  64 *
  65 * Resubmit a request by appending it to submit_queue.  The caller must ensure
  66 * that ioq_submit() is called later so that submit_queue requests are started.
  67 */
  68static void luring_resubmit(LuringState *s, LuringAIOCB *luringcb)
  69{
  70    QSIMPLEQ_INSERT_TAIL(&s->io_q.submit_queue, luringcb, next);
  71    s->io_q.in_queue++;
  72}
  73
  74/**
  75 * luring_resubmit_short_read:
  76 *
  77 * Short reads are rare but may occur. The remaining read request needs to be
  78 * resubmitted.
  79 */
  80static void luring_resubmit_short_read(LuringState *s, LuringAIOCB *luringcb,
  81                                       int nread)
  82{
  83    QEMUIOVector *resubmit_qiov;
  84    size_t remaining;
  85
  86    trace_luring_resubmit_short_read(s, luringcb, nread);
  87
  88    /* Update read position */
  89    luringcb->total_read += nread;
  90    remaining = luringcb->qiov->size - luringcb->total_read;
  91
  92    /* Shorten qiov */
  93    resubmit_qiov = &luringcb->resubmit_qiov;
  94    if (resubmit_qiov->iov == NULL) {
  95        qemu_iovec_init(resubmit_qiov, luringcb->qiov->niov);
  96    } else {
  97        qemu_iovec_reset(resubmit_qiov);
  98    }
  99    qemu_iovec_concat(resubmit_qiov, luringcb->qiov, luringcb->total_read,
 100                      remaining);
 101
 102    /* Update sqe */
 103    luringcb->sqeq.off += nread;
 104    luringcb->sqeq.addr = (__u64)(uintptr_t)luringcb->resubmit_qiov.iov;
 105    luringcb->sqeq.len = luringcb->resubmit_qiov.niov;
 106
 107    luring_resubmit(s, luringcb);
 108}
 109
 110/**
 111 * luring_process_completions:
 112 * @s: AIO state
 113 *
 114 * Fetches completed I/O requests, consumes cqes and invokes their callbacks
 115 * The function is somewhat tricky because it supports nested event loops, for
 116 * example when a request callback invokes aio_poll().
 117 *
 118 * Function schedules BH completion so it  can be called again in a nested
 119 * event loop.  When there are no events left  to complete the BH is being
 120 * canceled.
 121 *
 122 */
 123static void luring_process_completions(LuringState *s)
 124{
 125    struct io_uring_cqe *cqes;
 126    int total_bytes;
 127    /*
 128     * Request completion callbacks can run the nested event loop.
 129     * Schedule ourselves so the nested event loop will "see" remaining
 130     * completed requests and process them.  Without this, completion
 131     * callbacks that wait for other requests using a nested event loop
 132     * would hang forever.
 133     *
 134     * This workaround is needed because io_uring uses poll_wait, which
 135     * is woken up when new events are added to the uring, thus polling on
 136     * the same uring fd will block unless more events are received.
 137     *
 138     * Other leaf block drivers (drivers that access the data themselves)
 139     * are networking based, so they poll sockets for data and run the
 140     * correct coroutine.
 141     */
 142    qemu_bh_schedule(s->completion_bh);
 143
 144    while (io_uring_peek_cqe(&s->ring, &cqes) == 0) {
 145        LuringAIOCB *luringcb;
 146        int ret;
 147
 148        if (!cqes) {
 149            break;
 150        }
 151
 152        luringcb = io_uring_cqe_get_data(cqes);
 153        ret = cqes->res;
 154        io_uring_cqe_seen(&s->ring, cqes);
 155        cqes = NULL;
 156
 157        /* Change counters one-by-one because we can be nested. */
 158        s->io_q.in_flight--;
 159        trace_luring_process_completion(s, luringcb, ret);
 160
 161        /* total_read is non-zero only for resubmitted read requests */
 162        total_bytes = ret + luringcb->total_read;
 163
 164        if (ret < 0) {
 165            /*
 166             * Only writev/readv/fsync requests on regular files or host block
 167             * devices are submitted. Therefore -EAGAIN is not expected but it's
 168             * known to happen sometimes with Linux SCSI. Submit again and hope
 169             * the request completes successfully.
 170             *
 171             * For more information, see:
 172             * https://lore.kernel.org/io-uring/20210727165811.284510-3-axboe@kernel.dk/T/#u
 173             *
 174             * If the code is changed to submit other types of requests in the
 175             * future, then this workaround may need to be extended to deal with
 176             * genuine -EAGAIN results that should not be resubmitted
 177             * immediately.
 178             */
 179            if (ret == -EINTR || ret == -EAGAIN) {
 180                luring_resubmit(s, luringcb);
 181                continue;
 182            }
 183        } else if (!luringcb->qiov) {
 184            goto end;
 185        } else if (total_bytes == luringcb->qiov->size) {
 186            ret = 0;
 187        /* Only read/write */
 188        } else {
 189            /* Short Read/Write */
 190            if (luringcb->is_read) {
 191                if (ret > 0) {
 192                    luring_resubmit_short_read(s, luringcb, ret);
 193                    continue;
 194                } else {
 195                    /* Pad with zeroes */
 196                    qemu_iovec_memset(luringcb->qiov, total_bytes, 0,
 197                                      luringcb->qiov->size - total_bytes);
 198                    ret = 0;
 199                }
 200            } else {
 201                ret = -ENOSPC;
 202            }
 203        }
 204end:
 205        luringcb->ret = ret;
 206        qemu_iovec_destroy(&luringcb->resubmit_qiov);
 207
 208        /*
 209         * If the coroutine is already entered it must be in ioq_submit()
 210         * and will notice luringcb->ret has been filled in when it
 211         * eventually runs later. Coroutines cannot be entered recursively
 212         * so avoid doing that!
 213         */
 214        if (!qemu_coroutine_entered(luringcb->co)) {
 215            aio_co_wake(luringcb->co);
 216        }
 217    }
 218    qemu_bh_cancel(s->completion_bh);
 219}
 220
 221static int ioq_submit(LuringState *s)
 222{
 223    int ret = 0;
 224    LuringAIOCB *luringcb, *luringcb_next;
 225
 226    while (s->io_q.in_queue > 0) {
 227        /*
 228         * Try to fetch sqes from the ring for requests waiting in
 229         * the overflow queue
 230         */
 231        QSIMPLEQ_FOREACH_SAFE(luringcb, &s->io_q.submit_queue, next,
 232                              luringcb_next) {
 233            struct io_uring_sqe *sqes = io_uring_get_sqe(&s->ring);
 234            if (!sqes) {
 235                break;
 236            }
 237            /* Prep sqe for submission */
 238            *sqes = luringcb->sqeq;
 239            QSIMPLEQ_REMOVE_HEAD(&s->io_q.submit_queue, next);
 240        }
 241        ret = io_uring_submit(&s->ring);
 242        trace_luring_io_uring_submit(s, ret);
 243        /* Prevent infinite loop if submission is refused */
 244        if (ret <= 0) {
 245            if (ret == -EAGAIN || ret == -EINTR) {
 246                continue;
 247            }
 248            break;
 249        }
 250        s->io_q.in_flight += ret;
 251        s->io_q.in_queue  -= ret;
 252    }
 253    s->io_q.blocked = (s->io_q.in_queue > 0);
 254
 255    if (s->io_q.in_flight) {
 256        /*
 257         * We can try to complete something just right away if there are
 258         * still requests in-flight.
 259         */
 260        luring_process_completions(s);
 261    }
 262    return ret;
 263}
 264
 265static void luring_process_completions_and_submit(LuringState *s)
 266{
 267    aio_context_acquire(s->aio_context);
 268    luring_process_completions(s);
 269
 270    if (!s->io_q.plugged && s->io_q.in_queue > 0) {
 271        ioq_submit(s);
 272    }
 273    aio_context_release(s->aio_context);
 274}
 275
 276static void qemu_luring_completion_bh(void *opaque)
 277{
 278    LuringState *s = opaque;
 279    luring_process_completions_and_submit(s);
 280}
 281
 282static void qemu_luring_completion_cb(void *opaque)
 283{
 284    LuringState *s = opaque;
 285    luring_process_completions_and_submit(s);
 286}
 287
 288static bool qemu_luring_poll_cb(void *opaque)
 289{
 290    LuringState *s = opaque;
 291
 292    return io_uring_cq_ready(&s->ring);
 293}
 294
 295static void qemu_luring_poll_ready(void *opaque)
 296{
 297    LuringState *s = opaque;
 298
 299    luring_process_completions_and_submit(s);
 300}
 301
 302static void ioq_init(LuringQueue *io_q)
 303{
 304    QSIMPLEQ_INIT(&io_q->submit_queue);
 305    io_q->plugged = 0;
 306    io_q->in_queue = 0;
 307    io_q->in_flight = 0;
 308    io_q->blocked = false;
 309}
 310
 311void luring_io_plug(BlockDriverState *bs, LuringState *s)
 312{
 313    trace_luring_io_plug(s);
 314    s->io_q.plugged++;
 315}
 316
 317void luring_io_unplug(BlockDriverState *bs, LuringState *s)
 318{
 319    assert(s->io_q.plugged);
 320    trace_luring_io_unplug(s, s->io_q.blocked, s->io_q.plugged,
 321                           s->io_q.in_queue, s->io_q.in_flight);
 322    if (--s->io_q.plugged == 0 &&
 323        !s->io_q.blocked && s->io_q.in_queue > 0) {
 324        ioq_submit(s);
 325    }
 326}
 327
 328/**
 329 * luring_do_submit:
 330 * @fd: file descriptor for I/O
 331 * @luringcb: AIO control block
 332 * @s: AIO state
 333 * @offset: offset for request
 334 * @type: type of request
 335 *
 336 * Fetches sqes from ring, adds to pending queue and preps them
 337 *
 338 */
 339static int luring_do_submit(int fd, LuringAIOCB *luringcb, LuringState *s,
 340                            uint64_t offset, int type)
 341{
 342    int ret;
 343    struct io_uring_sqe *sqes = &luringcb->sqeq;
 344
 345    switch (type) {
 346    case QEMU_AIO_WRITE:
 347        io_uring_prep_writev(sqes, fd, luringcb->qiov->iov,
 348                             luringcb->qiov->niov, offset);
 349        break;
 350    case QEMU_AIO_READ:
 351        io_uring_prep_readv(sqes, fd, luringcb->qiov->iov,
 352                            luringcb->qiov->niov, offset);
 353        break;
 354    case QEMU_AIO_FLUSH:
 355        io_uring_prep_fsync(sqes, fd, IORING_FSYNC_DATASYNC);
 356        break;
 357    default:
 358        fprintf(stderr, "%s: invalid AIO request type, aborting 0x%x.\n",
 359                        __func__, type);
 360        abort();
 361    }
 362    io_uring_sqe_set_data(sqes, luringcb);
 363
 364    QSIMPLEQ_INSERT_TAIL(&s->io_q.submit_queue, luringcb, next);
 365    s->io_q.in_queue++;
 366    trace_luring_do_submit(s, s->io_q.blocked, s->io_q.plugged,
 367                           s->io_q.in_queue, s->io_q.in_flight);
 368    if (!s->io_q.blocked &&
 369        (!s->io_q.plugged ||
 370         s->io_q.in_flight + s->io_q.in_queue >= MAX_ENTRIES)) {
 371        ret = ioq_submit(s);
 372        trace_luring_do_submit_done(s, ret);
 373        return ret;
 374    }
 375    return 0;
 376}
 377
 378int coroutine_fn luring_co_submit(BlockDriverState *bs, LuringState *s, int fd,
 379                                  uint64_t offset, QEMUIOVector *qiov, int type)
 380{
 381    int ret;
 382    LuringAIOCB luringcb = {
 383        .co         = qemu_coroutine_self(),
 384        .ret        = -EINPROGRESS,
 385        .qiov       = qiov,
 386        .is_read    = (type == QEMU_AIO_READ),
 387    };
 388    trace_luring_co_submit(bs, s, &luringcb, fd, offset, qiov ? qiov->size : 0,
 389                           type);
 390    ret = luring_do_submit(fd, &luringcb, s, offset, type);
 391
 392    if (ret < 0) {
 393        return ret;
 394    }
 395
 396    if (luringcb.ret == -EINPROGRESS) {
 397        qemu_coroutine_yield();
 398    }
 399    return luringcb.ret;
 400}
 401
 402void luring_detach_aio_context(LuringState *s, AioContext *old_context)
 403{
 404    aio_set_fd_handler(old_context, s->ring.ring_fd, false,
 405                       NULL, NULL, NULL, NULL, s);
 406    qemu_bh_delete(s->completion_bh);
 407    s->aio_context = NULL;
 408}
 409
 410void luring_attach_aio_context(LuringState *s, AioContext *new_context)
 411{
 412    s->aio_context = new_context;
 413    s->completion_bh = aio_bh_new(new_context, qemu_luring_completion_bh, s);
 414    aio_set_fd_handler(s->aio_context, s->ring.ring_fd, false,
 415                       qemu_luring_completion_cb, NULL,
 416                       qemu_luring_poll_cb, qemu_luring_poll_ready, s);
 417}
 418
 419LuringState *luring_init(Error **errp)
 420{
 421    int rc;
 422    LuringState *s = g_new0(LuringState, 1);
 423    struct io_uring *ring = &s->ring;
 424
 425    trace_luring_init_state(s, sizeof(*s));
 426
 427    rc = io_uring_queue_init(MAX_ENTRIES, ring, 0);
 428    if (rc < 0) {
 429        error_setg_errno(errp, errno, "failed to init linux io_uring ring");
 430        g_free(s);
 431        return NULL;
 432    }
 433
 434    ioq_init(&s->io_q);
 435#ifdef CONFIG_LIBURING_REGISTER_RING_FD
 436    if (io_uring_register_ring_fd(&s->ring) < 0) {
 437        /*
 438         * Only warn about this error: we will fallback to the non-optimized
 439         * io_uring operations.
 440         */
 441        warn_report("failed to register linux io_uring ring file descriptor");
 442    }
 443#endif
 444
 445    return s;
 446}
 447
 448void luring_cleanup(LuringState *s)
 449{
 450    io_uring_queue_exit(&s->ring);
 451    trace_luring_cleanup_state(s);
 452    g_free(s);
 453}
 454