qemu/include/block/aio.h
<<
>>
Prefs
   1/*
   2 * QEMU aio implementation
   3 *
   4 * Copyright IBM, Corp. 2008
   5 *
   6 * Authors:
   7 *  Anthony Liguori   <aliguori@us.ibm.com>
   8 *
   9 * This work is licensed under the terms of the GNU GPL, version 2.  See
  10 * the COPYING file in the top-level directory.
  11 *
  12 */
  13
  14#ifndef QEMU_AIO_H
  15#define QEMU_AIO_H
  16
  17#include "qemu-common.h"
  18#include "qemu/queue.h"
  19#include "qemu/event_notifier.h"
  20#include "qemu/thread.h"
  21#include "qemu/timer.h"
  22
  23typedef struct BlockAIOCB BlockAIOCB;
  24typedef void BlockCompletionFunc(void *opaque, int ret);
  25
  26typedef struct AIOCBInfo {
  27    void (*cancel_async)(BlockAIOCB *acb);
  28    AioContext *(*get_aio_context)(BlockAIOCB *acb);
  29    size_t aiocb_size;
  30} AIOCBInfo;
  31
  32struct BlockAIOCB {
  33    const AIOCBInfo *aiocb_info;
  34    BlockDriverState *bs;
  35    BlockCompletionFunc *cb;
  36    void *opaque;
  37    int refcnt;
  38};
  39
  40void *qemu_aio_get(const AIOCBInfo *aiocb_info, BlockDriverState *bs,
  41                   BlockCompletionFunc *cb, void *opaque);
  42void qemu_aio_unref(void *p);
  43void qemu_aio_ref(void *p);
  44
  45typedef struct AioHandler AioHandler;
  46typedef void QEMUBHFunc(void *opaque);
  47typedef bool AioPollFn(void *opaque);
  48typedef void IOHandler(void *opaque);
  49
  50struct Coroutine;
  51struct ThreadPool;
  52struct LinuxAioState;
  53
  54struct AioContext {
  55    GSource source;
  56
  57    /* Used by AioContext users to protect from multi-threaded access.  */
  58    QemuRecMutex lock;
  59
  60    /* The list of registered AIO handlers.  Protected by ctx->list_lock. */
  61    QLIST_HEAD(, AioHandler) aio_handlers;
  62
  63    /* Used to avoid unnecessary event_notifier_set calls in aio_notify;
  64     * accessed with atomic primitives.  If this field is 0, everything
  65     * (file descriptors, bottom halves, timers) will be re-evaluated
  66     * before the next blocking poll(), thus the event_notifier_set call
  67     * can be skipped.  If it is non-zero, you may need to wake up a
  68     * concurrent aio_poll or the glib main event loop, making
  69     * event_notifier_set necessary.
  70     *
  71     * Bit 0 is reserved for GSource usage of the AioContext, and is 1
  72     * between a call to aio_ctx_prepare and the next call to aio_ctx_check.
  73     * Bits 1-31 simply count the number of active calls to aio_poll
  74     * that are in the prepare or poll phase.
  75     *
  76     * The GSource and aio_poll must use a different mechanism because
  77     * there is no certainty that a call to GSource's prepare callback
  78     * (via g_main_context_prepare) is indeed followed by check and
  79     * dispatch.  It's not clear whether this would be a bug, but let's
  80     * play safe and allow it---it will just cause extra calls to
  81     * event_notifier_set until the next call to dispatch.
  82     *
  83     * Instead, the aio_poll calls include both the prepare and the
  84     * dispatch phase, hence a simple counter is enough for them.
  85     */
  86    uint32_t notify_me;
  87
  88    /* A lock to protect between QEMUBH and AioHandler adders and deleter,
  89     * and to ensure that no callbacks are removed while we're walking and
  90     * dispatching them.
  91     */
  92    QemuLockCnt list_lock;
  93
  94    /* Anchor of the list of Bottom Halves belonging to the context */
  95    struct QEMUBH *first_bh;
  96
  97    /* Used by aio_notify.
  98     *
  99     * "notified" is used to avoid expensive event_notifier_test_and_clear
 100     * calls.  When it is clear, the EventNotifier is clear, or one thread
 101     * is going to clear "notified" before processing more events.  False
 102     * positives are possible, i.e. "notified" could be set even though the
 103     * EventNotifier is clear.
 104     *
 105     * Note that event_notifier_set *cannot* be optimized the same way.  For
 106     * more information on the problem that would result, see "#ifdef BUG2"
 107     * in the docs/aio_notify_accept.promela formal model.
 108     */
 109    bool notified;
 110    EventNotifier notifier;
 111
 112    QSLIST_HEAD(, Coroutine) scheduled_coroutines;
 113    QEMUBH *co_schedule_bh;
 114
 115    /* Thread pool for performing work and receiving completion callbacks.
 116     * Has its own locking.
 117     */
 118    struct ThreadPool *thread_pool;
 119
 120#ifdef CONFIG_LINUX_AIO
 121    /* State for native Linux AIO.  Uses aio_context_acquire/release for
 122     * locking.
 123     */
 124    struct LinuxAioState *linux_aio;
 125#endif
 126
 127    /* TimerLists for calling timers - one per clock type.  Has its own
 128     * locking.
 129     */
 130    QEMUTimerListGroup tlg;
 131
 132    int external_disable_cnt;
 133
 134    /* Number of AioHandlers without .io_poll() */
 135    int poll_disable_cnt;
 136
 137    /* Polling mode parameters */
 138    int64_t poll_ns;        /* current polling time in nanoseconds */
 139    int64_t poll_max_ns;    /* maximum polling time in nanoseconds */
 140    int64_t poll_grow;      /* polling time growth factor */
 141    int64_t poll_shrink;    /* polling time shrink factor */
 142
 143    /* Are we in polling mode or monitoring file descriptors? */
 144    bool poll_started;
 145
 146    /* epoll(7) state used when built with CONFIG_EPOLL */
 147    int epollfd;
 148    bool epoll_enabled;
 149    bool epoll_available;
 150};
 151
 152/**
 153 * aio_context_new: Allocate a new AioContext.
 154 *
 155 * AioContext provide a mini event-loop that can be waited on synchronously.
 156 * They also provide bottom halves, a service to execute a piece of code
 157 * as soon as possible.
 158 */
 159AioContext *aio_context_new(Error **errp);
 160
 161/**
 162 * aio_context_ref:
 163 * @ctx: The AioContext to operate on.
 164 *
 165 * Add a reference to an AioContext.
 166 */
 167void aio_context_ref(AioContext *ctx);
 168
 169/**
 170 * aio_context_unref:
 171 * @ctx: The AioContext to operate on.
 172 *
 173 * Drop a reference to an AioContext.
 174 */
 175void aio_context_unref(AioContext *ctx);
 176
 177/* Take ownership of the AioContext.  If the AioContext will be shared between
 178 * threads, and a thread does not want to be interrupted, it will have to
 179 * take ownership around calls to aio_poll().  Otherwise, aio_poll()
 180 * automatically takes care of calling aio_context_acquire and
 181 * aio_context_release.
 182 *
 183 * Note that this is separate from bdrv_drained_begin/bdrv_drained_end.  A
 184 * thread still has to call those to avoid being interrupted by the guest.
 185 *
 186 * Bottom halves, timers and callbacks can be created or removed without
 187 * acquiring the AioContext.
 188 */
 189void aio_context_acquire(AioContext *ctx);
 190
 191/* Relinquish ownership of the AioContext. */
 192void aio_context_release(AioContext *ctx);
 193
 194/**
 195 * aio_bh_schedule_oneshot: Allocate a new bottom half structure that will run
 196 * only once and as soon as possible.
 197 */
 198void aio_bh_schedule_oneshot(AioContext *ctx, QEMUBHFunc *cb, void *opaque);
 199
 200/**
 201 * aio_bh_new: Allocate a new bottom half structure.
 202 *
 203 * Bottom halves are lightweight callbacks whose invocation is guaranteed
 204 * to be wait-free, thread-safe and signal-safe.  The #QEMUBH structure
 205 * is opaque and must be allocated prior to its use.
 206 */
 207QEMUBH *aio_bh_new(AioContext *ctx, QEMUBHFunc *cb, void *opaque);
 208
 209/**
 210 * aio_notify: Force processing of pending events.
 211 *
 212 * Similar to signaling a condition variable, aio_notify forces
 213 * aio_poll to exit, so that the next call will re-examine pending events.
 214 * The caller of aio_notify will usually call aio_poll again very soon,
 215 * or go through another iteration of the GLib main loop.  Hence, aio_notify
 216 * also has the side effect of recalculating the sets of file descriptors
 217 * that the main loop waits for.
 218 *
 219 * Calling aio_notify is rarely necessary, because for example scheduling
 220 * a bottom half calls it already.
 221 */
 222void aio_notify(AioContext *ctx);
 223
 224/**
 225 * aio_notify_accept: Acknowledge receiving an aio_notify.
 226 *
 227 * aio_notify() uses an EventNotifier in order to wake up a sleeping
 228 * aio_poll() or g_main_context_iteration().  Calls to aio_notify() are
 229 * usually rare, but the AioContext has to clear the EventNotifier on
 230 * every aio_poll() or g_main_context_iteration() in order to avoid
 231 * busy waiting.  This event_notifier_test_and_clear() cannot be done
 232 * using the usual aio_context_set_event_notifier(), because it must
 233 * be done before processing all events (file descriptors, bottom halves,
 234 * timers).
 235 *
 236 * aio_notify_accept() is an optimized event_notifier_test_and_clear()
 237 * that is specific to an AioContext's notifier; it is used internally
 238 * to clear the EventNotifier only if aio_notify() had been called.
 239 */
 240void aio_notify_accept(AioContext *ctx);
 241
 242/**
 243 * aio_bh_call: Executes callback function of the specified BH.
 244 */
 245void aio_bh_call(QEMUBH *bh);
 246
 247/**
 248 * aio_bh_poll: Poll bottom halves for an AioContext.
 249 *
 250 * These are internal functions used by the QEMU main loop.
 251 * And notice that multiple occurrences of aio_bh_poll cannot
 252 * be called concurrently
 253 */
 254int aio_bh_poll(AioContext *ctx);
 255
 256/**
 257 * qemu_bh_schedule: Schedule a bottom half.
 258 *
 259 * Scheduling a bottom half interrupts the main loop and causes the
 260 * execution of the callback that was passed to qemu_bh_new.
 261 *
 262 * Bottom halves that are scheduled from a bottom half handler are instantly
 263 * invoked.  This can create an infinite loop if a bottom half handler
 264 * schedules itself.
 265 *
 266 * @bh: The bottom half to be scheduled.
 267 */
 268void qemu_bh_schedule(QEMUBH *bh);
 269
 270/**
 271 * qemu_bh_cancel: Cancel execution of a bottom half.
 272 *
 273 * Canceling execution of a bottom half undoes the effect of calls to
 274 * qemu_bh_schedule without freeing its resources yet.  While cancellation
 275 * itself is also wait-free and thread-safe, it can of course race with the
 276 * loop that executes bottom halves unless you are holding the iothread
 277 * mutex.  This makes it mostly useless if you are not holding the mutex.
 278 *
 279 * @bh: The bottom half to be canceled.
 280 */
 281void qemu_bh_cancel(QEMUBH *bh);
 282
 283/**
 284 *qemu_bh_delete: Cancel execution of a bottom half and free its resources.
 285 *
 286 * Deleting a bottom half frees the memory that was allocated for it by
 287 * qemu_bh_new.  It also implies canceling the bottom half if it was
 288 * scheduled.
 289 * This func is async. The bottom half will do the delete action at the finial
 290 * end.
 291 *
 292 * @bh: The bottom half to be deleted.
 293 */
 294void qemu_bh_delete(QEMUBH *bh);
 295
 296/* Return whether there are any pending callbacks from the GSource
 297 * attached to the AioContext, before g_poll is invoked.
 298 *
 299 * This is used internally in the implementation of the GSource.
 300 */
 301bool aio_prepare(AioContext *ctx);
 302
 303/* Return whether there are any pending callbacks from the GSource
 304 * attached to the AioContext, after g_poll is invoked.
 305 *
 306 * This is used internally in the implementation of the GSource.
 307 */
 308bool aio_pending(AioContext *ctx);
 309
 310/* Dispatch any pending callbacks from the GSource attached to the AioContext.
 311 *
 312 * This is used internally in the implementation of the GSource.
 313 */
 314void aio_dispatch(AioContext *ctx);
 315
 316/* Progress in completing AIO work to occur.  This can issue new pending
 317 * aio as a result of executing I/O completion or bh callbacks.
 318 *
 319 * Return whether any progress was made by executing AIO or bottom half
 320 * handlers.  If @blocking == true, this should always be true except
 321 * if someone called aio_notify.
 322 *
 323 * If there are no pending bottom halves, but there are pending AIO
 324 * operations, it may not be possible to make any progress without
 325 * blocking.  If @blocking is true, this function will wait until one
 326 * or more AIO events have completed, to ensure something has moved
 327 * before returning.
 328 */
 329bool aio_poll(AioContext *ctx, bool blocking);
 330
 331/* Register a file descriptor and associated callbacks.  Behaves very similarly
 332 * to qemu_set_fd_handler.  Unlike qemu_set_fd_handler, these callbacks will
 333 * be invoked when using aio_poll().
 334 *
 335 * Code that invokes AIO completion functions should rely on this function
 336 * instead of qemu_set_fd_handler[2].
 337 */
 338void aio_set_fd_handler(AioContext *ctx,
 339                        int fd,
 340                        bool is_external,
 341                        IOHandler *io_read,
 342                        IOHandler *io_write,
 343                        AioPollFn *io_poll,
 344                        void *opaque);
 345
 346/* Set polling begin/end callbacks for a file descriptor that has already been
 347 * registered with aio_set_fd_handler.  Do nothing if the file descriptor is
 348 * not registered.
 349 */
 350void aio_set_fd_poll(AioContext *ctx, int fd,
 351                     IOHandler *io_poll_begin,
 352                     IOHandler *io_poll_end);
 353
 354/* Register an event notifier and associated callbacks.  Behaves very similarly
 355 * to event_notifier_set_handler.  Unlike event_notifier_set_handler, these callbacks
 356 * will be invoked when using aio_poll().
 357 *
 358 * Code that invokes AIO completion functions should rely on this function
 359 * instead of event_notifier_set_handler.
 360 */
 361void aio_set_event_notifier(AioContext *ctx,
 362                            EventNotifier *notifier,
 363                            bool is_external,
 364                            EventNotifierHandler *io_read,
 365                            AioPollFn *io_poll);
 366
 367/* Set polling begin/end callbacks for an event notifier that has already been
 368 * registered with aio_set_event_notifier.  Do nothing if the event notifier is
 369 * not registered.
 370 */
 371void aio_set_event_notifier_poll(AioContext *ctx,
 372                                 EventNotifier *notifier,
 373                                 EventNotifierHandler *io_poll_begin,
 374                                 EventNotifierHandler *io_poll_end);
 375
 376/* Return a GSource that lets the main loop poll the file descriptors attached
 377 * to this AioContext.
 378 */
 379GSource *aio_get_g_source(AioContext *ctx);
 380
 381/* Return the ThreadPool bound to this AioContext */
 382struct ThreadPool *aio_get_thread_pool(AioContext *ctx);
 383
 384/* Setup the LinuxAioState bound to this AioContext */
 385struct LinuxAioState *aio_setup_linux_aio(AioContext *ctx, Error **errp);
 386
 387/* Return the LinuxAioState bound to this AioContext */
 388struct LinuxAioState *aio_get_linux_aio(AioContext *ctx);
 389
 390/**
 391 * aio_timer_new:
 392 * @ctx: the aio context
 393 * @type: the clock type
 394 * @scale: the scale
 395 * @cb: the callback to call on timer expiry
 396 * @opaque: the opaque pointer to pass to the callback
 397 *
 398 * Allocate a new timer attached to the context @ctx.
 399 * The function is responsible for memory allocation.
 400 *
 401 * The preferred interface is aio_timer_init. Use that
 402 * unless you really need dynamic memory allocation.
 403 *
 404 * Returns: a pointer to the new timer
 405 */
 406static inline QEMUTimer *aio_timer_new(AioContext *ctx, QEMUClockType type,
 407                                       int scale,
 408                                       QEMUTimerCB *cb, void *opaque)
 409{
 410    return timer_new_tl(ctx->tlg.tl[type], scale, cb, opaque);
 411}
 412
 413/**
 414 * aio_timer_init:
 415 * @ctx: the aio context
 416 * @ts: the timer
 417 * @type: the clock type
 418 * @scale: the scale
 419 * @cb: the callback to call on timer expiry
 420 * @opaque: the opaque pointer to pass to the callback
 421 *
 422 * Initialise a new timer attached to the context @ctx.
 423 * The caller is responsible for memory allocation.
 424 */
 425static inline void aio_timer_init(AioContext *ctx,
 426                                  QEMUTimer *ts, QEMUClockType type,
 427                                  int scale,
 428                                  QEMUTimerCB *cb, void *opaque)
 429{
 430    timer_init_tl(ts, ctx->tlg.tl[type], scale, cb, opaque);
 431}
 432
 433/**
 434 * aio_compute_timeout:
 435 * @ctx: the aio context
 436 *
 437 * Compute the timeout that a blocking aio_poll should use.
 438 */
 439int64_t aio_compute_timeout(AioContext *ctx);
 440
 441/**
 442 * aio_disable_external:
 443 * @ctx: the aio context
 444 *
 445 * Disable the further processing of external clients.
 446 */
 447static inline void aio_disable_external(AioContext *ctx)
 448{
 449    atomic_inc(&ctx->external_disable_cnt);
 450}
 451
 452/**
 453 * aio_enable_external:
 454 * @ctx: the aio context
 455 *
 456 * Enable the processing of external clients.
 457 */
 458static inline void aio_enable_external(AioContext *ctx)
 459{
 460    int old;
 461
 462    old = atomic_fetch_dec(&ctx->external_disable_cnt);
 463    assert(old > 0);
 464    if (old == 1) {
 465        /* Kick event loop so it re-arms file descriptors */
 466        aio_notify(ctx);
 467    }
 468}
 469
 470/**
 471 * aio_external_disabled:
 472 * @ctx: the aio context
 473 *
 474 * Return true if the external clients are disabled.
 475 */
 476static inline bool aio_external_disabled(AioContext *ctx)
 477{
 478    return atomic_read(&ctx->external_disable_cnt);
 479}
 480
 481/**
 482 * aio_node_check:
 483 * @ctx: the aio context
 484 * @is_external: Whether or not the checked node is an external event source.
 485 *
 486 * Check if the node's is_external flag is okay to be polled by the ctx at this
 487 * moment. True means green light.
 488 */
 489static inline bool aio_node_check(AioContext *ctx, bool is_external)
 490{
 491    return !is_external || !atomic_read(&ctx->external_disable_cnt);
 492}
 493
 494/**
 495 * aio_co_schedule:
 496 * @ctx: the aio context
 497 * @co: the coroutine
 498 *
 499 * Start a coroutine on a remote AioContext.
 500 *
 501 * The coroutine must not be entered by anyone else while aio_co_schedule()
 502 * is active.  In addition the coroutine must have yielded unless ctx
 503 * is the context in which the coroutine is running (i.e. the value of
 504 * qemu_get_current_aio_context() from the coroutine itself).
 505 */
 506void aio_co_schedule(AioContext *ctx, struct Coroutine *co);
 507
 508/**
 509 * aio_co_wake:
 510 * @co: the coroutine
 511 *
 512 * Restart a coroutine on the AioContext where it was running last, thus
 513 * preventing coroutines from jumping from one context to another when they
 514 * go to sleep.
 515 *
 516 * aio_co_wake may be executed either in coroutine or non-coroutine
 517 * context.  The coroutine must not be entered by anyone else while
 518 * aio_co_wake() is active.
 519 */
 520void aio_co_wake(struct Coroutine *co);
 521
 522/**
 523 * aio_co_enter:
 524 * @ctx: the context to run the coroutine
 525 * @co: the coroutine to run
 526 *
 527 * Enter a coroutine in the specified AioContext.
 528 */
 529void aio_co_enter(AioContext *ctx, struct Coroutine *co);
 530
 531/**
 532 * Return the AioContext whose event loop runs in the current thread.
 533 *
 534 * If called from an IOThread this will be the IOThread's AioContext.  If
 535 * called from another thread it will be the main loop AioContext.
 536 */
 537AioContext *qemu_get_current_aio_context(void);
 538
 539/**
 540 * in_aio_context_home_thread:
 541 * @ctx: the aio context
 542 *
 543 * Return whether we are running in the thread that normally runs @ctx.  Note
 544 * that acquiring/releasing ctx does not affect the outcome, each AioContext
 545 * still only has one home thread that is responsible for running it.
 546 */
 547static inline bool in_aio_context_home_thread(AioContext *ctx)
 548{
 549    return ctx == qemu_get_current_aio_context();
 550}
 551
 552/**
 553 * aio_context_setup:
 554 * @ctx: the aio context
 555 *
 556 * Initialize the aio context.
 557 */
 558void aio_context_setup(AioContext *ctx);
 559
 560/**
 561 * aio_context_destroy:
 562 * @ctx: the aio context
 563 *
 564 * Destroy the aio context.
 565 */
 566void aio_context_destroy(AioContext *ctx);
 567
 568/**
 569 * aio_context_set_poll_params:
 570 * @ctx: the aio context
 571 * @max_ns: how long to busy poll for, in nanoseconds
 572 * @grow: polling time growth factor
 573 * @shrink: polling time shrink factor
 574 *
 575 * Poll mode can be disabled by setting poll_max_ns to 0.
 576 */
 577void aio_context_set_poll_params(AioContext *ctx, int64_t max_ns,
 578                                 int64_t grow, int64_t shrink,
 579                                 Error **errp);
 580
 581#endif
 582