qemu/include/block/aio.h
<<
>>
Prefs
   1/*
   2 * QEMU aio implementation
   3 *
   4 * Copyright IBM, Corp. 2008
   5 *
   6 * Authors:
   7 *  Anthony Liguori   <aliguori@us.ibm.com>
   8 *
   9 * This work is licensed under the terms of the GNU GPL, version 2.  See
  10 * the COPYING file in the top-level directory.
  11 *
  12 */
  13
  14#ifndef QEMU_AIO_H
  15#define QEMU_AIO_H
  16
  17#include "qemu-common.h"
  18#include "qemu/queue.h"
  19#include "qemu/event_notifier.h"
  20#include "qemu/thread.h"
  21#include "qemu/timer.h"
  22
  23typedef struct BlockAIOCB BlockAIOCB;
  24typedef void BlockCompletionFunc(void *opaque, int ret);
  25
  26typedef struct AIOCBInfo {
  27    void (*cancel_async)(BlockAIOCB *acb);
  28    AioContext *(*get_aio_context)(BlockAIOCB *acb);
  29    size_t aiocb_size;
  30} AIOCBInfo;
  31
  32struct BlockAIOCB {
  33    const AIOCBInfo *aiocb_info;
  34    BlockDriverState *bs;
  35    BlockCompletionFunc *cb;
  36    void *opaque;
  37    int refcnt;
  38};
  39
  40void *qemu_aio_get(const AIOCBInfo *aiocb_info, BlockDriverState *bs,
  41                   BlockCompletionFunc *cb, void *opaque);
  42void qemu_aio_unref(void *p);
  43void qemu_aio_ref(void *p);
  44
  45typedef struct AioHandler AioHandler;
  46typedef void QEMUBHFunc(void *opaque);
  47typedef void IOHandler(void *opaque);
  48
  49struct ThreadPool;
  50struct LinuxAioState;
  51
  52struct AioContext {
  53    GSource source;
  54
  55    /* Protects all fields from multi-threaded access */
  56    QemuRecMutex lock;
  57
  58    /* The list of registered AIO handlers */
  59    QLIST_HEAD(, AioHandler) aio_handlers;
  60
  61    /* This is a simple lock used to protect the aio_handlers list.
  62     * Specifically, it's used to ensure that no callbacks are removed while
  63     * we're walking and dispatching callbacks.
  64     */
  65    int walking_handlers;
  66
  67    /* Used to avoid unnecessary event_notifier_set calls in aio_notify;
  68     * accessed with atomic primitives.  If this field is 0, everything
  69     * (file descriptors, bottom halves, timers) will be re-evaluated
  70     * before the next blocking poll(), thus the event_notifier_set call
  71     * can be skipped.  If it is non-zero, you may need to wake up a
  72     * concurrent aio_poll or the glib main event loop, making
  73     * event_notifier_set necessary.
  74     *
  75     * Bit 0 is reserved for GSource usage of the AioContext, and is 1
  76     * between a call to aio_ctx_prepare and the next call to aio_ctx_check.
  77     * Bits 1-31 simply count the number of active calls to aio_poll
  78     * that are in the prepare or poll phase.
  79     *
  80     * The GSource and aio_poll must use a different mechanism because
  81     * there is no certainty that a call to GSource's prepare callback
  82     * (via g_main_context_prepare) is indeed followed by check and
  83     * dispatch.  It's not clear whether this would be a bug, but let's
  84     * play safe and allow it---it will just cause extra calls to
  85     * event_notifier_set until the next call to dispatch.
  86     *
  87     * Instead, the aio_poll calls include both the prepare and the
  88     * dispatch phase, hence a simple counter is enough for them.
  89     */
  90    uint32_t notify_me;
  91
  92    /* lock to protect between bh's adders and deleter */
  93    QemuMutex bh_lock;
  94
  95    /* Anchor of the list of Bottom Halves belonging to the context */
  96    struct QEMUBH *first_bh;
  97
  98    /* A simple lock used to protect the first_bh list, and ensure that
  99     * no callbacks are removed while we're walking and dispatching callbacks.
 100     */
 101    int walking_bh;
 102
 103    /* Used by aio_notify.
 104     *
 105     * "notified" is used to avoid expensive event_notifier_test_and_clear
 106     * calls.  When it is clear, the EventNotifier is clear, or one thread
 107     * is going to clear "notified" before processing more events.  False
 108     * positives are possible, i.e. "notified" could be set even though the
 109     * EventNotifier is clear.
 110     *
 111     * Note that event_notifier_set *cannot* be optimized the same way.  For
 112     * more information on the problem that would result, see "#ifdef BUG2"
 113     * in the docs/aio_notify_accept.promela formal model.
 114     */
 115    bool notified;
 116    EventNotifier notifier;
 117
 118    /* Thread pool for performing work and receiving completion callbacks */
 119    struct ThreadPool *thread_pool;
 120
 121#ifdef CONFIG_LINUX_AIO
 122    /* State for native Linux AIO.  Uses aio_context_acquire/release for
 123     * locking.
 124     */
 125    struct LinuxAioState *linux_aio;
 126#endif
 127
 128    /* TimerLists for calling timers - one per clock type */
 129    QEMUTimerListGroup tlg;
 130
 131    int external_disable_cnt;
 132
 133    /* epoll(7) state used when built with CONFIG_EPOLL */
 134    int epollfd;
 135    bool epoll_enabled;
 136    bool epoll_available;
 137};
 138
 139/**
 140 * aio_context_new: Allocate a new AioContext.
 141 *
 142 * AioContext provide a mini event-loop that can be waited on synchronously.
 143 * They also provide bottom halves, a service to execute a piece of code
 144 * as soon as possible.
 145 */
 146AioContext *aio_context_new(Error **errp);
 147
 148/**
 149 * aio_context_ref:
 150 * @ctx: The AioContext to operate on.
 151 *
 152 * Add a reference to an AioContext.
 153 */
 154void aio_context_ref(AioContext *ctx);
 155
 156/**
 157 * aio_context_unref:
 158 * @ctx: The AioContext to operate on.
 159 *
 160 * Drop a reference to an AioContext.
 161 */
 162void aio_context_unref(AioContext *ctx);
 163
 164/* Take ownership of the AioContext.  If the AioContext will be shared between
 165 * threads, and a thread does not want to be interrupted, it will have to
 166 * take ownership around calls to aio_poll().  Otherwise, aio_poll()
 167 * automatically takes care of calling aio_context_acquire and
 168 * aio_context_release.
 169 *
 170 * Access to timers and BHs from a thread that has not acquired AioContext
 171 * is possible.  Access to callbacks for now must be done while the AioContext
 172 * is owned by the thread (FIXME).
 173 */
 174void aio_context_acquire(AioContext *ctx);
 175
 176/* Relinquish ownership of the AioContext. */
 177void aio_context_release(AioContext *ctx);
 178
 179/**
 180 * aio_bh_schedule_oneshot: Allocate a new bottom half structure that will run
 181 * only once and as soon as possible.
 182 */
 183void aio_bh_schedule_oneshot(AioContext *ctx, QEMUBHFunc *cb, void *opaque);
 184
 185/**
 186 * aio_bh_new: Allocate a new bottom half structure.
 187 *
 188 * Bottom halves are lightweight callbacks whose invocation is guaranteed
 189 * to be wait-free, thread-safe and signal-safe.  The #QEMUBH structure
 190 * is opaque and must be allocated prior to its use.
 191 */
 192QEMUBH *aio_bh_new(AioContext *ctx, QEMUBHFunc *cb, void *opaque);
 193
 194/**
 195 * aio_notify: Force processing of pending events.
 196 *
 197 * Similar to signaling a condition variable, aio_notify forces
 198 * aio_wait to exit, so that the next call will re-examine pending events.
 199 * The caller of aio_notify will usually call aio_wait again very soon,
 200 * or go through another iteration of the GLib main loop.  Hence, aio_notify
 201 * also has the side effect of recalculating the sets of file descriptors
 202 * that the main loop waits for.
 203 *
 204 * Calling aio_notify is rarely necessary, because for example scheduling
 205 * a bottom half calls it already.
 206 */
 207void aio_notify(AioContext *ctx);
 208
 209/**
 210 * aio_notify_accept: Acknowledge receiving an aio_notify.
 211 *
 212 * aio_notify() uses an EventNotifier in order to wake up a sleeping
 213 * aio_poll() or g_main_context_iteration().  Calls to aio_notify() are
 214 * usually rare, but the AioContext has to clear the EventNotifier on
 215 * every aio_poll() or g_main_context_iteration() in order to avoid
 216 * busy waiting.  This event_notifier_test_and_clear() cannot be done
 217 * using the usual aio_context_set_event_notifier(), because it must
 218 * be done before processing all events (file descriptors, bottom halves,
 219 * timers).
 220 *
 221 * aio_notify_accept() is an optimized event_notifier_test_and_clear()
 222 * that is specific to an AioContext's notifier; it is used internally
 223 * to clear the EventNotifier only if aio_notify() had been called.
 224 */
 225void aio_notify_accept(AioContext *ctx);
 226
 227/**
 228 * aio_bh_call: Executes callback function of the specified BH.
 229 */
 230void aio_bh_call(QEMUBH *bh);
 231
 232/**
 233 * aio_bh_poll: Poll bottom halves for an AioContext.
 234 *
 235 * These are internal functions used by the QEMU main loop.
 236 * And notice that multiple occurrences of aio_bh_poll cannot
 237 * be called concurrently
 238 */
 239int aio_bh_poll(AioContext *ctx);
 240
 241/**
 242 * qemu_bh_schedule: Schedule a bottom half.
 243 *
 244 * Scheduling a bottom half interrupts the main loop and causes the
 245 * execution of the callback that was passed to qemu_bh_new.
 246 *
 247 * Bottom halves that are scheduled from a bottom half handler are instantly
 248 * invoked.  This can create an infinite loop if a bottom half handler
 249 * schedules itself.
 250 *
 251 * @bh: The bottom half to be scheduled.
 252 */
 253void qemu_bh_schedule(QEMUBH *bh);
 254
 255/**
 256 * qemu_bh_cancel: Cancel execution of a bottom half.
 257 *
 258 * Canceling execution of a bottom half undoes the effect of calls to
 259 * qemu_bh_schedule without freeing its resources yet.  While cancellation
 260 * itself is also wait-free and thread-safe, it can of course race with the
 261 * loop that executes bottom halves unless you are holding the iothread
 262 * mutex.  This makes it mostly useless if you are not holding the mutex.
 263 *
 264 * @bh: The bottom half to be canceled.
 265 */
 266void qemu_bh_cancel(QEMUBH *bh);
 267
 268/**
 269 *qemu_bh_delete: Cancel execution of a bottom half and free its resources.
 270 *
 271 * Deleting a bottom half frees the memory that was allocated for it by
 272 * qemu_bh_new.  It also implies canceling the bottom half if it was
 273 * scheduled.
 274 * This func is async. The bottom half will do the delete action at the finial
 275 * end.
 276 *
 277 * @bh: The bottom half to be deleted.
 278 */
 279void qemu_bh_delete(QEMUBH *bh);
 280
 281/* Return whether there are any pending callbacks from the GSource
 282 * attached to the AioContext, before g_poll is invoked.
 283 *
 284 * This is used internally in the implementation of the GSource.
 285 */
 286bool aio_prepare(AioContext *ctx);
 287
 288/* Return whether there are any pending callbacks from the GSource
 289 * attached to the AioContext, after g_poll is invoked.
 290 *
 291 * This is used internally in the implementation of the GSource.
 292 */
 293bool aio_pending(AioContext *ctx);
 294
 295/* Dispatch any pending callbacks from the GSource attached to the AioContext.
 296 *
 297 * This is used internally in the implementation of the GSource.
 298 */
 299bool aio_dispatch(AioContext *ctx);
 300
 301/* Progress in completing AIO work to occur.  This can issue new pending
 302 * aio as a result of executing I/O completion or bh callbacks.
 303 *
 304 * Return whether any progress was made by executing AIO or bottom half
 305 * handlers.  If @blocking == true, this should always be true except
 306 * if someone called aio_notify.
 307 *
 308 * If there are no pending bottom halves, but there are pending AIO
 309 * operations, it may not be possible to make any progress without
 310 * blocking.  If @blocking is true, this function will wait until one
 311 * or more AIO events have completed, to ensure something has moved
 312 * before returning.
 313 */
 314bool aio_poll(AioContext *ctx, bool blocking);
 315
 316/* Register a file descriptor and associated callbacks.  Behaves very similarly
 317 * to qemu_set_fd_handler.  Unlike qemu_set_fd_handler, these callbacks will
 318 * be invoked when using aio_poll().
 319 *
 320 * Code that invokes AIO completion functions should rely on this function
 321 * instead of qemu_set_fd_handler[2].
 322 */
 323void aio_set_fd_handler(AioContext *ctx,
 324                        int fd,
 325                        bool is_external,
 326                        IOHandler *io_read,
 327                        IOHandler *io_write,
 328                        void *opaque);
 329
 330/* Register an event notifier and associated callbacks.  Behaves very similarly
 331 * to event_notifier_set_handler.  Unlike event_notifier_set_handler, these callbacks
 332 * will be invoked when using aio_poll().
 333 *
 334 * Code that invokes AIO completion functions should rely on this function
 335 * instead of event_notifier_set_handler.
 336 */
 337void aio_set_event_notifier(AioContext *ctx,
 338                            EventNotifier *notifier,
 339                            bool is_external,
 340                            EventNotifierHandler *io_read);
 341
 342/* Return a GSource that lets the main loop poll the file descriptors attached
 343 * to this AioContext.
 344 */
 345GSource *aio_get_g_source(AioContext *ctx);
 346
 347/* Return the ThreadPool bound to this AioContext */
 348struct ThreadPool *aio_get_thread_pool(AioContext *ctx);
 349
 350/* Return the LinuxAioState bound to this AioContext */
 351struct LinuxAioState *aio_get_linux_aio(AioContext *ctx);
 352
 353/**
 354 * aio_timer_new:
 355 * @ctx: the aio context
 356 * @type: the clock type
 357 * @scale: the scale
 358 * @cb: the callback to call on timer expiry
 359 * @opaque: the opaque pointer to pass to the callback
 360 *
 361 * Allocate a new timer attached to the context @ctx.
 362 * The function is responsible for memory allocation.
 363 *
 364 * The preferred interface is aio_timer_init. Use that
 365 * unless you really need dynamic memory allocation.
 366 *
 367 * Returns: a pointer to the new timer
 368 */
 369static inline QEMUTimer *aio_timer_new(AioContext *ctx, QEMUClockType type,
 370                                       int scale,
 371                                       QEMUTimerCB *cb, void *opaque)
 372{
 373    return timer_new_tl(ctx->tlg.tl[type], scale, cb, opaque);
 374}
 375
 376/**
 377 * aio_timer_init:
 378 * @ctx: the aio context
 379 * @ts: the timer
 380 * @type: the clock type
 381 * @scale: the scale
 382 * @cb: the callback to call on timer expiry
 383 * @opaque: the opaque pointer to pass to the callback
 384 *
 385 * Initialise a new timer attached to the context @ctx.
 386 * The caller is responsible for memory allocation.
 387 */
 388static inline void aio_timer_init(AioContext *ctx,
 389                                  QEMUTimer *ts, QEMUClockType type,
 390                                  int scale,
 391                                  QEMUTimerCB *cb, void *opaque)
 392{
 393    timer_init_tl(ts, ctx->tlg.tl[type], scale, cb, opaque);
 394}
 395
 396/**
 397 * aio_compute_timeout:
 398 * @ctx: the aio context
 399 *
 400 * Compute the timeout that a blocking aio_poll should use.
 401 */
 402int64_t aio_compute_timeout(AioContext *ctx);
 403
 404/**
 405 * aio_disable_external:
 406 * @ctx: the aio context
 407 *
 408 * Disable the further processing of external clients.
 409 */
 410static inline void aio_disable_external(AioContext *ctx)
 411{
 412    atomic_inc(&ctx->external_disable_cnt);
 413}
 414
 415/**
 416 * aio_enable_external:
 417 * @ctx: the aio context
 418 *
 419 * Enable the processing of external clients.
 420 */
 421static inline void aio_enable_external(AioContext *ctx)
 422{
 423    assert(ctx->external_disable_cnt > 0);
 424    atomic_dec(&ctx->external_disable_cnt);
 425}
 426
 427/**
 428 * aio_external_disabled:
 429 * @ctx: the aio context
 430 *
 431 * Return true if the external clients are disabled.
 432 */
 433static inline bool aio_external_disabled(AioContext *ctx)
 434{
 435    return atomic_read(&ctx->external_disable_cnt);
 436}
 437
 438/**
 439 * aio_node_check:
 440 * @ctx: the aio context
 441 * @is_external: Whether or not the checked node is an external event source.
 442 *
 443 * Check if the node's is_external flag is okay to be polled by the ctx at this
 444 * moment. True means green light.
 445 */
 446static inline bool aio_node_check(AioContext *ctx, bool is_external)
 447{
 448    return !is_external || !atomic_read(&ctx->external_disable_cnt);
 449}
 450
 451/**
 452 * Return the AioContext whose event loop runs in the current thread.
 453 *
 454 * If called from an IOThread this will be the IOThread's AioContext.  If
 455 * called from another thread it will be the main loop AioContext.
 456 */
 457AioContext *qemu_get_current_aio_context(void);
 458
 459/**
 460 * @ctx: the aio context
 461 *
 462 * Return whether we are running in the I/O thread that manages @ctx.
 463 */
 464static inline bool aio_context_in_iothread(AioContext *ctx)
 465{
 466    return ctx == qemu_get_current_aio_context();
 467}
 468
 469/**
 470 * aio_context_setup:
 471 * @ctx: the aio context
 472 *
 473 * Initialize the aio context.
 474 */
 475void aio_context_setup(AioContext *ctx);
 476
 477#endif
 478