qemu/util/aio-posix.c
<<
>>
Prefs
   1/*
   2 * QEMU aio implementation
   3 *
   4 * Copyright IBM, Corp. 2008
   5 *
   6 * Authors:
   7 *  Anthony Liguori   <aliguori@us.ibm.com>
   8 *
   9 * This work is licensed under the terms of the GNU GPL, version 2.  See
  10 * the COPYING file in the top-level directory.
  11 *
  12 * Contributions after 2012-01-13 are licensed under the terms of the
  13 * GNU GPL, version 2 or (at your option) any later version.
  14 */
  15
  16#include "qemu/osdep.h"
  17#include "block/block.h"
  18#include "qemu/main-loop.h"
  19#include "qemu/rcu.h"
  20#include "qemu/rcu_queue.h"
  21#include "qemu/sockets.h"
  22#include "qemu/cutils.h"
  23#include "trace.h"
  24#include "aio-posix.h"
  25
  26/* Stop userspace polling on a handler if it isn't active for some time */
  27#define POLL_IDLE_INTERVAL_NS (7 * NANOSECONDS_PER_SECOND)
  28
  29bool aio_poll_disabled(AioContext *ctx)
  30{
  31    return qatomic_read(&ctx->poll_disable_cnt);
  32}
  33
  34void aio_add_ready_handler(AioHandlerList *ready_list,
  35                           AioHandler *node,
  36                           int revents)
  37{
  38    QLIST_SAFE_REMOVE(node, node_ready); /* remove from nested parent's list */
  39    node->pfd.revents = revents;
  40    QLIST_INSERT_HEAD(ready_list, node, node_ready);
  41}
  42
  43static void aio_add_poll_ready_handler(AioHandlerList *ready_list,
  44                                       AioHandler *node)
  45{
  46    QLIST_SAFE_REMOVE(node, node_ready); /* remove from nested parent's list */
  47    node->poll_ready = true;
  48    QLIST_INSERT_HEAD(ready_list, node, node_ready);
  49}
  50
  51static AioHandler *find_aio_handler(AioContext *ctx, int fd)
  52{
  53    AioHandler *node;
  54
  55    QLIST_FOREACH(node, &ctx->aio_handlers, node) {
  56        if (node->pfd.fd == fd) {
  57            if (!QLIST_IS_INSERTED(node, node_deleted)) {
  58                return node;
  59            }
  60        }
  61    }
  62
  63    return NULL;
  64}
  65
  66static bool aio_remove_fd_handler(AioContext *ctx, AioHandler *node)
  67{
  68    /* If the GSource is in the process of being destroyed then
  69     * g_source_remove_poll() causes an assertion failure.  Skip
  70     * removal in that case, because glib cleans up its state during
  71     * destruction anyway.
  72     */
  73    if (!g_source_is_destroyed(&ctx->source)) {
  74        g_source_remove_poll(&ctx->source, &node->pfd);
  75    }
  76
  77    node->pfd.revents = 0;
  78    node->poll_ready = false;
  79
  80    /* If the fd monitor has already marked it deleted, leave it alone */
  81    if (QLIST_IS_INSERTED(node, node_deleted)) {
  82        return false;
  83    }
  84
  85    /* If a read is in progress, just mark the node as deleted */
  86    if (qemu_lockcnt_count(&ctx->list_lock)) {
  87        QLIST_INSERT_HEAD_RCU(&ctx->deleted_aio_handlers, node, node_deleted);
  88        return false;
  89    }
  90    /* Otherwise, delete it for real.  We can't just mark it as
  91     * deleted because deleted nodes are only cleaned up while
  92     * no one is walking the handlers list.
  93     */
  94    QLIST_SAFE_REMOVE(node, node_poll);
  95    QLIST_REMOVE(node, node);
  96    return true;
  97}
  98
  99void aio_set_fd_handler(AioContext *ctx,
 100                        int fd,
 101                        bool is_external,
 102                        IOHandler *io_read,
 103                        IOHandler *io_write,
 104                        AioPollFn *io_poll,
 105                        IOHandler *io_poll_ready,
 106                        void *opaque)
 107{
 108    AioHandler *node;
 109    AioHandler *new_node = NULL;
 110    bool is_new = false;
 111    bool deleted = false;
 112    int poll_disable_change;
 113
 114    if (io_poll && !io_poll_ready) {
 115        io_poll = NULL; /* polling only makes sense if there is a handler */
 116    }
 117
 118    qemu_lockcnt_lock(&ctx->list_lock);
 119
 120    node = find_aio_handler(ctx, fd);
 121
 122    /* Are we deleting the fd handler? */
 123    if (!io_read && !io_write && !io_poll) {
 124        if (node == NULL) {
 125            qemu_lockcnt_unlock(&ctx->list_lock);
 126            return;
 127        }
 128        /* Clean events in order to unregister fd from the ctx epoll. */
 129        node->pfd.events = 0;
 130
 131        poll_disable_change = -!node->io_poll;
 132    } else {
 133        poll_disable_change = !io_poll - (node && !node->io_poll);
 134        if (node == NULL) {
 135            is_new = true;
 136        }
 137        /* Alloc and insert if it's not already there */
 138        new_node = g_new0(AioHandler, 1);
 139
 140        /* Update handler with latest information */
 141        new_node->io_read = io_read;
 142        new_node->io_write = io_write;
 143        new_node->io_poll = io_poll;
 144        new_node->io_poll_ready = io_poll_ready;
 145        new_node->opaque = opaque;
 146        new_node->is_external = is_external;
 147
 148        if (is_new) {
 149            new_node->pfd.fd = fd;
 150        } else {
 151            new_node->pfd = node->pfd;
 152        }
 153        g_source_add_poll(&ctx->source, &new_node->pfd);
 154
 155        new_node->pfd.events = (io_read ? G_IO_IN | G_IO_HUP | G_IO_ERR : 0);
 156        new_node->pfd.events |= (io_write ? G_IO_OUT | G_IO_ERR : 0);
 157
 158        QLIST_INSERT_HEAD_RCU(&ctx->aio_handlers, new_node, node);
 159    }
 160
 161    /* No need to order poll_disable_cnt writes against other updates;
 162     * the counter is only used to avoid wasting time and latency on
 163     * iterated polling when the system call will be ultimately necessary.
 164     * Changing handlers is a rare event, and a little wasted polling until
 165     * the aio_notify below is not an issue.
 166     */
 167    qatomic_set(&ctx->poll_disable_cnt,
 168               qatomic_read(&ctx->poll_disable_cnt) + poll_disable_change);
 169
 170    ctx->fdmon_ops->update(ctx, node, new_node);
 171    if (node) {
 172        deleted = aio_remove_fd_handler(ctx, node);
 173    }
 174    qemu_lockcnt_unlock(&ctx->list_lock);
 175    aio_notify(ctx);
 176
 177    if (deleted) {
 178        g_free(node);
 179    }
 180}
 181
 182void aio_set_fd_poll(AioContext *ctx, int fd,
 183                     IOHandler *io_poll_begin,
 184                     IOHandler *io_poll_end)
 185{
 186    AioHandler *node = find_aio_handler(ctx, fd);
 187
 188    if (!node) {
 189        return;
 190    }
 191
 192    node->io_poll_begin = io_poll_begin;
 193    node->io_poll_end = io_poll_end;
 194}
 195
 196void aio_set_event_notifier(AioContext *ctx,
 197                            EventNotifier *notifier,
 198                            bool is_external,
 199                            EventNotifierHandler *io_read,
 200                            AioPollFn *io_poll,
 201                            EventNotifierHandler *io_poll_ready)
 202{
 203    aio_set_fd_handler(ctx, event_notifier_get_fd(notifier), is_external,
 204                       (IOHandler *)io_read, NULL, io_poll,
 205                       (IOHandler *)io_poll_ready, notifier);
 206}
 207
 208void aio_set_event_notifier_poll(AioContext *ctx,
 209                                 EventNotifier *notifier,
 210                                 EventNotifierHandler *io_poll_begin,
 211                                 EventNotifierHandler *io_poll_end)
 212{
 213    aio_set_fd_poll(ctx, event_notifier_get_fd(notifier),
 214                    (IOHandler *)io_poll_begin,
 215                    (IOHandler *)io_poll_end);
 216}
 217
 218static bool poll_set_started(AioContext *ctx, AioHandlerList *ready_list,
 219                             bool started)
 220{
 221    AioHandler *node;
 222    bool progress = false;
 223
 224    if (started == ctx->poll_started) {
 225        return false;
 226    }
 227
 228    ctx->poll_started = started;
 229
 230    qemu_lockcnt_inc(&ctx->list_lock);
 231    QLIST_FOREACH(node, &ctx->poll_aio_handlers, node_poll) {
 232        IOHandler *fn;
 233
 234        if (QLIST_IS_INSERTED(node, node_deleted)) {
 235            continue;
 236        }
 237
 238        if (started) {
 239            fn = node->io_poll_begin;
 240        } else {
 241            fn = node->io_poll_end;
 242        }
 243
 244        if (fn) {
 245            fn(node->opaque);
 246        }
 247
 248        /* Poll one last time in case ->io_poll_end() raced with the event */
 249        if (!started && node->io_poll(node->opaque)) {
 250            aio_add_poll_ready_handler(ready_list, node);
 251            progress = true;
 252        }
 253    }
 254    qemu_lockcnt_dec(&ctx->list_lock);
 255
 256    return progress;
 257}
 258
 259
 260bool aio_prepare(AioContext *ctx)
 261{
 262    AioHandlerList ready_list = QLIST_HEAD_INITIALIZER(ready_list);
 263
 264    /* Poll mode cannot be used with glib's event loop, disable it. */
 265    poll_set_started(ctx, &ready_list, false);
 266    /* TODO what to do with this list? */
 267
 268    return false;
 269}
 270
 271bool aio_pending(AioContext *ctx)
 272{
 273    AioHandler *node;
 274    bool result = false;
 275
 276    /*
 277     * We have to walk very carefully in case aio_set_fd_handler is
 278     * called while we're walking.
 279     */
 280    qemu_lockcnt_inc(&ctx->list_lock);
 281
 282    QLIST_FOREACH_RCU(node, &ctx->aio_handlers, node) {
 283        int revents;
 284
 285        /* TODO should this check poll ready? */
 286        revents = node->pfd.revents & node->pfd.events;
 287        if (revents & (G_IO_IN | G_IO_HUP | G_IO_ERR) && node->io_read &&
 288            aio_node_check(ctx, node->is_external)) {
 289            result = true;
 290            break;
 291        }
 292        if (revents & (G_IO_OUT | G_IO_ERR) && node->io_write &&
 293            aio_node_check(ctx, node->is_external)) {
 294            result = true;
 295            break;
 296        }
 297    }
 298    qemu_lockcnt_dec(&ctx->list_lock);
 299
 300    return result;
 301}
 302
 303static void aio_free_deleted_handlers(AioContext *ctx)
 304{
 305    AioHandler *node;
 306
 307    if (QLIST_EMPTY_RCU(&ctx->deleted_aio_handlers)) {
 308        return;
 309    }
 310    if (!qemu_lockcnt_dec_if_lock(&ctx->list_lock)) {
 311        return; /* we are nested, let the parent do the freeing */
 312    }
 313
 314    while ((node = QLIST_FIRST_RCU(&ctx->deleted_aio_handlers))) {
 315        QLIST_REMOVE(node, node);
 316        QLIST_REMOVE(node, node_deleted);
 317        QLIST_SAFE_REMOVE(node, node_poll);
 318        g_free(node);
 319    }
 320
 321    qemu_lockcnt_inc_and_unlock(&ctx->list_lock);
 322}
 323
 324static bool aio_dispatch_handler(AioContext *ctx, AioHandler *node)
 325{
 326    bool progress = false;
 327    bool poll_ready;
 328    int revents;
 329
 330    revents = node->pfd.revents & node->pfd.events;
 331    node->pfd.revents = 0;
 332
 333    poll_ready = node->poll_ready;
 334    node->poll_ready = false;
 335
 336    /*
 337     * Start polling AioHandlers when they become ready because activity is
 338     * likely to continue.  Note that starvation is theoretically possible when
 339     * fdmon_supports_polling(), but only until the fd fires for the first
 340     * time.
 341     */
 342    if (!QLIST_IS_INSERTED(node, node_deleted) &&
 343        !QLIST_IS_INSERTED(node, node_poll) &&
 344        node->io_poll) {
 345        trace_poll_add(ctx, node, node->pfd.fd, revents);
 346        if (ctx->poll_started && node->io_poll_begin) {
 347            node->io_poll_begin(node->opaque);
 348        }
 349        QLIST_INSERT_HEAD(&ctx->poll_aio_handlers, node, node_poll);
 350    }
 351    if (!QLIST_IS_INSERTED(node, node_deleted) &&
 352        poll_ready && revents == 0 &&
 353        aio_node_check(ctx, node->is_external) &&
 354        node->io_poll_ready) {
 355        node->io_poll_ready(node->opaque);
 356
 357        /*
 358         * Return early since revents was zero. aio_notify() does not count as
 359         * progress.
 360         */
 361        return node->opaque != &ctx->notifier;
 362    }
 363
 364    if (!QLIST_IS_INSERTED(node, node_deleted) &&
 365        (revents & (G_IO_IN | G_IO_HUP | G_IO_ERR)) &&
 366        aio_node_check(ctx, node->is_external) &&
 367        node->io_read) {
 368        node->io_read(node->opaque);
 369
 370        /* aio_notify() does not count as progress */
 371        if (node->opaque != &ctx->notifier) {
 372            progress = true;
 373        }
 374    }
 375    if (!QLIST_IS_INSERTED(node, node_deleted) &&
 376        (revents & (G_IO_OUT | G_IO_ERR)) &&
 377        aio_node_check(ctx, node->is_external) &&
 378        node->io_write) {
 379        node->io_write(node->opaque);
 380        progress = true;
 381    }
 382
 383    return progress;
 384}
 385
 386/*
 387 * If we have a list of ready handlers then this is more efficient than
 388 * scanning all handlers with aio_dispatch_handlers().
 389 */
 390static bool aio_dispatch_ready_handlers(AioContext *ctx,
 391                                        AioHandlerList *ready_list)
 392{
 393    bool progress = false;
 394    AioHandler *node;
 395
 396    while ((node = QLIST_FIRST(ready_list))) {
 397        QLIST_REMOVE(node, node_ready);
 398        progress = aio_dispatch_handler(ctx, node) || progress;
 399    }
 400
 401    return progress;
 402}
 403
 404/* Slower than aio_dispatch_ready_handlers() but only used via glib */
 405static bool aio_dispatch_handlers(AioContext *ctx)
 406{
 407    AioHandler *node, *tmp;
 408    bool progress = false;
 409
 410    QLIST_FOREACH_SAFE_RCU(node, &ctx->aio_handlers, node, tmp) {
 411        progress = aio_dispatch_handler(ctx, node) || progress;
 412    }
 413
 414    return progress;
 415}
 416
 417void aio_dispatch(AioContext *ctx)
 418{
 419    qemu_lockcnt_inc(&ctx->list_lock);
 420    aio_bh_poll(ctx);
 421    aio_dispatch_handlers(ctx);
 422    aio_free_deleted_handlers(ctx);
 423    qemu_lockcnt_dec(&ctx->list_lock);
 424
 425    timerlistgroup_run_timers(&ctx->tlg);
 426}
 427
 428static bool run_poll_handlers_once(AioContext *ctx,
 429                                   AioHandlerList *ready_list,
 430                                   int64_t now,
 431                                   int64_t *timeout)
 432{
 433    bool progress = false;
 434    AioHandler *node;
 435    AioHandler *tmp;
 436
 437    QLIST_FOREACH_SAFE(node, &ctx->poll_aio_handlers, node_poll, tmp) {
 438        if (aio_node_check(ctx, node->is_external) &&
 439            node->io_poll(node->opaque)) {
 440            aio_add_poll_ready_handler(ready_list, node);
 441
 442            node->poll_idle_timeout = now + POLL_IDLE_INTERVAL_NS;
 443
 444            /*
 445             * Polling was successful, exit try_poll_mode immediately
 446             * to adjust the next polling time.
 447             */
 448            *timeout = 0;
 449            if (node->opaque != &ctx->notifier) {
 450                progress = true;
 451            }
 452        }
 453
 454        /* Caller handles freeing deleted nodes.  Don't do it here. */
 455    }
 456
 457    return progress;
 458}
 459
 460static bool fdmon_supports_polling(AioContext *ctx)
 461{
 462    return ctx->fdmon_ops->need_wait != aio_poll_disabled;
 463}
 464
 465static bool remove_idle_poll_handlers(AioContext *ctx,
 466                                      AioHandlerList *ready_list,
 467                                      int64_t now)
 468{
 469    AioHandler *node;
 470    AioHandler *tmp;
 471    bool progress = false;
 472
 473    /*
 474     * File descriptor monitoring implementations without userspace polling
 475     * support suffer from starvation when a subset of handlers is polled
 476     * because fds will not be processed in a timely fashion.  Don't remove
 477     * idle poll handlers.
 478     */
 479    if (!fdmon_supports_polling(ctx)) {
 480        return false;
 481    }
 482
 483    QLIST_FOREACH_SAFE(node, &ctx->poll_aio_handlers, node_poll, tmp) {
 484        if (node->poll_idle_timeout == 0LL) {
 485            node->poll_idle_timeout = now + POLL_IDLE_INTERVAL_NS;
 486        } else if (now >= node->poll_idle_timeout) {
 487            trace_poll_remove(ctx, node, node->pfd.fd);
 488            node->poll_idle_timeout = 0LL;
 489            QLIST_SAFE_REMOVE(node, node_poll);
 490            if (ctx->poll_started && node->io_poll_end) {
 491                node->io_poll_end(node->opaque);
 492
 493                /*
 494                 * Final poll in case ->io_poll_end() races with an event.
 495                 * Nevermind about re-adding the handler in the rare case where
 496                 * this causes progress.
 497                 */
 498                if (node->io_poll(node->opaque)) {
 499                    aio_add_poll_ready_handler(ready_list, node);
 500                    progress = true;
 501                }
 502            }
 503        }
 504    }
 505
 506    return progress;
 507}
 508
 509/* run_poll_handlers:
 510 * @ctx: the AioContext
 511 * @ready_list: the list to place ready handlers on
 512 * @max_ns: maximum time to poll for, in nanoseconds
 513 *
 514 * Polls for a given time.
 515 *
 516 * Note that the caller must have incremented ctx->list_lock.
 517 *
 518 * Returns: true if progress was made, false otherwise
 519 */
 520static bool run_poll_handlers(AioContext *ctx, AioHandlerList *ready_list,
 521                              int64_t max_ns, int64_t *timeout)
 522{
 523    bool progress;
 524    int64_t start_time, elapsed_time;
 525
 526    assert(qemu_lockcnt_count(&ctx->list_lock) > 0);
 527
 528    trace_run_poll_handlers_begin(ctx, max_ns, *timeout);
 529
 530    /*
 531     * Optimization: ->io_poll() handlers often contain RCU read critical
 532     * sections and we therefore see many rcu_read_lock() -> rcu_read_unlock()
 533     * -> rcu_read_lock() -> ... sequences with expensive memory
 534     * synchronization primitives.  Make the entire polling loop an RCU
 535     * critical section because nested rcu_read_lock()/rcu_read_unlock() calls
 536     * are cheap.
 537     */
 538    RCU_READ_LOCK_GUARD();
 539
 540    start_time = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
 541    do {
 542        progress = run_poll_handlers_once(ctx, ready_list,
 543                                          start_time, timeout);
 544        elapsed_time = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - start_time;
 545        max_ns = qemu_soonest_timeout(*timeout, max_ns);
 546        assert(!(max_ns && progress));
 547    } while (elapsed_time < max_ns && !ctx->fdmon_ops->need_wait(ctx));
 548
 549    if (remove_idle_poll_handlers(ctx, ready_list,
 550                                  start_time + elapsed_time)) {
 551        *timeout = 0;
 552        progress = true;
 553    }
 554
 555    /* If time has passed with no successful polling, adjust *timeout to
 556     * keep the same ending time.
 557     */
 558    if (*timeout != -1) {
 559        *timeout -= MIN(*timeout, elapsed_time);
 560    }
 561
 562    trace_run_poll_handlers_end(ctx, progress, *timeout);
 563    return progress;
 564}
 565
 566/* try_poll_mode:
 567 * @ctx: the AioContext
 568 * @ready_list: list to add handlers that need to be run
 569 * @timeout: timeout for blocking wait, computed by the caller and updated if
 570 *    polling succeeds.
 571 *
 572 * Note that the caller must have incremented ctx->list_lock.
 573 *
 574 * Returns: true if progress was made, false otherwise
 575 */
 576static bool try_poll_mode(AioContext *ctx, AioHandlerList *ready_list,
 577                          int64_t *timeout)
 578{
 579    int64_t max_ns;
 580
 581    if (QLIST_EMPTY_RCU(&ctx->poll_aio_handlers)) {
 582        return false;
 583    }
 584
 585    max_ns = qemu_soonest_timeout(*timeout, ctx->poll_ns);
 586    if (max_ns && !ctx->fdmon_ops->need_wait(ctx)) {
 587        poll_set_started(ctx, ready_list, true);
 588
 589        if (run_poll_handlers(ctx, ready_list, max_ns, timeout)) {
 590            return true;
 591        }
 592    }
 593
 594    if (poll_set_started(ctx, ready_list, false)) {
 595        *timeout = 0;
 596        return true;
 597    }
 598
 599    return false;
 600}
 601
 602bool aio_poll(AioContext *ctx, bool blocking)
 603{
 604    AioHandlerList ready_list = QLIST_HEAD_INITIALIZER(ready_list);
 605    bool progress;
 606    bool use_notify_me;
 607    int64_t timeout;
 608    int64_t start = 0;
 609
 610    /*
 611     * There cannot be two concurrent aio_poll calls for the same AioContext (or
 612     * an aio_poll concurrent with a GSource prepare/check/dispatch callback).
 613     * We rely on this below to avoid slow locked accesses to ctx->notify_me.
 614     *
 615     * aio_poll() may only be called in the AioContext's thread. iohandler_ctx
 616     * is special in that it runs in the main thread, but that thread's context
 617     * is qemu_aio_context.
 618     */
 619    assert(in_aio_context_home_thread(ctx == iohandler_get_aio_context() ?
 620                                      qemu_get_aio_context() : ctx));
 621
 622    qemu_lockcnt_inc(&ctx->list_lock);
 623
 624    if (ctx->poll_max_ns) {
 625        start = qemu_clock_get_ns(QEMU_CLOCK_REALTIME);
 626    }
 627
 628    timeout = blocking ? aio_compute_timeout(ctx) : 0;
 629    progress = try_poll_mode(ctx, &ready_list, &timeout);
 630    assert(!(timeout && progress));
 631
 632    /*
 633     * aio_notify can avoid the expensive event_notifier_set if
 634     * everything (file descriptors, bottom halves, timers) will
 635     * be re-evaluated before the next blocking poll().  This is
 636     * already true when aio_poll is called with blocking == false;
 637     * if blocking == true, it is only true after poll() returns,
 638     * so disable the optimization now.
 639     */
 640    use_notify_me = timeout != 0;
 641    if (use_notify_me) {
 642        qatomic_set(&ctx->notify_me, qatomic_read(&ctx->notify_me) + 2);
 643        /*
 644         * Write ctx->notify_me before reading ctx->notified.  Pairs with
 645         * smp_mb in aio_notify().
 646         */
 647        smp_mb();
 648
 649        /* Don't block if aio_notify() was called */
 650        if (qatomic_read(&ctx->notified)) {
 651            timeout = 0;
 652        }
 653    }
 654
 655    /* If polling is allowed, non-blocking aio_poll does not need the
 656     * system call---a single round of run_poll_handlers_once suffices.
 657     */
 658    if (timeout || ctx->fdmon_ops->need_wait(ctx)) {
 659        ctx->fdmon_ops->wait(ctx, &ready_list, timeout);
 660    }
 661
 662    if (use_notify_me) {
 663        /* Finish the poll before clearing the flag.  */
 664        qatomic_store_release(&ctx->notify_me,
 665                             qatomic_read(&ctx->notify_me) - 2);
 666    }
 667
 668    aio_notify_accept(ctx);
 669
 670    /* Adjust polling time */
 671    if (ctx->poll_max_ns) {
 672        int64_t block_ns = qemu_clock_get_ns(QEMU_CLOCK_REALTIME) - start;
 673
 674        if (block_ns <= ctx->poll_ns) {
 675            /* This is the sweet spot, no adjustment needed */
 676        } else if (block_ns > ctx->poll_max_ns) {
 677            /* We'd have to poll for too long, poll less */
 678            int64_t old = ctx->poll_ns;
 679
 680            if (ctx->poll_shrink) {
 681                ctx->poll_ns /= ctx->poll_shrink;
 682            } else {
 683                ctx->poll_ns = 0;
 684            }
 685
 686            trace_poll_shrink(ctx, old, ctx->poll_ns);
 687        } else if (ctx->poll_ns < ctx->poll_max_ns &&
 688                   block_ns < ctx->poll_max_ns) {
 689            /* There is room to grow, poll longer */
 690            int64_t old = ctx->poll_ns;
 691            int64_t grow = ctx->poll_grow;
 692
 693            if (grow == 0) {
 694                grow = 2;
 695            }
 696
 697            if (ctx->poll_ns) {
 698                ctx->poll_ns *= grow;
 699            } else {
 700                ctx->poll_ns = 4000; /* start polling at 4 microseconds */
 701            }
 702
 703            if (ctx->poll_ns > ctx->poll_max_ns) {
 704                ctx->poll_ns = ctx->poll_max_ns;
 705            }
 706
 707            trace_poll_grow(ctx, old, ctx->poll_ns);
 708        }
 709    }
 710
 711    progress |= aio_bh_poll(ctx);
 712    progress |= aio_dispatch_ready_handlers(ctx, &ready_list);
 713
 714    aio_free_deleted_handlers(ctx);
 715
 716    qemu_lockcnt_dec(&ctx->list_lock);
 717
 718    progress |= timerlistgroup_run_timers(&ctx->tlg);
 719
 720    return progress;
 721}
 722
 723void aio_context_setup(AioContext *ctx)
 724{
 725    ctx->fdmon_ops = &fdmon_poll_ops;
 726    ctx->epollfd = -1;
 727
 728    /* Use the fastest fd monitoring implementation if available */
 729    if (fdmon_io_uring_setup(ctx)) {
 730        return;
 731    }
 732
 733    fdmon_epoll_setup(ctx);
 734}
 735
 736void aio_context_destroy(AioContext *ctx)
 737{
 738    fdmon_io_uring_destroy(ctx);
 739    fdmon_epoll_disable(ctx);
 740    aio_free_deleted_handlers(ctx);
 741}
 742
 743void aio_context_use_g_source(AioContext *ctx)
 744{
 745    /*
 746     * Disable io_uring when the glib main loop is used because it doesn't
 747     * support mixed glib/aio_poll() usage. It relies on aio_poll() being
 748     * called regularly so that changes to the monitored file descriptors are
 749     * submitted, otherwise a list of pending fd handlers builds up.
 750     */
 751    fdmon_io_uring_destroy(ctx);
 752    aio_free_deleted_handlers(ctx);
 753}
 754
 755void aio_context_set_poll_params(AioContext *ctx, int64_t max_ns,
 756                                 int64_t grow, int64_t shrink, Error **errp)
 757{
 758    /* No thread synchronization here, it doesn't matter if an incorrect value
 759     * is used once.
 760     */
 761    ctx->poll_max_ns = max_ns;
 762    ctx->poll_ns = 0;
 763    ctx->poll_grow = grow;
 764    ctx->poll_shrink = shrink;
 765
 766    aio_notify(ctx);
 767}
 768
 769void aio_context_set_aio_params(AioContext *ctx, int64_t max_batch,
 770                                Error **errp)
 771{
 772    /*
 773     * No thread synchronization here, it doesn't matter if an incorrect value
 774     * is used once.
 775     */
 776    ctx->aio_max_batch = max_batch;
 777
 778    aio_notify(ctx);
 779}
 780