qemu/async.c
<<
>>
Prefs
   1/*
   2 * QEMU System Emulator
   3 *
   4 * Copyright (c) 2003-2008 Fabrice Bellard
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a copy
   7 * of this software and associated documentation files (the "Software"), to deal
   8 * in the Software without restriction, including without limitation the rights
   9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  10 * copies of the Software, and to permit persons to whom the Software is
  11 * furnished to do so, subject to the following conditions:
  12 *
  13 * The above copyright notice and this permission notice shall be included in
  14 * all copies or substantial portions of the Software.
  15 *
  16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  22 * THE SOFTWARE.
  23 */
  24
  25#include "qemu-common.h"
  26#include "block/aio.h"
  27#include "block/thread-pool.h"
  28#include "qemu/main-loop.h"
  29
  30/***********************************************************/
  31/* bottom halves (can be seen as timers which expire ASAP) */
  32
  33struct QEMUBH {
  34    AioContext *ctx;
  35    QEMUBHFunc *cb;
  36    void *opaque;
  37    QEMUBH *next;
  38    bool scheduled;
  39    bool idle;
  40    bool deleted;
  41};
  42
  43QEMUBH *aio_bh_new(AioContext *ctx, QEMUBHFunc *cb, void *opaque)
  44{
  45    QEMUBH *bh;
  46    bh = g_malloc0(sizeof(QEMUBH));
  47    bh->ctx = ctx;
  48    bh->cb = cb;
  49    bh->opaque = opaque;
  50    qemu_mutex_lock(&ctx->bh_lock);
  51    bh->next = ctx->first_bh;
  52    /* Make sure that the members are ready before putting bh into list */
  53    smp_wmb();
  54    ctx->first_bh = bh;
  55    qemu_mutex_unlock(&ctx->bh_lock);
  56    return bh;
  57}
  58
  59/* Multiple occurrences of aio_bh_poll cannot be called concurrently */
  60int aio_bh_poll(AioContext *ctx)
  61{
  62    QEMUBH *bh, **bhp, *next;
  63    int ret;
  64
  65    ctx->walking_bh++;
  66
  67    ret = 0;
  68    for (bh = ctx->first_bh; bh; bh = next) {
  69        /* Make sure that fetching bh happens before accessing its members */
  70        smp_read_barrier_depends();
  71        next = bh->next;
  72        if (!bh->deleted && bh->scheduled) {
  73            bh->scheduled = 0;
  74            /* Paired with write barrier in bh schedule to ensure reading for
  75             * idle & callbacks coming after bh's scheduling.
  76             */
  77            smp_rmb();
  78            if (!bh->idle)
  79                ret = 1;
  80            bh->idle = 0;
  81            bh->cb(bh->opaque);
  82        }
  83    }
  84
  85    ctx->walking_bh--;
  86
  87    /* remove deleted bhs */
  88    if (!ctx->walking_bh) {
  89        qemu_mutex_lock(&ctx->bh_lock);
  90        bhp = &ctx->first_bh;
  91        while (*bhp) {
  92            bh = *bhp;
  93            if (bh->deleted) {
  94                *bhp = bh->next;
  95                g_free(bh);
  96            } else {
  97                bhp = &bh->next;
  98            }
  99        }
 100        qemu_mutex_unlock(&ctx->bh_lock);
 101    }
 102
 103    return ret;
 104}
 105
 106void qemu_bh_schedule_idle(QEMUBH *bh)
 107{
 108    if (bh->scheduled)
 109        return;
 110    bh->idle = 1;
 111    /* Make sure that idle & any writes needed by the callback are done
 112     * before the locations are read in the aio_bh_poll.
 113     */
 114    smp_wmb();
 115    bh->scheduled = 1;
 116}
 117
 118void qemu_bh_schedule(QEMUBH *bh)
 119{
 120    if (bh->scheduled)
 121        return;
 122    bh->idle = 0;
 123    /* Make sure that idle & any writes needed by the callback are done
 124     * before the locations are read in the aio_bh_poll.
 125     */
 126    smp_wmb();
 127    bh->scheduled = 1;
 128    aio_notify(bh->ctx);
 129}
 130
 131
 132/* This func is async.
 133 */
 134void qemu_bh_cancel(QEMUBH *bh)
 135{
 136    bh->scheduled = 0;
 137}
 138
 139/* This func is async.The bottom half will do the delete action at the finial
 140 * end.
 141 */
 142void qemu_bh_delete(QEMUBH *bh)
 143{
 144    bh->scheduled = 0;
 145    bh->deleted = 1;
 146}
 147
 148static gboolean
 149aio_ctx_prepare(GSource *source, gint    *timeout)
 150{
 151    AioContext *ctx = (AioContext *) source;
 152    QEMUBH *bh;
 153    int deadline;
 154
 155    /* We assume there is no timeout already supplied */
 156    *timeout = -1;
 157    for (bh = ctx->first_bh; bh; bh = bh->next) {
 158        if (!bh->deleted && bh->scheduled) {
 159            if (bh->idle) {
 160                /* idle bottom halves will be polled at least
 161                 * every 10ms */
 162                *timeout = 10;
 163            } else {
 164                /* non-idle bottom halves will be executed
 165                 * immediately */
 166                *timeout = 0;
 167                return true;
 168            }
 169        }
 170    }
 171
 172    deadline = qemu_timeout_ns_to_ms(timerlistgroup_deadline_ns(&ctx->tlg));
 173    if (deadline == 0) {
 174        *timeout = 0;
 175        return true;
 176    } else {
 177        *timeout = qemu_soonest_timeout(*timeout, deadline);
 178    }
 179
 180    return false;
 181}
 182
 183static gboolean
 184aio_ctx_check(GSource *source)
 185{
 186    AioContext *ctx = (AioContext *) source;
 187    QEMUBH *bh;
 188
 189    for (bh = ctx->first_bh; bh; bh = bh->next) {
 190        if (!bh->deleted && bh->scheduled) {
 191            return true;
 192        }
 193    }
 194    return aio_pending(ctx) || (timerlistgroup_deadline_ns(&ctx->tlg) == 0);
 195}
 196
 197static gboolean
 198aio_ctx_dispatch(GSource     *source,
 199                 GSourceFunc  callback,
 200                 gpointer     user_data)
 201{
 202    AioContext *ctx = (AioContext *) source;
 203
 204    assert(callback == NULL);
 205    aio_poll(ctx, false);
 206    return true;
 207}
 208
 209static void
 210aio_ctx_finalize(GSource     *source)
 211{
 212    AioContext *ctx = (AioContext *) source;
 213
 214    thread_pool_free(ctx->thread_pool);
 215    aio_set_event_notifier(ctx, &ctx->notifier, NULL);
 216    event_notifier_cleanup(&ctx->notifier);
 217    qemu_mutex_destroy(&ctx->bh_lock);
 218    g_array_free(ctx->pollfds, TRUE);
 219    timerlistgroup_deinit(&ctx->tlg);
 220}
 221
 222static GSourceFuncs aio_source_funcs = {
 223    aio_ctx_prepare,
 224    aio_ctx_check,
 225    aio_ctx_dispatch,
 226    aio_ctx_finalize
 227};
 228
 229GSource *aio_get_g_source(AioContext *ctx)
 230{
 231    g_source_ref(&ctx->source);
 232    return &ctx->source;
 233}
 234
 235ThreadPool *aio_get_thread_pool(AioContext *ctx)
 236{
 237    if (!ctx->thread_pool) {
 238        ctx->thread_pool = thread_pool_new(ctx);
 239    }
 240    return ctx->thread_pool;
 241}
 242
 243void aio_notify(AioContext *ctx)
 244{
 245    event_notifier_set(&ctx->notifier);
 246}
 247
 248static void aio_timerlist_notify(void *opaque)
 249{
 250    aio_notify(opaque);
 251}
 252
 253AioContext *aio_context_new(void)
 254{
 255    AioContext *ctx;
 256    ctx = (AioContext *) g_source_new(&aio_source_funcs, sizeof(AioContext));
 257    ctx->pollfds = g_array_new(FALSE, FALSE, sizeof(GPollFD));
 258    ctx->thread_pool = NULL;
 259    qemu_mutex_init(&ctx->bh_lock);
 260    event_notifier_init(&ctx->notifier, false);
 261    aio_set_event_notifier(ctx, &ctx->notifier, 
 262                           (EventNotifierHandler *)
 263                           event_notifier_test_and_clear);
 264    timerlistgroup_init(&ctx->tlg, aio_timerlist_notify, ctx);
 265
 266    return ctx;
 267}
 268
 269void aio_context_ref(AioContext *ctx)
 270{
 271    g_source_ref(&ctx->source);
 272}
 273
 274void aio_context_unref(AioContext *ctx)
 275{
 276    g_source_unref(&ctx->source);
 277}
 278