qemu/util/qemu-coroutine.c
<<
>>
Prefs
   1/*
   2 * QEMU coroutines
   3 *
   4 * Copyright IBM, Corp. 2011
   5 *
   6 * Authors:
   7 *  Stefan Hajnoczi    <stefanha@linux.vnet.ibm.com>
   8 *  Kevin Wolf         <kwolf@redhat.com>
   9 *
  10 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
  11 * See the COPYING.LIB file in the top-level directory.
  12 *
  13 */
  14
  15#include "qemu/osdep.h"
  16#include "trace.h"
  17#include "qemu-common.h"
  18#include "qemu/thread.h"
  19#include "qemu/atomic.h"
  20#include "qemu/coroutine.h"
  21#include "qemu/coroutine_int.h"
  22#include "block/aio.h"
  23
  24enum {
  25    POOL_BATCH_SIZE = 64,
  26};
  27
  28/** Free list to speed up creation */
  29static QSLIST_HEAD(, Coroutine) release_pool = QSLIST_HEAD_INITIALIZER(pool);
  30static unsigned int release_pool_size;
  31static __thread QSLIST_HEAD(, Coroutine) alloc_pool = QSLIST_HEAD_INITIALIZER(pool);
  32static __thread unsigned int alloc_pool_size;
  33static __thread Notifier coroutine_pool_cleanup_notifier;
  34
  35static void coroutine_pool_cleanup(Notifier *n, void *value)
  36{
  37    Coroutine *co;
  38    Coroutine *tmp;
  39
  40    QSLIST_FOREACH_SAFE(co, &alloc_pool, pool_next, tmp) {
  41        QSLIST_REMOVE_HEAD(&alloc_pool, pool_next);
  42        qemu_coroutine_delete(co);
  43    }
  44}
  45
  46Coroutine *qemu_coroutine_create(CoroutineEntry *entry, void *opaque)
  47{
  48    Coroutine *co = NULL;
  49
  50    if (CONFIG_COROUTINE_POOL) {
  51        co = QSLIST_FIRST(&alloc_pool);
  52        if (!co) {
  53            if (release_pool_size > POOL_BATCH_SIZE) {
  54                /* Slow path; a good place to register the destructor, too.  */
  55                if (!coroutine_pool_cleanup_notifier.notify) {
  56                    coroutine_pool_cleanup_notifier.notify = coroutine_pool_cleanup;
  57                    qemu_thread_atexit_add(&coroutine_pool_cleanup_notifier);
  58                }
  59
  60                /* This is not exact; there could be a little skew between
  61                 * release_pool_size and the actual size of release_pool.  But
  62                 * it is just a heuristic, it does not need to be perfect.
  63                 */
  64                alloc_pool_size = atomic_xchg(&release_pool_size, 0);
  65                QSLIST_MOVE_ATOMIC(&alloc_pool, &release_pool);
  66                co = QSLIST_FIRST(&alloc_pool);
  67            }
  68        }
  69        if (co) {
  70            QSLIST_REMOVE_HEAD(&alloc_pool, pool_next);
  71            alloc_pool_size--;
  72        }
  73    }
  74
  75    if (!co) {
  76        co = qemu_coroutine_new();
  77    }
  78
  79    co->entry = entry;
  80    co->entry_arg = opaque;
  81    QSIMPLEQ_INIT(&co->co_queue_wakeup);
  82    return co;
  83}
  84
  85static void coroutine_delete(Coroutine *co)
  86{
  87    co->caller = NULL;
  88
  89    if (CONFIG_COROUTINE_POOL) {
  90        if (release_pool_size < POOL_BATCH_SIZE * 2) {
  91            QSLIST_INSERT_HEAD_ATOMIC(&release_pool, co, pool_next);
  92            atomic_inc(&release_pool_size);
  93            return;
  94        }
  95        if (alloc_pool_size < POOL_BATCH_SIZE) {
  96            QSLIST_INSERT_HEAD(&alloc_pool, co, pool_next);
  97            alloc_pool_size++;
  98            return;
  99        }
 100    }
 101
 102    qemu_coroutine_delete(co);
 103}
 104
 105void qemu_aio_coroutine_enter(AioContext *ctx, Coroutine *co)
 106{
 107    Coroutine *self = qemu_coroutine_self();
 108    CoroutineAction ret;
 109
 110    /* Cannot rely on the read barrier for co in aio_co_wake(), as there are
 111     * callers outside of aio_co_wake() */
 112    const char *scheduled = atomic_mb_read(&co->scheduled);
 113
 114    trace_qemu_aio_coroutine_enter(ctx, self, co, co->entry_arg);
 115
 116    /* if the Coroutine has already been scheduled, entering it again will
 117     * cause us to enter it twice, potentially even after the coroutine has
 118     * been deleted */
 119    if (scheduled) {
 120        fprintf(stderr,
 121                "%s: Co-routine was already scheduled in '%s'\n",
 122                __func__, scheduled);
 123        abort();
 124    }
 125
 126    if (co->caller) {
 127        fprintf(stderr, "Co-routine re-entered recursively\n");
 128        abort();
 129    }
 130
 131    co->caller = self;
 132    co->ctx = ctx;
 133
 134    /* Store co->ctx before anything that stores co.  Matches
 135     * barrier in aio_co_wake and qemu_co_mutex_wake.
 136     */
 137    smp_wmb();
 138
 139    ret = qemu_coroutine_switch(self, co, COROUTINE_ENTER);
 140
 141    qemu_co_queue_run_restart(co);
 142
 143    /* Beware, if ret == COROUTINE_YIELD and qemu_co_queue_run_restart()
 144     * has started any other coroutine, "co" might have been reentered
 145     * and even freed by now!  So be careful and do not touch it.
 146     */
 147
 148    switch (ret) {
 149    case COROUTINE_YIELD:
 150        return;
 151    case COROUTINE_TERMINATE:
 152        assert(!co->locks_held);
 153        trace_qemu_coroutine_terminate(co);
 154        coroutine_delete(co);
 155        return;
 156    default:
 157        abort();
 158    }
 159}
 160
 161void qemu_coroutine_enter(Coroutine *co)
 162{
 163    qemu_aio_coroutine_enter(qemu_get_current_aio_context(), co);
 164}
 165
 166void qemu_coroutine_enter_if_inactive(Coroutine *co)
 167{
 168    if (!qemu_coroutine_entered(co)) {
 169        qemu_coroutine_enter(co);
 170    }
 171}
 172
 173void coroutine_fn qemu_coroutine_yield(void)
 174{
 175    Coroutine *self = qemu_coroutine_self();
 176    Coroutine *to = self->caller;
 177
 178    trace_qemu_coroutine_yield(self, to);
 179
 180    if (!to) {
 181        fprintf(stderr, "Co-routine is yielding to no one\n");
 182        abort();
 183    }
 184
 185    self->caller = NULL;
 186    qemu_coroutine_switch(self, to, COROUTINE_YIELD);
 187}
 188
 189bool qemu_coroutine_entered(Coroutine *co)
 190{
 191    return co->caller;
 192}
 193