qemu/util/qemu-coroutine.c
<<
>>
Prefs
   1/*
   2 * QEMU coroutines
   3 *
   4 * Copyright IBM, Corp. 2011
   5 *
   6 * Authors:
   7 *  Stefan Hajnoczi    <stefanha@linux.vnet.ibm.com>
   8 *  Kevin Wolf         <kwolf@redhat.com>
   9 *
  10 * This work is licensed under the terms of the GNU LGPL, version 2 or later.
  11 * See the COPYING.LIB file in the top-level directory.
  12 *
  13 */
  14
  15#include "qemu/osdep.h"
  16#include "trace.h"
  17#include "qemu-common.h"
  18#include "qemu/thread.h"
  19#include "qemu/atomic.h"
  20#include "qemu/coroutine.h"
  21#include "qemu/coroutine_int.h"
  22
  23enum {
  24    POOL_BATCH_SIZE = 64,
  25};
  26
  27/** Free list to speed up creation */
  28static QSLIST_HEAD(, Coroutine) release_pool = QSLIST_HEAD_INITIALIZER(pool);
  29static unsigned int release_pool_size;
  30static __thread QSLIST_HEAD(, Coroutine) alloc_pool = QSLIST_HEAD_INITIALIZER(pool);
  31static __thread unsigned int alloc_pool_size;
  32static __thread Notifier coroutine_pool_cleanup_notifier;
  33
  34static void coroutine_pool_cleanup(Notifier *n, void *value)
  35{
  36    Coroutine *co;
  37    Coroutine *tmp;
  38
  39    QSLIST_FOREACH_SAFE(co, &alloc_pool, pool_next, tmp) {
  40        QSLIST_REMOVE_HEAD(&alloc_pool, pool_next);
  41        qemu_coroutine_delete(co);
  42    }
  43}
  44
  45Coroutine *qemu_coroutine_create(CoroutineEntry *entry)
  46{
  47    Coroutine *co = NULL;
  48
  49    if (CONFIG_COROUTINE_POOL) {
  50        co = QSLIST_FIRST(&alloc_pool);
  51        if (!co) {
  52            if (release_pool_size > POOL_BATCH_SIZE) {
  53                /* Slow path; a good place to register the destructor, too.  */
  54                if (!coroutine_pool_cleanup_notifier.notify) {
  55                    coroutine_pool_cleanup_notifier.notify = coroutine_pool_cleanup;
  56                    qemu_thread_atexit_add(&coroutine_pool_cleanup_notifier);
  57                }
  58
  59                /* This is not exact; there could be a little skew between
  60                 * release_pool_size and the actual size of release_pool.  But
  61                 * it is just a heuristic, it does not need to be perfect.
  62                 */
  63                alloc_pool_size = atomic_xchg(&release_pool_size, 0);
  64                QSLIST_MOVE_ATOMIC(&alloc_pool, &release_pool);
  65                co = QSLIST_FIRST(&alloc_pool);
  66            }
  67        }
  68        if (co) {
  69            QSLIST_REMOVE_HEAD(&alloc_pool, pool_next);
  70            alloc_pool_size--;
  71        }
  72    }
  73
  74    if (!co) {
  75        co = qemu_coroutine_new();
  76    }
  77
  78    co->entry = entry;
  79    QTAILQ_INIT(&co->co_queue_wakeup);
  80    return co;
  81}
  82
  83static void coroutine_delete(Coroutine *co)
  84{
  85    co->caller = NULL;
  86
  87    if (CONFIG_COROUTINE_POOL) {
  88        if (release_pool_size < POOL_BATCH_SIZE * 2) {
  89            QSLIST_INSERT_HEAD_ATOMIC(&release_pool, co, pool_next);
  90            atomic_inc(&release_pool_size);
  91            return;
  92        }
  93        if (alloc_pool_size < POOL_BATCH_SIZE) {
  94            QSLIST_INSERT_HEAD(&alloc_pool, co, pool_next);
  95            alloc_pool_size++;
  96            return;
  97        }
  98    }
  99
 100    qemu_coroutine_delete(co);
 101}
 102
 103void qemu_coroutine_enter(Coroutine *co, void *opaque)
 104{
 105    Coroutine *self = qemu_coroutine_self();
 106    CoroutineAction ret;
 107
 108    trace_qemu_coroutine_enter(self, co, opaque);
 109
 110    if (co->caller) {
 111        fprintf(stderr, "Co-routine re-entered recursively\n");
 112        abort();
 113    }
 114
 115    co->caller = self;
 116    co->entry_arg = opaque;
 117    ret = qemu_coroutine_switch(self, co, COROUTINE_ENTER);
 118
 119    qemu_co_queue_run_restart(co);
 120
 121    switch (ret) {
 122    case COROUTINE_YIELD:
 123        return;
 124    case COROUTINE_TERMINATE:
 125        trace_qemu_coroutine_terminate(co);
 126        coroutine_delete(co);
 127        return;
 128    default:
 129        abort();
 130    }
 131}
 132
 133void coroutine_fn qemu_coroutine_yield(void)
 134{
 135    Coroutine *self = qemu_coroutine_self();
 136    Coroutine *to = self->caller;
 137
 138    trace_qemu_coroutine_yield(self, to);
 139
 140    if (!to) {
 141        fprintf(stderr, "Co-routine is yielding to no one\n");
 142        abort();
 143    }
 144
 145    self->caller = NULL;
 146    qemu_coroutine_switch(self, to, COROUTINE_YIELD);
 147}
 148