qemu/util/qemu-coroutine-lock.c
<<
>>
Prefs
   1/*
   2 * coroutine queues and locks
   3 *
   4 * Copyright (c) 2011 Kevin Wolf <kwolf@redhat.com>
   5 *
   6 * Permission is hereby granted, free of charge, to any person obtaining a copy
   7 * of this software and associated documentation files (the "Software"), to deal
   8 * in the Software without restriction, including without limitation the rights
   9 * to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
  10 * copies of the Software, and to permit persons to whom the Software is
  11 * furnished to do so, subject to the following conditions:
  12 *
  13 * The above copyright notice and this permission notice shall be included in
  14 * all copies or substantial portions of the Software.
  15 *
  16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
  17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
  18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
  19 * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
  20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
  21 * OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
  22 * THE SOFTWARE.
  23 *
  24 * The lock-free mutex implementation is based on OSv
  25 * (core/lfmutex.cc, include/lockfree/mutex.hh).
  26 * Copyright (C) 2013 Cloudius Systems, Ltd.
  27 */
  28
  29#include "qemu/osdep.h"
  30#include "qemu-common.h"
  31#include "qemu/coroutine.h"
  32#include "qemu/coroutine_int.h"
  33#include "qemu/processor.h"
  34#include "qemu/queue.h"
  35#include "block/aio.h"
  36#include "trace.h"
  37
  38void qemu_co_queue_init(CoQueue *queue)
  39{
  40    QSIMPLEQ_INIT(&queue->entries);
  41}
  42
  43void coroutine_fn qemu_co_queue_wait(CoQueue *queue, CoMutex *mutex)
  44{
  45    Coroutine *self = qemu_coroutine_self();
  46    QSIMPLEQ_INSERT_TAIL(&queue->entries, self, co_queue_next);
  47
  48    if (mutex) {
  49        qemu_co_mutex_unlock(mutex);
  50    }
  51
  52    /* There is no race condition here.  Other threads will call
  53     * aio_co_schedule on our AioContext, which can reenter this
  54     * coroutine but only after this yield and after the main loop
  55     * has gone through the next iteration.
  56     */
  57    qemu_coroutine_yield();
  58    assert(qemu_in_coroutine());
  59
  60    /* TODO: OSv implements wait morphing here, where the wakeup
  61     * primitive automatically places the woken coroutine on the
  62     * mutex's queue.  This avoids the thundering herd effect.
  63     */
  64    if (mutex) {
  65        qemu_co_mutex_lock(mutex);
  66    }
  67}
  68
  69/**
  70 * qemu_co_queue_run_restart:
  71 *
  72 * Enter each coroutine that was previously marked for restart by
  73 * qemu_co_queue_next() or qemu_co_queue_restart_all().  This function is
  74 * invoked by the core coroutine code when the current coroutine yields or
  75 * terminates.
  76 */
  77void qemu_co_queue_run_restart(Coroutine *co)
  78{
  79    Coroutine *next;
  80    QSIMPLEQ_HEAD(, Coroutine) tmp_queue_wakeup =
  81        QSIMPLEQ_HEAD_INITIALIZER(tmp_queue_wakeup);
  82
  83    trace_qemu_co_queue_run_restart(co);
  84
  85    /* Because "co" has yielded, any coroutine that we wakeup can resume it.
  86     * If this happens and "co" terminates, co->co_queue_wakeup becomes
  87     * invalid memory.  Therefore, use a temporary queue and do not touch
  88     * the "co" coroutine as soon as you enter another one.
  89     *
  90     * In its turn resumed "co" can pupulate "co_queue_wakeup" queue with
  91     * new coroutines to be woken up.  The caller, who has resumed "co",
  92     * will be responsible for traversing the same queue, which may cause
  93     * a different wakeup order but not any missing wakeups.
  94     */
  95    QSIMPLEQ_CONCAT(&tmp_queue_wakeup, &co->co_queue_wakeup);
  96
  97    while ((next = QSIMPLEQ_FIRST(&tmp_queue_wakeup))) {
  98        QSIMPLEQ_REMOVE_HEAD(&tmp_queue_wakeup, co_queue_next);
  99        qemu_coroutine_enter(next);
 100    }
 101}
 102
 103static bool qemu_co_queue_do_restart(CoQueue *queue, bool single)
 104{
 105    Coroutine *next;
 106
 107    if (QSIMPLEQ_EMPTY(&queue->entries)) {
 108        return false;
 109    }
 110
 111    while ((next = QSIMPLEQ_FIRST(&queue->entries)) != NULL) {
 112        QSIMPLEQ_REMOVE_HEAD(&queue->entries, co_queue_next);
 113        aio_co_wake(next);
 114        if (single) {
 115            break;
 116        }
 117    }
 118    return true;
 119}
 120
 121bool coroutine_fn qemu_co_queue_next(CoQueue *queue)
 122{
 123    assert(qemu_in_coroutine());
 124    return qemu_co_queue_do_restart(queue, true);
 125}
 126
 127void coroutine_fn qemu_co_queue_restart_all(CoQueue *queue)
 128{
 129    assert(qemu_in_coroutine());
 130    qemu_co_queue_do_restart(queue, false);
 131}
 132
 133bool qemu_co_enter_next(CoQueue *queue)
 134{
 135    Coroutine *next;
 136
 137    next = QSIMPLEQ_FIRST(&queue->entries);
 138    if (!next) {
 139        return false;
 140    }
 141
 142    QSIMPLEQ_REMOVE_HEAD(&queue->entries, co_queue_next);
 143    qemu_coroutine_enter(next);
 144    return true;
 145}
 146
 147bool qemu_co_queue_empty(CoQueue *queue)
 148{
 149    return QSIMPLEQ_FIRST(&queue->entries) == NULL;
 150}
 151
 152/* The wait records are handled with a multiple-producer, single-consumer
 153 * lock-free queue.  There cannot be two concurrent pop_waiter() calls
 154 * because pop_waiter() can only be called while mutex->handoff is zero.
 155 * This can happen in three cases:
 156 * - in qemu_co_mutex_unlock, before the hand-off protocol has started.
 157 *   In this case, qemu_co_mutex_lock will see mutex->handoff == 0 and
 158 *   not take part in the handoff.
 159 * - in qemu_co_mutex_lock, if it steals the hand-off responsibility from
 160 *   qemu_co_mutex_unlock.  In this case, qemu_co_mutex_unlock will fail
 161 *   the cmpxchg (it will see either 0 or the next sequence value) and
 162 *   exit.  The next hand-off cannot begin until qemu_co_mutex_lock has
 163 *   woken up someone.
 164 * - in qemu_co_mutex_unlock, if it takes the hand-off token itself.
 165 *   In this case another iteration starts with mutex->handoff == 0;
 166 *   a concurrent qemu_co_mutex_lock will fail the cmpxchg, and
 167 *   qemu_co_mutex_unlock will go back to case (1).
 168 *
 169 * The following functions manage this queue.
 170 */
 171typedef struct CoWaitRecord {
 172    Coroutine *co;
 173    QSLIST_ENTRY(CoWaitRecord) next;
 174} CoWaitRecord;
 175
 176static void push_waiter(CoMutex *mutex, CoWaitRecord *w)
 177{
 178    w->co = qemu_coroutine_self();
 179    QSLIST_INSERT_HEAD_ATOMIC(&mutex->from_push, w, next);
 180}
 181
 182static void move_waiters(CoMutex *mutex)
 183{
 184    QSLIST_HEAD(, CoWaitRecord) reversed;
 185    QSLIST_MOVE_ATOMIC(&reversed, &mutex->from_push);
 186    while (!QSLIST_EMPTY(&reversed)) {
 187        CoWaitRecord *w = QSLIST_FIRST(&reversed);
 188        QSLIST_REMOVE_HEAD(&reversed, next);
 189        QSLIST_INSERT_HEAD(&mutex->to_pop, w, next);
 190    }
 191}
 192
 193static CoWaitRecord *pop_waiter(CoMutex *mutex)
 194{
 195    CoWaitRecord *w;
 196
 197    if (QSLIST_EMPTY(&mutex->to_pop)) {
 198        move_waiters(mutex);
 199        if (QSLIST_EMPTY(&mutex->to_pop)) {
 200            return NULL;
 201        }
 202    }
 203    w = QSLIST_FIRST(&mutex->to_pop);
 204    QSLIST_REMOVE_HEAD(&mutex->to_pop, next);
 205    return w;
 206}
 207
 208static bool has_waiters(CoMutex *mutex)
 209{
 210    return QSLIST_EMPTY(&mutex->to_pop) || QSLIST_EMPTY(&mutex->from_push);
 211}
 212
 213void qemu_co_mutex_init(CoMutex *mutex)
 214{
 215    memset(mutex, 0, sizeof(*mutex));
 216}
 217
 218static void coroutine_fn qemu_co_mutex_wake(CoMutex *mutex, Coroutine *co)
 219{
 220    /* Read co before co->ctx; pairs with smp_wmb() in
 221     * qemu_coroutine_enter().
 222     */
 223    smp_read_barrier_depends();
 224    mutex->ctx = co->ctx;
 225    aio_co_wake(co);
 226}
 227
 228static void coroutine_fn qemu_co_mutex_lock_slowpath(AioContext *ctx,
 229                                                     CoMutex *mutex)
 230{
 231    Coroutine *self = qemu_coroutine_self();
 232    CoWaitRecord w;
 233    unsigned old_handoff;
 234
 235    trace_qemu_co_mutex_lock_entry(mutex, self);
 236    w.co = self;
 237    push_waiter(mutex, &w);
 238
 239    /* This is the "Responsibility Hand-Off" protocol; a lock() picks from
 240     * a concurrent unlock() the responsibility of waking somebody up.
 241     */
 242    old_handoff = atomic_mb_read(&mutex->handoff);
 243    if (old_handoff &&
 244        has_waiters(mutex) &&
 245        atomic_cmpxchg(&mutex->handoff, old_handoff, 0) == old_handoff) {
 246        /* There can be no concurrent pops, because there can be only
 247         * one active handoff at a time.
 248         */
 249        CoWaitRecord *to_wake = pop_waiter(mutex);
 250        Coroutine *co = to_wake->co;
 251        if (co == self) {
 252            /* We got the lock ourselves!  */
 253            assert(to_wake == &w);
 254            mutex->ctx = ctx;
 255            return;
 256        }
 257
 258        qemu_co_mutex_wake(mutex, co);
 259    }
 260
 261    qemu_coroutine_yield();
 262    trace_qemu_co_mutex_lock_return(mutex, self);
 263}
 264
 265void coroutine_fn qemu_co_mutex_lock(CoMutex *mutex)
 266{
 267    AioContext *ctx = qemu_get_current_aio_context();
 268    Coroutine *self = qemu_coroutine_self();
 269    int waiters, i;
 270
 271    /* Running a very small critical section on pthread_mutex_t and CoMutex
 272     * shows that pthread_mutex_t is much faster because it doesn't actually
 273     * go to sleep.  What happens is that the critical section is shorter
 274     * than the latency of entering the kernel and thus FUTEX_WAIT always
 275     * fails.  With CoMutex there is no such latency but you still want to
 276     * avoid wait and wakeup.  So introduce it artificially.
 277     */
 278    i = 0;
 279retry_fast_path:
 280    waiters = atomic_cmpxchg(&mutex->locked, 0, 1);
 281    if (waiters != 0) {
 282        while (waiters == 1 && ++i < 1000) {
 283            if (atomic_read(&mutex->ctx) == ctx) {
 284                break;
 285            }
 286            if (atomic_read(&mutex->locked) == 0) {
 287                goto retry_fast_path;
 288            }
 289            cpu_relax();
 290        }
 291        waiters = atomic_fetch_inc(&mutex->locked);
 292    }
 293
 294    if (waiters == 0) {
 295        /* Uncontended.  */
 296        trace_qemu_co_mutex_lock_uncontended(mutex, self);
 297        mutex->ctx = ctx;
 298    } else {
 299        qemu_co_mutex_lock_slowpath(ctx, mutex);
 300    }
 301    mutex->holder = self;
 302    self->locks_held++;
 303}
 304
 305void coroutine_fn qemu_co_mutex_unlock(CoMutex *mutex)
 306{
 307    Coroutine *self = qemu_coroutine_self();
 308
 309    trace_qemu_co_mutex_unlock_entry(mutex, self);
 310
 311    assert(mutex->locked);
 312    assert(mutex->holder == self);
 313    assert(qemu_in_coroutine());
 314
 315    mutex->ctx = NULL;
 316    mutex->holder = NULL;
 317    self->locks_held--;
 318    if (atomic_fetch_dec(&mutex->locked) == 1) {
 319        /* No waiting qemu_co_mutex_lock().  Pfew, that was easy!  */
 320        return;
 321    }
 322
 323    for (;;) {
 324        CoWaitRecord *to_wake = pop_waiter(mutex);
 325        unsigned our_handoff;
 326
 327        if (to_wake) {
 328            qemu_co_mutex_wake(mutex, to_wake->co);
 329            break;
 330        }
 331
 332        /* Some concurrent lock() is in progress (we know this because
 333         * mutex->locked was >1) but it hasn't yet put itself on the wait
 334         * queue.  Pick a sequence number for the handoff protocol (not 0).
 335         */
 336        if (++mutex->sequence == 0) {
 337            mutex->sequence = 1;
 338        }
 339
 340        our_handoff = mutex->sequence;
 341        atomic_mb_set(&mutex->handoff, our_handoff);
 342        if (!has_waiters(mutex)) {
 343            /* The concurrent lock has not added itself yet, so it
 344             * will be able to pick our handoff.
 345             */
 346            break;
 347        }
 348
 349        /* Try to do the handoff protocol ourselves; if somebody else has
 350         * already taken it, however, we're done and they're responsible.
 351         */
 352        if (atomic_cmpxchg(&mutex->handoff, our_handoff, 0) != our_handoff) {
 353            break;
 354        }
 355    }
 356
 357    trace_qemu_co_mutex_unlock_return(mutex, self);
 358}
 359
 360void qemu_co_rwlock_init(CoRwlock *lock)
 361{
 362    memset(lock, 0, sizeof(*lock));
 363    qemu_co_queue_init(&lock->queue);
 364    qemu_co_mutex_init(&lock->mutex);
 365}
 366
 367void qemu_co_rwlock_rdlock(CoRwlock *lock)
 368{
 369    Coroutine *self = qemu_coroutine_self();
 370
 371    qemu_co_mutex_lock(&lock->mutex);
 372    /* For fairness, wait if a writer is in line.  */
 373    while (lock->pending_writer) {
 374        qemu_co_queue_wait(&lock->queue, &lock->mutex);
 375    }
 376    lock->reader++;
 377    qemu_co_mutex_unlock(&lock->mutex);
 378
 379    /* The rest of the read-side critical section is run without the mutex.  */
 380    self->locks_held++;
 381}
 382
 383void qemu_co_rwlock_unlock(CoRwlock *lock)
 384{
 385    Coroutine *self = qemu_coroutine_self();
 386
 387    assert(qemu_in_coroutine());
 388    if (!lock->reader) {
 389        /* The critical section started in qemu_co_rwlock_wrlock.  */
 390        qemu_co_queue_restart_all(&lock->queue);
 391    } else {
 392        self->locks_held--;
 393
 394        qemu_co_mutex_lock(&lock->mutex);
 395        lock->reader--;
 396        assert(lock->reader >= 0);
 397        /* Wakeup only one waiting writer */
 398        if (!lock->reader) {
 399            qemu_co_queue_next(&lock->queue);
 400        }
 401    }
 402    qemu_co_mutex_unlock(&lock->mutex);
 403}
 404
 405void qemu_co_rwlock_downgrade(CoRwlock *lock)
 406{
 407    Coroutine *self = qemu_coroutine_self();
 408
 409    /* lock->mutex critical section started in qemu_co_rwlock_wrlock or
 410     * qemu_co_rwlock_upgrade.
 411     */
 412    assert(lock->reader == 0);
 413    lock->reader++;
 414    qemu_co_mutex_unlock(&lock->mutex);
 415
 416    /* The rest of the read-side critical section is run without the mutex.  */
 417    self->locks_held++;
 418}
 419
 420void qemu_co_rwlock_wrlock(CoRwlock *lock)
 421{
 422    qemu_co_mutex_lock(&lock->mutex);
 423    lock->pending_writer++;
 424    while (lock->reader) {
 425        qemu_co_queue_wait(&lock->queue, &lock->mutex);
 426    }
 427    lock->pending_writer--;
 428
 429    /* The rest of the write-side critical section is run with
 430     * the mutex taken, so that lock->reader remains zero.
 431     * There is no need to update self->locks_held.
 432     */
 433}
 434
 435void qemu_co_rwlock_upgrade(CoRwlock *lock)
 436{
 437    Coroutine *self = qemu_coroutine_self();
 438
 439    qemu_co_mutex_lock(&lock->mutex);
 440    assert(lock->reader > 0);
 441    lock->reader--;
 442    lock->pending_writer++;
 443    while (lock->reader) {
 444        qemu_co_queue_wait(&lock->queue, &lock->mutex);
 445    }
 446    lock->pending_writer--;
 447
 448    /* The rest of the write-side critical section is run with
 449     * the mutex taken, similar to qemu_co_rwlock_wrlock.  Do
 450     * not account for the lock twice in self->locks_held.
 451     */
 452    self->locks_held--;
 453}
 454