qemu/util/rcu.c
<<
>>
Prefs
   1/*
   2 * urcu-mb.c
   3 *
   4 * Userspace RCU library with explicit memory barriers
   5 *
   6 * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
   7 * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
   8 * Copyright 2015 Red Hat, Inc.
   9 *
  10 * Ported to QEMU by Paolo Bonzini  <pbonzini@redhat.com>
  11 *
  12 * This library is free software; you can redistribute it and/or
  13 * modify it under the terms of the GNU Lesser General Public
  14 * License as published by the Free Software Foundation; either
  15 * version 2.1 of the License, or (at your option) any later version.
  16 *
  17 * This library is distributed in the hope that it will be useful,
  18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  20 * Lesser General Public License for more details.
  21 *
  22 * You should have received a copy of the GNU Lesser General Public
  23 * License along with this library; if not, write to the Free Software
  24 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  25 *
  26 * IBM's contributions to this file may be relicensed under LGPLv2 or later.
  27 */
  28
  29#include "qemu/osdep.h"
  30#include "qemu-common.h"
  31#include "qemu/rcu.h"
  32#include "qemu/atomic.h"
  33#include "qemu/thread.h"
  34#include "qemu/main-loop.h"
  35
  36/*
  37 * Global grace period counter.  Bit 0 is always one in rcu_gp_ctr.
  38 * Bits 1 and above are defined in synchronize_rcu.
  39 */
  40#define RCU_GP_LOCKED           (1UL << 0)
  41#define RCU_GP_CTR              (1UL << 1)
  42
  43unsigned long rcu_gp_ctr = RCU_GP_LOCKED;
  44
  45QemuEvent rcu_gp_event;
  46static QemuMutex rcu_registry_lock;
  47static QemuMutex rcu_sync_lock;
  48
  49/*
  50 * Check whether a quiescent state was crossed between the beginning of
  51 * update_counter_and_wait and now.
  52 */
  53static inline int rcu_gp_ongoing(unsigned long *ctr)
  54{
  55    unsigned long v;
  56
  57    v = atomic_read(ctr);
  58    return v && (v != rcu_gp_ctr);
  59}
  60
  61/* Written to only by each individual reader. Read by both the reader and the
  62 * writers.
  63 */
  64__thread struct rcu_reader_data rcu_reader;
  65
  66/* Protected by rcu_registry_lock.  */
  67typedef QLIST_HEAD(, rcu_reader_data) ThreadList;
  68static ThreadList registry = QLIST_HEAD_INITIALIZER(registry);
  69
  70/* Wait for previous parity/grace period to be empty of readers.  */
  71static void wait_for_readers(void)
  72{
  73    ThreadList qsreaders = QLIST_HEAD_INITIALIZER(qsreaders);
  74    struct rcu_reader_data *index, *tmp;
  75
  76    for (;;) {
  77        /* We want to be notified of changes made to rcu_gp_ongoing
  78         * while we walk the list.
  79         */
  80        qemu_event_reset(&rcu_gp_event);
  81
  82        /* Instead of using atomic_mb_set for index->waiting, and
  83         * atomic_mb_read for index->ctr, memory barriers are placed
  84         * manually since writes to different threads are independent.
  85         * qemu_event_reset has acquire semantics, so no memory barrier
  86         * is needed here.
  87         */
  88        QLIST_FOREACH(index, &registry, node) {
  89            atomic_set(&index->waiting, true);
  90        }
  91
  92        /* Here, order the stores to index->waiting before the
  93         * loads of index->ctr.
  94         */
  95        smp_mb();
  96
  97        QLIST_FOREACH_SAFE(index, &registry, node, tmp) {
  98            if (!rcu_gp_ongoing(&index->ctr)) {
  99                QLIST_REMOVE(index, node);
 100                QLIST_INSERT_HEAD(&qsreaders, index, node);
 101
 102                /* No need for mb_set here, worst of all we
 103                 * get some extra futex wakeups.
 104                 */
 105                atomic_set(&index->waiting, false);
 106            }
 107        }
 108
 109        if (QLIST_EMPTY(&registry)) {
 110            break;
 111        }
 112
 113        /* Wait for one thread to report a quiescent state and try again.
 114         * Release rcu_registry_lock, so rcu_(un)register_thread() doesn't
 115         * wait too much time.
 116         *
 117         * rcu_register_thread() may add nodes to &registry; it will not
 118         * wake up synchronize_rcu, but that is okay because at least another
 119         * thread must exit its RCU read-side critical section before
 120         * synchronize_rcu is done.  The next iteration of the loop will
 121         * move the new thread's rcu_reader from &registry to &qsreaders,
 122         * because rcu_gp_ongoing() will return false.
 123         *
 124         * rcu_unregister_thread() may remove nodes from &qsreaders instead
 125         * of &registry if it runs during qemu_event_wait.  That's okay;
 126         * the node then will not be added back to &registry by QLIST_SWAP
 127         * below.  The invariant is that the node is part of one list when
 128         * rcu_registry_lock is released.
 129         */
 130        qemu_mutex_unlock(&rcu_registry_lock);
 131        qemu_event_wait(&rcu_gp_event);
 132        qemu_mutex_lock(&rcu_registry_lock);
 133    }
 134
 135    /* put back the reader list in the registry */
 136    QLIST_SWAP(&registry, &qsreaders, node);
 137}
 138
 139void synchronize_rcu(void)
 140{
 141    qemu_mutex_lock(&rcu_sync_lock);
 142    qemu_mutex_lock(&rcu_registry_lock);
 143
 144    if (!QLIST_EMPTY(&registry)) {
 145        /* In either case, the atomic_mb_set below blocks stores that free
 146         * old RCU-protected pointers.
 147         */
 148        if (sizeof(rcu_gp_ctr) < 8) {
 149            /* For architectures with 32-bit longs, a two-subphases algorithm
 150             * ensures we do not encounter overflow bugs.
 151             *
 152             * Switch parity: 0 -> 1, 1 -> 0.
 153             */
 154            atomic_mb_set(&rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR);
 155            wait_for_readers();
 156            atomic_mb_set(&rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR);
 157        } else {
 158            /* Increment current grace period.  */
 159            atomic_mb_set(&rcu_gp_ctr, rcu_gp_ctr + RCU_GP_CTR);
 160        }
 161
 162        wait_for_readers();
 163    }
 164
 165    qemu_mutex_unlock(&rcu_registry_lock);
 166    qemu_mutex_unlock(&rcu_sync_lock);
 167}
 168
 169
 170#define RCU_CALL_MIN_SIZE        30
 171
 172/* Multi-producer, single-consumer queue based on urcu/static/wfqueue.h
 173 * from liburcu.  Note that head is only used by the consumer.
 174 */
 175static struct rcu_head dummy;
 176static struct rcu_head *head = &dummy, **tail = &dummy.next;
 177static int rcu_call_count;
 178static QemuEvent rcu_call_ready_event;
 179
 180static void enqueue(struct rcu_head *node)
 181{
 182    struct rcu_head **old_tail;
 183
 184    node->next = NULL;
 185    old_tail = atomic_xchg(&tail, &node->next);
 186    atomic_mb_set(old_tail, node);
 187}
 188
 189static struct rcu_head *try_dequeue(void)
 190{
 191    struct rcu_head *node, *next;
 192
 193retry:
 194    /* Test for an empty list, which we do not expect.  Note that for
 195     * the consumer head and tail are always consistent.  The head
 196     * is consistent because only the consumer reads/writes it.
 197     * The tail, because it is the first step in the enqueuing.
 198     * It is only the next pointers that might be inconsistent.
 199     */
 200    if (head == &dummy && atomic_mb_read(&tail) == &dummy.next) {
 201        abort();
 202    }
 203
 204    /* If the head node has NULL in its next pointer, the value is
 205     * wrong and we need to wait until its enqueuer finishes the update.
 206     */
 207    node = head;
 208    next = atomic_mb_read(&head->next);
 209    if (!next) {
 210        return NULL;
 211    }
 212
 213    /* Since we are the sole consumer, and we excluded the empty case
 214     * above, the queue will always have at least two nodes: the
 215     * dummy node, and the one being removed.  So we do not need to update
 216     * the tail pointer.
 217     */
 218    head = next;
 219
 220    /* If we dequeued the dummy node, add it back at the end and retry.  */
 221    if (node == &dummy) {
 222        enqueue(node);
 223        goto retry;
 224    }
 225
 226    return node;
 227}
 228
 229static void *call_rcu_thread(void *opaque)
 230{
 231    struct rcu_head *node;
 232
 233    rcu_register_thread();
 234
 235    for (;;) {
 236        int tries = 0;
 237        int n = atomic_read(&rcu_call_count);
 238
 239        /* Heuristically wait for a decent number of callbacks to pile up.
 240         * Fetch rcu_call_count now, we only must process elements that were
 241         * added before synchronize_rcu() starts.
 242         */
 243        while (n == 0 || (n < RCU_CALL_MIN_SIZE && ++tries <= 5)) {
 244            g_usleep(10000);
 245            if (n == 0) {
 246                qemu_event_reset(&rcu_call_ready_event);
 247                n = atomic_read(&rcu_call_count);
 248                if (n == 0) {
 249                    qemu_event_wait(&rcu_call_ready_event);
 250                }
 251            }
 252            n = atomic_read(&rcu_call_count);
 253        }
 254
 255        atomic_sub(&rcu_call_count, n);
 256        synchronize_rcu();
 257        qemu_mutex_lock_iothread();
 258        while (n > 0) {
 259            node = try_dequeue();
 260            while (!node) {
 261                qemu_mutex_unlock_iothread();
 262                qemu_event_reset(&rcu_call_ready_event);
 263                node = try_dequeue();
 264                if (!node) {
 265                    qemu_event_wait(&rcu_call_ready_event);
 266                    node = try_dequeue();
 267                }
 268                qemu_mutex_lock_iothread();
 269            }
 270
 271            n--;
 272            node->func(node);
 273        }
 274        qemu_mutex_unlock_iothread();
 275    }
 276    abort();
 277}
 278
 279void call_rcu1(struct rcu_head *node, void (*func)(struct rcu_head *node))
 280{
 281    node->func = func;
 282    enqueue(node);
 283    atomic_inc(&rcu_call_count);
 284    qemu_event_set(&rcu_call_ready_event);
 285}
 286
 287void rcu_register_thread(void)
 288{
 289    assert(rcu_reader.ctr == 0);
 290    qemu_mutex_lock(&rcu_registry_lock);
 291    QLIST_INSERT_HEAD(&registry, &rcu_reader, node);
 292    qemu_mutex_unlock(&rcu_registry_lock);
 293}
 294
 295void rcu_unregister_thread(void)
 296{
 297    qemu_mutex_lock(&rcu_registry_lock);
 298    QLIST_REMOVE(&rcu_reader, node);
 299    qemu_mutex_unlock(&rcu_registry_lock);
 300}
 301
 302static void rcu_init_complete(void)
 303{
 304    QemuThread thread;
 305
 306    qemu_mutex_init(&rcu_registry_lock);
 307    qemu_mutex_init(&rcu_sync_lock);
 308    qemu_event_init(&rcu_gp_event, true);
 309
 310    qemu_event_init(&rcu_call_ready_event, false);
 311
 312    /* The caller is assumed to have iothread lock, so the call_rcu thread
 313     * must have been quiescent even after forking, just recreate it.
 314     */
 315    qemu_thread_create(&thread, "call_rcu", call_rcu_thread,
 316                       NULL, QEMU_THREAD_DETACHED);
 317
 318    rcu_register_thread();
 319}
 320
 321#ifdef CONFIG_POSIX
 322static void rcu_init_lock(void)
 323{
 324    qemu_mutex_lock(&rcu_sync_lock);
 325    qemu_mutex_lock(&rcu_registry_lock);
 326}
 327
 328static void rcu_init_unlock(void)
 329{
 330    qemu_mutex_unlock(&rcu_registry_lock);
 331    qemu_mutex_unlock(&rcu_sync_lock);
 332}
 333#endif
 334
 335void rcu_after_fork(void)
 336{
 337    memset(&registry, 0, sizeof(registry));
 338    rcu_init_complete();
 339}
 340
 341static void __attribute__((__constructor__)) rcu_init(void)
 342{
 343#ifdef CONFIG_POSIX
 344    pthread_atfork(rcu_init_lock, rcu_init_unlock, rcu_init_unlock);
 345#endif
 346    rcu_init_complete();
 347}
 348