qemu/util/rcu.c
<<
>>
Prefs
   1/*
   2 * urcu-mb.c
   3 *
   4 * Userspace RCU library with explicit memory barriers
   5 *
   6 * Copyright (c) 2009 Mathieu Desnoyers <mathieu.desnoyers@efficios.com>
   7 * Copyright (c) 2009 Paul E. McKenney, IBM Corporation.
   8 * Copyright 2015 Red Hat, Inc.
   9 *
  10 * Ported to QEMU by Paolo Bonzini  <pbonzini@redhat.com>
  11 *
  12 * This library is free software; you can redistribute it and/or
  13 * modify it under the terms of the GNU Lesser General Public
  14 * License as published by the Free Software Foundation; either
  15 * version 2.1 of the License, or (at your option) any later version.
  16 *
  17 * This library is distributed in the hope that it will be useful,
  18 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  19 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  20 * Lesser General Public License for more details.
  21 *
  22 * You should have received a copy of the GNU Lesser General Public
  23 * License along with this library; if not, write to the Free Software
  24 * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
  25 *
  26 * IBM's contributions to this file may be relicensed under LGPLv2 or later.
  27 */
  28
  29#include "qemu/osdep.h"
  30#include "qemu/rcu.h"
  31#include "qemu/atomic.h"
  32#include "qemu/thread.h"
  33#include "qemu/main-loop.h"
  34#include "qemu/lockable.h"
  35#if defined(CONFIG_MALLOC_TRIM)
  36#include <malloc.h>
  37#endif
  38
  39/*
  40 * Global grace period counter.  Bit 0 is always one in rcu_gp_ctr.
  41 * Bits 1 and above are defined in synchronize_rcu.
  42 */
  43#define RCU_GP_LOCKED           (1UL << 0)
  44#define RCU_GP_CTR              (1UL << 1)
  45
  46unsigned long rcu_gp_ctr = RCU_GP_LOCKED;
  47
  48QemuEvent rcu_gp_event;
  49static int in_drain_call_rcu;
  50static QemuMutex rcu_registry_lock;
  51static QemuMutex rcu_sync_lock;
  52
  53/*
  54 * Check whether a quiescent state was crossed between the beginning of
  55 * update_counter_and_wait and now.
  56 */
  57static inline int rcu_gp_ongoing(unsigned long *ctr)
  58{
  59    unsigned long v;
  60
  61    v = qatomic_read(ctr);
  62    return v && (v != rcu_gp_ctr);
  63}
  64
  65/* Written to only by each individual reader. Read by both the reader and the
  66 * writers.
  67 */
  68__thread struct rcu_reader_data rcu_reader;
  69
  70/* Protected by rcu_registry_lock.  */
  71typedef QLIST_HEAD(, rcu_reader_data) ThreadList;
  72static ThreadList registry = QLIST_HEAD_INITIALIZER(registry);
  73
  74/* Wait for previous parity/grace period to be empty of readers.  */
  75static void wait_for_readers(void)
  76{
  77    ThreadList qsreaders = QLIST_HEAD_INITIALIZER(qsreaders);
  78    struct rcu_reader_data *index, *tmp;
  79
  80    for (;;) {
  81        /* We want to be notified of changes made to rcu_gp_ongoing
  82         * while we walk the list.
  83         */
  84        qemu_event_reset(&rcu_gp_event);
  85
  86        /* Instead of using qatomic_mb_set for index->waiting, and
  87         * qatomic_mb_read for index->ctr, memory barriers are placed
  88         * manually since writes to different threads are independent.
  89         * qemu_event_reset has acquire semantics, so no memory barrier
  90         * is needed here.
  91         */
  92        QLIST_FOREACH(index, &registry, node) {
  93            qatomic_set(&index->waiting, true);
  94        }
  95
  96        /* Here, order the stores to index->waiting before the loads of
  97         * index->ctr.  Pairs with smp_mb_placeholder() in rcu_read_unlock(),
  98         * ensuring that the loads of index->ctr are sequentially consistent.
  99         */
 100        smp_mb_global();
 101
 102        QLIST_FOREACH_SAFE(index, &registry, node, tmp) {
 103            if (!rcu_gp_ongoing(&index->ctr)) {
 104                QLIST_REMOVE(index, node);
 105                QLIST_INSERT_HEAD(&qsreaders, index, node);
 106
 107                /* No need for mb_set here, worst of all we
 108                 * get some extra futex wakeups.
 109                 */
 110                qatomic_set(&index->waiting, false);
 111            } else if (qatomic_read(&in_drain_call_rcu)) {
 112                notifier_list_notify(&index->force_rcu, NULL);
 113            }
 114        }
 115
 116        if (QLIST_EMPTY(&registry)) {
 117            break;
 118        }
 119
 120        /* Wait for one thread to report a quiescent state and try again.
 121         * Release rcu_registry_lock, so rcu_(un)register_thread() doesn't
 122         * wait too much time.
 123         *
 124         * rcu_register_thread() may add nodes to &registry; it will not
 125         * wake up synchronize_rcu, but that is okay because at least another
 126         * thread must exit its RCU read-side critical section before
 127         * synchronize_rcu is done.  The next iteration of the loop will
 128         * move the new thread's rcu_reader from &registry to &qsreaders,
 129         * because rcu_gp_ongoing() will return false.
 130         *
 131         * rcu_unregister_thread() may remove nodes from &qsreaders instead
 132         * of &registry if it runs during qemu_event_wait.  That's okay;
 133         * the node then will not be added back to &registry by QLIST_SWAP
 134         * below.  The invariant is that the node is part of one list when
 135         * rcu_registry_lock is released.
 136         */
 137        qemu_mutex_unlock(&rcu_registry_lock);
 138        qemu_event_wait(&rcu_gp_event);
 139        qemu_mutex_lock(&rcu_registry_lock);
 140    }
 141
 142    /* put back the reader list in the registry */
 143    QLIST_SWAP(&registry, &qsreaders, node);
 144}
 145
 146void synchronize_rcu(void)
 147{
 148    QEMU_LOCK_GUARD(&rcu_sync_lock);
 149
 150    /* Write RCU-protected pointers before reading p_rcu_reader->ctr.
 151     * Pairs with smp_mb_placeholder() in rcu_read_lock().
 152     */
 153    smp_mb_global();
 154
 155    QEMU_LOCK_GUARD(&rcu_registry_lock);
 156    if (!QLIST_EMPTY(&registry)) {
 157        /* In either case, the qatomic_mb_set below blocks stores that free
 158         * old RCU-protected pointers.
 159         */
 160        if (sizeof(rcu_gp_ctr) < 8) {
 161            /* For architectures with 32-bit longs, a two-subphases algorithm
 162             * ensures we do not encounter overflow bugs.
 163             *
 164             * Switch parity: 0 -> 1, 1 -> 0.
 165             */
 166            qatomic_mb_set(&rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR);
 167            wait_for_readers();
 168            qatomic_mb_set(&rcu_gp_ctr, rcu_gp_ctr ^ RCU_GP_CTR);
 169        } else {
 170            /* Increment current grace period.  */
 171            qatomic_mb_set(&rcu_gp_ctr, rcu_gp_ctr + RCU_GP_CTR);
 172        }
 173
 174        wait_for_readers();
 175    }
 176}
 177
 178
 179#define RCU_CALL_MIN_SIZE        30
 180
 181/* Multi-producer, single-consumer queue based on urcu/static/wfqueue.h
 182 * from liburcu.  Note that head is only used by the consumer.
 183 */
 184static struct rcu_head dummy;
 185static struct rcu_head *head = &dummy, **tail = &dummy.next;
 186static int rcu_call_count;
 187static QemuEvent rcu_call_ready_event;
 188
 189static void enqueue(struct rcu_head *node)
 190{
 191    struct rcu_head **old_tail;
 192
 193    node->next = NULL;
 194    old_tail = qatomic_xchg(&tail, &node->next);
 195    qatomic_mb_set(old_tail, node);
 196}
 197
 198static struct rcu_head *try_dequeue(void)
 199{
 200    struct rcu_head *node, *next;
 201
 202retry:
 203    /* Test for an empty list, which we do not expect.  Note that for
 204     * the consumer head and tail are always consistent.  The head
 205     * is consistent because only the consumer reads/writes it.
 206     * The tail, because it is the first step in the enqueuing.
 207     * It is only the next pointers that might be inconsistent.
 208     */
 209    if (head == &dummy && qatomic_mb_read(&tail) == &dummy.next) {
 210        abort();
 211    }
 212
 213    /* If the head node has NULL in its next pointer, the value is
 214     * wrong and we need to wait until its enqueuer finishes the update.
 215     */
 216    node = head;
 217    next = qatomic_mb_read(&head->next);
 218    if (!next) {
 219        return NULL;
 220    }
 221
 222    /* Since we are the sole consumer, and we excluded the empty case
 223     * above, the queue will always have at least two nodes: the
 224     * dummy node, and the one being removed.  So we do not need to update
 225     * the tail pointer.
 226     */
 227    head = next;
 228
 229    /* If we dequeued the dummy node, add it back at the end and retry.  */
 230    if (node == &dummy) {
 231        enqueue(node);
 232        goto retry;
 233    }
 234
 235    return node;
 236}
 237
 238static void *call_rcu_thread(void *opaque)
 239{
 240    struct rcu_head *node;
 241
 242    rcu_register_thread();
 243
 244    for (;;) {
 245        int tries = 0;
 246        int n = qatomic_read(&rcu_call_count);
 247
 248        /* Heuristically wait for a decent number of callbacks to pile up.
 249         * Fetch rcu_call_count now, we only must process elements that were
 250         * added before synchronize_rcu() starts.
 251         */
 252        while (n == 0 || (n < RCU_CALL_MIN_SIZE && ++tries <= 5)) {
 253            g_usleep(10000);
 254            if (n == 0) {
 255                qemu_event_reset(&rcu_call_ready_event);
 256                n = qatomic_read(&rcu_call_count);
 257                if (n == 0) {
 258#if defined(CONFIG_MALLOC_TRIM)
 259                    malloc_trim(4 * 1024 * 1024);
 260#endif
 261                    qemu_event_wait(&rcu_call_ready_event);
 262                }
 263            }
 264            n = qatomic_read(&rcu_call_count);
 265        }
 266
 267        qatomic_sub(&rcu_call_count, n);
 268        synchronize_rcu();
 269        qemu_mutex_lock_iothread();
 270        while (n > 0) {
 271            node = try_dequeue();
 272            while (!node) {
 273                qemu_mutex_unlock_iothread();
 274                qemu_event_reset(&rcu_call_ready_event);
 275                node = try_dequeue();
 276                if (!node) {
 277                    qemu_event_wait(&rcu_call_ready_event);
 278                    node = try_dequeue();
 279                }
 280                qemu_mutex_lock_iothread();
 281            }
 282
 283            n--;
 284            node->func(node);
 285        }
 286        qemu_mutex_unlock_iothread();
 287    }
 288    abort();
 289}
 290
 291void call_rcu1(struct rcu_head *node, void (*func)(struct rcu_head *node))
 292{
 293    node->func = func;
 294    enqueue(node);
 295    qatomic_inc(&rcu_call_count);
 296    qemu_event_set(&rcu_call_ready_event);
 297}
 298
 299
 300struct rcu_drain {
 301    struct rcu_head rcu;
 302    QemuEvent drain_complete_event;
 303};
 304
 305static void drain_rcu_callback(struct rcu_head *node)
 306{
 307    struct rcu_drain *event = (struct rcu_drain *)node;
 308    qemu_event_set(&event->drain_complete_event);
 309}
 310
 311/*
 312 * This function ensures that all pending RCU callbacks
 313 * on the current thread are done executing
 314
 315 * drops big qemu lock during the wait to allow RCU thread
 316 * to process the callbacks
 317 *
 318 */
 319
 320void drain_call_rcu(void)
 321{
 322    struct rcu_drain rcu_drain;
 323    bool locked = qemu_mutex_iothread_locked();
 324
 325    memset(&rcu_drain, 0, sizeof(struct rcu_drain));
 326    qemu_event_init(&rcu_drain.drain_complete_event, false);
 327
 328    if (locked) {
 329        qemu_mutex_unlock_iothread();
 330    }
 331
 332
 333    /*
 334     * RCU callbacks are invoked in the same order as in which they
 335     * are registered, thus we can be sure that when 'drain_rcu_callback'
 336     * is called, all RCU callbacks that were registered on this thread
 337     * prior to calling this function are completed.
 338     *
 339     * Note that since we have only one global queue of the RCU callbacks,
 340     * we also end up waiting for most of RCU callbacks that were registered
 341     * on the other threads, but this is a side effect that shoudn't be
 342     * assumed.
 343     */
 344
 345    qatomic_inc(&in_drain_call_rcu);
 346    call_rcu1(&rcu_drain.rcu, drain_rcu_callback);
 347    qemu_event_wait(&rcu_drain.drain_complete_event);
 348    qatomic_dec(&in_drain_call_rcu);
 349
 350    if (locked) {
 351        qemu_mutex_lock_iothread();
 352    }
 353
 354}
 355
 356void rcu_register_thread(void)
 357{
 358    assert(rcu_reader.ctr == 0);
 359    qemu_mutex_lock(&rcu_registry_lock);
 360    QLIST_INSERT_HEAD(&registry, &rcu_reader, node);
 361    qemu_mutex_unlock(&rcu_registry_lock);
 362}
 363
 364void rcu_unregister_thread(void)
 365{
 366    qemu_mutex_lock(&rcu_registry_lock);
 367    QLIST_REMOVE(&rcu_reader, node);
 368    qemu_mutex_unlock(&rcu_registry_lock);
 369}
 370
 371void rcu_add_force_rcu_notifier(Notifier *n)
 372{
 373    qemu_mutex_lock(&rcu_registry_lock);
 374    notifier_list_add(&rcu_reader.force_rcu, n);
 375    qemu_mutex_unlock(&rcu_registry_lock);
 376}
 377
 378void rcu_remove_force_rcu_notifier(Notifier *n)
 379{
 380    qemu_mutex_lock(&rcu_registry_lock);
 381    notifier_remove(n);
 382    qemu_mutex_unlock(&rcu_registry_lock);
 383}
 384
 385static void rcu_init_complete(void)
 386{
 387    QemuThread thread;
 388
 389    qemu_mutex_init(&rcu_registry_lock);
 390    qemu_mutex_init(&rcu_sync_lock);
 391    qemu_event_init(&rcu_gp_event, true);
 392
 393    qemu_event_init(&rcu_call_ready_event, false);
 394
 395    /* The caller is assumed to have iothread lock, so the call_rcu thread
 396     * must have been quiescent even after forking, just recreate it.
 397     */
 398    qemu_thread_create(&thread, "call_rcu", call_rcu_thread,
 399                       NULL, QEMU_THREAD_DETACHED);
 400
 401    rcu_register_thread();
 402}
 403
 404static int atfork_depth = 1;
 405
 406void rcu_enable_atfork(void)
 407{
 408    atfork_depth++;
 409}
 410
 411void rcu_disable_atfork(void)
 412{
 413    atfork_depth--;
 414}
 415
 416#ifdef CONFIG_POSIX
 417static void rcu_init_lock(void)
 418{
 419    if (atfork_depth < 1) {
 420        return;
 421    }
 422
 423    qemu_mutex_lock(&rcu_sync_lock);
 424    qemu_mutex_lock(&rcu_registry_lock);
 425}
 426
 427static void rcu_init_unlock(void)
 428{
 429    if (atfork_depth < 1) {
 430        return;
 431    }
 432
 433    qemu_mutex_unlock(&rcu_registry_lock);
 434    qemu_mutex_unlock(&rcu_sync_lock);
 435}
 436
 437static void rcu_init_child(void)
 438{
 439    if (atfork_depth < 1) {
 440        return;
 441    }
 442
 443    memset(&registry, 0, sizeof(registry));
 444    rcu_init_complete();
 445}
 446#endif
 447
 448static void __attribute__((__constructor__)) rcu_init(void)
 449{
 450    smp_mb_global_init();
 451#ifdef CONFIG_POSIX
 452    pthread_atfork(rcu_init_lock, rcu_init_unlock, rcu_init_child);
 453#endif
 454    rcu_init_complete();
 455}
 456