linux/kernel/rcu/sync.c
<<
>>
Prefs
   1/*
   2 * RCU-based infrastructure for lightweight reader-writer locking
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License as published by
   6 * the Free Software Foundation; either version 2 of the License, or
   7 * (at your option) any later version.
   8 *
   9 * This program is distributed in the hope that it will be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program; if not, you can access it online at
  16 * http://www.gnu.org/licenses/gpl-2.0.html.
  17 *
  18 * Copyright (c) 2015, Red Hat, Inc.
  19 *
  20 * Author: Oleg Nesterov <oleg@redhat.com>
  21 */
  22
  23#include <linux/rcu_sync.h>
  24#include <linux/sched.h>
  25
  26#ifdef CONFIG_PROVE_RCU
  27#define __INIT_HELD(func)       .held = func,
  28#else
  29#define __INIT_HELD(func)
  30#endif
  31
  32static const struct {
  33        void (*sync)(void);
  34        void (*call)(struct rcu_head *, void (*)(struct rcu_head *));
  35        void (*wait)(void);
  36#ifdef CONFIG_PROVE_RCU
  37        int  (*held)(void);
  38#endif
  39} gp_ops[] = {
  40        [RCU_SYNC] = {
  41                .sync = synchronize_rcu,
  42                .call = call_rcu,
  43                .wait = rcu_barrier,
  44                __INIT_HELD(rcu_read_lock_held)
  45        },
  46        [RCU_SCHED_SYNC] = {
  47                .sync = synchronize_sched,
  48                .call = call_rcu_sched,
  49                .wait = rcu_barrier_sched,
  50                __INIT_HELD(rcu_read_lock_sched_held)
  51        },
  52        [RCU_BH_SYNC] = {
  53                .sync = synchronize_rcu_bh,
  54                .call = call_rcu_bh,
  55                .wait = rcu_barrier_bh,
  56                __INIT_HELD(rcu_read_lock_bh_held)
  57        },
  58};
  59
  60enum { GP_IDLE = 0, GP_PENDING, GP_PASSED };
  61enum { CB_IDLE = 0, CB_PENDING, CB_REPLAY };
  62
  63#define rss_lock        gp_wait.lock
  64
  65#ifdef CONFIG_PROVE_RCU
  66void rcu_sync_lockdep_assert(struct rcu_sync *rsp)
  67{
  68        RCU_LOCKDEP_WARN(!gp_ops[rsp->gp_type].held(),
  69                         "suspicious rcu_sync_is_idle() usage");
  70}
  71
  72EXPORT_SYMBOL_GPL(rcu_sync_lockdep_assert);
  73#endif
  74
  75/**
  76 * rcu_sync_init() - Initialize an rcu_sync structure
  77 * @rsp: Pointer to rcu_sync structure to be initialized
  78 * @type: Flavor of RCU with which to synchronize rcu_sync structure
  79 */
  80void rcu_sync_init(struct rcu_sync *rsp, enum rcu_sync_type type)
  81{
  82        memset(rsp, 0, sizeof(*rsp));
  83        init_waitqueue_head(&rsp->gp_wait);
  84        rsp->gp_type = type;
  85}
  86
  87/**
  88 * rcu_sync_enter_start - Force readers onto slow path for multiple updates
  89 * @rsp: Pointer to rcu_sync structure to use for synchronization
  90 *
  91 * Must be called after rcu_sync_init() and before first use.
  92 *
  93 * Ensures rcu_sync_is_idle() returns false and rcu_sync_{enter,exit}()
  94 * pairs turn into NO-OPs.
  95 */
  96void rcu_sync_enter_start(struct rcu_sync *rsp)
  97{
  98        rsp->gp_count++;
  99        rsp->gp_state = GP_PASSED;
 100}
 101
 102/**
 103 * rcu_sync_enter() - Force readers onto slowpath
 104 * @rsp: Pointer to rcu_sync structure to use for synchronization
 105 *
 106 * This function is used by updaters who need readers to make use of
 107 * a slowpath during the update.  After this function returns, all
 108 * subsequent calls to rcu_sync_is_idle() will return false, which
 109 * tells readers to stay off their fastpaths.  A later call to
 110 * rcu_sync_exit() re-enables reader slowpaths.
 111 *
 112 * When called in isolation, rcu_sync_enter() must wait for a grace
 113 * period, however, closely spaced calls to rcu_sync_enter() can
 114 * optimize away the grace-period wait via a state machine implemented
 115 * by rcu_sync_enter(), rcu_sync_exit(), and rcu_sync_func().
 116 */
 117void rcu_sync_enter(struct rcu_sync *rsp)
 118{
 119        bool need_wait, need_sync;
 120
 121        spin_lock_irq(&rsp->rss_lock);
 122        need_wait = rsp->gp_count++;
 123        need_sync = rsp->gp_state == GP_IDLE;
 124        if (need_sync)
 125                rsp->gp_state = GP_PENDING;
 126        spin_unlock_irq(&rsp->rss_lock);
 127
 128        BUG_ON(need_wait && need_sync);
 129
 130        if (need_sync) {
 131                gp_ops[rsp->gp_type].sync();
 132                rsp->gp_state = GP_PASSED;
 133                wake_up_all(&rsp->gp_wait);
 134        } else if (need_wait) {
 135                wait_event(rsp->gp_wait, rsp->gp_state == GP_PASSED);
 136        } else {
 137                /*
 138                 * Possible when there's a pending CB from a rcu_sync_exit().
 139                 * Nobody has yet been allowed the 'fast' path and thus we can
 140                 * avoid doing any sync(). The callback will get 'dropped'.
 141                 */
 142                BUG_ON(rsp->gp_state != GP_PASSED);
 143        }
 144}
 145
 146/**
 147 * rcu_sync_func() - Callback function managing reader access to fastpath
 148 * @rhp: Pointer to rcu_head in rcu_sync structure to use for synchronization
 149 *
 150 * This function is passed to one of the call_rcu() functions by
 151 * rcu_sync_exit(), so that it is invoked after a grace period following the
 152 * that invocation of rcu_sync_exit().  It takes action based on events that
 153 * have taken place in the meantime, so that closely spaced rcu_sync_enter()
 154 * and rcu_sync_exit() pairs need not wait for a grace period.
 155 *
 156 * If another rcu_sync_enter() is invoked before the grace period
 157 * ended, reset state to allow the next rcu_sync_exit() to let the
 158 * readers back onto their fastpaths (after a grace period).  If both
 159 * another rcu_sync_enter() and its matching rcu_sync_exit() are invoked
 160 * before the grace period ended, re-invoke call_rcu() on behalf of that
 161 * rcu_sync_exit().  Otherwise, set all state back to idle so that readers
 162 * can again use their fastpaths.
 163 */
 164static void rcu_sync_func(struct rcu_head *rhp)
 165{
 166        struct rcu_sync *rsp = container_of(rhp, struct rcu_sync, cb_head);
 167        unsigned long flags;
 168
 169        BUG_ON(rsp->gp_state != GP_PASSED);
 170        BUG_ON(rsp->cb_state == CB_IDLE);
 171
 172        spin_lock_irqsave(&rsp->rss_lock, flags);
 173        if (rsp->gp_count) {
 174                /*
 175                 * A new rcu_sync_begin() has happened; drop the callback.
 176                 */
 177                rsp->cb_state = CB_IDLE;
 178        } else if (rsp->cb_state == CB_REPLAY) {
 179                /*
 180                 * A new rcu_sync_exit() has happened; requeue the callback
 181                 * to catch a later GP.
 182                 */
 183                rsp->cb_state = CB_PENDING;
 184                gp_ops[rsp->gp_type].call(&rsp->cb_head, rcu_sync_func);
 185        } else {
 186                /*
 187                 * We're at least a GP after rcu_sync_exit(); eveybody will now
 188                 * have observed the write side critical section. Let 'em rip!.
 189                 */
 190                rsp->cb_state = CB_IDLE;
 191                rsp->gp_state = GP_IDLE;
 192        }
 193        spin_unlock_irqrestore(&rsp->rss_lock, flags);
 194}
 195
 196/**
 197 * rcu_sync_exit() - Allow readers back onto fast patch after grace period
 198 * @rsp: Pointer to rcu_sync structure to use for synchronization
 199 *
 200 * This function is used by updaters who have completed, and can therefore
 201 * now allow readers to make use of their fastpaths after a grace period
 202 * has elapsed.  After this grace period has completed, all subsequent
 203 * calls to rcu_sync_is_idle() will return true, which tells readers that
 204 * they can once again use their fastpaths.
 205 */
 206void rcu_sync_exit(struct rcu_sync *rsp)
 207{
 208        spin_lock_irq(&rsp->rss_lock);
 209        if (!--rsp->gp_count) {
 210                if (rsp->cb_state == CB_IDLE) {
 211                        rsp->cb_state = CB_PENDING;
 212                        gp_ops[rsp->gp_type].call(&rsp->cb_head, rcu_sync_func);
 213                } else if (rsp->cb_state == CB_PENDING) {
 214                        rsp->cb_state = CB_REPLAY;
 215                }
 216        }
 217        spin_unlock_irq(&rsp->rss_lock);
 218}
 219
 220/**
 221 * rcu_sync_dtor() - Clean up an rcu_sync structure
 222 * @rsp: Pointer to rcu_sync structure to be cleaned up
 223 */
 224void rcu_sync_dtor(struct rcu_sync *rsp)
 225{
 226        int cb_state;
 227
 228        BUG_ON(rsp->gp_count);
 229
 230        spin_lock_irq(&rsp->rss_lock);
 231        if (rsp->cb_state == CB_REPLAY)
 232                rsp->cb_state = CB_PENDING;
 233        cb_state = rsp->cb_state;
 234        spin_unlock_irq(&rsp->rss_lock);
 235
 236        if (cb_state != CB_IDLE) {
 237                gp_ops[rsp->gp_type].wait();
 238                BUG_ON(rsp->cb_state != CB_IDLE);
 239        }
 240}
 241