linux/kernel/rcu/tiny.c
<<
>>
Prefs
   1/*
   2 * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition.
   3 *
   4 * This program is free software; you can redistribute it and/or modify
   5 * it under the terms of the GNU General Public License as published by
   6 * the Free Software Foundation; either version 2 of the License, or
   7 * (at your option) any later version.
   8 *
   9 * This program is distributed in the hope that it will be useful,
  10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  12 * GNU General Public License for more details.
  13 *
  14 * You should have received a copy of the GNU General Public License
  15 * along with this program; if not, you can access it online at
  16 * http://www.gnu.org/licenses/gpl-2.0.html.
  17 *
  18 * Copyright IBM Corporation, 2008
  19 *
  20 * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
  21 *
  22 * For detailed explanation of Read-Copy Update mechanism see -
  23 *              Documentation/RCU
  24 */
  25#include <linux/completion.h>
  26#include <linux/interrupt.h>
  27#include <linux/notifier.h>
  28#include <linux/rcupdate_wait.h>
  29#include <linux/kernel.h>
  30#include <linux/export.h>
  31#include <linux/mutex.h>
  32#include <linux/sched.h>
  33#include <linux/types.h>
  34#include <linux/init.h>
  35#include <linux/time.h>
  36#include <linux/cpu.h>
  37#include <linux/prefetch.h>
  38#include <linux/slab.h>
  39#include <linux/mm.h>
  40
  41#include "rcu.h"
  42
  43/* Global control variables for rcupdate callback mechanism. */
  44struct rcu_ctrlblk {
  45        struct rcu_head *rcucblist;     /* List of pending callbacks (CBs). */
  46        struct rcu_head **donetail;     /* ->next pointer of last "done" CB. */
  47        struct rcu_head **curtail;      /* ->next pointer of last CB. */
  48};
  49
  50/* Definition for rcupdate control block. */
  51static struct rcu_ctrlblk rcu_ctrlblk = {
  52        .donetail       = &rcu_ctrlblk.rcucblist,
  53        .curtail        = &rcu_ctrlblk.rcucblist,
  54};
  55
  56void rcu_barrier(void)
  57{
  58        wait_rcu_gp(call_rcu);
  59}
  60EXPORT_SYMBOL(rcu_barrier);
  61
  62/* Record an rcu quiescent state.  */
  63void rcu_qs(void)
  64{
  65        unsigned long flags;
  66
  67        local_irq_save(flags);
  68        if (rcu_ctrlblk.donetail != rcu_ctrlblk.curtail) {
  69                rcu_ctrlblk.donetail = rcu_ctrlblk.curtail;
  70                raise_softirq_irqoff(RCU_SOFTIRQ);
  71        }
  72        local_irq_restore(flags);
  73}
  74
  75/*
  76 * Check to see if the scheduling-clock interrupt came from an extended
  77 * quiescent state, and, if so, tell RCU about it.  This function must
  78 * be called from hardirq context.  It is normally called from the
  79 * scheduling-clock interrupt.
  80 */
  81void rcu_sched_clock_irq(int user)
  82{
  83        if (user) {
  84                rcu_qs();
  85        } else if (rcu_ctrlblk.donetail != rcu_ctrlblk.curtail) {
  86                set_tsk_need_resched(current);
  87                set_preempt_need_resched();
  88        }
  89}
  90
  91/*
  92 * Reclaim the specified callback, either by invoking it for non-kfree cases or
  93 * freeing it directly (for kfree). Return true if kfreeing, false otherwise.
  94 */
  95static inline bool rcu_reclaim_tiny(struct rcu_head *head)
  96{
  97        rcu_callback_t f;
  98        unsigned long offset = (unsigned long)head->func;
  99
 100        rcu_lock_acquire(&rcu_callback_map);
 101        if (__is_kvfree_rcu_offset(offset)) {
 102                trace_rcu_invoke_kvfree_callback("", head, offset);
 103                kvfree((void *)head - offset);
 104                rcu_lock_release(&rcu_callback_map);
 105                return true;
 106        }
 107
 108        trace_rcu_invoke_callback("", head);
 109        f = head->func;
 110        WRITE_ONCE(head->func, (rcu_callback_t)0L);
 111        f(head);
 112        rcu_lock_release(&rcu_callback_map);
 113        return false;
 114}
 115
 116/* Invoke the RCU callbacks whose grace period has elapsed.  */
 117static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused)
 118{
 119        struct rcu_head *next, *list;
 120        unsigned long flags;
 121
 122        /* Move the ready-to-invoke callbacks to a local list. */
 123        local_irq_save(flags);
 124        if (rcu_ctrlblk.donetail == &rcu_ctrlblk.rcucblist) {
 125                /* No callbacks ready, so just leave. */
 126                local_irq_restore(flags);
 127                return;
 128        }
 129        list = rcu_ctrlblk.rcucblist;
 130        rcu_ctrlblk.rcucblist = *rcu_ctrlblk.donetail;
 131        *rcu_ctrlblk.donetail = NULL;
 132        if (rcu_ctrlblk.curtail == rcu_ctrlblk.donetail)
 133                rcu_ctrlblk.curtail = &rcu_ctrlblk.rcucblist;
 134        rcu_ctrlblk.donetail = &rcu_ctrlblk.rcucblist;
 135        local_irq_restore(flags);
 136
 137        /* Invoke the callbacks on the local list. */
 138        while (list) {
 139                next = list->next;
 140                prefetch(next);
 141                debug_rcu_head_unqueue(list);
 142                local_bh_disable();
 143                rcu_reclaim_tiny(list);
 144                local_bh_enable();
 145                list = next;
 146        }
 147}
 148
 149/*
 150 * Wait for a grace period to elapse.  But it is illegal to invoke
 151 * synchronize_rcu() from within an RCU read-side critical section.
 152 * Therefore, any legal call to synchronize_rcu() is a quiescent
 153 * state, and so on a UP system, synchronize_rcu() need do nothing.
 154 * (But Lai Jiangshan points out the benefits of doing might_sleep()
 155 * to reduce latency.)
 156 *
 157 * Cool, huh?  (Due to Josh Triplett.)
 158 */
 159void synchronize_rcu(void)
 160{
 161        RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
 162                         lock_is_held(&rcu_lock_map) ||
 163                         lock_is_held(&rcu_sched_lock_map),
 164                         "Illegal synchronize_rcu() in RCU read-side critical section");
 165}
 166EXPORT_SYMBOL_GPL(synchronize_rcu);
 167
 168/*
 169 * Post an RCU callback to be invoked after the end of an RCU grace
 170 * period.  But since we have but one CPU, that would be after any
 171 * quiescent state.
 172 */
 173void call_rcu(struct rcu_head *head, rcu_callback_t func)
 174{
 175        unsigned long flags;
 176
 177        debug_rcu_head_queue(head);
 178        head->func = func;
 179        head->next = NULL;
 180
 181        local_irq_save(flags);
 182        *rcu_ctrlblk.curtail = head;
 183        rcu_ctrlblk.curtail = &head->next;
 184        local_irq_restore(flags);
 185
 186        if (unlikely(is_idle_task(current))) {
 187                /* force scheduling for rcu_qs() */
 188                resched_cpu(0);
 189        }
 190}
 191EXPORT_SYMBOL_GPL(call_rcu);
 192
 193void __init rcu_init(void)
 194{
 195        open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
 196        rcu_early_boot_tests();
 197        srcu_init();
 198}
 199