linux/kernel/rcu/tiny.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition.
   4 *
   5 * Copyright IBM Corporation, 2008
   6 *
   7 * Author: Paul E. McKenney <paulmck@linux.ibm.com>
   8 *
   9 * For detailed explanation of Read-Copy Update mechanism see -
  10 *              Documentation/RCU
  11 */
  12#include <linux/completion.h>
  13#include <linux/interrupt.h>
  14#include <linux/notifier.h>
  15#include <linux/rcupdate_wait.h>
  16#include <linux/kernel.h>
  17#include <linux/export.h>
  18#include <linux/mutex.h>
  19#include <linux/sched.h>
  20#include <linux/types.h>
  21#include <linux/init.h>
  22#include <linux/time.h>
  23#include <linux/cpu.h>
  24#include <linux/prefetch.h>
  25#include <linux/slab.h>
  26#include <linux/mm.h>
  27
  28#include "rcu.h"
  29
  30/* Global control variables for rcupdate callback mechanism. */
  31struct rcu_ctrlblk {
  32        struct rcu_head *rcucblist;     /* List of pending callbacks (CBs). */
  33        struct rcu_head **donetail;     /* ->next pointer of last "done" CB. */
  34        struct rcu_head **curtail;      /* ->next pointer of last CB. */
  35        unsigned long gp_seq;           /* Grace-period counter. */
  36};
  37
  38/* Definition for rcupdate control block. */
  39static struct rcu_ctrlblk rcu_ctrlblk = {
  40        .donetail       = &rcu_ctrlblk.rcucblist,
  41        .curtail        = &rcu_ctrlblk.rcucblist,
  42        .gp_seq         = 0 - 300UL,
  43};
  44
  45void rcu_barrier(void)
  46{
  47        wait_rcu_gp(call_rcu);
  48}
  49EXPORT_SYMBOL(rcu_barrier);
  50
  51/* Record an rcu quiescent state.  */
  52void rcu_qs(void)
  53{
  54        unsigned long flags;
  55
  56        local_irq_save(flags);
  57        if (rcu_ctrlblk.donetail != rcu_ctrlblk.curtail) {
  58                rcu_ctrlblk.donetail = rcu_ctrlblk.curtail;
  59                raise_softirq_irqoff(RCU_SOFTIRQ);
  60        }
  61        WRITE_ONCE(rcu_ctrlblk.gp_seq, rcu_ctrlblk.gp_seq + 1);
  62        local_irq_restore(flags);
  63}
  64
  65/*
  66 * Check to see if the scheduling-clock interrupt came from an extended
  67 * quiescent state, and, if so, tell RCU about it.  This function must
  68 * be called from hardirq context.  It is normally called from the
  69 * scheduling-clock interrupt.
  70 */
  71void rcu_sched_clock_irq(int user)
  72{
  73        if (user) {
  74                rcu_qs();
  75        } else if (rcu_ctrlblk.donetail != rcu_ctrlblk.curtail) {
  76                set_tsk_need_resched(current);
  77                set_preempt_need_resched();
  78        }
  79}
  80
  81/*
  82 * Reclaim the specified callback, either by invoking it for non-kfree cases or
  83 * freeing it directly (for kfree). Return true if kfreeing, false otherwise.
  84 */
  85static inline bool rcu_reclaim_tiny(struct rcu_head *head)
  86{
  87        rcu_callback_t f;
  88        unsigned long offset = (unsigned long)head->func;
  89
  90        rcu_lock_acquire(&rcu_callback_map);
  91        if (__is_kvfree_rcu_offset(offset)) {
  92                trace_rcu_invoke_kvfree_callback("", head, offset);
  93                kvfree((void *)head - offset);
  94                rcu_lock_release(&rcu_callback_map);
  95                return true;
  96        }
  97
  98        trace_rcu_invoke_callback("", head);
  99        f = head->func;
 100        WRITE_ONCE(head->func, (rcu_callback_t)0L);
 101        f(head);
 102        rcu_lock_release(&rcu_callback_map);
 103        return false;
 104}
 105
 106/* Invoke the RCU callbacks whose grace period has elapsed.  */
 107static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused)
 108{
 109        struct rcu_head *next, *list;
 110        unsigned long flags;
 111
 112        /* Move the ready-to-invoke callbacks to a local list. */
 113        local_irq_save(flags);
 114        if (rcu_ctrlblk.donetail == &rcu_ctrlblk.rcucblist) {
 115                /* No callbacks ready, so just leave. */
 116                local_irq_restore(flags);
 117                return;
 118        }
 119        list = rcu_ctrlblk.rcucblist;
 120        rcu_ctrlblk.rcucblist = *rcu_ctrlblk.donetail;
 121        *rcu_ctrlblk.donetail = NULL;
 122        if (rcu_ctrlblk.curtail == rcu_ctrlblk.donetail)
 123                rcu_ctrlblk.curtail = &rcu_ctrlblk.rcucblist;
 124        rcu_ctrlblk.donetail = &rcu_ctrlblk.rcucblist;
 125        local_irq_restore(flags);
 126
 127        /* Invoke the callbacks on the local list. */
 128        while (list) {
 129                next = list->next;
 130                prefetch(next);
 131                debug_rcu_head_unqueue(list);
 132                local_bh_disable();
 133                rcu_reclaim_tiny(list);
 134                local_bh_enable();
 135                list = next;
 136        }
 137}
 138
 139/*
 140 * Wait for a grace period to elapse.  But it is illegal to invoke
 141 * synchronize_rcu() from within an RCU read-side critical section.
 142 * Therefore, any legal call to synchronize_rcu() is a quiescent
 143 * state, and so on a UP system, synchronize_rcu() need do nothing.
 144 * (But Lai Jiangshan points out the benefits of doing might_sleep()
 145 * to reduce latency.)
 146 *
 147 * Cool, huh?  (Due to Josh Triplett.)
 148 */
 149void synchronize_rcu(void)
 150{
 151        RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
 152                         lock_is_held(&rcu_lock_map) ||
 153                         lock_is_held(&rcu_sched_lock_map),
 154                         "Illegal synchronize_rcu() in RCU read-side critical section");
 155}
 156EXPORT_SYMBOL_GPL(synchronize_rcu);
 157
 158/*
 159 * Post an RCU callback to be invoked after the end of an RCU grace
 160 * period.  But since we have but one CPU, that would be after any
 161 * quiescent state.
 162 */
 163void call_rcu(struct rcu_head *head, rcu_callback_t func)
 164{
 165        unsigned long flags;
 166
 167        debug_rcu_head_queue(head);
 168        head->func = func;
 169        head->next = NULL;
 170
 171        local_irq_save(flags);
 172        *rcu_ctrlblk.curtail = head;
 173        rcu_ctrlblk.curtail = &head->next;
 174        local_irq_restore(flags);
 175
 176        if (unlikely(is_idle_task(current))) {
 177                /* force scheduling for rcu_qs() */
 178                resched_cpu(0);
 179        }
 180}
 181EXPORT_SYMBOL_GPL(call_rcu);
 182
 183/*
 184 * Return a grace-period-counter "cookie".  For more information,
 185 * see the Tree RCU header comment.
 186 */
 187unsigned long get_state_synchronize_rcu(void)
 188{
 189        return READ_ONCE(rcu_ctrlblk.gp_seq);
 190}
 191EXPORT_SYMBOL_GPL(get_state_synchronize_rcu);
 192
 193/*
 194 * Return a grace-period-counter "cookie" and ensure that a future grace
 195 * period completes.  For more information, see the Tree RCU header comment.
 196 */
 197unsigned long start_poll_synchronize_rcu(void)
 198{
 199        unsigned long gp_seq = get_state_synchronize_rcu();
 200
 201        if (unlikely(is_idle_task(current))) {
 202                /* force scheduling for rcu_qs() */
 203                resched_cpu(0);
 204        }
 205        return gp_seq;
 206}
 207EXPORT_SYMBOL_GPL(start_poll_synchronize_rcu);
 208
 209/*
 210 * Return true if the grace period corresponding to oldstate has completed
 211 * and false otherwise.  For more information, see the Tree RCU header
 212 * comment.
 213 */
 214bool poll_state_synchronize_rcu(unsigned long oldstate)
 215{
 216        return READ_ONCE(rcu_ctrlblk.gp_seq) != oldstate;
 217}
 218EXPORT_SYMBOL_GPL(poll_state_synchronize_rcu);
 219
 220void __init rcu_init(void)
 221{
 222        open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
 223        rcu_early_boot_tests();
 224}
 225