linux/kernel/rcu/tiny.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * Read-Copy Update mechanism for mutual exclusion, the Bloatwatch edition.
   4 *
   5 * Copyright IBM Corporation, 2008
   6 *
   7 * Author: Paul E. McKenney <paulmck@linux.ibm.com>
   8 *
   9 * For detailed explanation of Read-Copy Update mechanism see -
  10 *              Documentation/RCU
  11 */
  12#include <linux/completion.h>
  13#include <linux/interrupt.h>
  14#include <linux/notifier.h>
  15#include <linux/rcupdate_wait.h>
  16#include <linux/kernel.h>
  17#include <linux/export.h>
  18#include <linux/mutex.h>
  19#include <linux/sched.h>
  20#include <linux/types.h>
  21#include <linux/init.h>
  22#include <linux/time.h>
  23#include <linux/cpu.h>
  24#include <linux/prefetch.h>
  25#include <linux/slab.h>
  26
  27#include "rcu.h"
  28
  29/* Global control variables for rcupdate callback mechanism. */
  30struct rcu_ctrlblk {
  31        struct rcu_head *rcucblist;     /* List of pending callbacks (CBs). */
  32        struct rcu_head **donetail;     /* ->next pointer of last "done" CB. */
  33        struct rcu_head **curtail;      /* ->next pointer of last CB. */
  34};
  35
  36/* Definition for rcupdate control block. */
  37static struct rcu_ctrlblk rcu_ctrlblk = {
  38        .donetail       = &rcu_ctrlblk.rcucblist,
  39        .curtail        = &rcu_ctrlblk.rcucblist,
  40};
  41
  42void rcu_barrier(void)
  43{
  44        wait_rcu_gp(call_rcu);
  45}
  46EXPORT_SYMBOL(rcu_barrier);
  47
  48/* Record an rcu quiescent state.  */
  49void rcu_qs(void)
  50{
  51        unsigned long flags;
  52
  53        local_irq_save(flags);
  54        if (rcu_ctrlblk.donetail != rcu_ctrlblk.curtail) {
  55                rcu_ctrlblk.donetail = rcu_ctrlblk.curtail;
  56                raise_softirq_irqoff(RCU_SOFTIRQ);
  57        }
  58        local_irq_restore(flags);
  59}
  60
  61/*
  62 * Check to see if the scheduling-clock interrupt came from an extended
  63 * quiescent state, and, if so, tell RCU about it.  This function must
  64 * be called from hardirq context.  It is normally called from the
  65 * scheduling-clock interrupt.
  66 */
  67void rcu_sched_clock_irq(int user)
  68{
  69        if (user) {
  70                rcu_qs();
  71        } else if (rcu_ctrlblk.donetail != rcu_ctrlblk.curtail) {
  72                set_tsk_need_resched(current);
  73                set_preempt_need_resched();
  74        }
  75}
  76
  77/*
  78 * Reclaim the specified callback, either by invoking it for non-kfree cases or
  79 * freeing it directly (for kfree). Return true if kfreeing, false otherwise.
  80 */
  81static inline bool rcu_reclaim_tiny(struct rcu_head *head)
  82{
  83        rcu_callback_t f;
  84        unsigned long offset = (unsigned long)head->func;
  85
  86        rcu_lock_acquire(&rcu_callback_map);
  87        if (__is_kfree_rcu_offset(offset)) {
  88                trace_rcu_invoke_kfree_callback("", head, offset);
  89                kfree((void *)head - offset);
  90                rcu_lock_release(&rcu_callback_map);
  91                return true;
  92        }
  93
  94        trace_rcu_invoke_callback("", head);
  95        f = head->func;
  96        WRITE_ONCE(head->func, (rcu_callback_t)0L);
  97        f(head);
  98        rcu_lock_release(&rcu_callback_map);
  99        return false;
 100}
 101
 102/* Invoke the RCU callbacks whose grace period has elapsed.  */
 103static __latent_entropy void rcu_process_callbacks(struct softirq_action *unused)
 104{
 105        struct rcu_head *next, *list;
 106        unsigned long flags;
 107
 108        /* Move the ready-to-invoke callbacks to a local list. */
 109        local_irq_save(flags);
 110        if (rcu_ctrlblk.donetail == &rcu_ctrlblk.rcucblist) {
 111                /* No callbacks ready, so just leave. */
 112                local_irq_restore(flags);
 113                return;
 114        }
 115        list = rcu_ctrlblk.rcucblist;
 116        rcu_ctrlblk.rcucblist = *rcu_ctrlblk.donetail;
 117        *rcu_ctrlblk.donetail = NULL;
 118        if (rcu_ctrlblk.curtail == rcu_ctrlblk.donetail)
 119                rcu_ctrlblk.curtail = &rcu_ctrlblk.rcucblist;
 120        rcu_ctrlblk.donetail = &rcu_ctrlblk.rcucblist;
 121        local_irq_restore(flags);
 122
 123        /* Invoke the callbacks on the local list. */
 124        while (list) {
 125                next = list->next;
 126                prefetch(next);
 127                debug_rcu_head_unqueue(list);
 128                local_bh_disable();
 129                rcu_reclaim_tiny(list);
 130                local_bh_enable();
 131                list = next;
 132        }
 133}
 134
 135/*
 136 * Wait for a grace period to elapse.  But it is illegal to invoke
 137 * synchronize_rcu() from within an RCU read-side critical section.
 138 * Therefore, any legal call to synchronize_rcu() is a quiescent
 139 * state, and so on a UP system, synchronize_rcu() need do nothing.
 140 * (But Lai Jiangshan points out the benefits of doing might_sleep()
 141 * to reduce latency.)
 142 *
 143 * Cool, huh?  (Due to Josh Triplett.)
 144 */
 145void synchronize_rcu(void)
 146{
 147        RCU_LOCKDEP_WARN(lock_is_held(&rcu_bh_lock_map) ||
 148                         lock_is_held(&rcu_lock_map) ||
 149                         lock_is_held(&rcu_sched_lock_map),
 150                         "Illegal synchronize_rcu() in RCU read-side critical section");
 151}
 152EXPORT_SYMBOL_GPL(synchronize_rcu);
 153
 154/*
 155 * Post an RCU callback to be invoked after the end of an RCU grace
 156 * period.  But since we have but one CPU, that would be after any
 157 * quiescent state.
 158 */
 159void call_rcu(struct rcu_head *head, rcu_callback_t func)
 160{
 161        unsigned long flags;
 162
 163        debug_rcu_head_queue(head);
 164        head->func = func;
 165        head->next = NULL;
 166
 167        local_irq_save(flags);
 168        *rcu_ctrlblk.curtail = head;
 169        rcu_ctrlblk.curtail = &head->next;
 170        local_irq_restore(flags);
 171
 172        if (unlikely(is_idle_task(current))) {
 173                /* force scheduling for rcu_qs() */
 174                resched_cpu(0);
 175        }
 176}
 177EXPORT_SYMBOL_GPL(call_rcu);
 178
 179void __init rcu_init(void)
 180{
 181        open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
 182        rcu_early_boot_tests();
 183        srcu_init();
 184}
 185