linux/kernel/rcu/srcutiny.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0+
   2/*
   3 * Sleepable Read-Copy Update mechanism for mutual exclusion,
   4 *      tiny version for non-preemptible single-CPU use.
   5 *
   6 * Copyright (C) IBM Corporation, 2017
   7 *
   8 * Author: Paul McKenney <paulmck@linux.ibm.com>
   9 */
  10
  11#include <linux/export.h>
  12#include <linux/mutex.h>
  13#include <linux/preempt.h>
  14#include <linux/rcupdate_wait.h>
  15#include <linux/sched.h>
  16#include <linux/delay.h>
  17#include <linux/srcu.h>
  18
  19#include <linux/rcu_node_tree.h>
  20#include "rcu_segcblist.h"
  21#include "rcu.h"
  22
  23int rcu_scheduler_active __read_mostly;
  24static LIST_HEAD(srcu_boot_list);
  25static bool srcu_init_done;
  26
  27static int init_srcu_struct_fields(struct srcu_struct *ssp)
  28{
  29        ssp->srcu_lock_nesting[0] = 0;
  30        ssp->srcu_lock_nesting[1] = 0;
  31        init_swait_queue_head(&ssp->srcu_wq);
  32        ssp->srcu_cb_head = NULL;
  33        ssp->srcu_cb_tail = &ssp->srcu_cb_head;
  34        ssp->srcu_gp_running = false;
  35        ssp->srcu_gp_waiting = false;
  36        ssp->srcu_idx = 0;
  37        INIT_WORK(&ssp->srcu_work, srcu_drive_gp);
  38        INIT_LIST_HEAD(&ssp->srcu_work.entry);
  39        return 0;
  40}
  41
  42#ifdef CONFIG_DEBUG_LOCK_ALLOC
  43
  44int __init_srcu_struct(struct srcu_struct *ssp, const char *name,
  45                       struct lock_class_key *key)
  46{
  47        /* Don't re-initialize a lock while it is held. */
  48        debug_check_no_locks_freed((void *)ssp, sizeof(*ssp));
  49        lockdep_init_map(&ssp->dep_map, name, key, 0);
  50        return init_srcu_struct_fields(ssp);
  51}
  52EXPORT_SYMBOL_GPL(__init_srcu_struct);
  53
  54#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
  55
  56/*
  57 * init_srcu_struct - initialize a sleep-RCU structure
  58 * @ssp: structure to initialize.
  59 *
  60 * Must invoke this on a given srcu_struct before passing that srcu_struct
  61 * to any other function.  Each srcu_struct represents a separate domain
  62 * of SRCU protection.
  63 */
  64int init_srcu_struct(struct srcu_struct *ssp)
  65{
  66        return init_srcu_struct_fields(ssp);
  67}
  68EXPORT_SYMBOL_GPL(init_srcu_struct);
  69
  70#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
  71
  72/*
  73 * cleanup_srcu_struct - deconstruct a sleep-RCU structure
  74 * @ssp: structure to clean up.
  75 *
  76 * Must invoke this after you are finished using a given srcu_struct that
  77 * was initialized via init_srcu_struct(), else you leak memory.
  78 */
  79void cleanup_srcu_struct(struct srcu_struct *ssp)
  80{
  81        WARN_ON(ssp->srcu_lock_nesting[0] || ssp->srcu_lock_nesting[1]);
  82        flush_work(&ssp->srcu_work);
  83        WARN_ON(ssp->srcu_gp_running);
  84        WARN_ON(ssp->srcu_gp_waiting);
  85        WARN_ON(ssp->srcu_cb_head);
  86        WARN_ON(&ssp->srcu_cb_head != ssp->srcu_cb_tail);
  87}
  88EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
  89
  90/*
  91 * Removes the count for the old reader from the appropriate element of
  92 * the srcu_struct.
  93 */
  94void __srcu_read_unlock(struct srcu_struct *ssp, int idx)
  95{
  96        int newval = ssp->srcu_lock_nesting[idx] - 1;
  97
  98        WRITE_ONCE(ssp->srcu_lock_nesting[idx], newval);
  99        if (!newval && READ_ONCE(ssp->srcu_gp_waiting))
 100                swake_up_one(&ssp->srcu_wq);
 101}
 102EXPORT_SYMBOL_GPL(__srcu_read_unlock);
 103
 104/*
 105 * Workqueue handler to drive one grace period and invoke any callbacks
 106 * that become ready as a result.  Single-CPU and !PREEMPTION operation
 107 * means that we get away with murder on synchronization.  ;-)
 108 */
 109void srcu_drive_gp(struct work_struct *wp)
 110{
 111        int idx;
 112        struct rcu_head *lh;
 113        struct rcu_head *rhp;
 114        struct srcu_struct *ssp;
 115
 116        ssp = container_of(wp, struct srcu_struct, srcu_work);
 117        if (ssp->srcu_gp_running || !READ_ONCE(ssp->srcu_cb_head))
 118                return; /* Already running or nothing to do. */
 119
 120        /* Remove recently arrived callbacks and wait for readers. */
 121        WRITE_ONCE(ssp->srcu_gp_running, true);
 122        local_irq_disable();
 123        lh = ssp->srcu_cb_head;
 124        ssp->srcu_cb_head = NULL;
 125        ssp->srcu_cb_tail = &ssp->srcu_cb_head;
 126        local_irq_enable();
 127        idx = ssp->srcu_idx;
 128        WRITE_ONCE(ssp->srcu_idx, !ssp->srcu_idx);
 129        WRITE_ONCE(ssp->srcu_gp_waiting, true);  /* srcu_read_unlock() wakes! */
 130        swait_event_exclusive(ssp->srcu_wq, !READ_ONCE(ssp->srcu_lock_nesting[idx]));
 131        WRITE_ONCE(ssp->srcu_gp_waiting, false); /* srcu_read_unlock() cheap. */
 132
 133        /* Invoke the callbacks we removed above. */
 134        while (lh) {
 135                rhp = lh;
 136                lh = lh->next;
 137                local_bh_disable();
 138                rhp->func(rhp);
 139                local_bh_enable();
 140        }
 141
 142        /*
 143         * Enable rescheduling, and if there are more callbacks,
 144         * reschedule ourselves.  This can race with a call_srcu()
 145         * at interrupt level, but the ->srcu_gp_running checks will
 146         * straighten that out.
 147         */
 148        WRITE_ONCE(ssp->srcu_gp_running, false);
 149        if (READ_ONCE(ssp->srcu_cb_head))
 150                schedule_work(&ssp->srcu_work);
 151}
 152EXPORT_SYMBOL_GPL(srcu_drive_gp);
 153
 154/*
 155 * Enqueue an SRCU callback on the specified srcu_struct structure,
 156 * initiating grace-period processing if it is not already running.
 157 */
 158void call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp,
 159               rcu_callback_t func)
 160{
 161        unsigned long flags;
 162
 163        rhp->func = func;
 164        rhp->next = NULL;
 165        local_irq_save(flags);
 166        *ssp->srcu_cb_tail = rhp;
 167        ssp->srcu_cb_tail = &rhp->next;
 168        local_irq_restore(flags);
 169        if (!READ_ONCE(ssp->srcu_gp_running)) {
 170                if (likely(srcu_init_done))
 171                        schedule_work(&ssp->srcu_work);
 172                else if (list_empty(&ssp->srcu_work.entry))
 173                        list_add(&ssp->srcu_work.entry, &srcu_boot_list);
 174        }
 175}
 176EXPORT_SYMBOL_GPL(call_srcu);
 177
 178/*
 179 * synchronize_srcu - wait for prior SRCU read-side critical-section completion
 180 */
 181void synchronize_srcu(struct srcu_struct *ssp)
 182{
 183        struct rcu_synchronize rs;
 184
 185        init_rcu_head_on_stack(&rs.head);
 186        init_completion(&rs.completion);
 187        call_srcu(ssp, &rs.head, wakeme_after_rcu);
 188        wait_for_completion(&rs.completion);
 189        destroy_rcu_head_on_stack(&rs.head);
 190}
 191EXPORT_SYMBOL_GPL(synchronize_srcu);
 192
 193/* Lockdep diagnostics.  */
 194void __init rcu_scheduler_starting(void)
 195{
 196        rcu_scheduler_active = RCU_SCHEDULER_RUNNING;
 197}
 198
 199/*
 200 * Queue work for srcu_struct structures with early boot callbacks.
 201 * The work won't actually execute until the workqueue initialization
 202 * phase that takes place after the scheduler starts.
 203 */
 204void __init srcu_init(void)
 205{
 206        struct srcu_struct *ssp;
 207
 208        srcu_init_done = true;
 209        while (!list_empty(&srcu_boot_list)) {
 210                ssp = list_first_entry(&srcu_boot_list,
 211                                      struct srcu_struct, srcu_work.entry);
 212                list_del_init(&ssp->srcu_work.entry);
 213                schedule_work(&ssp->srcu_work);
 214        }
 215}
 216