1/* 2 * Sleepable Read-Copy Update mechanism for mutual exclusion 3 * 4 * This program is free software; you can redistribute it and/or modify 5 * it under the terms of the GNU General Public License as published by 6 * the Free Software Foundation; either version 2 of the License, or 7 * (at your option) any later version. 8 * 9 * This program is distributed in the hope that it will be useful, 10 * but WITHOUT ANY WARRANTY; without even the implied warranty of 11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 * GNU General Public License for more details. 13 * 14 * You should have received a copy of the GNU General Public License 15 * along with this program; if not, you can access it online at 16 * http://www.gnu.org/licenses/gpl-2.0.html. 17 * 18 * Copyright (C) IBM Corporation, 2006 19 * Copyright (C) Fujitsu, 2012 20 * 21 * Author: Paul McKenney <paulmck@us.ibm.com> 22 * Lai Jiangshan <laijs@cn.fujitsu.com> 23 * 24 * For detailed explanation of Read-Copy Update mechanism see - 25 * Documentation/RCU/ *.txt 26 * 27 */ 28 29#ifndef _LINUX_SRCU_H 30#define _LINUX_SRCU_H 31 32#include <linux/mutex.h> 33#include <linux/rcupdate.h> 34#include <linux/workqueue.h> 35 36struct srcu_struct_array { 37 unsigned long c[2]; 38 unsigned long seq[2]; 39}; 40 41struct rcu_batch { 42 struct rcu_head *head, **tail; 43}; 44 45#define RCU_BATCH_INIT(name) { NULL, &(name.head) } 46 47struct srcu_struct { 48 unsigned long completed; 49 struct srcu_struct_array __percpu *per_cpu_ref; 50 spinlock_t queue_lock; /* protect ->batch_queue, ->running */ 51 bool running; 52 /* callbacks just queued */ 53 struct rcu_batch batch_queue; 54 /* callbacks try to do the first check_zero */ 55 struct rcu_batch batch_check0; 56 /* callbacks done with the first check_zero and the flip */ 57 struct rcu_batch batch_check1; 58 struct rcu_batch batch_done; 59 struct delayed_work work; 60#ifdef CONFIG_DEBUG_LOCK_ALLOC 61 struct lockdep_map dep_map; 62#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 63}; 64 65#ifdef CONFIG_DEBUG_LOCK_ALLOC 66 67int __init_srcu_struct(struct srcu_struct *sp, const char *name, 68 struct lock_class_key *key); 69 70#define init_srcu_struct(sp) \ 71({ \ 72 static struct lock_class_key __srcu_key; \ 73 \ 74 __init_srcu_struct((sp), #sp, &__srcu_key); \ 75}) 76 77#define __SRCU_DEP_MAP_INIT(srcu_name) .dep_map = { .name = #srcu_name }, 78#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 79 80int init_srcu_struct(struct srcu_struct *sp); 81 82#define __SRCU_DEP_MAP_INIT(srcu_name) 83#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 84 85void process_srcu(struct work_struct *work); 86 87#define __SRCU_STRUCT_INIT(name) \ 88 { \ 89 .completed = -300, \ 90 .per_cpu_ref = &name##_srcu_array, \ 91 .queue_lock = __SPIN_LOCK_UNLOCKED(name.queue_lock), \ 92 .running = false, \ 93 .batch_queue = RCU_BATCH_INIT(name.batch_queue), \ 94 .batch_check0 = RCU_BATCH_INIT(name.batch_check0), \ 95 .batch_check1 = RCU_BATCH_INIT(name.batch_check1), \ 96 .batch_done = RCU_BATCH_INIT(name.batch_done), \ 97 .work = __DELAYED_WORK_INITIALIZER(name.work, process_srcu, 0),\ 98 __SRCU_DEP_MAP_INIT(name) \ 99 } 100 101/* 102 * define and init a srcu struct at build time. 103 * dont't call init_srcu_struct() nor cleanup_srcu_struct() on it. 104 */ 105#define __DEFINE_SRCU(name, is_static) \ 106 static DEFINE_PER_CPU(struct srcu_struct_array, name##_srcu_array);\ 107 is_static struct srcu_struct name = __SRCU_STRUCT_INIT(name) 108#define DEFINE_SRCU(name) __DEFINE_SRCU(name, /* not static */) 109#define DEFINE_STATIC_SRCU(name) __DEFINE_SRCU(name, static) 110 111/** 112 * call_srcu() - Queue a callback for invocation after an SRCU grace period 113 * @sp: srcu_struct in queue the callback 114 * @head: structure to be used for queueing the SRCU callback. 115 * @func: function to be invoked after the SRCU grace period 116 * 117 * The callback function will be invoked some time after a full SRCU 118 * grace period elapses, in other words after all pre-existing SRCU 119 * read-side critical sections have completed. However, the callback 120 * function might well execute concurrently with other SRCU read-side 121 * critical sections that started after call_srcu() was invoked. SRCU 122 * read-side critical sections are delimited by srcu_read_lock() and 123 * srcu_read_unlock(), and may be nested. 124 * 125 * The callback will be invoked from process context, but must nevertheless 126 * be fast and must not block. 127 */ 128void call_srcu(struct srcu_struct *sp, struct rcu_head *head, 129 void (*func)(struct rcu_head *head)); 130 131void cleanup_srcu_struct(struct srcu_struct *sp); 132int __srcu_read_lock(struct srcu_struct *sp) __acquires(sp); 133void __srcu_read_unlock(struct srcu_struct *sp, int idx) __releases(sp); 134void synchronize_srcu(struct srcu_struct *sp); 135void synchronize_srcu_expedited(struct srcu_struct *sp); 136unsigned long srcu_batches_completed(struct srcu_struct *sp); 137void srcu_barrier(struct srcu_struct *sp); 138 139#ifdef CONFIG_DEBUG_LOCK_ALLOC 140 141/** 142 * srcu_read_lock_held - might we be in SRCU read-side critical section? 143 * 144 * If CONFIG_DEBUG_LOCK_ALLOC is selected, returns nonzero iff in an SRCU 145 * read-side critical section. In absence of CONFIG_DEBUG_LOCK_ALLOC, 146 * this assumes we are in an SRCU read-side critical section unless it can 147 * prove otherwise. 148 * 149 * Checks debug_lockdep_rcu_enabled() to prevent false positives during boot 150 * and while lockdep is disabled. 151 * 152 * Note that SRCU is based on its own statemachine and it doesn't 153 * relies on normal RCU, it can be called from the CPU which 154 * is in the idle loop from an RCU point of view or offline. 155 */ 156static inline int srcu_read_lock_held(struct srcu_struct *sp) 157{ 158 if (!debug_lockdep_rcu_enabled()) 159 return 1; 160 return lock_is_held(&sp->dep_map); 161} 162 163#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 164 165static inline int srcu_read_lock_held(struct srcu_struct *sp) 166{ 167 return 1; 168} 169 170#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */ 171 172/** 173 * srcu_dereference_check - fetch SRCU-protected pointer for later dereferencing 174 * @p: the pointer to fetch and protect for later dereferencing 175 * @sp: pointer to the srcu_struct, which is used to check that we 176 * really are in an SRCU read-side critical section. 177 * @c: condition to check for update-side use 178 * 179 * If PROVE_RCU is enabled, invoking this outside of an RCU read-side 180 * critical section will result in an RCU-lockdep splat, unless @c evaluates 181 * to 1. The @c argument will normally be a logical expression containing 182 * lockdep_is_held() calls. 183 */ 184#define srcu_dereference_check(p, sp, c) \ 185 __rcu_dereference_check((p), srcu_read_lock_held(sp) || (c), __rcu) 186 187/** 188 * srcu_dereference - fetch SRCU-protected pointer for later dereferencing 189 * @p: the pointer to fetch and protect for later dereferencing 190 * @sp: pointer to the srcu_struct, which is used to check that we 191 * really are in an SRCU read-side critical section. 192 * 193 * Makes rcu_dereference_check() do the dirty work. If PROVE_RCU 194 * is enabled, invoking this outside of an RCU read-side critical 195 * section will result in an RCU-lockdep splat. 196 */ 197#define srcu_dereference(p, sp) srcu_dereference_check((p), (sp), 0) 198 199/** 200 * srcu_read_lock - register a new reader for an SRCU-protected structure. 201 * @sp: srcu_struct in which to register the new reader. 202 * 203 * Enter an SRCU read-side critical section. Note that SRCU read-side 204 * critical sections may be nested. However, it is illegal to 205 * call anything that waits on an SRCU grace period for the same 206 * srcu_struct, whether directly or indirectly. Please note that 207 * one way to indirectly wait on an SRCU grace period is to acquire 208 * a mutex that is held elsewhere while calling synchronize_srcu() or 209 * synchronize_srcu_expedited(). 210 * 211 * Note that srcu_read_lock() and the matching srcu_read_unlock() must 212 * occur in the same context, for example, it is illegal to invoke 213 * srcu_read_unlock() in an irq handler if the matching srcu_read_lock() 214 * was invoked in process context. 215 */ 216static inline int srcu_read_lock(struct srcu_struct *sp) __acquires(sp) 217{ 218 int retval = __srcu_read_lock(sp); 219 220 rcu_lock_acquire(&(sp)->dep_map); 221 return retval; 222} 223 224/** 225 * srcu_read_unlock - unregister a old reader from an SRCU-protected structure. 226 * @sp: srcu_struct in which to unregister the old reader. 227 * @idx: return value from corresponding srcu_read_lock(). 228 * 229 * Exit an SRCU read-side critical section. 230 */ 231static inline void srcu_read_unlock(struct srcu_struct *sp, int idx) 232 __releases(sp) 233{ 234 rcu_lock_release(&(sp)->dep_map); 235 __srcu_read_unlock(sp, idx); 236} 237 238/** 239 * smp_mb__after_srcu_read_unlock - ensure full ordering after srcu_read_unlock 240 * 241 * Converts the preceding srcu_read_unlock into a two-way memory barrier. 242 * 243 * Call this after srcu_read_unlock, to guarantee that all memory operations 244 * that occur after smp_mb__after_srcu_read_unlock will appear to happen after 245 * the preceding srcu_read_unlock. 246 */ 247static inline void smp_mb__after_srcu_read_unlock(void) 248{ 249 /* __srcu_read_unlock has smp_mb() internally so nothing to do here. */ 250} 251 252#endif 253