1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23#ifndef __LINUX_RCU_H
24#define __LINUX_RCU_H
25
26#include <trace/events/rcu.h>
27#ifdef CONFIG_RCU_TRACE
28#define RCU_TRACE(stmt) stmt
29#else
30#define RCU_TRACE(stmt)
31#endif
32
33
34#define DYNTICK_IRQ_NONIDLE ((LONG_MAX / 2) + 1)
35
36
37
38
39
40
41#define RCU_SEQ_CTR_SHIFT 2
42#define RCU_SEQ_STATE_MASK ((1 << RCU_SEQ_CTR_SHIFT) - 1)
43
44
45
46
47
48static inline unsigned long rcu_seq_ctr(unsigned long s)
49{
50 return s >> RCU_SEQ_CTR_SHIFT;
51}
52
53
54
55
56
57static inline int rcu_seq_state(unsigned long s)
58{
59 return s & RCU_SEQ_STATE_MASK;
60}
61
62
63
64
65
66static inline void rcu_seq_set_state(unsigned long *sp, int newstate)
67{
68 WARN_ON_ONCE(newstate & ~RCU_SEQ_STATE_MASK);
69 WRITE_ONCE(*sp, (*sp & ~RCU_SEQ_STATE_MASK) + newstate);
70}
71
72
73static inline void rcu_seq_start(unsigned long *sp)
74{
75 WRITE_ONCE(*sp, *sp + 1);
76 smp_mb();
77 WARN_ON_ONCE(rcu_seq_state(*sp) != 1);
78}
79
80
81static inline unsigned long rcu_seq_endval(unsigned long *sp)
82{
83 return (*sp | RCU_SEQ_STATE_MASK) + 1;
84}
85
86
87static inline void rcu_seq_end(unsigned long *sp)
88{
89 smp_mb();
90 WARN_ON_ONCE(!rcu_seq_state(*sp));
91 WRITE_ONCE(*sp, rcu_seq_endval(sp));
92}
93
94
95static inline unsigned long rcu_seq_snap(unsigned long *sp)
96{
97 unsigned long s;
98
99 s = (READ_ONCE(*sp) + 2 * RCU_SEQ_STATE_MASK + 1) & ~RCU_SEQ_STATE_MASK;
100 smp_mb();
101 return s;
102}
103
104
105static inline unsigned long rcu_seq_current(unsigned long *sp)
106{
107 return READ_ONCE(*sp);
108}
109
110
111
112
113
114static inline bool rcu_seq_done(unsigned long *sp, unsigned long s)
115{
116 return ULONG_CMP_GE(READ_ONCE(*sp), s);
117}
118
119
120
121
122
123
124
125#ifdef CONFIG_DEBUG_OBJECTS_RCU_HEAD
126# define STATE_RCU_HEAD_READY 0
127# define STATE_RCU_HEAD_QUEUED 1
128
129extern struct debug_obj_descr rcuhead_debug_descr;
130
131static inline int debug_rcu_head_queue(struct rcu_head *head)
132{
133 int r1;
134
135 r1 = debug_object_activate(head, &rcuhead_debug_descr);
136 debug_object_active_state(head, &rcuhead_debug_descr,
137 STATE_RCU_HEAD_READY,
138 STATE_RCU_HEAD_QUEUED);
139 return r1;
140}
141
142static inline void debug_rcu_head_unqueue(struct rcu_head *head)
143{
144 debug_object_active_state(head, &rcuhead_debug_descr,
145 STATE_RCU_HEAD_QUEUED,
146 STATE_RCU_HEAD_READY);
147 debug_object_deactivate(head, &rcuhead_debug_descr);
148}
149#else
150static inline int debug_rcu_head_queue(struct rcu_head *head)
151{
152 return 0;
153}
154
155static inline void debug_rcu_head_unqueue(struct rcu_head *head)
156{
157}
158#endif
159
160void kfree(const void *);
161
162
163
164
165
166static inline bool __rcu_reclaim(const char *rn, struct rcu_head *head)
167{
168 unsigned long offset = (unsigned long)head->func;
169
170 rcu_lock_acquire(&rcu_callback_map);
171 if (__is_kfree_rcu_offset(offset)) {
172 RCU_TRACE(trace_rcu_invoke_kfree_callback(rn, head, offset);)
173 kfree((void *)head - offset);
174 rcu_lock_release(&rcu_callback_map);
175 return true;
176 } else {
177 RCU_TRACE(trace_rcu_invoke_callback(rn, head);)
178 head->func(head);
179 rcu_lock_release(&rcu_callback_map);
180 return false;
181 }
182}
183
184#ifdef CONFIG_RCU_STALL_COMMON
185
186extern int rcu_cpu_stall_suppress;
187int rcu_jiffies_till_stall_check(void);
188
189#define rcu_ftrace_dump_stall_suppress() \
190do { \
191 if (!rcu_cpu_stall_suppress) \
192 rcu_cpu_stall_suppress = 3; \
193} while (0)
194
195#define rcu_ftrace_dump_stall_unsuppress() \
196do { \
197 if (rcu_cpu_stall_suppress == 3) \
198 rcu_cpu_stall_suppress = 0; \
199} while (0)
200
201#else
202#define rcu_ftrace_dump_stall_suppress()
203#define rcu_ftrace_dump_stall_unsuppress()
204#endif
205
206
207
208
209
210
211#define TPS(x) tracepoint_string(x)
212
213
214
215
216#define rcu_ftrace_dump(oops_dump_mode) \
217do { \
218 static atomic_t ___rfd_beenhere = ATOMIC_INIT(0); \
219 \
220 if (!atomic_read(&___rfd_beenhere) && \
221 !atomic_xchg(&___rfd_beenhere, 1)) { \
222 tracing_off(); \
223 rcu_ftrace_dump_stall_suppress(); \
224 ftrace_dump(oops_dump_mode); \
225 rcu_ftrace_dump_stall_unsuppress(); \
226 } \
227} while (0)
228
229void rcu_early_boot_tests(void);
230void rcu_test_sync_prims(void);
231
232
233
234
235
236extern void resched_cpu(int cpu);
237
238#if defined(SRCU) || !defined(TINY_RCU)
239
240#include <linux/rcu_node_tree.h>
241
242extern int rcu_num_lvls;
243extern int num_rcu_lvl[];
244extern int rcu_num_nodes;
245static bool rcu_fanout_exact;
246static int rcu_fanout_leaf;
247
248
249
250
251
252static inline void rcu_init_levelspread(int *levelspread, const int *levelcnt)
253{
254 int i;
255
256 if (rcu_fanout_exact) {
257 levelspread[rcu_num_lvls - 1] = rcu_fanout_leaf;
258 for (i = rcu_num_lvls - 2; i >= 0; i--)
259 levelspread[i] = RCU_FANOUT;
260 } else {
261 int ccur;
262 int cprv;
263
264 cprv = nr_cpu_ids;
265 for (i = rcu_num_lvls - 1; i >= 0; i--) {
266 ccur = levelcnt[i];
267 levelspread[i] = (cprv + ccur - 1) / ccur;
268 cprv = ccur;
269 }
270 }
271}
272
273
274#define rcu_first_leaf_node(rsp) ((rsp)->level[rcu_num_lvls - 1])
275
276
277#define rcu_is_leaf_node(rnp) ((rnp)->level == rcu_num_lvls - 1)
278
279
280
281
282
283#define rcu_for_each_node_breadth_first(rsp, rnp) \
284 for ((rnp) = &(rsp)->node[0]; \
285 (rnp) < &(rsp)->node[rcu_num_nodes]; (rnp)++)
286
287
288
289
290
291
292#define rcu_for_each_nonleaf_node_breadth_first(rsp, rnp) \
293 for ((rnp) = &(rsp)->node[0]; !rcu_is_leaf_node(rsp, rnp); (rnp)++)
294
295
296
297
298
299
300
301#define rcu_for_each_leaf_node(rsp, rnp) \
302 for ((rnp) = rcu_first_leaf_node(rsp); \
303 (rnp) < &(rsp)->node[rcu_num_nodes]; (rnp)++)
304
305
306
307
308#define for_each_leaf_node_possible_cpu(rnp, cpu) \
309 for ((cpu) = cpumask_next((rnp)->grplo - 1, cpu_possible_mask); \
310 (cpu) <= rnp->grphi; \
311 (cpu) = cpumask_next((cpu), cpu_possible_mask))
312
313
314
315
316#define rcu_find_next_bit(rnp, cpu, mask) \
317 ((rnp)->grplo + find_next_bit(&(mask), BITS_PER_LONG, (cpu)))
318#define for_each_leaf_node_cpu_mask(rnp, cpu, mask) \
319 for ((cpu) = rcu_find_next_bit((rnp), 0, (mask)); \
320 (cpu) <= rnp->grphi; \
321 (cpu) = rcu_find_next_bit((rnp), (cpu) + 1 - (rnp->grplo), (mask)))
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337#define raw_spin_lock_rcu_node(p) \
338do { \
339 raw_spin_lock(&ACCESS_PRIVATE(p, lock)); \
340 smp_mb__after_unlock_lock(); \
341} while (0)
342
343#define raw_spin_unlock_rcu_node(p) raw_spin_unlock(&ACCESS_PRIVATE(p, lock))
344
345#define raw_spin_lock_irq_rcu_node(p) \
346do { \
347 raw_spin_lock_irq(&ACCESS_PRIVATE(p, lock)); \
348 smp_mb__after_unlock_lock(); \
349} while (0)
350
351#define raw_spin_unlock_irq_rcu_node(p) \
352 raw_spin_unlock_irq(&ACCESS_PRIVATE(p, lock))
353
354#define raw_spin_lock_irqsave_rcu_node(p, flags) \
355do { \
356 raw_spin_lock_irqsave(&ACCESS_PRIVATE(p, lock), flags); \
357 smp_mb__after_unlock_lock(); \
358} while (0)
359
360#define raw_spin_unlock_irqrestore_rcu_node(p, flags) \
361 raw_spin_unlock_irqrestore(&ACCESS_PRIVATE(p, lock), flags)
362
363#define raw_spin_trylock_rcu_node(p) \
364({ \
365 bool ___locked = raw_spin_trylock(&ACCESS_PRIVATE(p, lock)); \
366 \
367 if (___locked) \
368 smp_mb__after_unlock_lock(); \
369 ___locked; \
370})
371
372#define raw_lockdep_assert_held_rcu_node(p) \
373 lockdep_assert_held(&ACCESS_PRIVATE(p, lock))
374
375#endif
376
377#ifdef CONFIG_TINY_RCU
378
379static inline bool rcu_gp_is_normal(void) { return true; }
380static inline bool rcu_gp_is_expedited(void) { return false; }
381static inline void rcu_expedite_gp(void) { }
382static inline void rcu_unexpedite_gp(void) { }
383static inline void rcu_request_urgent_qs_task(struct task_struct *t) { }
384#else
385bool rcu_gp_is_normal(void);
386bool rcu_gp_is_expedited(void);
387void rcu_expedite_gp(void);
388void rcu_unexpedite_gp(void);
389void rcupdate_announce_bootup_oddness(void);
390void rcu_request_urgent_qs_task(struct task_struct *t);
391#endif
392
393#define RCU_SCHEDULER_INACTIVE 0
394#define RCU_SCHEDULER_INIT 1
395#define RCU_SCHEDULER_RUNNING 2
396
397enum rcutorture_type {
398 RCU_FLAVOR,
399 RCU_BH_FLAVOR,
400 RCU_SCHED_FLAVOR,
401 RCU_TASKS_FLAVOR,
402 SRCU_FLAVOR,
403 INVALID_RCU_FLAVOR
404};
405
406#if defined(CONFIG_TREE_RCU) || defined(CONFIG_PREEMPT_RCU)
407void rcutorture_get_gp_data(enum rcutorture_type test_type, int *flags,
408 unsigned long *gpnum, unsigned long *completed);
409void rcutorture_record_test_transition(void);
410void rcutorture_record_progress(unsigned long vernum);
411void do_trace_rcu_torture_read(const char *rcutorturename,
412 struct rcu_head *rhp,
413 unsigned long secs,
414 unsigned long c_old,
415 unsigned long c);
416#else
417static inline void rcutorture_get_gp_data(enum rcutorture_type test_type,
418 int *flags,
419 unsigned long *gpnum,
420 unsigned long *completed)
421{
422 *flags = 0;
423 *gpnum = 0;
424 *completed = 0;
425}
426static inline void rcutorture_record_test_transition(void) { }
427static inline void rcutorture_record_progress(unsigned long vernum) { }
428#ifdef CONFIG_RCU_TRACE
429void do_trace_rcu_torture_read(const char *rcutorturename,
430 struct rcu_head *rhp,
431 unsigned long secs,
432 unsigned long c_old,
433 unsigned long c);
434#else
435#define do_trace_rcu_torture_read(rcutorturename, rhp, secs, c_old, c) \
436 do { } while (0)
437#endif
438#endif
439
440#ifdef CONFIG_TINY_SRCU
441
442static inline void srcutorture_get_gp_data(enum rcutorture_type test_type,
443 struct srcu_struct *sp, int *flags,
444 unsigned long *gpnum,
445 unsigned long *completed)
446{
447 if (test_type != SRCU_FLAVOR)
448 return;
449 *flags = 0;
450 *completed = sp->srcu_idx;
451 *gpnum = *completed;
452}
453
454#elif defined(CONFIG_TREE_SRCU)
455
456void srcutorture_get_gp_data(enum rcutorture_type test_type,
457 struct srcu_struct *sp, int *flags,
458 unsigned long *gpnum, unsigned long *completed);
459
460#endif
461
462#ifdef CONFIG_TINY_RCU
463static inline unsigned long rcu_batches_started(void) { return 0; }
464static inline unsigned long rcu_batches_started_bh(void) { return 0; }
465static inline unsigned long rcu_batches_started_sched(void) { return 0; }
466static inline unsigned long rcu_batches_completed(void) { return 0; }
467static inline unsigned long rcu_batches_completed_bh(void) { return 0; }
468static inline unsigned long rcu_batches_completed_sched(void) { return 0; }
469static inline unsigned long rcu_exp_batches_completed(void) { return 0; }
470static inline unsigned long rcu_exp_batches_completed_sched(void) { return 0; }
471static inline unsigned long
472srcu_batches_completed(struct srcu_struct *sp) { return 0; }
473static inline void rcu_force_quiescent_state(void) { }
474static inline void rcu_bh_force_quiescent_state(void) { }
475static inline void rcu_sched_force_quiescent_state(void) { }
476static inline void show_rcu_gp_kthreads(void) { }
477#else
478extern unsigned long rcutorture_testseq;
479extern unsigned long rcutorture_vernum;
480unsigned long rcu_batches_started(void);
481unsigned long rcu_batches_started_bh(void);
482unsigned long rcu_batches_started_sched(void);
483unsigned long rcu_batches_completed(void);
484unsigned long rcu_batches_completed_bh(void);
485unsigned long rcu_batches_completed_sched(void);
486unsigned long rcu_exp_batches_completed(void);
487unsigned long rcu_exp_batches_completed_sched(void);
488unsigned long srcu_batches_completed(struct srcu_struct *sp);
489void show_rcu_gp_kthreads(void);
490void rcu_force_quiescent_state(void);
491void rcu_bh_force_quiescent_state(void);
492void rcu_sched_force_quiescent_state(void);
493extern struct workqueue_struct *rcu_gp_wq;
494extern struct workqueue_struct *rcu_par_gp_wq;
495#endif
496
497#ifdef CONFIG_RCU_NOCB_CPU
498bool rcu_is_nocb_cpu(int cpu);
499#else
500static inline bool rcu_is_nocb_cpu(int cpu) { return false; }
501#endif
502
503#endif
504