1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16#define pr_fmt(fmt) "rcu: " fmt
17
18#include <linux/export.h>
19#include <linux/mutex.h>
20#include <linux/percpu.h>
21#include <linux/preempt.h>
22#include <linux/rcupdate_wait.h>
23#include <linux/sched.h>
24#include <linux/smp.h>
25#include <linux/delay.h>
26#include <linux/module.h>
27#include <linux/slab.h>
28#include <linux/srcu.h>
29
30#include "rcu.h"
31#include "rcu_segcblist.h"
32
33
34#define DEFAULT_SRCU_EXP_HOLDOFF (25 * 1000)
35static ulong exp_holdoff = DEFAULT_SRCU_EXP_HOLDOFF;
36module_param(exp_holdoff, ulong, 0444);
37
38
39static ulong counter_wrap_check = (ULONG_MAX >> 2);
40module_param(counter_wrap_check, ulong, 0444);
41
42
43
44
45
46
47
48
49
50#define SRCU_SIZING_NONE 0
51#define SRCU_SIZING_INIT 1
52#define SRCU_SIZING_TORTURE 2
53#define SRCU_SIZING_AUTO 3
54#define SRCU_SIZING_CONTEND 0x10
55#define SRCU_SIZING_IS(x) ((convert_to_big & ~SRCU_SIZING_CONTEND) == x)
56#define SRCU_SIZING_IS_NONE() (SRCU_SIZING_IS(SRCU_SIZING_NONE))
57#define SRCU_SIZING_IS_INIT() (SRCU_SIZING_IS(SRCU_SIZING_INIT))
58#define SRCU_SIZING_IS_TORTURE() (SRCU_SIZING_IS(SRCU_SIZING_TORTURE))
59#define SRCU_SIZING_IS_CONTEND() (convert_to_big & SRCU_SIZING_CONTEND)
60static int convert_to_big = SRCU_SIZING_AUTO;
61module_param(convert_to_big, int, 0444);
62
63
64static int big_cpu_lim __read_mostly = 128;
65module_param(big_cpu_lim, int, 0444);
66
67
68static int small_contention_lim __read_mostly = 100;
69module_param(small_contention_lim, int, 0444);
70
71
72static LIST_HEAD(srcu_boot_list);
73static bool __read_mostly srcu_init_done;
74
75static void srcu_invoke_callbacks(struct work_struct *work);
76static void srcu_reschedule(struct srcu_struct *ssp, unsigned long delay);
77static void process_srcu(struct work_struct *work);
78static void srcu_delay_timer(struct timer_list *t);
79
80
81#define spin_lock_rcu_node(p) \
82do { \
83 spin_lock(&ACCESS_PRIVATE(p, lock)); \
84 smp_mb__after_unlock_lock(); \
85} while (0)
86
87#define spin_unlock_rcu_node(p) spin_unlock(&ACCESS_PRIVATE(p, lock))
88
89#define spin_lock_irq_rcu_node(p) \
90do { \
91 spin_lock_irq(&ACCESS_PRIVATE(p, lock)); \
92 smp_mb__after_unlock_lock(); \
93} while (0)
94
95#define spin_unlock_irq_rcu_node(p) \
96 spin_unlock_irq(&ACCESS_PRIVATE(p, lock))
97
98#define spin_lock_irqsave_rcu_node(p, flags) \
99do { \
100 spin_lock_irqsave(&ACCESS_PRIVATE(p, lock), flags); \
101 smp_mb__after_unlock_lock(); \
102} while (0)
103
104#define spin_trylock_irqsave_rcu_node(p, flags) \
105({ \
106 bool ___locked = spin_trylock_irqsave(&ACCESS_PRIVATE(p, lock), flags); \
107 \
108 if (___locked) \
109 smp_mb__after_unlock_lock(); \
110 ___locked; \
111})
112
113#define spin_unlock_irqrestore_rcu_node(p, flags) \
114 spin_unlock_irqrestore(&ACCESS_PRIVATE(p, lock), flags) \
115
116
117
118
119
120
121
122static void init_srcu_struct_data(struct srcu_struct *ssp)
123{
124 int cpu;
125 struct srcu_data *sdp;
126
127
128
129
130
131 WARN_ON_ONCE(ARRAY_SIZE(sdp->srcu_lock_count) !=
132 ARRAY_SIZE(sdp->srcu_unlock_count));
133 for_each_possible_cpu(cpu) {
134 sdp = per_cpu_ptr(ssp->sda, cpu);
135 spin_lock_init(&ACCESS_PRIVATE(sdp, lock));
136 rcu_segcblist_init(&sdp->srcu_cblist);
137 sdp->srcu_cblist_invoking = false;
138 sdp->srcu_gp_seq_needed = ssp->srcu_gp_seq;
139 sdp->srcu_gp_seq_needed_exp = ssp->srcu_gp_seq;
140 sdp->mynode = NULL;
141 sdp->cpu = cpu;
142 INIT_WORK(&sdp->work, srcu_invoke_callbacks);
143 timer_setup(&sdp->delay_work, srcu_delay_timer, 0);
144 sdp->ssp = ssp;
145 }
146}
147
148
149#define SRCU_SNP_INIT_SEQ 0x2
150
151
152
153
154
155static inline bool srcu_invl_snp_seq(unsigned long s)
156{
157 return rcu_seq_state(s) == SRCU_SNP_INIT_SEQ;
158}
159
160
161
162
163
164static bool init_srcu_struct_nodes(struct srcu_struct *ssp, gfp_t gfp_flags)
165{
166 int cpu;
167 int i;
168 int level = 0;
169 int levelspread[RCU_NUM_LVLS];
170 struct srcu_data *sdp;
171 struct srcu_node *snp;
172 struct srcu_node *snp_first;
173
174
175 rcu_init_geometry();
176 ssp->node = kcalloc(rcu_num_nodes, sizeof(*ssp->node), gfp_flags);
177 if (!ssp->node)
178 return false;
179
180
181 ssp->level[0] = &ssp->node[0];
182 for (i = 1; i < rcu_num_lvls; i++)
183 ssp->level[i] = ssp->level[i - 1] + num_rcu_lvl[i - 1];
184 rcu_init_levelspread(levelspread, num_rcu_lvl);
185
186
187 srcu_for_each_node_breadth_first(ssp, snp) {
188 spin_lock_init(&ACCESS_PRIVATE(snp, lock));
189 WARN_ON_ONCE(ARRAY_SIZE(snp->srcu_have_cbs) !=
190 ARRAY_SIZE(snp->srcu_data_have_cbs));
191 for (i = 0; i < ARRAY_SIZE(snp->srcu_have_cbs); i++) {
192 snp->srcu_have_cbs[i] = SRCU_SNP_INIT_SEQ;
193 snp->srcu_data_have_cbs[i] = 0;
194 }
195 snp->srcu_gp_seq_needed_exp = SRCU_SNP_INIT_SEQ;
196 snp->grplo = -1;
197 snp->grphi = -1;
198 if (snp == &ssp->node[0]) {
199
200 snp->srcu_parent = NULL;
201 continue;
202 }
203
204
205 if (snp == ssp->level[level + 1])
206 level++;
207 snp->srcu_parent = ssp->level[level - 1] +
208 (snp - ssp->level[level]) /
209 levelspread[level - 1];
210 }
211
212
213
214
215
216 level = rcu_num_lvls - 1;
217 snp_first = ssp->level[level];
218 for_each_possible_cpu(cpu) {
219 sdp = per_cpu_ptr(ssp->sda, cpu);
220 sdp->mynode = &snp_first[cpu / levelspread[level]];
221 for (snp = sdp->mynode; snp != NULL; snp = snp->srcu_parent) {
222 if (snp->grplo < 0)
223 snp->grplo = cpu;
224 snp->grphi = cpu;
225 }
226 sdp->grpmask = 1 << (cpu - sdp->mynode->grplo);
227 }
228 smp_store_release(&ssp->srcu_size_state, SRCU_SIZE_WAIT_BARRIER);
229 return true;
230}
231
232
233
234
235
236
237static int init_srcu_struct_fields(struct srcu_struct *ssp, bool is_static)
238{
239 ssp->srcu_size_state = SRCU_SIZE_SMALL;
240 ssp->node = NULL;
241 mutex_init(&ssp->srcu_cb_mutex);
242 mutex_init(&ssp->srcu_gp_mutex);
243 ssp->srcu_idx = 0;
244 ssp->srcu_gp_seq = 0;
245 ssp->srcu_barrier_seq = 0;
246 mutex_init(&ssp->srcu_barrier_mutex);
247 atomic_set(&ssp->srcu_barrier_cpu_cnt, 0);
248 INIT_DELAYED_WORK(&ssp->work, process_srcu);
249 ssp->sda_is_static = is_static;
250 if (!is_static)
251 ssp->sda = alloc_percpu(struct srcu_data);
252 if (!ssp->sda)
253 return -ENOMEM;
254 init_srcu_struct_data(ssp);
255 ssp->srcu_gp_seq_needed_exp = 0;
256 ssp->srcu_last_gp_end = ktime_get_mono_fast_ns();
257 if (READ_ONCE(ssp->srcu_size_state) == SRCU_SIZE_SMALL && SRCU_SIZING_IS_INIT()) {
258 if (!init_srcu_struct_nodes(ssp, GFP_ATOMIC)) {
259 if (!ssp->sda_is_static) {
260 free_percpu(ssp->sda);
261 ssp->sda = NULL;
262 return -ENOMEM;
263 }
264 } else {
265 WRITE_ONCE(ssp->srcu_size_state, SRCU_SIZE_BIG);
266 }
267 }
268 smp_store_release(&ssp->srcu_gp_seq_needed, 0);
269 return 0;
270}
271
272#ifdef CONFIG_DEBUG_LOCK_ALLOC
273
274int __init_srcu_struct(struct srcu_struct *ssp, const char *name,
275 struct lock_class_key *key)
276{
277
278 debug_check_no_locks_freed((void *)ssp, sizeof(*ssp));
279 lockdep_init_map(&ssp->dep_map, name, key, 0);
280 spin_lock_init(&ACCESS_PRIVATE(ssp, lock));
281 return init_srcu_struct_fields(ssp, false);
282}
283EXPORT_SYMBOL_GPL(__init_srcu_struct);
284
285#else
286
287
288
289
290
291
292
293
294
295int init_srcu_struct(struct srcu_struct *ssp)
296{
297 spin_lock_init(&ACCESS_PRIVATE(ssp, lock));
298 return init_srcu_struct_fields(ssp, false);
299}
300EXPORT_SYMBOL_GPL(init_srcu_struct);
301
302#endif
303
304
305
306
307static void __srcu_transition_to_big(struct srcu_struct *ssp)
308{
309 lockdep_assert_held(&ACCESS_PRIVATE(ssp, lock));
310 smp_store_release(&ssp->srcu_size_state, SRCU_SIZE_ALLOC);
311}
312
313
314
315
316static void srcu_transition_to_big(struct srcu_struct *ssp)
317{
318 unsigned long flags;
319
320
321 if (smp_load_acquire(&ssp->srcu_size_state) != SRCU_SIZE_SMALL)
322 return;
323 spin_lock_irqsave_rcu_node(ssp, flags);
324 if (smp_load_acquire(&ssp->srcu_size_state) != SRCU_SIZE_SMALL) {
325 spin_unlock_irqrestore_rcu_node(ssp, flags);
326 return;
327 }
328 __srcu_transition_to_big(ssp);
329 spin_unlock_irqrestore_rcu_node(ssp, flags);
330}
331
332
333
334
335
336static void spin_lock_irqsave_check_contention(struct srcu_struct *ssp)
337{
338 unsigned long j;
339
340 if (!SRCU_SIZING_IS_CONTEND() || ssp->srcu_size_state)
341 return;
342 j = jiffies;
343 if (ssp->srcu_size_jiffies != j) {
344 ssp->srcu_size_jiffies = j;
345 ssp->srcu_n_lock_retries = 0;
346 }
347 if (++ssp->srcu_n_lock_retries <= small_contention_lim)
348 return;
349 __srcu_transition_to_big(ssp);
350}
351
352
353
354
355
356
357
358static void spin_lock_irqsave_sdp_contention(struct srcu_data *sdp, unsigned long *flags)
359{
360 struct srcu_struct *ssp = sdp->ssp;
361
362 if (spin_trylock_irqsave_rcu_node(sdp, *flags))
363 return;
364 spin_lock_irqsave_rcu_node(ssp, *flags);
365 spin_lock_irqsave_check_contention(ssp);
366 spin_unlock_irqrestore_rcu_node(ssp, *flags);
367 spin_lock_irqsave_rcu_node(sdp, *flags);
368}
369
370
371
372
373
374
375
376static void spin_lock_irqsave_ssp_contention(struct srcu_struct *ssp, unsigned long *flags)
377{
378 if (spin_trylock_irqsave_rcu_node(ssp, *flags))
379 return;
380 spin_lock_irqsave_rcu_node(ssp, *flags);
381 spin_lock_irqsave_check_contention(ssp);
382}
383
384
385
386
387
388
389
390
391
392static void check_init_srcu_struct(struct srcu_struct *ssp)
393{
394 unsigned long flags;
395
396
397 if (!rcu_seq_state(smp_load_acquire(&ssp->srcu_gp_seq_needed)))
398 return;
399 spin_lock_irqsave_rcu_node(ssp, flags);
400 if (!rcu_seq_state(ssp->srcu_gp_seq_needed)) {
401 spin_unlock_irqrestore_rcu_node(ssp, flags);
402 return;
403 }
404 init_srcu_struct_fields(ssp, true);
405 spin_unlock_irqrestore_rcu_node(ssp, flags);
406}
407
408
409
410
411
412static unsigned long srcu_readers_lock_idx(struct srcu_struct *ssp, int idx)
413{
414 int cpu;
415 unsigned long sum = 0;
416
417 for_each_possible_cpu(cpu) {
418 struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu);
419
420 sum += READ_ONCE(cpuc->srcu_lock_count[idx]);
421 }
422 return sum;
423}
424
425
426
427
428
429static unsigned long srcu_readers_unlock_idx(struct srcu_struct *ssp, int idx)
430{
431 int cpu;
432 unsigned long sum = 0;
433
434 for_each_possible_cpu(cpu) {
435 struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu);
436
437 sum += READ_ONCE(cpuc->srcu_unlock_count[idx]);
438 }
439 return sum;
440}
441
442
443
444
445
446static bool srcu_readers_active_idx_check(struct srcu_struct *ssp, int idx)
447{
448 unsigned long unlocks;
449
450 unlocks = srcu_readers_unlock_idx(ssp, idx);
451
452
453
454
455
456
457
458
459
460
461
462
463 smp_mb();
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486 return srcu_readers_lock_idx(ssp, idx) == unlocks;
487}
488
489
490
491
492
493
494
495
496
497
498static bool srcu_readers_active(struct srcu_struct *ssp)
499{
500 int cpu;
501 unsigned long sum = 0;
502
503 for_each_possible_cpu(cpu) {
504 struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu);
505
506 sum += READ_ONCE(cpuc->srcu_lock_count[0]);
507 sum += READ_ONCE(cpuc->srcu_lock_count[1]);
508 sum -= READ_ONCE(cpuc->srcu_unlock_count[0]);
509 sum -= READ_ONCE(cpuc->srcu_unlock_count[1]);
510 }
511 return sum;
512}
513
514
515
516
517
518
519
520
521
522
523#define SRCU_DEFAULT_RETRY_CHECK_DELAY 5
524
525static ulong srcu_retry_check_delay = SRCU_DEFAULT_RETRY_CHECK_DELAY;
526module_param(srcu_retry_check_delay, ulong, 0444);
527
528#define SRCU_INTERVAL 1
529#define SRCU_MAX_INTERVAL 10
530
531#define SRCU_DEFAULT_MAX_NODELAY_PHASE_LO 3UL
532
533#define SRCU_DEFAULT_MAX_NODELAY_PHASE_HI 1000UL
534
535
536#define SRCU_UL_CLAMP_LO(val, low) ((val) > (low) ? (val) : (low))
537#define SRCU_UL_CLAMP_HI(val, high) ((val) < (high) ? (val) : (high))
538#define SRCU_UL_CLAMP(val, low, high) SRCU_UL_CLAMP_HI(SRCU_UL_CLAMP_LO((val), (low)), (high))
539
540
541
542#define SRCU_DEFAULT_MAX_NODELAY_PHASE_ADJUSTED \
543 (2UL * USEC_PER_SEC / HZ / SRCU_DEFAULT_RETRY_CHECK_DELAY)
544
545
546#define SRCU_DEFAULT_MAX_NODELAY_PHASE \
547 SRCU_UL_CLAMP(SRCU_DEFAULT_MAX_NODELAY_PHASE_ADJUSTED, \
548 SRCU_DEFAULT_MAX_NODELAY_PHASE_LO, \
549 SRCU_DEFAULT_MAX_NODELAY_PHASE_HI)
550
551static ulong srcu_max_nodelay_phase = SRCU_DEFAULT_MAX_NODELAY_PHASE;
552module_param(srcu_max_nodelay_phase, ulong, 0444);
553
554
555#define SRCU_DEFAULT_MAX_NODELAY (SRCU_DEFAULT_MAX_NODELAY_PHASE > 100 ? \
556 SRCU_DEFAULT_MAX_NODELAY_PHASE : 100)
557
558static ulong srcu_max_nodelay = SRCU_DEFAULT_MAX_NODELAY;
559module_param(srcu_max_nodelay, ulong, 0444);
560
561
562
563
564
565static unsigned long srcu_get_delay(struct srcu_struct *ssp)
566{
567 unsigned long gpstart;
568 unsigned long j;
569 unsigned long jbase = SRCU_INTERVAL;
570
571 if (ULONG_CMP_LT(READ_ONCE(ssp->srcu_gp_seq), READ_ONCE(ssp->srcu_gp_seq_needed_exp)))
572 jbase = 0;
573 if (rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq))) {
574 j = jiffies - 1;
575 gpstart = READ_ONCE(ssp->srcu_gp_start);
576 if (time_after(j, gpstart))
577 jbase += j - gpstart;
578 if (!jbase) {
579 WRITE_ONCE(ssp->srcu_n_exp_nodelay, READ_ONCE(ssp->srcu_n_exp_nodelay) + 1);
580 if (READ_ONCE(ssp->srcu_n_exp_nodelay) > srcu_max_nodelay_phase)
581 jbase = 1;
582 }
583 }
584 return jbase > SRCU_MAX_INTERVAL ? SRCU_MAX_INTERVAL : jbase;
585}
586
587
588
589
590
591
592
593
594void cleanup_srcu_struct(struct srcu_struct *ssp)
595{
596 int cpu;
597
598 if (WARN_ON(!srcu_get_delay(ssp)))
599 return;
600 if (WARN_ON(srcu_readers_active(ssp)))
601 return;
602 flush_delayed_work(&ssp->work);
603 for_each_possible_cpu(cpu) {
604 struct srcu_data *sdp = per_cpu_ptr(ssp->sda, cpu);
605
606 del_timer_sync(&sdp->delay_work);
607 flush_work(&sdp->work);
608 if (WARN_ON(rcu_segcblist_n_cbs(&sdp->srcu_cblist)))
609 return;
610 }
611 if (WARN_ON(rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) != SRCU_STATE_IDLE) ||
612 WARN_ON(rcu_seq_current(&ssp->srcu_gp_seq) != ssp->srcu_gp_seq_needed) ||
613 WARN_ON(srcu_readers_active(ssp))) {
614 pr_info("%s: Active srcu_struct %p read state: %d gp state: %lu/%lu\n",
615 __func__, ssp, rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)),
616 rcu_seq_current(&ssp->srcu_gp_seq), ssp->srcu_gp_seq_needed);
617 return;
618 }
619 if (!ssp->sda_is_static) {
620 free_percpu(ssp->sda);
621 ssp->sda = NULL;
622 }
623 kfree(ssp->node);
624 ssp->node = NULL;
625 ssp->srcu_size_state = SRCU_SIZE_SMALL;
626}
627EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
628
629
630
631
632
633
634int __srcu_read_lock(struct srcu_struct *ssp)
635{
636 int idx;
637
638 idx = READ_ONCE(ssp->srcu_idx) & 0x1;
639 this_cpu_inc(ssp->sda->srcu_lock_count[idx]);
640 smp_mb();
641 return idx;
642}
643EXPORT_SYMBOL_GPL(__srcu_read_lock);
644
645
646
647
648
649
650void __srcu_read_unlock(struct srcu_struct *ssp, int idx)
651{
652 smp_mb();
653 this_cpu_inc(ssp->sda->srcu_unlock_count[idx]);
654}
655EXPORT_SYMBOL_GPL(__srcu_read_unlock);
656
657
658
659
660static void srcu_gp_start(struct srcu_struct *ssp)
661{
662 struct srcu_data *sdp;
663 int state;
664
665 if (smp_load_acquire(&ssp->srcu_size_state) < SRCU_SIZE_WAIT_BARRIER)
666 sdp = per_cpu_ptr(ssp->sda, 0);
667 else
668 sdp = this_cpu_ptr(ssp->sda);
669 lockdep_assert_held(&ACCESS_PRIVATE(ssp, lock));
670 WARN_ON_ONCE(ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed));
671 spin_lock_rcu_node(sdp);
672 rcu_segcblist_advance(&sdp->srcu_cblist,
673 rcu_seq_current(&ssp->srcu_gp_seq));
674 (void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
675 rcu_seq_snap(&ssp->srcu_gp_seq));
676 spin_unlock_rcu_node(sdp);
677 WRITE_ONCE(ssp->srcu_gp_start, jiffies);
678 WRITE_ONCE(ssp->srcu_n_exp_nodelay, 0);
679 smp_mb();
680 rcu_seq_start(&ssp->srcu_gp_seq);
681 state = rcu_seq_state(ssp->srcu_gp_seq);
682 WARN_ON_ONCE(state != SRCU_STATE_SCAN1);
683}
684
685
686static void srcu_delay_timer(struct timer_list *t)
687{
688 struct srcu_data *sdp = container_of(t, struct srcu_data, delay_work);
689
690 queue_work_on(sdp->cpu, rcu_gp_wq, &sdp->work);
691}
692
693static void srcu_queue_delayed_work_on(struct srcu_data *sdp,
694 unsigned long delay)
695{
696 if (!delay) {
697 queue_work_on(sdp->cpu, rcu_gp_wq, &sdp->work);
698 return;
699 }
700
701 timer_reduce(&sdp->delay_work, jiffies + delay);
702}
703
704
705
706
707
708static void srcu_schedule_cbs_sdp(struct srcu_data *sdp, unsigned long delay)
709{
710 srcu_queue_delayed_work_on(sdp, delay);
711}
712
713
714
715
716
717
718
719static void srcu_schedule_cbs_snp(struct srcu_struct *ssp, struct srcu_node *snp,
720 unsigned long mask, unsigned long delay)
721{
722 int cpu;
723
724 for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) {
725 if (!(mask & (1 << (cpu - snp->grplo))))
726 continue;
727 srcu_schedule_cbs_sdp(per_cpu_ptr(ssp->sda, cpu), delay);
728 }
729}
730
731
732
733
734
735
736
737
738
739
740static void srcu_gp_end(struct srcu_struct *ssp)
741{
742 unsigned long cbdelay = 1;
743 bool cbs;
744 bool last_lvl;
745 int cpu;
746 unsigned long flags;
747 unsigned long gpseq;
748 int idx;
749 unsigned long mask;
750 struct srcu_data *sdp;
751 unsigned long sgsne;
752 struct srcu_node *snp;
753 int ss_state;
754
755
756 mutex_lock(&ssp->srcu_cb_mutex);
757
758
759 spin_lock_irq_rcu_node(ssp);
760 idx = rcu_seq_state(ssp->srcu_gp_seq);
761 WARN_ON_ONCE(idx != SRCU_STATE_SCAN2);
762 if (ULONG_CMP_LT(READ_ONCE(ssp->srcu_gp_seq), READ_ONCE(ssp->srcu_gp_seq_needed_exp)))
763 cbdelay = 0;
764
765 WRITE_ONCE(ssp->srcu_last_gp_end, ktime_get_mono_fast_ns());
766 rcu_seq_end(&ssp->srcu_gp_seq);
767 gpseq = rcu_seq_current(&ssp->srcu_gp_seq);
768 if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, gpseq))
769 WRITE_ONCE(ssp->srcu_gp_seq_needed_exp, gpseq);
770 spin_unlock_irq_rcu_node(ssp);
771 mutex_unlock(&ssp->srcu_gp_mutex);
772
773
774
775 ss_state = smp_load_acquire(&ssp->srcu_size_state);
776 if (ss_state < SRCU_SIZE_WAIT_BARRIER) {
777 srcu_schedule_cbs_sdp(per_cpu_ptr(ssp->sda, 0), cbdelay);
778 } else {
779 idx = rcu_seq_ctr(gpseq) % ARRAY_SIZE(snp->srcu_have_cbs);
780 srcu_for_each_node_breadth_first(ssp, snp) {
781 spin_lock_irq_rcu_node(snp);
782 cbs = false;
783 last_lvl = snp >= ssp->level[rcu_num_lvls - 1];
784 if (last_lvl)
785 cbs = ss_state < SRCU_SIZE_BIG || snp->srcu_have_cbs[idx] == gpseq;
786 snp->srcu_have_cbs[idx] = gpseq;
787 rcu_seq_set_state(&snp->srcu_have_cbs[idx], 1);
788 sgsne = snp->srcu_gp_seq_needed_exp;
789 if (srcu_invl_snp_seq(sgsne) || ULONG_CMP_LT(sgsne, gpseq))
790 WRITE_ONCE(snp->srcu_gp_seq_needed_exp, gpseq);
791 if (ss_state < SRCU_SIZE_BIG)
792 mask = ~0;
793 else
794 mask = snp->srcu_data_have_cbs[idx];
795 snp->srcu_data_have_cbs[idx] = 0;
796 spin_unlock_irq_rcu_node(snp);
797 if (cbs)
798 srcu_schedule_cbs_snp(ssp, snp, mask, cbdelay);
799 }
800 }
801
802
803 if (!(gpseq & counter_wrap_check))
804 for_each_possible_cpu(cpu) {
805 sdp = per_cpu_ptr(ssp->sda, cpu);
806 spin_lock_irqsave_rcu_node(sdp, flags);
807 if (ULONG_CMP_GE(gpseq, sdp->srcu_gp_seq_needed + 100))
808 sdp->srcu_gp_seq_needed = gpseq;
809 if (ULONG_CMP_GE(gpseq, sdp->srcu_gp_seq_needed_exp + 100))
810 sdp->srcu_gp_seq_needed_exp = gpseq;
811 spin_unlock_irqrestore_rcu_node(sdp, flags);
812 }
813
814
815 mutex_unlock(&ssp->srcu_cb_mutex);
816
817
818 spin_lock_irq_rcu_node(ssp);
819 gpseq = rcu_seq_current(&ssp->srcu_gp_seq);
820 if (!rcu_seq_state(gpseq) &&
821 ULONG_CMP_LT(gpseq, ssp->srcu_gp_seq_needed)) {
822 srcu_gp_start(ssp);
823 spin_unlock_irq_rcu_node(ssp);
824 srcu_reschedule(ssp, 0);
825 } else {
826 spin_unlock_irq_rcu_node(ssp);
827 }
828
829
830 if (ss_state != SRCU_SIZE_SMALL && ss_state != SRCU_SIZE_BIG) {
831 if (ss_state == SRCU_SIZE_ALLOC)
832 init_srcu_struct_nodes(ssp, GFP_KERNEL);
833 else
834 smp_store_release(&ssp->srcu_size_state, ss_state + 1);
835 }
836}
837
838
839
840
841
842
843
844
845static void srcu_funnel_exp_start(struct srcu_struct *ssp, struct srcu_node *snp,
846 unsigned long s)
847{
848 unsigned long flags;
849 unsigned long sgsne;
850
851 if (snp)
852 for (; snp != NULL; snp = snp->srcu_parent) {
853 sgsne = READ_ONCE(snp->srcu_gp_seq_needed_exp);
854 if (rcu_seq_done(&ssp->srcu_gp_seq, s) ||
855 (!srcu_invl_snp_seq(sgsne) && ULONG_CMP_GE(sgsne, s)))
856 return;
857 spin_lock_irqsave_rcu_node(snp, flags);
858 sgsne = snp->srcu_gp_seq_needed_exp;
859 if (!srcu_invl_snp_seq(sgsne) && ULONG_CMP_GE(sgsne, s)) {
860 spin_unlock_irqrestore_rcu_node(snp, flags);
861 return;
862 }
863 WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s);
864 spin_unlock_irqrestore_rcu_node(snp, flags);
865 }
866 spin_lock_irqsave_ssp_contention(ssp, &flags);
867 if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, s))
868 WRITE_ONCE(ssp->srcu_gp_seq_needed_exp, s);
869 spin_unlock_irqrestore_rcu_node(ssp, flags);
870}
871
872
873
874
875
876
877
878
879
880
881
882static void srcu_funnel_gp_start(struct srcu_struct *ssp, struct srcu_data *sdp,
883 unsigned long s, bool do_norm)
884{
885 unsigned long flags;
886 int idx = rcu_seq_ctr(s) % ARRAY_SIZE(sdp->mynode->srcu_have_cbs);
887 unsigned long sgsne;
888 struct srcu_node *snp;
889 struct srcu_node *snp_leaf;
890 unsigned long snp_seq;
891
892
893 if (smp_load_acquire(&ssp->srcu_size_state) < SRCU_SIZE_WAIT_BARRIER)
894 snp_leaf = NULL;
895 else
896 snp_leaf = sdp->mynode;
897
898 if (snp_leaf)
899
900 for (snp = snp_leaf; snp != NULL; snp = snp->srcu_parent) {
901 if (rcu_seq_done(&ssp->srcu_gp_seq, s) && snp != snp_leaf)
902 return;
903 spin_lock_irqsave_rcu_node(snp, flags);
904 snp_seq = snp->srcu_have_cbs[idx];
905 if (!srcu_invl_snp_seq(snp_seq) && ULONG_CMP_GE(snp_seq, s)) {
906 if (snp == snp_leaf && snp_seq == s)
907 snp->srcu_data_have_cbs[idx] |= sdp->grpmask;
908 spin_unlock_irqrestore_rcu_node(snp, flags);
909 if (snp == snp_leaf && snp_seq != s) {
910 srcu_schedule_cbs_sdp(sdp, do_norm ? SRCU_INTERVAL : 0);
911 return;
912 }
913 if (!do_norm)
914 srcu_funnel_exp_start(ssp, snp, s);
915 return;
916 }
917 snp->srcu_have_cbs[idx] = s;
918 if (snp == snp_leaf)
919 snp->srcu_data_have_cbs[idx] |= sdp->grpmask;
920 sgsne = snp->srcu_gp_seq_needed_exp;
921 if (!do_norm && (srcu_invl_snp_seq(sgsne) || ULONG_CMP_LT(sgsne, s)))
922 WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s);
923 spin_unlock_irqrestore_rcu_node(snp, flags);
924 }
925
926
927 spin_lock_irqsave_ssp_contention(ssp, &flags);
928 if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed, s)) {
929
930
931
932
933 smp_store_release(&ssp->srcu_gp_seq_needed, s);
934 }
935 if (!do_norm && ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, s))
936 WRITE_ONCE(ssp->srcu_gp_seq_needed_exp, s);
937
938
939 if (!rcu_seq_done(&ssp->srcu_gp_seq, s) &&
940 rcu_seq_state(ssp->srcu_gp_seq) == SRCU_STATE_IDLE) {
941 WARN_ON_ONCE(ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed));
942 srcu_gp_start(ssp);
943
944
945
946
947
948
949 if (likely(srcu_init_done))
950 queue_delayed_work(rcu_gp_wq, &ssp->work,
951 !!srcu_get_delay(ssp));
952 else if (list_empty(&ssp->work.work.entry))
953 list_add(&ssp->work.work.entry, &srcu_boot_list);
954 }
955 spin_unlock_irqrestore_rcu_node(ssp, flags);
956}
957
958
959
960
961
962
963static bool try_check_zero(struct srcu_struct *ssp, int idx, int trycount)
964{
965 unsigned long curdelay;
966
967 curdelay = !srcu_get_delay(ssp);
968
969 for (;;) {
970 if (srcu_readers_active_idx_check(ssp, idx))
971 return true;
972 if ((--trycount + curdelay) <= 0)
973 return false;
974 udelay(srcu_retry_check_delay);
975 }
976}
977
978
979
980
981
982
983static void srcu_flip(struct srcu_struct *ssp)
984{
985
986
987
988
989
990
991
992
993 smp_mb();
994
995 WRITE_ONCE(ssp->srcu_idx, ssp->srcu_idx + 1);
996
997
998
999
1000
1001
1002
1003
1004 smp_mb();
1005}
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028static bool srcu_might_be_idle(struct srcu_struct *ssp)
1029{
1030 unsigned long curseq;
1031 unsigned long flags;
1032 struct srcu_data *sdp;
1033 unsigned long t;
1034 unsigned long tlast;
1035
1036 check_init_srcu_struct(ssp);
1037
1038 sdp = raw_cpu_ptr(ssp->sda);
1039 spin_lock_irqsave_rcu_node(sdp, flags);
1040 if (rcu_segcblist_pend_cbs(&sdp->srcu_cblist)) {
1041 spin_unlock_irqrestore_rcu_node(sdp, flags);
1042 return false;
1043 }
1044 spin_unlock_irqrestore_rcu_node(sdp, flags);
1045
1046
1047
1048
1049
1050
1051
1052
1053 t = ktime_get_mono_fast_ns();
1054 tlast = READ_ONCE(ssp->srcu_last_gp_end);
1055 if (exp_holdoff == 0 ||
1056 time_in_range_open(t, tlast, tlast + exp_holdoff))
1057 return false;
1058
1059
1060 curseq = rcu_seq_current(&ssp->srcu_gp_seq);
1061 smp_mb();
1062 if (ULONG_CMP_LT(curseq, READ_ONCE(ssp->srcu_gp_seq_needed)))
1063 return false;
1064 smp_mb();
1065 if (curseq != rcu_seq_current(&ssp->srcu_gp_seq))
1066 return false;
1067 return true;
1068}
1069
1070
1071
1072
1073static void srcu_leak_callback(struct rcu_head *rhp)
1074{
1075}
1076
1077
1078
1079
1080static unsigned long srcu_gp_start_if_needed(struct srcu_struct *ssp,
1081 struct rcu_head *rhp, bool do_norm)
1082{
1083 unsigned long flags;
1084 int idx;
1085 bool needexp = false;
1086 bool needgp = false;
1087 unsigned long s;
1088 struct srcu_data *sdp;
1089 struct srcu_node *sdp_mynode;
1090 int ss_state;
1091
1092 check_init_srcu_struct(ssp);
1093 idx = srcu_read_lock(ssp);
1094 ss_state = smp_load_acquire(&ssp->srcu_size_state);
1095 if (ss_state < SRCU_SIZE_WAIT_CALL)
1096 sdp = per_cpu_ptr(ssp->sda, 0);
1097 else
1098 sdp = raw_cpu_ptr(ssp->sda);
1099 spin_lock_irqsave_sdp_contention(sdp, &flags);
1100 if (rhp)
1101 rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp);
1102 rcu_segcblist_advance(&sdp->srcu_cblist,
1103 rcu_seq_current(&ssp->srcu_gp_seq));
1104 s = rcu_seq_snap(&ssp->srcu_gp_seq);
1105 (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, s);
1106 if (ULONG_CMP_LT(sdp->srcu_gp_seq_needed, s)) {
1107 sdp->srcu_gp_seq_needed = s;
1108 needgp = true;
1109 }
1110 if (!do_norm && ULONG_CMP_LT(sdp->srcu_gp_seq_needed_exp, s)) {
1111 sdp->srcu_gp_seq_needed_exp = s;
1112 needexp = true;
1113 }
1114 spin_unlock_irqrestore_rcu_node(sdp, flags);
1115
1116
1117 if (ss_state < SRCU_SIZE_WAIT_BARRIER)
1118 sdp_mynode = NULL;
1119 else
1120 sdp_mynode = sdp->mynode;
1121
1122 if (needgp)
1123 srcu_funnel_gp_start(ssp, sdp, s, do_norm);
1124 else if (needexp)
1125 srcu_funnel_exp_start(ssp, sdp_mynode, s);
1126 srcu_read_unlock(ssp, idx);
1127 return s;
1128}
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158static void __call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp,
1159 rcu_callback_t func, bool do_norm)
1160{
1161 if (debug_rcu_head_queue(rhp)) {
1162
1163 WRITE_ONCE(rhp->func, srcu_leak_callback);
1164 WARN_ONCE(1, "call_srcu(): Leaked duplicate callback\n");
1165 return;
1166 }
1167 rhp->func = func;
1168 (void)srcu_gp_start_if_needed(ssp, rhp, do_norm);
1169}
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188void call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp,
1189 rcu_callback_t func)
1190{
1191 __call_srcu(ssp, rhp, func, true);
1192}
1193EXPORT_SYMBOL_GPL(call_srcu);
1194
1195
1196
1197
1198static void __synchronize_srcu(struct srcu_struct *ssp, bool do_norm)
1199{
1200 struct rcu_synchronize rcu;
1201
1202 RCU_LOCKDEP_WARN(lockdep_is_held(ssp) ||
1203 lock_is_held(&rcu_bh_lock_map) ||
1204 lock_is_held(&rcu_lock_map) ||
1205 lock_is_held(&rcu_sched_lock_map),
1206 "Illegal synchronize_srcu() in same-type SRCU (or in RCU) read-side critical section");
1207
1208 if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
1209 return;
1210 might_sleep();
1211 check_init_srcu_struct(ssp);
1212 init_completion(&rcu.completion);
1213 init_rcu_head_on_stack(&rcu.head);
1214 __call_srcu(ssp, &rcu.head, wakeme_after_rcu, do_norm);
1215 wait_for_completion(&rcu.completion);
1216 destroy_rcu_head_on_stack(&rcu.head);
1217
1218
1219
1220
1221
1222
1223
1224
1225 smp_mb();
1226}
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238void synchronize_srcu_expedited(struct srcu_struct *ssp)
1239{
1240 __synchronize_srcu(ssp, rcu_gp_is_normal());
1241}
1242EXPORT_SYMBOL_GPL(synchronize_srcu_expedited);
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291void synchronize_srcu(struct srcu_struct *ssp)
1292{
1293 if (srcu_might_be_idle(ssp) || rcu_gp_is_expedited())
1294 synchronize_srcu_expedited(ssp);
1295 else
1296 __synchronize_srcu(ssp, true);
1297}
1298EXPORT_SYMBOL_GPL(synchronize_srcu);
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310unsigned long get_state_synchronize_srcu(struct srcu_struct *ssp)
1311{
1312
1313
1314 smp_mb();
1315 return rcu_seq_snap(&ssp->srcu_gp_seq);
1316}
1317EXPORT_SYMBOL_GPL(get_state_synchronize_srcu);
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329unsigned long start_poll_synchronize_srcu(struct srcu_struct *ssp)
1330{
1331 return srcu_gp_start_if_needed(ssp, NULL, true);
1332}
1333EXPORT_SYMBOL_GPL(start_poll_synchronize_srcu);
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360bool poll_state_synchronize_srcu(struct srcu_struct *ssp, unsigned long cookie)
1361{
1362 if (!rcu_seq_done(&ssp->srcu_gp_seq, cookie))
1363 return false;
1364
1365
1366 smp_mb();
1367 return true;
1368}
1369EXPORT_SYMBOL_GPL(poll_state_synchronize_srcu);
1370
1371
1372
1373
1374static void srcu_barrier_cb(struct rcu_head *rhp)
1375{
1376 struct srcu_data *sdp;
1377 struct srcu_struct *ssp;
1378
1379 sdp = container_of(rhp, struct srcu_data, srcu_barrier_head);
1380 ssp = sdp->ssp;
1381 if (atomic_dec_and_test(&ssp->srcu_barrier_cpu_cnt))
1382 complete(&ssp->srcu_barrier_completion);
1383}
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393static void srcu_barrier_one_cpu(struct srcu_struct *ssp, struct srcu_data *sdp)
1394{
1395 spin_lock_irq_rcu_node(sdp);
1396 atomic_inc(&ssp->srcu_barrier_cpu_cnt);
1397 sdp->srcu_barrier_head.func = srcu_barrier_cb;
1398 debug_rcu_head_queue(&sdp->srcu_barrier_head);
1399 if (!rcu_segcblist_entrain(&sdp->srcu_cblist,
1400 &sdp->srcu_barrier_head)) {
1401 debug_rcu_head_unqueue(&sdp->srcu_barrier_head);
1402 atomic_dec(&ssp->srcu_barrier_cpu_cnt);
1403 }
1404 spin_unlock_irq_rcu_node(sdp);
1405}
1406
1407
1408
1409
1410
1411void srcu_barrier(struct srcu_struct *ssp)
1412{
1413 int cpu;
1414 int idx;
1415 unsigned long s = rcu_seq_snap(&ssp->srcu_barrier_seq);
1416
1417 check_init_srcu_struct(ssp);
1418 mutex_lock(&ssp->srcu_barrier_mutex);
1419 if (rcu_seq_done(&ssp->srcu_barrier_seq, s)) {
1420 smp_mb();
1421 mutex_unlock(&ssp->srcu_barrier_mutex);
1422 return;
1423 }
1424 rcu_seq_start(&ssp->srcu_barrier_seq);
1425 init_completion(&ssp->srcu_barrier_completion);
1426
1427
1428 atomic_set(&ssp->srcu_barrier_cpu_cnt, 1);
1429
1430 idx = srcu_read_lock(ssp);
1431 if (smp_load_acquire(&ssp->srcu_size_state) < SRCU_SIZE_WAIT_BARRIER)
1432 srcu_barrier_one_cpu(ssp, per_cpu_ptr(ssp->sda, 0));
1433 else
1434 for_each_possible_cpu(cpu)
1435 srcu_barrier_one_cpu(ssp, per_cpu_ptr(ssp->sda, cpu));
1436 srcu_read_unlock(ssp, idx);
1437
1438
1439 if (atomic_dec_and_test(&ssp->srcu_barrier_cpu_cnt))
1440 complete(&ssp->srcu_barrier_completion);
1441 wait_for_completion(&ssp->srcu_barrier_completion);
1442
1443 rcu_seq_end(&ssp->srcu_barrier_seq);
1444 mutex_unlock(&ssp->srcu_barrier_mutex);
1445}
1446EXPORT_SYMBOL_GPL(srcu_barrier);
1447
1448
1449
1450
1451
1452
1453
1454
1455unsigned long srcu_batches_completed(struct srcu_struct *ssp)
1456{
1457 return READ_ONCE(ssp->srcu_idx);
1458}
1459EXPORT_SYMBOL_GPL(srcu_batches_completed);
1460
1461
1462
1463
1464
1465
1466static void srcu_advance_state(struct srcu_struct *ssp)
1467{
1468 int idx;
1469
1470 mutex_lock(&ssp->srcu_gp_mutex);
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482 idx = rcu_seq_state(smp_load_acquire(&ssp->srcu_gp_seq));
1483 if (idx == SRCU_STATE_IDLE) {
1484 spin_lock_irq_rcu_node(ssp);
1485 if (ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)) {
1486 WARN_ON_ONCE(rcu_seq_state(ssp->srcu_gp_seq));
1487 spin_unlock_irq_rcu_node(ssp);
1488 mutex_unlock(&ssp->srcu_gp_mutex);
1489 return;
1490 }
1491 idx = rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq));
1492 if (idx == SRCU_STATE_IDLE)
1493 srcu_gp_start(ssp);
1494 spin_unlock_irq_rcu_node(ssp);
1495 if (idx != SRCU_STATE_IDLE) {
1496 mutex_unlock(&ssp->srcu_gp_mutex);
1497 return;
1498 }
1499 }
1500
1501 if (rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) == SRCU_STATE_SCAN1) {
1502 idx = 1 ^ (ssp->srcu_idx & 1);
1503 if (!try_check_zero(ssp, idx, 1)) {
1504 mutex_unlock(&ssp->srcu_gp_mutex);
1505 return;
1506 }
1507 srcu_flip(ssp);
1508 spin_lock_irq_rcu_node(ssp);
1509 rcu_seq_set_state(&ssp->srcu_gp_seq, SRCU_STATE_SCAN2);
1510 ssp->srcu_n_exp_nodelay = 0;
1511 spin_unlock_irq_rcu_node(ssp);
1512 }
1513
1514 if (rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) == SRCU_STATE_SCAN2) {
1515
1516
1517
1518
1519
1520 idx = 1 ^ (ssp->srcu_idx & 1);
1521 if (!try_check_zero(ssp, idx, 2)) {
1522 mutex_unlock(&ssp->srcu_gp_mutex);
1523 return;
1524 }
1525 ssp->srcu_n_exp_nodelay = 0;
1526 srcu_gp_end(ssp);
1527 }
1528}
1529
1530
1531
1532
1533
1534
1535
1536static void srcu_invoke_callbacks(struct work_struct *work)
1537{
1538 long len;
1539 bool more;
1540 struct rcu_cblist ready_cbs;
1541 struct rcu_head *rhp;
1542 struct srcu_data *sdp;
1543 struct srcu_struct *ssp;
1544
1545 sdp = container_of(work, struct srcu_data, work);
1546
1547 ssp = sdp->ssp;
1548 rcu_cblist_init(&ready_cbs);
1549 spin_lock_irq_rcu_node(sdp);
1550 rcu_segcblist_advance(&sdp->srcu_cblist,
1551 rcu_seq_current(&ssp->srcu_gp_seq));
1552 if (sdp->srcu_cblist_invoking ||
1553 !rcu_segcblist_ready_cbs(&sdp->srcu_cblist)) {
1554 spin_unlock_irq_rcu_node(sdp);
1555 return;
1556 }
1557
1558
1559 sdp->srcu_cblist_invoking = true;
1560 rcu_segcblist_extract_done_cbs(&sdp->srcu_cblist, &ready_cbs);
1561 len = ready_cbs.len;
1562 spin_unlock_irq_rcu_node(sdp);
1563 rhp = rcu_cblist_dequeue(&ready_cbs);
1564 for (; rhp != NULL; rhp = rcu_cblist_dequeue(&ready_cbs)) {
1565 debug_rcu_head_unqueue(rhp);
1566 local_bh_disable();
1567 rhp->func(rhp);
1568 local_bh_enable();
1569 }
1570 WARN_ON_ONCE(ready_cbs.len);
1571
1572
1573
1574
1575
1576 spin_lock_irq_rcu_node(sdp);
1577 rcu_segcblist_add_len(&sdp->srcu_cblist, -len);
1578 (void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
1579 rcu_seq_snap(&ssp->srcu_gp_seq));
1580 sdp->srcu_cblist_invoking = false;
1581 more = rcu_segcblist_ready_cbs(&sdp->srcu_cblist);
1582 spin_unlock_irq_rcu_node(sdp);
1583 if (more)
1584 srcu_schedule_cbs_sdp(sdp, 0);
1585}
1586
1587
1588
1589
1590
1591static void srcu_reschedule(struct srcu_struct *ssp, unsigned long delay)
1592{
1593 bool pushgp = true;
1594
1595 spin_lock_irq_rcu_node(ssp);
1596 if (ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)) {
1597 if (!WARN_ON_ONCE(rcu_seq_state(ssp->srcu_gp_seq))) {
1598
1599 pushgp = false;
1600 }
1601 } else if (!rcu_seq_state(ssp->srcu_gp_seq)) {
1602
1603 srcu_gp_start(ssp);
1604 }
1605 spin_unlock_irq_rcu_node(ssp);
1606
1607 if (pushgp)
1608 queue_delayed_work(rcu_gp_wq, &ssp->work, delay);
1609}
1610
1611
1612
1613
1614static void process_srcu(struct work_struct *work)
1615{
1616 unsigned long curdelay;
1617 unsigned long j;
1618 struct srcu_struct *ssp;
1619
1620 ssp = container_of(work, struct srcu_struct, work.work);
1621
1622 srcu_advance_state(ssp);
1623 curdelay = srcu_get_delay(ssp);
1624 if (curdelay) {
1625 WRITE_ONCE(ssp->reschedule_count, 0);
1626 } else {
1627 j = jiffies;
1628 if (READ_ONCE(ssp->reschedule_jiffies) == j) {
1629 WRITE_ONCE(ssp->reschedule_count, READ_ONCE(ssp->reschedule_count) + 1);
1630 if (READ_ONCE(ssp->reschedule_count) > srcu_max_nodelay)
1631 curdelay = 1;
1632 } else {
1633 WRITE_ONCE(ssp->reschedule_count, 1);
1634 WRITE_ONCE(ssp->reschedule_jiffies, j);
1635 }
1636 }
1637 srcu_reschedule(ssp, curdelay);
1638}
1639
1640void srcutorture_get_gp_data(enum rcutorture_type test_type,
1641 struct srcu_struct *ssp, int *flags,
1642 unsigned long *gp_seq)
1643{
1644 if (test_type != SRCU_FLAVOR)
1645 return;
1646 *flags = 0;
1647 *gp_seq = rcu_seq_current(&ssp->srcu_gp_seq);
1648}
1649EXPORT_SYMBOL_GPL(srcutorture_get_gp_data);
1650
1651static const char * const srcu_size_state_name[] = {
1652 "SRCU_SIZE_SMALL",
1653 "SRCU_SIZE_ALLOC",
1654 "SRCU_SIZE_WAIT_BARRIER",
1655 "SRCU_SIZE_WAIT_CALL",
1656 "SRCU_SIZE_WAIT_CBS1",
1657 "SRCU_SIZE_WAIT_CBS2",
1658 "SRCU_SIZE_WAIT_CBS3",
1659 "SRCU_SIZE_WAIT_CBS4",
1660 "SRCU_SIZE_BIG",
1661 "SRCU_SIZE_???",
1662};
1663
1664void srcu_torture_stats_print(struct srcu_struct *ssp, char *tt, char *tf)
1665{
1666 int cpu;
1667 int idx;
1668 unsigned long s0 = 0, s1 = 0;
1669 int ss_state = READ_ONCE(ssp->srcu_size_state);
1670 int ss_state_idx = ss_state;
1671
1672 idx = ssp->srcu_idx & 0x1;
1673 if (ss_state < 0 || ss_state >= ARRAY_SIZE(srcu_size_state_name))
1674 ss_state_idx = ARRAY_SIZE(srcu_size_state_name) - 1;
1675 pr_alert("%s%s Tree SRCU g%ld state %d (%s)",
1676 tt, tf, rcu_seq_current(&ssp->srcu_gp_seq), ss_state,
1677 srcu_size_state_name[ss_state_idx]);
1678 if (!ssp->sda) {
1679
1680 pr_cont(" No per-CPU srcu_data structures (->sda == NULL).\n");
1681 } else {
1682 pr_cont(" per-CPU(idx=%d):", idx);
1683 for_each_possible_cpu(cpu) {
1684 unsigned long l0, l1;
1685 unsigned long u0, u1;
1686 long c0, c1;
1687 struct srcu_data *sdp;
1688
1689 sdp = per_cpu_ptr(ssp->sda, cpu);
1690 u0 = data_race(sdp->srcu_unlock_count[!idx]);
1691 u1 = data_race(sdp->srcu_unlock_count[idx]);
1692
1693
1694
1695
1696
1697 smp_rmb();
1698
1699 l0 = data_race(sdp->srcu_lock_count[!idx]);
1700 l1 = data_race(sdp->srcu_lock_count[idx]);
1701
1702 c0 = l0 - u0;
1703 c1 = l1 - u1;
1704 pr_cont(" %d(%ld,%ld %c)",
1705 cpu, c0, c1,
1706 "C."[rcu_segcblist_empty(&sdp->srcu_cblist)]);
1707 s0 += c0;
1708 s1 += c1;
1709 }
1710 pr_cont(" T(%ld,%ld)\n", s0, s1);
1711 }
1712 if (SRCU_SIZING_IS_TORTURE())
1713 srcu_transition_to_big(ssp);
1714}
1715EXPORT_SYMBOL_GPL(srcu_torture_stats_print);
1716
1717static int __init srcu_bootup_announce(void)
1718{
1719 pr_info("Hierarchical SRCU implementation.\n");
1720 if (exp_holdoff != DEFAULT_SRCU_EXP_HOLDOFF)
1721 pr_info("\tNon-default auto-expedite holdoff of %lu ns.\n", exp_holdoff);
1722 if (srcu_retry_check_delay != SRCU_DEFAULT_RETRY_CHECK_DELAY)
1723 pr_info("\tNon-default retry check delay of %lu us.\n", srcu_retry_check_delay);
1724 if (srcu_max_nodelay != SRCU_DEFAULT_MAX_NODELAY)
1725 pr_info("\tNon-default max no-delay of %lu.\n", srcu_max_nodelay);
1726 pr_info("\tMax phase no-delay instances is %lu.\n", srcu_max_nodelay_phase);
1727 return 0;
1728}
1729early_initcall(srcu_bootup_announce);
1730
1731void __init srcu_init(void)
1732{
1733 struct srcu_struct *ssp;
1734
1735
1736 if (SRCU_SIZING_IS(SRCU_SIZING_AUTO)) {
1737 if (nr_cpu_ids >= big_cpu_lim) {
1738 convert_to_big = SRCU_SIZING_INIT;
1739 pr_info("%s: Setting srcu_struct sizes to big.\n", __func__);
1740 } else {
1741 convert_to_big = SRCU_SIZING_NONE | SRCU_SIZING_CONTEND;
1742 pr_info("%s: Setting srcu_struct sizes based on contention.\n", __func__);
1743 }
1744 }
1745
1746
1747
1748
1749
1750
1751 srcu_init_done = true;
1752 while (!list_empty(&srcu_boot_list)) {
1753 ssp = list_first_entry(&srcu_boot_list, struct srcu_struct,
1754 work.work.entry);
1755 list_del_init(&ssp->work.work.entry);
1756 if (SRCU_SIZING_IS(SRCU_SIZING_INIT) && ssp->srcu_size_state == SRCU_SIZE_SMALL)
1757 ssp->srcu_size_state = SRCU_SIZE_ALLOC;
1758 queue_work(rcu_gp_wq, &ssp->work.work);
1759 }
1760}
1761
1762#ifdef CONFIG_MODULES
1763
1764
1765static int srcu_module_coming(struct module *mod)
1766{
1767 int i;
1768 struct srcu_struct **sspp = mod->srcu_struct_ptrs;
1769 int ret;
1770
1771 for (i = 0; i < mod->num_srcu_structs; i++) {
1772 ret = init_srcu_struct(*(sspp++));
1773 if (WARN_ON_ONCE(ret))
1774 return ret;
1775 }
1776 return 0;
1777}
1778
1779
1780static void srcu_module_going(struct module *mod)
1781{
1782 int i;
1783 struct srcu_struct **sspp = mod->srcu_struct_ptrs;
1784
1785 for (i = 0; i < mod->num_srcu_structs; i++)
1786 cleanup_srcu_struct(*(sspp++));
1787}
1788
1789
1790static int srcu_module_notify(struct notifier_block *self,
1791 unsigned long val, void *data)
1792{
1793 struct module *mod = data;
1794 int ret = 0;
1795
1796 switch (val) {
1797 case MODULE_STATE_COMING:
1798 ret = srcu_module_coming(mod);
1799 break;
1800 case MODULE_STATE_GOING:
1801 srcu_module_going(mod);
1802 break;
1803 default:
1804 break;
1805 }
1806 return ret;
1807}
1808
1809static struct notifier_block srcu_module_nb = {
1810 .notifier_call = srcu_module_notify,
1811 .priority = 0,
1812};
1813
1814static __init int init_srcu_module_notifier(void)
1815{
1816 int ret;
1817
1818 ret = register_module_notifier(&srcu_module_nb);
1819 if (ret)
1820 pr_warn("Failed to register srcu module notifier\n");
1821 return ret;
1822}
1823late_initcall(init_srcu_module_notifier);
1824
1825#endif
1826