1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29#include <linux/export.h>
30#include <linux/mutex.h>
31#include <linux/percpu.h>
32#include <linux/preempt.h>
33#include <linux/rcupdate_wait.h>
34#include <linux/sched.h>
35#include <linux/smp.h>
36#include <linux/delay.h>
37#include <linux/module.h>
38#include <linux/srcu.h>
39
40#include "rcu.h"
41#include "rcu_segcblist.h"
42
43
44#define DEFAULT_SRCU_EXP_HOLDOFF (25 * 1000)
45static ulong exp_holdoff = DEFAULT_SRCU_EXP_HOLDOFF;
46module_param(exp_holdoff, ulong, 0444);
47
48
49static ulong counter_wrap_check = (ULONG_MAX >> 2);
50module_param(counter_wrap_check, ulong, 0444);
51
52static void srcu_invoke_callbacks(struct work_struct *work);
53static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay);
54
55
56
57
58
59
60
61static void init_srcu_struct_nodes(struct srcu_struct *sp, bool is_static)
62{
63 int cpu;
64 int i;
65 int level = 0;
66 int levelspread[RCU_NUM_LVLS];
67 struct srcu_data *sdp;
68 struct srcu_node *snp;
69 struct srcu_node *snp_first;
70
71
72 sp->level[0] = &sp->node[0];
73 for (i = 1; i < rcu_num_lvls; i++)
74 sp->level[i] = sp->level[i - 1] + num_rcu_lvl[i - 1];
75 rcu_init_levelspread(levelspread, num_rcu_lvl);
76
77
78 rcu_for_each_node_breadth_first(sp, snp) {
79 raw_spin_lock_init(&ACCESS_PRIVATE(snp, lock));
80 WARN_ON_ONCE(ARRAY_SIZE(snp->srcu_have_cbs) !=
81 ARRAY_SIZE(snp->srcu_data_have_cbs));
82 for (i = 0; i < ARRAY_SIZE(snp->srcu_have_cbs); i++) {
83 snp->srcu_have_cbs[i] = 0;
84 snp->srcu_data_have_cbs[i] = 0;
85 }
86 snp->srcu_gp_seq_needed_exp = 0;
87 snp->grplo = -1;
88 snp->grphi = -1;
89 if (snp == &sp->node[0]) {
90
91 snp->srcu_parent = NULL;
92 continue;
93 }
94
95
96 if (snp == sp->level[level + 1])
97 level++;
98 snp->srcu_parent = sp->level[level - 1] +
99 (snp - sp->level[level]) /
100 levelspread[level - 1];
101 }
102
103
104
105
106
107 WARN_ON_ONCE(ARRAY_SIZE(sdp->srcu_lock_count) !=
108 ARRAY_SIZE(sdp->srcu_unlock_count));
109 level = rcu_num_lvls - 1;
110 snp_first = sp->level[level];
111 for_each_possible_cpu(cpu) {
112 sdp = per_cpu_ptr(sp->sda, cpu);
113 raw_spin_lock_init(&ACCESS_PRIVATE(sdp, lock));
114 rcu_segcblist_init(&sdp->srcu_cblist);
115 sdp->srcu_cblist_invoking = false;
116 sdp->srcu_gp_seq_needed = sp->srcu_gp_seq;
117 sdp->srcu_gp_seq_needed_exp = sp->srcu_gp_seq;
118 sdp->mynode = &snp_first[cpu / levelspread[level]];
119 for (snp = sdp->mynode; snp != NULL; snp = snp->srcu_parent) {
120 if (snp->grplo < 0)
121 snp->grplo = cpu;
122 snp->grphi = cpu;
123 }
124 sdp->cpu = cpu;
125 INIT_DELAYED_WORK(&sdp->work, srcu_invoke_callbacks);
126 sdp->sp = sp;
127 sdp->grpmask = 1 << (cpu - sdp->mynode->grplo);
128 if (is_static)
129 continue;
130
131
132 for (i = 0; i < ARRAY_SIZE(sdp->srcu_lock_count); i++) {
133 sdp->srcu_lock_count[i] = 0;
134 sdp->srcu_unlock_count[i] = 0;
135 }
136 }
137}
138
139
140
141
142
143
144
145static int init_srcu_struct_fields(struct srcu_struct *sp, bool is_static)
146{
147 mutex_init(&sp->srcu_cb_mutex);
148 mutex_init(&sp->srcu_gp_mutex);
149 sp->srcu_idx = 0;
150 sp->srcu_gp_seq = 0;
151 sp->srcu_barrier_seq = 0;
152 mutex_init(&sp->srcu_barrier_mutex);
153 atomic_set(&sp->srcu_barrier_cpu_cnt, 0);
154 INIT_DELAYED_WORK(&sp->work, process_srcu);
155 if (!is_static)
156 sp->sda = alloc_percpu(struct srcu_data);
157 init_srcu_struct_nodes(sp, is_static);
158 sp->srcu_gp_seq_needed_exp = 0;
159 sp->srcu_last_gp_end = ktime_get_mono_fast_ns();
160 smp_store_release(&sp->srcu_gp_seq_needed, 0);
161 return sp->sda ? 0 : -ENOMEM;
162}
163
164#ifdef CONFIG_DEBUG_LOCK_ALLOC
165
166int __init_srcu_struct(struct srcu_struct *sp, const char *name,
167 struct lock_class_key *key)
168{
169
170 debug_check_no_locks_freed((void *)sp, sizeof(*sp));
171 lockdep_init_map(&sp->dep_map, name, key, 0);
172 raw_spin_lock_init(&ACCESS_PRIVATE(sp, lock));
173 return init_srcu_struct_fields(sp, false);
174}
175EXPORT_SYMBOL_GPL(__init_srcu_struct);
176
177#else
178
179
180
181
182
183
184
185
186
187int init_srcu_struct(struct srcu_struct *sp)
188{
189 raw_spin_lock_init(&ACCESS_PRIVATE(sp, lock));
190 return init_srcu_struct_fields(sp, false);
191}
192EXPORT_SYMBOL_GPL(init_srcu_struct);
193
194#endif
195
196
197
198
199
200
201
202
203
204static void check_init_srcu_struct(struct srcu_struct *sp)
205{
206 unsigned long flags;
207
208 WARN_ON_ONCE(rcu_scheduler_active == RCU_SCHEDULER_INIT);
209
210 if (!rcu_seq_state(smp_load_acquire(&sp->srcu_gp_seq_needed)))
211 return;
212 raw_spin_lock_irqsave_rcu_node(sp, flags);
213 if (!rcu_seq_state(sp->srcu_gp_seq_needed)) {
214 raw_spin_unlock_irqrestore_rcu_node(sp, flags);
215 return;
216 }
217 init_srcu_struct_fields(sp, true);
218 raw_spin_unlock_irqrestore_rcu_node(sp, flags);
219}
220
221
222
223
224
225static unsigned long srcu_readers_lock_idx(struct srcu_struct *sp, int idx)
226{
227 int cpu;
228 unsigned long sum = 0;
229
230 for_each_possible_cpu(cpu) {
231 struct srcu_data *cpuc = per_cpu_ptr(sp->sda, cpu);
232
233 sum += READ_ONCE(cpuc->srcu_lock_count[idx]);
234 }
235 return sum;
236}
237
238
239
240
241
242static unsigned long srcu_readers_unlock_idx(struct srcu_struct *sp, int idx)
243{
244 int cpu;
245 unsigned long sum = 0;
246
247 for_each_possible_cpu(cpu) {
248 struct srcu_data *cpuc = per_cpu_ptr(sp->sda, cpu);
249
250 sum += READ_ONCE(cpuc->srcu_unlock_count[idx]);
251 }
252 return sum;
253}
254
255
256
257
258
259static bool srcu_readers_active_idx_check(struct srcu_struct *sp, int idx)
260{
261 unsigned long unlocks;
262
263 unlocks = srcu_readers_unlock_idx(sp, idx);
264
265
266
267
268
269
270
271
272
273
274
275
276 smp_mb();
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299 return srcu_readers_lock_idx(sp, idx) == unlocks;
300}
301
302
303
304
305
306
307
308
309
310
311static bool srcu_readers_active(struct srcu_struct *sp)
312{
313 int cpu;
314 unsigned long sum = 0;
315
316 for_each_possible_cpu(cpu) {
317 struct srcu_data *cpuc = per_cpu_ptr(sp->sda, cpu);
318
319 sum += READ_ONCE(cpuc->srcu_lock_count[0]);
320 sum += READ_ONCE(cpuc->srcu_lock_count[1]);
321 sum -= READ_ONCE(cpuc->srcu_unlock_count[0]);
322 sum -= READ_ONCE(cpuc->srcu_unlock_count[1]);
323 }
324 return sum;
325}
326
327#define SRCU_INTERVAL 1
328
329
330
331
332
333static unsigned long srcu_get_delay(struct srcu_struct *sp)
334{
335 if (ULONG_CMP_LT(READ_ONCE(sp->srcu_gp_seq),
336 READ_ONCE(sp->srcu_gp_seq_needed_exp)))
337 return 0;
338 return SRCU_INTERVAL;
339}
340
341
342
343
344
345
346
347
348void cleanup_srcu_struct(struct srcu_struct *sp)
349{
350 int cpu;
351
352 if (WARN_ON(!srcu_get_delay(sp)))
353 return;
354 if (WARN_ON(srcu_readers_active(sp)))
355 return;
356 flush_delayed_work(&sp->work);
357 for_each_possible_cpu(cpu)
358 flush_delayed_work(&per_cpu_ptr(sp->sda, cpu)->work);
359 if (WARN_ON(rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) != SRCU_STATE_IDLE) ||
360 WARN_ON(srcu_readers_active(sp))) {
361 pr_info("cleanup_srcu_struct: Active srcu_struct %p state: %d\n", sp, rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)));
362 return;
363 }
364 free_percpu(sp->sda);
365 sp->sda = NULL;
366}
367EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
368
369
370
371
372
373
374int __srcu_read_lock(struct srcu_struct *sp)
375{
376 int idx;
377
378 idx = READ_ONCE(sp->srcu_idx) & 0x1;
379 this_cpu_inc(sp->sda->srcu_lock_count[idx]);
380 smp_mb();
381 return idx;
382}
383EXPORT_SYMBOL_GPL(__srcu_read_lock);
384
385
386
387
388
389
390void __srcu_read_unlock(struct srcu_struct *sp, int idx)
391{
392 smp_mb();
393 this_cpu_inc(sp->sda->srcu_unlock_count[idx]);
394}
395EXPORT_SYMBOL_GPL(__srcu_read_unlock);
396
397
398
399
400
401
402
403
404#define SRCU_RETRY_CHECK_DELAY 5
405
406
407
408
409static void srcu_gp_start(struct srcu_struct *sp)
410{
411 struct srcu_data *sdp = this_cpu_ptr(sp->sda);
412 int state;
413
414 lockdep_assert_held(&sp->lock);
415 WARN_ON_ONCE(ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed));
416 rcu_segcblist_advance(&sdp->srcu_cblist,
417 rcu_seq_current(&sp->srcu_gp_seq));
418 (void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
419 rcu_seq_snap(&sp->srcu_gp_seq));
420 smp_mb();
421 rcu_seq_start(&sp->srcu_gp_seq);
422 state = rcu_seq_state(READ_ONCE(sp->srcu_gp_seq));
423 WARN_ON_ONCE(state != SRCU_STATE_SCAN1);
424}
425
426
427
428
429DEFINE_PER_CPU(bool, srcu_online);
430
431void srcu_online_cpu(unsigned int cpu)
432{
433 WRITE_ONCE(per_cpu(srcu_online, cpu), true);
434}
435
436void srcu_offline_cpu(unsigned int cpu)
437{
438 WRITE_ONCE(per_cpu(srcu_online, cpu), false);
439}
440
441
442
443
444
445
446static bool srcu_queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
447 struct delayed_work *dwork,
448 unsigned long delay)
449{
450 bool ret;
451
452 preempt_disable();
453 if (READ_ONCE(per_cpu(srcu_online, cpu)))
454 ret = queue_delayed_work_on(cpu, wq, dwork, delay);
455 else
456 ret = queue_delayed_work(wq, dwork, delay);
457 preempt_enable();
458 return ret;
459}
460
461
462
463
464
465static void srcu_schedule_cbs_sdp(struct srcu_data *sdp, unsigned long delay)
466{
467 srcu_queue_delayed_work_on(sdp->cpu, system_power_efficient_wq,
468 &sdp->work, delay);
469}
470
471
472
473
474
475
476
477static void srcu_schedule_cbs_snp(struct srcu_struct *sp, struct srcu_node *snp,
478 unsigned long mask, unsigned long delay)
479{
480 int cpu;
481
482 for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) {
483 if (!(mask & (1 << (cpu - snp->grplo))))
484 continue;
485 srcu_schedule_cbs_sdp(per_cpu_ptr(sp->sda, cpu), delay);
486 }
487}
488
489
490
491
492
493
494
495
496
497
498static void srcu_gp_end(struct srcu_struct *sp)
499{
500 unsigned long cbdelay;
501 bool cbs;
502 int cpu;
503 unsigned long flags;
504 unsigned long gpseq;
505 int idx;
506 int idxnext;
507 unsigned long mask;
508 struct srcu_data *sdp;
509 struct srcu_node *snp;
510
511
512 mutex_lock(&sp->srcu_cb_mutex);
513
514
515 raw_spin_lock_irq_rcu_node(sp);
516 idx = rcu_seq_state(sp->srcu_gp_seq);
517 WARN_ON_ONCE(idx != SRCU_STATE_SCAN2);
518 cbdelay = srcu_get_delay(sp);
519 sp->srcu_last_gp_end = ktime_get_mono_fast_ns();
520 rcu_seq_end(&sp->srcu_gp_seq);
521 gpseq = rcu_seq_current(&sp->srcu_gp_seq);
522 if (ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, gpseq))
523 sp->srcu_gp_seq_needed_exp = gpseq;
524 raw_spin_unlock_irq_rcu_node(sp);
525 mutex_unlock(&sp->srcu_gp_mutex);
526
527
528
529 idx = rcu_seq_ctr(gpseq) % ARRAY_SIZE(snp->srcu_have_cbs);
530 idxnext = (idx + 1) % ARRAY_SIZE(snp->srcu_have_cbs);
531 rcu_for_each_node_breadth_first(sp, snp) {
532 raw_spin_lock_irq_rcu_node(snp);
533 cbs = false;
534 if (snp >= sp->level[rcu_num_lvls - 1])
535 cbs = snp->srcu_have_cbs[idx] == gpseq;
536 snp->srcu_have_cbs[idx] = gpseq;
537 rcu_seq_set_state(&snp->srcu_have_cbs[idx], 1);
538 if (ULONG_CMP_LT(snp->srcu_gp_seq_needed_exp, gpseq))
539 snp->srcu_gp_seq_needed_exp = gpseq;
540 mask = snp->srcu_data_have_cbs[idx];
541 snp->srcu_data_have_cbs[idx] = 0;
542 raw_spin_unlock_irq_rcu_node(snp);
543 if (cbs)
544 srcu_schedule_cbs_snp(sp, snp, mask, cbdelay);
545
546
547 if (!(gpseq & counter_wrap_check))
548 for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) {
549 sdp = per_cpu_ptr(sp->sda, cpu);
550 raw_spin_lock_irqsave_rcu_node(sdp, flags);
551 if (ULONG_CMP_GE(gpseq,
552 sdp->srcu_gp_seq_needed + 100))
553 sdp->srcu_gp_seq_needed = gpseq;
554 raw_spin_unlock_irqrestore_rcu_node(sdp, flags);
555 }
556 }
557
558
559 mutex_unlock(&sp->srcu_cb_mutex);
560
561
562 raw_spin_lock_irq_rcu_node(sp);
563 gpseq = rcu_seq_current(&sp->srcu_gp_seq);
564 if (!rcu_seq_state(gpseq) &&
565 ULONG_CMP_LT(gpseq, sp->srcu_gp_seq_needed)) {
566 srcu_gp_start(sp);
567 raw_spin_unlock_irq_rcu_node(sp);
568
569 srcu_reschedule(sp, rcu_seq_ctr(gpseq) & 0x3ff
570 ? 0 : SRCU_INTERVAL);
571 } else {
572 raw_spin_unlock_irq_rcu_node(sp);
573 }
574}
575
576
577
578
579
580
581
582
583static void srcu_funnel_exp_start(struct srcu_struct *sp, struct srcu_node *snp,
584 unsigned long s)
585{
586 unsigned long flags;
587
588 for (; snp != NULL; snp = snp->srcu_parent) {
589 if (rcu_seq_done(&sp->srcu_gp_seq, s) ||
590 ULONG_CMP_GE(READ_ONCE(snp->srcu_gp_seq_needed_exp), s))
591 return;
592 raw_spin_lock_irqsave_rcu_node(snp, flags);
593 if (ULONG_CMP_GE(snp->srcu_gp_seq_needed_exp, s)) {
594 raw_spin_unlock_irqrestore_rcu_node(snp, flags);
595 return;
596 }
597 WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s);
598 raw_spin_unlock_irqrestore_rcu_node(snp, flags);
599 }
600 raw_spin_lock_irqsave_rcu_node(sp, flags);
601 if (!ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, s))
602 sp->srcu_gp_seq_needed_exp = s;
603 raw_spin_unlock_irqrestore_rcu_node(sp, flags);
604}
605
606
607
608
609
610
611
612
613static void srcu_funnel_gp_start(struct srcu_struct *sp, struct srcu_data *sdp,
614 unsigned long s, bool do_norm)
615{
616 unsigned long flags;
617 int idx = rcu_seq_ctr(s) % ARRAY_SIZE(sdp->mynode->srcu_have_cbs);
618 struct srcu_node *snp = sdp->mynode;
619 unsigned long snp_seq;
620
621
622 for (; snp != NULL; snp = snp->srcu_parent) {
623 if (rcu_seq_done(&sp->srcu_gp_seq, s) && snp != sdp->mynode)
624 return;
625 raw_spin_lock_irqsave_rcu_node(snp, flags);
626 if (ULONG_CMP_GE(snp->srcu_have_cbs[idx], s)) {
627 snp_seq = snp->srcu_have_cbs[idx];
628 if (snp == sdp->mynode && snp_seq == s)
629 snp->srcu_data_have_cbs[idx] |= sdp->grpmask;
630 raw_spin_unlock_irqrestore_rcu_node(snp, flags);
631 if (snp == sdp->mynode && snp_seq != s) {
632 srcu_schedule_cbs_sdp(sdp, do_norm
633 ? SRCU_INTERVAL
634 : 0);
635 return;
636 }
637 if (!do_norm)
638 srcu_funnel_exp_start(sp, snp, s);
639 return;
640 }
641 snp->srcu_have_cbs[idx] = s;
642 if (snp == sdp->mynode)
643 snp->srcu_data_have_cbs[idx] |= sdp->grpmask;
644 if (!do_norm && ULONG_CMP_LT(snp->srcu_gp_seq_needed_exp, s))
645 snp->srcu_gp_seq_needed_exp = s;
646 raw_spin_unlock_irqrestore_rcu_node(snp, flags);
647 }
648
649
650 raw_spin_lock_irqsave_rcu_node(sp, flags);
651 if (ULONG_CMP_LT(sp->srcu_gp_seq_needed, s)) {
652
653
654
655
656 smp_store_release(&sp->srcu_gp_seq_needed, s);
657 }
658 if (!do_norm && ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, s))
659 sp->srcu_gp_seq_needed_exp = s;
660
661
662 if (!rcu_seq_done(&sp->srcu_gp_seq, s) &&
663 rcu_seq_state(sp->srcu_gp_seq) == SRCU_STATE_IDLE) {
664 WARN_ON_ONCE(ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed));
665 srcu_gp_start(sp);
666 queue_delayed_work(system_power_efficient_wq, &sp->work,
667 srcu_get_delay(sp));
668 }
669 raw_spin_unlock_irqrestore_rcu_node(sp, flags);
670}
671
672
673
674
675
676
677static bool try_check_zero(struct srcu_struct *sp, int idx, int trycount)
678{
679 for (;;) {
680 if (srcu_readers_active_idx_check(sp, idx))
681 return true;
682 if (--trycount + !srcu_get_delay(sp) <= 0)
683 return false;
684 udelay(SRCU_RETRY_CHECK_DELAY);
685 }
686}
687
688
689
690
691
692
693static void srcu_flip(struct srcu_struct *sp)
694{
695
696
697
698
699
700
701
702
703 smp_mb();
704
705 WRITE_ONCE(sp->srcu_idx, sp->srcu_idx + 1);
706
707
708
709
710
711
712
713
714 smp_mb();
715}
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738static bool srcu_might_be_idle(struct srcu_struct *sp)
739{
740 unsigned long curseq;
741 unsigned long flags;
742 struct srcu_data *sdp;
743 unsigned long t;
744
745
746 local_irq_save(flags);
747 sdp = this_cpu_ptr(sp->sda);
748 if (rcu_segcblist_pend_cbs(&sdp->srcu_cblist)) {
749 local_irq_restore(flags);
750 return false;
751 }
752 local_irq_restore(flags);
753
754
755
756
757
758
759
760
761 t = ktime_get_mono_fast_ns();
762 if (exp_holdoff == 0 ||
763 time_in_range_open(t, sp->srcu_last_gp_end,
764 sp->srcu_last_gp_end + exp_holdoff))
765 return false;
766
767
768 curseq = rcu_seq_current(&sp->srcu_gp_seq);
769 smp_mb();
770 if (ULONG_CMP_LT(curseq, READ_ONCE(sp->srcu_gp_seq_needed)))
771 return false;
772 smp_mb();
773 if (curseq != rcu_seq_current(&sp->srcu_gp_seq))
774 return false;
775 return true;
776}
777
778
779
780
781static void srcu_leak_callback(struct rcu_head *rhp)
782{
783}
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813void __call_srcu(struct srcu_struct *sp, struct rcu_head *rhp,
814 rcu_callback_t func, bool do_norm)
815{
816 unsigned long flags;
817 bool needexp = false;
818 bool needgp = false;
819 unsigned long s;
820 struct srcu_data *sdp;
821
822 check_init_srcu_struct(sp);
823 if (debug_rcu_head_queue(rhp)) {
824
825 WRITE_ONCE(rhp->func, srcu_leak_callback);
826 WARN_ONCE(1, "call_srcu(): Leaked duplicate callback\n");
827 return;
828 }
829 rhp->func = func;
830 local_irq_save(flags);
831 sdp = this_cpu_ptr(sp->sda);
832 raw_spin_lock_rcu_node(sdp);
833 rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp, false);
834 rcu_segcblist_advance(&sdp->srcu_cblist,
835 rcu_seq_current(&sp->srcu_gp_seq));
836 s = rcu_seq_snap(&sp->srcu_gp_seq);
837 (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, s);
838 if (ULONG_CMP_LT(sdp->srcu_gp_seq_needed, s)) {
839 sdp->srcu_gp_seq_needed = s;
840 needgp = true;
841 }
842 if (!do_norm && ULONG_CMP_LT(sdp->srcu_gp_seq_needed_exp, s)) {
843 sdp->srcu_gp_seq_needed_exp = s;
844 needexp = true;
845 }
846 raw_spin_unlock_irqrestore_rcu_node(sdp, flags);
847 if (needgp)
848 srcu_funnel_gp_start(sp, sdp, s, do_norm);
849 else if (needexp)
850 srcu_funnel_exp_start(sp, sdp->mynode, s);
851}
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870void call_srcu(struct srcu_struct *sp, struct rcu_head *rhp,
871 rcu_callback_t func)
872{
873 __call_srcu(sp, rhp, func, true);
874}
875EXPORT_SYMBOL_GPL(call_srcu);
876
877
878
879
880static void __synchronize_srcu(struct srcu_struct *sp, bool do_norm)
881{
882 struct rcu_synchronize rcu;
883
884 RCU_LOCKDEP_WARN(lock_is_held(&sp->dep_map) ||
885 lock_is_held(&rcu_bh_lock_map) ||
886 lock_is_held(&rcu_lock_map) ||
887 lock_is_held(&rcu_sched_lock_map),
888 "Illegal synchronize_srcu() in same-type SRCU (or in RCU) read-side critical section");
889
890 if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
891 return;
892 might_sleep();
893 check_init_srcu_struct(sp);
894 init_completion(&rcu.completion);
895 init_rcu_head_on_stack(&rcu.head);
896 __call_srcu(sp, &rcu.head, wakeme_after_rcu, do_norm);
897 wait_for_completion(&rcu.completion);
898 destroy_rcu_head_on_stack(&rcu.head);
899}
900
901
902
903
904
905
906
907
908
909
910
911void synchronize_srcu_expedited(struct srcu_struct *sp)
912{
913 __synchronize_srcu(sp, rcu_gp_is_normal());
914}
915EXPORT_SYMBOL_GPL(synchronize_srcu_expedited);
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961void synchronize_srcu(struct srcu_struct *sp)
962{
963 if (srcu_might_be_idle(sp) || rcu_gp_is_expedited())
964 synchronize_srcu_expedited(sp);
965 else
966 __synchronize_srcu(sp, true);
967}
968EXPORT_SYMBOL_GPL(synchronize_srcu);
969
970
971
972
973static void srcu_barrier_cb(struct rcu_head *rhp)
974{
975 struct srcu_data *sdp;
976 struct srcu_struct *sp;
977
978 sdp = container_of(rhp, struct srcu_data, srcu_barrier_head);
979 sp = sdp->sp;
980 if (atomic_dec_and_test(&sp->srcu_barrier_cpu_cnt))
981 complete(&sp->srcu_barrier_completion);
982}
983
984
985
986
987
988void srcu_barrier(struct srcu_struct *sp)
989{
990 int cpu;
991 struct srcu_data *sdp;
992 unsigned long s = rcu_seq_snap(&sp->srcu_barrier_seq);
993
994 check_init_srcu_struct(sp);
995 mutex_lock(&sp->srcu_barrier_mutex);
996 if (rcu_seq_done(&sp->srcu_barrier_seq, s)) {
997 smp_mb();
998 mutex_unlock(&sp->srcu_barrier_mutex);
999 return;
1000 }
1001 rcu_seq_start(&sp->srcu_barrier_seq);
1002 init_completion(&sp->srcu_barrier_completion);
1003
1004
1005 atomic_set(&sp->srcu_barrier_cpu_cnt, 1);
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015 for_each_possible_cpu(cpu) {
1016 sdp = per_cpu_ptr(sp->sda, cpu);
1017 raw_spin_lock_irq_rcu_node(sdp);
1018 atomic_inc(&sp->srcu_barrier_cpu_cnt);
1019 sdp->srcu_barrier_head.func = srcu_barrier_cb;
1020 debug_rcu_head_queue(&sdp->srcu_barrier_head);
1021 if (!rcu_segcblist_entrain(&sdp->srcu_cblist,
1022 &sdp->srcu_barrier_head, 0)) {
1023 debug_rcu_head_unqueue(&sdp->srcu_barrier_head);
1024 atomic_dec(&sp->srcu_barrier_cpu_cnt);
1025 }
1026 raw_spin_unlock_irq_rcu_node(sdp);
1027 }
1028
1029
1030 if (atomic_dec_and_test(&sp->srcu_barrier_cpu_cnt))
1031 complete(&sp->srcu_barrier_completion);
1032 wait_for_completion(&sp->srcu_barrier_completion);
1033
1034 rcu_seq_end(&sp->srcu_barrier_seq);
1035 mutex_unlock(&sp->srcu_barrier_mutex);
1036}
1037EXPORT_SYMBOL_GPL(srcu_barrier);
1038
1039
1040
1041
1042
1043
1044
1045
1046unsigned long srcu_batches_completed(struct srcu_struct *sp)
1047{
1048 return sp->srcu_idx;
1049}
1050EXPORT_SYMBOL_GPL(srcu_batches_completed);
1051
1052
1053
1054
1055
1056
1057static void srcu_advance_state(struct srcu_struct *sp)
1058{
1059 int idx;
1060
1061 mutex_lock(&sp->srcu_gp_mutex);
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073 idx = rcu_seq_state(smp_load_acquire(&sp->srcu_gp_seq));
1074 if (idx == SRCU_STATE_IDLE) {
1075 raw_spin_lock_irq_rcu_node(sp);
1076 if (ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)) {
1077 WARN_ON_ONCE(rcu_seq_state(sp->srcu_gp_seq));
1078 raw_spin_unlock_irq_rcu_node(sp);
1079 mutex_unlock(&sp->srcu_gp_mutex);
1080 return;
1081 }
1082 idx = rcu_seq_state(READ_ONCE(sp->srcu_gp_seq));
1083 if (idx == SRCU_STATE_IDLE)
1084 srcu_gp_start(sp);
1085 raw_spin_unlock_irq_rcu_node(sp);
1086 if (idx != SRCU_STATE_IDLE) {
1087 mutex_unlock(&sp->srcu_gp_mutex);
1088 return;
1089 }
1090 }
1091
1092 if (rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) == SRCU_STATE_SCAN1) {
1093 idx = 1 ^ (sp->srcu_idx & 1);
1094 if (!try_check_zero(sp, idx, 1)) {
1095 mutex_unlock(&sp->srcu_gp_mutex);
1096 return;
1097 }
1098 srcu_flip(sp);
1099 rcu_seq_set_state(&sp->srcu_gp_seq, SRCU_STATE_SCAN2);
1100 }
1101
1102 if (rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) == SRCU_STATE_SCAN2) {
1103
1104
1105
1106
1107
1108 idx = 1 ^ (sp->srcu_idx & 1);
1109 if (!try_check_zero(sp, idx, 2)) {
1110 mutex_unlock(&sp->srcu_gp_mutex);
1111 return;
1112 }
1113 srcu_gp_end(sp);
1114 }
1115}
1116
1117
1118
1119
1120
1121
1122
1123static void srcu_invoke_callbacks(struct work_struct *work)
1124{
1125 bool more;
1126 struct rcu_cblist ready_cbs;
1127 struct rcu_head *rhp;
1128 struct srcu_data *sdp;
1129 struct srcu_struct *sp;
1130
1131 sdp = container_of(work, struct srcu_data, work.work);
1132 sp = sdp->sp;
1133 rcu_cblist_init(&ready_cbs);
1134 raw_spin_lock_irq_rcu_node(sdp);
1135 rcu_segcblist_advance(&sdp->srcu_cblist,
1136 rcu_seq_current(&sp->srcu_gp_seq));
1137 if (sdp->srcu_cblist_invoking ||
1138 !rcu_segcblist_ready_cbs(&sdp->srcu_cblist)) {
1139 raw_spin_unlock_irq_rcu_node(sdp);
1140 return;
1141 }
1142
1143
1144 sdp->srcu_cblist_invoking = true;
1145 rcu_segcblist_extract_done_cbs(&sdp->srcu_cblist, &ready_cbs);
1146 raw_spin_unlock_irq_rcu_node(sdp);
1147 rhp = rcu_cblist_dequeue(&ready_cbs);
1148 for (; rhp != NULL; rhp = rcu_cblist_dequeue(&ready_cbs)) {
1149 debug_rcu_head_unqueue(rhp);
1150 local_bh_disable();
1151 rhp->func(rhp);
1152 local_bh_enable();
1153 }
1154
1155
1156
1157
1158
1159 raw_spin_lock_irq_rcu_node(sdp);
1160 rcu_segcblist_insert_count(&sdp->srcu_cblist, &ready_cbs);
1161 (void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
1162 rcu_seq_snap(&sp->srcu_gp_seq));
1163 sdp->srcu_cblist_invoking = false;
1164 more = rcu_segcblist_ready_cbs(&sdp->srcu_cblist);
1165 raw_spin_unlock_irq_rcu_node(sdp);
1166 if (more)
1167 srcu_schedule_cbs_sdp(sdp, 0);
1168}
1169
1170
1171
1172
1173
1174static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay)
1175{
1176 bool pushgp = true;
1177
1178 raw_spin_lock_irq_rcu_node(sp);
1179 if (ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)) {
1180 if (!WARN_ON_ONCE(rcu_seq_state(sp->srcu_gp_seq))) {
1181
1182 pushgp = false;
1183 }
1184 } else if (!rcu_seq_state(sp->srcu_gp_seq)) {
1185
1186 srcu_gp_start(sp);
1187 }
1188 raw_spin_unlock_irq_rcu_node(sp);
1189
1190 if (pushgp)
1191 queue_delayed_work(system_power_efficient_wq, &sp->work, delay);
1192}
1193
1194
1195
1196
1197void process_srcu(struct work_struct *work)
1198{
1199 struct srcu_struct *sp;
1200
1201 sp = container_of(work, struct srcu_struct, work.work);
1202
1203 srcu_advance_state(sp);
1204 srcu_reschedule(sp, srcu_get_delay(sp));
1205}
1206EXPORT_SYMBOL_GPL(process_srcu);
1207
1208void srcutorture_get_gp_data(enum rcutorture_type test_type,
1209 struct srcu_struct *sp, int *flags,
1210 unsigned long *gpnum, unsigned long *completed)
1211{
1212 if (test_type != SRCU_FLAVOR)
1213 return;
1214 *flags = 0;
1215 *completed = rcu_seq_ctr(sp->srcu_gp_seq);
1216 *gpnum = rcu_seq_ctr(sp->srcu_gp_seq_needed);
1217}
1218EXPORT_SYMBOL_GPL(srcutorture_get_gp_data);
1219
1220static int __init srcu_bootup_announce(void)
1221{
1222 pr_info("Hierarchical SRCU implementation.\n");
1223 if (exp_holdoff != DEFAULT_SRCU_EXP_HOLDOFF)
1224 pr_info("\tNon-default auto-expedite holdoff of %lu ns.\n", exp_holdoff);
1225 return 0;
1226}
1227early_initcall(srcu_bootup_announce);
1228