1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29#include <linux/export.h>
30#include <linux/mutex.h>
31#include <linux/percpu.h>
32#include <linux/preempt.h>
33#include <linux/rcupdate_wait.h>
34#include <linux/sched.h>
35#include <linux/smp.h>
36#include <linux/delay.h>
37#include <linux/module.h>
38#include <linux/srcu.h>
39
40#include "rcu.h"
41#include "rcu_segcblist.h"
42
43
44#define DEFAULT_SRCU_EXP_HOLDOFF (25 * 1000)
45static ulong exp_holdoff = DEFAULT_SRCU_EXP_HOLDOFF;
46module_param(exp_holdoff, ulong, 0444);
47
48
49static ulong counter_wrap_check = (ULONG_MAX >> 2);
50module_param(counter_wrap_check, ulong, 0444);
51
52static void srcu_invoke_callbacks(struct work_struct *work);
53static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay);
54static void process_srcu(struct work_struct *work);
55
56
57
58
59
60
61
62static void init_srcu_struct_nodes(struct srcu_struct *sp, bool is_static)
63{
64 int cpu;
65 int i;
66 int level = 0;
67 int levelspread[RCU_NUM_LVLS];
68 struct srcu_data *sdp;
69 struct srcu_node *snp;
70 struct srcu_node *snp_first;
71
72
73 sp->level[0] = &sp->node[0];
74 for (i = 1; i < rcu_num_lvls; i++)
75 sp->level[i] = sp->level[i - 1] + num_rcu_lvl[i - 1];
76 rcu_init_levelspread(levelspread, num_rcu_lvl);
77
78
79 rcu_for_each_node_breadth_first(sp, snp) {
80 raw_spin_lock_init(&ACCESS_PRIVATE(snp, lock));
81 WARN_ON_ONCE(ARRAY_SIZE(snp->srcu_have_cbs) !=
82 ARRAY_SIZE(snp->srcu_data_have_cbs));
83 for (i = 0; i < ARRAY_SIZE(snp->srcu_have_cbs); i++) {
84 snp->srcu_have_cbs[i] = 0;
85 snp->srcu_data_have_cbs[i] = 0;
86 }
87 snp->srcu_gp_seq_needed_exp = 0;
88 snp->grplo = -1;
89 snp->grphi = -1;
90 if (snp == &sp->node[0]) {
91
92 snp->srcu_parent = NULL;
93 continue;
94 }
95
96
97 if (snp == sp->level[level + 1])
98 level++;
99 snp->srcu_parent = sp->level[level - 1] +
100 (snp - sp->level[level]) /
101 levelspread[level - 1];
102 }
103
104
105
106
107
108 WARN_ON_ONCE(ARRAY_SIZE(sdp->srcu_lock_count) !=
109 ARRAY_SIZE(sdp->srcu_unlock_count));
110 level = rcu_num_lvls - 1;
111 snp_first = sp->level[level];
112 for_each_possible_cpu(cpu) {
113 sdp = per_cpu_ptr(sp->sda, cpu);
114 raw_spin_lock_init(&ACCESS_PRIVATE(sdp, lock));
115 rcu_segcblist_init(&sdp->srcu_cblist);
116 sdp->srcu_cblist_invoking = false;
117 sdp->srcu_gp_seq_needed = sp->srcu_gp_seq;
118 sdp->srcu_gp_seq_needed_exp = sp->srcu_gp_seq;
119 sdp->mynode = &snp_first[cpu / levelspread[level]];
120 for (snp = sdp->mynode; snp != NULL; snp = snp->srcu_parent) {
121 if (snp->grplo < 0)
122 snp->grplo = cpu;
123 snp->grphi = cpu;
124 }
125 sdp->cpu = cpu;
126 INIT_DELAYED_WORK(&sdp->work, srcu_invoke_callbacks);
127 sdp->sp = sp;
128 sdp->grpmask = 1 << (cpu - sdp->mynode->grplo);
129 if (is_static)
130 continue;
131
132
133 for (i = 0; i < ARRAY_SIZE(sdp->srcu_lock_count); i++) {
134 sdp->srcu_lock_count[i] = 0;
135 sdp->srcu_unlock_count[i] = 0;
136 }
137 }
138}
139
140
141
142
143
144
145
146static int init_srcu_struct_fields(struct srcu_struct *sp, bool is_static)
147{
148 mutex_init(&sp->srcu_cb_mutex);
149 mutex_init(&sp->srcu_gp_mutex);
150 sp->srcu_idx = 0;
151 sp->srcu_gp_seq = 0;
152 sp->srcu_barrier_seq = 0;
153 mutex_init(&sp->srcu_barrier_mutex);
154 atomic_set(&sp->srcu_barrier_cpu_cnt, 0);
155 INIT_DELAYED_WORK(&sp->work, process_srcu);
156 if (!is_static)
157 sp->sda = alloc_percpu(struct srcu_data);
158 init_srcu_struct_nodes(sp, is_static);
159 sp->srcu_gp_seq_needed_exp = 0;
160 sp->srcu_last_gp_end = ktime_get_mono_fast_ns();
161 smp_store_release(&sp->srcu_gp_seq_needed, 0);
162 return sp->sda ? 0 : -ENOMEM;
163}
164
165#ifdef CONFIG_DEBUG_LOCK_ALLOC
166
167int __init_srcu_struct(struct srcu_struct *sp, const char *name,
168 struct lock_class_key *key)
169{
170
171 debug_check_no_locks_freed((void *)sp, sizeof(*sp));
172 lockdep_init_map(&sp->dep_map, name, key, 0);
173 raw_spin_lock_init(&ACCESS_PRIVATE(sp, lock));
174 return init_srcu_struct_fields(sp, false);
175}
176EXPORT_SYMBOL_GPL(__init_srcu_struct);
177
178#else
179
180
181
182
183
184
185
186
187
188int init_srcu_struct(struct srcu_struct *sp)
189{
190 raw_spin_lock_init(&ACCESS_PRIVATE(sp, lock));
191 return init_srcu_struct_fields(sp, false);
192}
193EXPORT_SYMBOL_GPL(init_srcu_struct);
194
195#endif
196
197
198
199
200
201
202
203
204
205static void check_init_srcu_struct(struct srcu_struct *sp)
206{
207 unsigned long flags;
208
209 WARN_ON_ONCE(rcu_scheduler_active == RCU_SCHEDULER_INIT);
210
211 if (!rcu_seq_state(smp_load_acquire(&sp->srcu_gp_seq_needed)))
212 return;
213 raw_spin_lock_irqsave_rcu_node(sp, flags);
214 if (!rcu_seq_state(sp->srcu_gp_seq_needed)) {
215 raw_spin_unlock_irqrestore_rcu_node(sp, flags);
216 return;
217 }
218 init_srcu_struct_fields(sp, true);
219 raw_spin_unlock_irqrestore_rcu_node(sp, flags);
220}
221
222
223
224
225
226static unsigned long srcu_readers_lock_idx(struct srcu_struct *sp, int idx)
227{
228 int cpu;
229 unsigned long sum = 0;
230
231 for_each_possible_cpu(cpu) {
232 struct srcu_data *cpuc = per_cpu_ptr(sp->sda, cpu);
233
234 sum += READ_ONCE(cpuc->srcu_lock_count[idx]);
235 }
236 return sum;
237}
238
239
240
241
242
243static unsigned long srcu_readers_unlock_idx(struct srcu_struct *sp, int idx)
244{
245 int cpu;
246 unsigned long sum = 0;
247
248 for_each_possible_cpu(cpu) {
249 struct srcu_data *cpuc = per_cpu_ptr(sp->sda, cpu);
250
251 sum += READ_ONCE(cpuc->srcu_unlock_count[idx]);
252 }
253 return sum;
254}
255
256
257
258
259
260static bool srcu_readers_active_idx_check(struct srcu_struct *sp, int idx)
261{
262 unsigned long unlocks;
263
264 unlocks = srcu_readers_unlock_idx(sp, idx);
265
266
267
268
269
270
271
272
273
274
275
276
277 smp_mb();
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300 return srcu_readers_lock_idx(sp, idx) == unlocks;
301}
302
303
304
305
306
307
308
309
310
311
312static bool srcu_readers_active(struct srcu_struct *sp)
313{
314 int cpu;
315 unsigned long sum = 0;
316
317 for_each_possible_cpu(cpu) {
318 struct srcu_data *cpuc = per_cpu_ptr(sp->sda, cpu);
319
320 sum += READ_ONCE(cpuc->srcu_lock_count[0]);
321 sum += READ_ONCE(cpuc->srcu_lock_count[1]);
322 sum -= READ_ONCE(cpuc->srcu_unlock_count[0]);
323 sum -= READ_ONCE(cpuc->srcu_unlock_count[1]);
324 }
325 return sum;
326}
327
328#define SRCU_INTERVAL 1
329
330
331
332
333
334static unsigned long srcu_get_delay(struct srcu_struct *sp)
335{
336 if (ULONG_CMP_LT(READ_ONCE(sp->srcu_gp_seq),
337 READ_ONCE(sp->srcu_gp_seq_needed_exp)))
338 return 0;
339 return SRCU_INTERVAL;
340}
341
342
343
344
345
346
347
348
349void cleanup_srcu_struct(struct srcu_struct *sp)
350{
351 int cpu;
352
353 if (WARN_ON(!srcu_get_delay(sp)))
354 return;
355 if (WARN_ON(srcu_readers_active(sp)))
356 return;
357 flush_delayed_work(&sp->work);
358 for_each_possible_cpu(cpu)
359 flush_delayed_work(&per_cpu_ptr(sp->sda, cpu)->work);
360 if (WARN_ON(rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) != SRCU_STATE_IDLE) ||
361 WARN_ON(srcu_readers_active(sp))) {
362 pr_info("cleanup_srcu_struct: Active srcu_struct %p state: %d\n", sp, rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)));
363 return;
364 }
365 free_percpu(sp->sda);
366 sp->sda = NULL;
367}
368EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
369
370
371
372
373
374
375int __srcu_read_lock(struct srcu_struct *sp)
376{
377 int idx;
378
379 idx = READ_ONCE(sp->srcu_idx) & 0x1;
380 this_cpu_inc(sp->sda->srcu_lock_count[idx]);
381 smp_mb();
382 return idx;
383}
384EXPORT_SYMBOL_GPL(__srcu_read_lock);
385
386
387
388
389
390
391void __srcu_read_unlock(struct srcu_struct *sp, int idx)
392{
393 smp_mb();
394 this_cpu_inc(sp->sda->srcu_unlock_count[idx]);
395}
396EXPORT_SYMBOL_GPL(__srcu_read_unlock);
397
398
399
400
401
402
403
404
405#define SRCU_RETRY_CHECK_DELAY 5
406
407
408
409
410static void srcu_gp_start(struct srcu_struct *sp)
411{
412 struct srcu_data *sdp = this_cpu_ptr(sp->sda);
413 int state;
414
415 lockdep_assert_held(&sp->lock);
416 WARN_ON_ONCE(ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed));
417 rcu_segcblist_advance(&sdp->srcu_cblist,
418 rcu_seq_current(&sp->srcu_gp_seq));
419 (void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
420 rcu_seq_snap(&sp->srcu_gp_seq));
421 smp_mb();
422 rcu_seq_start(&sp->srcu_gp_seq);
423 state = rcu_seq_state(READ_ONCE(sp->srcu_gp_seq));
424 WARN_ON_ONCE(state != SRCU_STATE_SCAN1);
425}
426
427
428
429
430DEFINE_PER_CPU(bool, srcu_online);
431
432void srcu_online_cpu(unsigned int cpu)
433{
434 WRITE_ONCE(per_cpu(srcu_online, cpu), true);
435}
436
437void srcu_offline_cpu(unsigned int cpu)
438{
439 WRITE_ONCE(per_cpu(srcu_online, cpu), false);
440}
441
442
443
444
445
446
447static bool srcu_queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
448 struct delayed_work *dwork,
449 unsigned long delay)
450{
451 bool ret;
452
453 preempt_disable();
454 if (READ_ONCE(per_cpu(srcu_online, cpu)))
455 ret = queue_delayed_work_on(cpu, wq, dwork, delay);
456 else
457 ret = queue_delayed_work(wq, dwork, delay);
458 preempt_enable();
459 return ret;
460}
461
462
463
464
465
466static void srcu_schedule_cbs_sdp(struct srcu_data *sdp, unsigned long delay)
467{
468 srcu_queue_delayed_work_on(sdp->cpu, system_power_efficient_wq,
469 &sdp->work, delay);
470}
471
472
473
474
475
476
477
478static void srcu_schedule_cbs_snp(struct srcu_struct *sp, struct srcu_node *snp,
479 unsigned long mask, unsigned long delay)
480{
481 int cpu;
482
483 for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) {
484 if (!(mask & (1 << (cpu - snp->grplo))))
485 continue;
486 srcu_schedule_cbs_sdp(per_cpu_ptr(sp->sda, cpu), delay);
487 }
488}
489
490
491
492
493
494
495
496
497
498
499static void srcu_gp_end(struct srcu_struct *sp)
500{
501 unsigned long cbdelay;
502 bool cbs;
503 int cpu;
504 unsigned long flags;
505 unsigned long gpseq;
506 int idx;
507 int idxnext;
508 unsigned long mask;
509 struct srcu_data *sdp;
510 struct srcu_node *snp;
511
512
513 mutex_lock(&sp->srcu_cb_mutex);
514
515
516 raw_spin_lock_irq_rcu_node(sp);
517 idx = rcu_seq_state(sp->srcu_gp_seq);
518 WARN_ON_ONCE(idx != SRCU_STATE_SCAN2);
519 cbdelay = srcu_get_delay(sp);
520 sp->srcu_last_gp_end = ktime_get_mono_fast_ns();
521 rcu_seq_end(&sp->srcu_gp_seq);
522 gpseq = rcu_seq_current(&sp->srcu_gp_seq);
523 if (ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, gpseq))
524 sp->srcu_gp_seq_needed_exp = gpseq;
525 raw_spin_unlock_irq_rcu_node(sp);
526 mutex_unlock(&sp->srcu_gp_mutex);
527
528
529
530 idx = rcu_seq_ctr(gpseq) % ARRAY_SIZE(snp->srcu_have_cbs);
531 idxnext = (idx + 1) % ARRAY_SIZE(snp->srcu_have_cbs);
532 rcu_for_each_node_breadth_first(sp, snp) {
533 raw_spin_lock_irq_rcu_node(snp);
534 cbs = false;
535 if (snp >= sp->level[rcu_num_lvls - 1])
536 cbs = snp->srcu_have_cbs[idx] == gpseq;
537 snp->srcu_have_cbs[idx] = gpseq;
538 rcu_seq_set_state(&snp->srcu_have_cbs[idx], 1);
539 if (ULONG_CMP_LT(snp->srcu_gp_seq_needed_exp, gpseq))
540 snp->srcu_gp_seq_needed_exp = gpseq;
541 mask = snp->srcu_data_have_cbs[idx];
542 snp->srcu_data_have_cbs[idx] = 0;
543 raw_spin_unlock_irq_rcu_node(snp);
544 if (cbs)
545 srcu_schedule_cbs_snp(sp, snp, mask, cbdelay);
546
547
548 if (!(gpseq & counter_wrap_check))
549 for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) {
550 sdp = per_cpu_ptr(sp->sda, cpu);
551 raw_spin_lock_irqsave_rcu_node(sdp, flags);
552 if (ULONG_CMP_GE(gpseq,
553 sdp->srcu_gp_seq_needed + 100))
554 sdp->srcu_gp_seq_needed = gpseq;
555 raw_spin_unlock_irqrestore_rcu_node(sdp, flags);
556 }
557 }
558
559
560 mutex_unlock(&sp->srcu_cb_mutex);
561
562
563 raw_spin_lock_irq_rcu_node(sp);
564 gpseq = rcu_seq_current(&sp->srcu_gp_seq);
565 if (!rcu_seq_state(gpseq) &&
566 ULONG_CMP_LT(gpseq, sp->srcu_gp_seq_needed)) {
567 srcu_gp_start(sp);
568 raw_spin_unlock_irq_rcu_node(sp);
569
570 srcu_reschedule(sp, rcu_seq_ctr(gpseq) & 0x3ff
571 ? 0 : SRCU_INTERVAL);
572 } else {
573 raw_spin_unlock_irq_rcu_node(sp);
574 }
575}
576
577
578
579
580
581
582
583
584static void srcu_funnel_exp_start(struct srcu_struct *sp, struct srcu_node *snp,
585 unsigned long s)
586{
587 unsigned long flags;
588
589 for (; snp != NULL; snp = snp->srcu_parent) {
590 if (rcu_seq_done(&sp->srcu_gp_seq, s) ||
591 ULONG_CMP_GE(READ_ONCE(snp->srcu_gp_seq_needed_exp), s))
592 return;
593 raw_spin_lock_irqsave_rcu_node(snp, flags);
594 if (ULONG_CMP_GE(snp->srcu_gp_seq_needed_exp, s)) {
595 raw_spin_unlock_irqrestore_rcu_node(snp, flags);
596 return;
597 }
598 WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s);
599 raw_spin_unlock_irqrestore_rcu_node(snp, flags);
600 }
601 raw_spin_lock_irqsave_rcu_node(sp, flags);
602 if (!ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, s))
603 sp->srcu_gp_seq_needed_exp = s;
604 raw_spin_unlock_irqrestore_rcu_node(sp, flags);
605}
606
607
608
609
610
611
612
613
614static void srcu_funnel_gp_start(struct srcu_struct *sp, struct srcu_data *sdp,
615 unsigned long s, bool do_norm)
616{
617 unsigned long flags;
618 int idx = rcu_seq_ctr(s) % ARRAY_SIZE(sdp->mynode->srcu_have_cbs);
619 struct srcu_node *snp = sdp->mynode;
620 unsigned long snp_seq;
621
622
623 for (; snp != NULL; snp = snp->srcu_parent) {
624 if (rcu_seq_done(&sp->srcu_gp_seq, s) && snp != sdp->mynode)
625 return;
626 raw_spin_lock_irqsave_rcu_node(snp, flags);
627 if (ULONG_CMP_GE(snp->srcu_have_cbs[idx], s)) {
628 snp_seq = snp->srcu_have_cbs[idx];
629 if (snp == sdp->mynode && snp_seq == s)
630 snp->srcu_data_have_cbs[idx] |= sdp->grpmask;
631 raw_spin_unlock_irqrestore_rcu_node(snp, flags);
632 if (snp == sdp->mynode && snp_seq != s) {
633 srcu_schedule_cbs_sdp(sdp, do_norm
634 ? SRCU_INTERVAL
635 : 0);
636 return;
637 }
638 if (!do_norm)
639 srcu_funnel_exp_start(sp, snp, s);
640 return;
641 }
642 snp->srcu_have_cbs[idx] = s;
643 if (snp == sdp->mynode)
644 snp->srcu_data_have_cbs[idx] |= sdp->grpmask;
645 if (!do_norm && ULONG_CMP_LT(snp->srcu_gp_seq_needed_exp, s))
646 snp->srcu_gp_seq_needed_exp = s;
647 raw_spin_unlock_irqrestore_rcu_node(snp, flags);
648 }
649
650
651 raw_spin_lock_irqsave_rcu_node(sp, flags);
652 if (ULONG_CMP_LT(sp->srcu_gp_seq_needed, s)) {
653
654
655
656
657 smp_store_release(&sp->srcu_gp_seq_needed, s);
658 }
659 if (!do_norm && ULONG_CMP_LT(sp->srcu_gp_seq_needed_exp, s))
660 sp->srcu_gp_seq_needed_exp = s;
661
662
663 if (!rcu_seq_done(&sp->srcu_gp_seq, s) &&
664 rcu_seq_state(sp->srcu_gp_seq) == SRCU_STATE_IDLE) {
665 WARN_ON_ONCE(ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed));
666 srcu_gp_start(sp);
667 queue_delayed_work(system_power_efficient_wq, &sp->work,
668 srcu_get_delay(sp));
669 }
670 raw_spin_unlock_irqrestore_rcu_node(sp, flags);
671}
672
673
674
675
676
677
678static bool try_check_zero(struct srcu_struct *sp, int idx, int trycount)
679{
680 for (;;) {
681 if (srcu_readers_active_idx_check(sp, idx))
682 return true;
683 if (--trycount + !srcu_get_delay(sp) <= 0)
684 return false;
685 udelay(SRCU_RETRY_CHECK_DELAY);
686 }
687}
688
689
690
691
692
693
694static void srcu_flip(struct srcu_struct *sp)
695{
696
697
698
699
700
701
702
703
704 smp_mb();
705
706 WRITE_ONCE(sp->srcu_idx, sp->srcu_idx + 1);
707
708
709
710
711
712
713
714
715 smp_mb();
716}
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739static bool srcu_might_be_idle(struct srcu_struct *sp)
740{
741 unsigned long curseq;
742 unsigned long flags;
743 struct srcu_data *sdp;
744 unsigned long t;
745
746
747 local_irq_save(flags);
748 sdp = this_cpu_ptr(sp->sda);
749 if (rcu_segcblist_pend_cbs(&sdp->srcu_cblist)) {
750 local_irq_restore(flags);
751 return false;
752 }
753 local_irq_restore(flags);
754
755
756
757
758
759
760
761
762 t = ktime_get_mono_fast_ns();
763 if (exp_holdoff == 0 ||
764 time_in_range_open(t, sp->srcu_last_gp_end,
765 sp->srcu_last_gp_end + exp_holdoff))
766 return false;
767
768
769 curseq = rcu_seq_current(&sp->srcu_gp_seq);
770 smp_mb();
771 if (ULONG_CMP_LT(curseq, READ_ONCE(sp->srcu_gp_seq_needed)))
772 return false;
773 smp_mb();
774 if (curseq != rcu_seq_current(&sp->srcu_gp_seq))
775 return false;
776 return true;
777}
778
779
780
781
782static void srcu_leak_callback(struct rcu_head *rhp)
783{
784}
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814void __call_srcu(struct srcu_struct *sp, struct rcu_head *rhp,
815 rcu_callback_t func, bool do_norm)
816{
817 unsigned long flags;
818 bool needexp = false;
819 bool needgp = false;
820 unsigned long s;
821 struct srcu_data *sdp;
822
823 check_init_srcu_struct(sp);
824 if (debug_rcu_head_queue(rhp)) {
825
826 WRITE_ONCE(rhp->func, srcu_leak_callback);
827 WARN_ONCE(1, "call_srcu(): Leaked duplicate callback\n");
828 return;
829 }
830 rhp->func = func;
831 local_irq_save(flags);
832 sdp = this_cpu_ptr(sp->sda);
833 raw_spin_lock_rcu_node(sdp);
834 rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp, false);
835 rcu_segcblist_advance(&sdp->srcu_cblist,
836 rcu_seq_current(&sp->srcu_gp_seq));
837 s = rcu_seq_snap(&sp->srcu_gp_seq);
838 (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, s);
839 if (ULONG_CMP_LT(sdp->srcu_gp_seq_needed, s)) {
840 sdp->srcu_gp_seq_needed = s;
841 needgp = true;
842 }
843 if (!do_norm && ULONG_CMP_LT(sdp->srcu_gp_seq_needed_exp, s)) {
844 sdp->srcu_gp_seq_needed_exp = s;
845 needexp = true;
846 }
847 raw_spin_unlock_irqrestore_rcu_node(sdp, flags);
848 if (needgp)
849 srcu_funnel_gp_start(sp, sdp, s, do_norm);
850 else if (needexp)
851 srcu_funnel_exp_start(sp, sdp->mynode, s);
852}
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871void call_srcu(struct srcu_struct *sp, struct rcu_head *rhp,
872 rcu_callback_t func)
873{
874 __call_srcu(sp, rhp, func, true);
875}
876EXPORT_SYMBOL_GPL(call_srcu);
877
878
879
880
881static void __synchronize_srcu(struct srcu_struct *sp, bool do_norm)
882{
883 struct rcu_synchronize rcu;
884
885 RCU_LOCKDEP_WARN(lock_is_held(&sp->dep_map) ||
886 lock_is_held(&rcu_bh_lock_map) ||
887 lock_is_held(&rcu_lock_map) ||
888 lock_is_held(&rcu_sched_lock_map),
889 "Illegal synchronize_srcu() in same-type SRCU (or in RCU) read-side critical section");
890
891 if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
892 return;
893 might_sleep();
894 check_init_srcu_struct(sp);
895 init_completion(&rcu.completion);
896 init_rcu_head_on_stack(&rcu.head);
897 __call_srcu(sp, &rcu.head, wakeme_after_rcu, do_norm);
898 wait_for_completion(&rcu.completion);
899 destroy_rcu_head_on_stack(&rcu.head);
900
901
902
903
904
905
906
907
908 smp_mb();
909}
910
911
912
913
914
915
916
917
918
919
920
921void synchronize_srcu_expedited(struct srcu_struct *sp)
922{
923 __synchronize_srcu(sp, rcu_gp_is_normal());
924}
925EXPORT_SYMBOL_GPL(synchronize_srcu_expedited);
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971void synchronize_srcu(struct srcu_struct *sp)
972{
973 if (srcu_might_be_idle(sp) || rcu_gp_is_expedited())
974 synchronize_srcu_expedited(sp);
975 else
976 __synchronize_srcu(sp, true);
977}
978EXPORT_SYMBOL_GPL(synchronize_srcu);
979
980
981
982
983static void srcu_barrier_cb(struct rcu_head *rhp)
984{
985 struct srcu_data *sdp;
986 struct srcu_struct *sp;
987
988 sdp = container_of(rhp, struct srcu_data, srcu_barrier_head);
989 sp = sdp->sp;
990 if (atomic_dec_and_test(&sp->srcu_barrier_cpu_cnt))
991 complete(&sp->srcu_barrier_completion);
992}
993
994
995
996
997
998void srcu_barrier(struct srcu_struct *sp)
999{
1000 int cpu;
1001 struct srcu_data *sdp;
1002 unsigned long s = rcu_seq_snap(&sp->srcu_barrier_seq);
1003
1004 check_init_srcu_struct(sp);
1005 mutex_lock(&sp->srcu_barrier_mutex);
1006 if (rcu_seq_done(&sp->srcu_barrier_seq, s)) {
1007 smp_mb();
1008 mutex_unlock(&sp->srcu_barrier_mutex);
1009 return;
1010 }
1011 rcu_seq_start(&sp->srcu_barrier_seq);
1012 init_completion(&sp->srcu_barrier_completion);
1013
1014
1015 atomic_set(&sp->srcu_barrier_cpu_cnt, 1);
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025 for_each_possible_cpu(cpu) {
1026 sdp = per_cpu_ptr(sp->sda, cpu);
1027 raw_spin_lock_irq_rcu_node(sdp);
1028 atomic_inc(&sp->srcu_barrier_cpu_cnt);
1029 sdp->srcu_barrier_head.func = srcu_barrier_cb;
1030 debug_rcu_head_queue(&sdp->srcu_barrier_head);
1031 if (!rcu_segcblist_entrain(&sdp->srcu_cblist,
1032 &sdp->srcu_barrier_head, 0)) {
1033 debug_rcu_head_unqueue(&sdp->srcu_barrier_head);
1034 atomic_dec(&sp->srcu_barrier_cpu_cnt);
1035 }
1036 raw_spin_unlock_irq_rcu_node(sdp);
1037 }
1038
1039
1040 if (atomic_dec_and_test(&sp->srcu_barrier_cpu_cnt))
1041 complete(&sp->srcu_barrier_completion);
1042 wait_for_completion(&sp->srcu_barrier_completion);
1043
1044 rcu_seq_end(&sp->srcu_barrier_seq);
1045 mutex_unlock(&sp->srcu_barrier_mutex);
1046}
1047EXPORT_SYMBOL_GPL(srcu_barrier);
1048
1049
1050
1051
1052
1053
1054
1055
1056unsigned long srcu_batches_completed(struct srcu_struct *sp)
1057{
1058 return sp->srcu_idx;
1059}
1060EXPORT_SYMBOL_GPL(srcu_batches_completed);
1061
1062
1063
1064
1065
1066
1067static void srcu_advance_state(struct srcu_struct *sp)
1068{
1069 int idx;
1070
1071 mutex_lock(&sp->srcu_gp_mutex);
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083 idx = rcu_seq_state(smp_load_acquire(&sp->srcu_gp_seq));
1084 if (idx == SRCU_STATE_IDLE) {
1085 raw_spin_lock_irq_rcu_node(sp);
1086 if (ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)) {
1087 WARN_ON_ONCE(rcu_seq_state(sp->srcu_gp_seq));
1088 raw_spin_unlock_irq_rcu_node(sp);
1089 mutex_unlock(&sp->srcu_gp_mutex);
1090 return;
1091 }
1092 idx = rcu_seq_state(READ_ONCE(sp->srcu_gp_seq));
1093 if (idx == SRCU_STATE_IDLE)
1094 srcu_gp_start(sp);
1095 raw_spin_unlock_irq_rcu_node(sp);
1096 if (idx != SRCU_STATE_IDLE) {
1097 mutex_unlock(&sp->srcu_gp_mutex);
1098 return;
1099 }
1100 }
1101
1102 if (rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) == SRCU_STATE_SCAN1) {
1103 idx = 1 ^ (sp->srcu_idx & 1);
1104 if (!try_check_zero(sp, idx, 1)) {
1105 mutex_unlock(&sp->srcu_gp_mutex);
1106 return;
1107 }
1108 srcu_flip(sp);
1109 rcu_seq_set_state(&sp->srcu_gp_seq, SRCU_STATE_SCAN2);
1110 }
1111
1112 if (rcu_seq_state(READ_ONCE(sp->srcu_gp_seq)) == SRCU_STATE_SCAN2) {
1113
1114
1115
1116
1117
1118 idx = 1 ^ (sp->srcu_idx & 1);
1119 if (!try_check_zero(sp, idx, 2)) {
1120 mutex_unlock(&sp->srcu_gp_mutex);
1121 return;
1122 }
1123 srcu_gp_end(sp);
1124 }
1125}
1126
1127
1128
1129
1130
1131
1132
1133static void srcu_invoke_callbacks(struct work_struct *work)
1134{
1135 bool more;
1136 struct rcu_cblist ready_cbs;
1137 struct rcu_head *rhp;
1138 struct srcu_data *sdp;
1139 struct srcu_struct *sp;
1140
1141 sdp = container_of(work, struct srcu_data, work.work);
1142 sp = sdp->sp;
1143 rcu_cblist_init(&ready_cbs);
1144 raw_spin_lock_irq_rcu_node(sdp);
1145 rcu_segcblist_advance(&sdp->srcu_cblist,
1146 rcu_seq_current(&sp->srcu_gp_seq));
1147 if (sdp->srcu_cblist_invoking ||
1148 !rcu_segcblist_ready_cbs(&sdp->srcu_cblist)) {
1149 raw_spin_unlock_irq_rcu_node(sdp);
1150 return;
1151 }
1152
1153
1154 sdp->srcu_cblist_invoking = true;
1155 rcu_segcblist_extract_done_cbs(&sdp->srcu_cblist, &ready_cbs);
1156 raw_spin_unlock_irq_rcu_node(sdp);
1157 rhp = rcu_cblist_dequeue(&ready_cbs);
1158 for (; rhp != NULL; rhp = rcu_cblist_dequeue(&ready_cbs)) {
1159 debug_rcu_head_unqueue(rhp);
1160 local_bh_disable();
1161 rhp->func(rhp);
1162 local_bh_enable();
1163 }
1164
1165
1166
1167
1168
1169 raw_spin_lock_irq_rcu_node(sdp);
1170 rcu_segcblist_insert_count(&sdp->srcu_cblist, &ready_cbs);
1171 (void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
1172 rcu_seq_snap(&sp->srcu_gp_seq));
1173 sdp->srcu_cblist_invoking = false;
1174 more = rcu_segcblist_ready_cbs(&sdp->srcu_cblist);
1175 raw_spin_unlock_irq_rcu_node(sdp);
1176 if (more)
1177 srcu_schedule_cbs_sdp(sdp, 0);
1178}
1179
1180
1181
1182
1183
1184static void srcu_reschedule(struct srcu_struct *sp, unsigned long delay)
1185{
1186 bool pushgp = true;
1187
1188 raw_spin_lock_irq_rcu_node(sp);
1189 if (ULONG_CMP_GE(sp->srcu_gp_seq, sp->srcu_gp_seq_needed)) {
1190 if (!WARN_ON_ONCE(rcu_seq_state(sp->srcu_gp_seq))) {
1191
1192 pushgp = false;
1193 }
1194 } else if (!rcu_seq_state(sp->srcu_gp_seq)) {
1195
1196 srcu_gp_start(sp);
1197 }
1198 raw_spin_unlock_irq_rcu_node(sp);
1199
1200 if (pushgp)
1201 queue_delayed_work(system_power_efficient_wq, &sp->work, delay);
1202}
1203
1204
1205
1206
1207static void process_srcu(struct work_struct *work)
1208{
1209 struct srcu_struct *sp;
1210
1211 sp = container_of(work, struct srcu_struct, work.work);
1212
1213 srcu_advance_state(sp);
1214 srcu_reschedule(sp, srcu_get_delay(sp));
1215}
1216
1217void srcutorture_get_gp_data(enum rcutorture_type test_type,
1218 struct srcu_struct *sp, int *flags,
1219 unsigned long *gpnum, unsigned long *completed)
1220{
1221 if (test_type != SRCU_FLAVOR)
1222 return;
1223 *flags = 0;
1224 *completed = rcu_seq_ctr(sp->srcu_gp_seq);
1225 *gpnum = rcu_seq_ctr(sp->srcu_gp_seq_needed);
1226}
1227EXPORT_SYMBOL_GPL(srcutorture_get_gp_data);
1228
1229void srcu_torture_stats_print(struct srcu_struct *sp, char *tt, char *tf)
1230{
1231 int cpu;
1232 int idx;
1233 unsigned long s0 = 0, s1 = 0;
1234
1235 idx = sp->srcu_idx & 0x1;
1236 pr_alert("%s%s Tree SRCU per-CPU(idx=%d):", tt, tf, idx);
1237 for_each_possible_cpu(cpu) {
1238 unsigned long l0, l1;
1239 unsigned long u0, u1;
1240 long c0, c1;
1241 struct srcu_data *counts;
1242
1243 counts = per_cpu_ptr(sp->sda, cpu);
1244 u0 = counts->srcu_unlock_count[!idx];
1245 u1 = counts->srcu_unlock_count[idx];
1246
1247
1248
1249
1250
1251 smp_rmb();
1252
1253 l0 = counts->srcu_lock_count[!idx];
1254 l1 = counts->srcu_lock_count[idx];
1255
1256 c0 = l0 - u0;
1257 c1 = l1 - u1;
1258 pr_cont(" %d(%ld,%ld)", cpu, c0, c1);
1259 s0 += c0;
1260 s1 += c1;
1261 }
1262 pr_cont(" T(%ld,%ld)\n", s0, s1);
1263}
1264EXPORT_SYMBOL_GPL(srcu_torture_stats_print);
1265
1266static int __init srcu_bootup_announce(void)
1267{
1268 pr_info("Hierarchical SRCU implementation.\n");
1269 if (exp_holdoff != DEFAULT_SRCU_EXP_HOLDOFF)
1270 pr_info("\tNon-default auto-expedite holdoff of %lu ns.\n", exp_holdoff);
1271 return 0;
1272}
1273early_initcall(srcu_bootup_announce);
1274