1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29#define pr_fmt(fmt) "rcu: " fmt
30
31#include <linux/export.h>
32#include <linux/mutex.h>
33#include <linux/percpu.h>
34#include <linux/preempt.h>
35#include <linux/rcupdate_wait.h>
36#include <linux/sched.h>
37#include <linux/smp.h>
38#include <linux/delay.h>
39#include <linux/module.h>
40#include <linux/srcu.h>
41
42#include "rcu.h"
43#include "rcu_segcblist.h"
44
45
46#define DEFAULT_SRCU_EXP_HOLDOFF (25 * 1000)
47static ulong exp_holdoff = DEFAULT_SRCU_EXP_HOLDOFF;
48module_param(exp_holdoff, ulong, 0444);
49
50
51static ulong counter_wrap_check = (ULONG_MAX >> 2);
52module_param(counter_wrap_check, ulong, 0444);
53
54
55static LIST_HEAD(srcu_boot_list);
56static bool __read_mostly srcu_init_done;
57
58static void srcu_invoke_callbacks(struct work_struct *work);
59static void srcu_reschedule(struct srcu_struct *ssp, unsigned long delay);
60static void process_srcu(struct work_struct *work);
61static void srcu_delay_timer(struct timer_list *t);
62
63
64#define spin_lock_rcu_node(p) \
65do { \
66 spin_lock(&ACCESS_PRIVATE(p, lock)); \
67 smp_mb__after_unlock_lock(); \
68} while (0)
69
70#define spin_unlock_rcu_node(p) spin_unlock(&ACCESS_PRIVATE(p, lock))
71
72#define spin_lock_irq_rcu_node(p) \
73do { \
74 spin_lock_irq(&ACCESS_PRIVATE(p, lock)); \
75 smp_mb__after_unlock_lock(); \
76} while (0)
77
78#define spin_unlock_irq_rcu_node(p) \
79 spin_unlock_irq(&ACCESS_PRIVATE(p, lock))
80
81#define spin_lock_irqsave_rcu_node(p, flags) \
82do { \
83 spin_lock_irqsave(&ACCESS_PRIVATE(p, lock), flags); \
84 smp_mb__after_unlock_lock(); \
85} while (0)
86
87#define spin_unlock_irqrestore_rcu_node(p, flags) \
88 spin_unlock_irqrestore(&ACCESS_PRIVATE(p, lock), flags) \
89
90
91
92
93
94
95
96static void init_srcu_struct_nodes(struct srcu_struct *ssp, bool is_static)
97{
98 int cpu;
99 int i;
100 int level = 0;
101 int levelspread[RCU_NUM_LVLS];
102 struct srcu_data *sdp;
103 struct srcu_node *snp;
104 struct srcu_node *snp_first;
105
106
107 ssp->level[0] = &ssp->node[0];
108 for (i = 1; i < rcu_num_lvls; i++)
109 ssp->level[i] = ssp->level[i - 1] + num_rcu_lvl[i - 1];
110 rcu_init_levelspread(levelspread, num_rcu_lvl);
111
112
113 srcu_for_each_node_breadth_first(ssp, snp) {
114 spin_lock_init(&ACCESS_PRIVATE(snp, lock));
115 WARN_ON_ONCE(ARRAY_SIZE(snp->srcu_have_cbs) !=
116 ARRAY_SIZE(snp->srcu_data_have_cbs));
117 for (i = 0; i < ARRAY_SIZE(snp->srcu_have_cbs); i++) {
118 snp->srcu_have_cbs[i] = 0;
119 snp->srcu_data_have_cbs[i] = 0;
120 }
121 snp->srcu_gp_seq_needed_exp = 0;
122 snp->grplo = -1;
123 snp->grphi = -1;
124 if (snp == &ssp->node[0]) {
125
126 snp->srcu_parent = NULL;
127 continue;
128 }
129
130
131 if (snp == ssp->level[level + 1])
132 level++;
133 snp->srcu_parent = ssp->level[level - 1] +
134 (snp - ssp->level[level]) /
135 levelspread[level - 1];
136 }
137
138
139
140
141
142 WARN_ON_ONCE(ARRAY_SIZE(sdp->srcu_lock_count) !=
143 ARRAY_SIZE(sdp->srcu_unlock_count));
144 level = rcu_num_lvls - 1;
145 snp_first = ssp->level[level];
146 for_each_possible_cpu(cpu) {
147 sdp = per_cpu_ptr(ssp->sda, cpu);
148 spin_lock_init(&ACCESS_PRIVATE(sdp, lock));
149 rcu_segcblist_init(&sdp->srcu_cblist);
150 sdp->srcu_cblist_invoking = false;
151 sdp->srcu_gp_seq_needed = ssp->srcu_gp_seq;
152 sdp->srcu_gp_seq_needed_exp = ssp->srcu_gp_seq;
153 sdp->mynode = &snp_first[cpu / levelspread[level]];
154 for (snp = sdp->mynode; snp != NULL; snp = snp->srcu_parent) {
155 if (snp->grplo < 0)
156 snp->grplo = cpu;
157 snp->grphi = cpu;
158 }
159 sdp->cpu = cpu;
160 INIT_WORK(&sdp->work, srcu_invoke_callbacks);
161 timer_setup(&sdp->delay_work, srcu_delay_timer, 0);
162 sdp->ssp = ssp;
163 sdp->grpmask = 1 << (cpu - sdp->mynode->grplo);
164 if (is_static)
165 continue;
166
167
168 for (i = 0; i < ARRAY_SIZE(sdp->srcu_lock_count); i++) {
169 sdp->srcu_lock_count[i] = 0;
170 sdp->srcu_unlock_count[i] = 0;
171 }
172 }
173}
174
175
176
177
178
179
180
181static int init_srcu_struct_fields(struct srcu_struct *ssp, bool is_static)
182{
183 mutex_init(&ssp->srcu_cb_mutex);
184 mutex_init(&ssp->srcu_gp_mutex);
185 ssp->srcu_idx = 0;
186 ssp->srcu_gp_seq = 0;
187 ssp->srcu_barrier_seq = 0;
188 mutex_init(&ssp->srcu_barrier_mutex);
189 atomic_set(&ssp->srcu_barrier_cpu_cnt, 0);
190 INIT_DELAYED_WORK(&ssp->work, process_srcu);
191 if (!is_static)
192 ssp->sda = alloc_percpu(struct srcu_data);
193 init_srcu_struct_nodes(ssp, is_static);
194 ssp->srcu_gp_seq_needed_exp = 0;
195 ssp->srcu_last_gp_end = ktime_get_mono_fast_ns();
196 smp_store_release(&ssp->srcu_gp_seq_needed, 0);
197 return ssp->sda ? 0 : -ENOMEM;
198}
199
200#ifdef CONFIG_DEBUG_LOCK_ALLOC
201
202int __init_srcu_struct(struct srcu_struct *ssp, const char *name,
203 struct lock_class_key *key)
204{
205
206 debug_check_no_locks_freed((void *)ssp, sizeof(*ssp));
207 lockdep_init_map(&ssp->dep_map, name, key, 0);
208 spin_lock_init(&ACCESS_PRIVATE(ssp, lock));
209 return init_srcu_struct_fields(ssp, false);
210}
211EXPORT_SYMBOL_GPL(__init_srcu_struct);
212
213#else
214
215
216
217
218
219
220
221
222
223int init_srcu_struct(struct srcu_struct *ssp)
224{
225 spin_lock_init(&ACCESS_PRIVATE(ssp, lock));
226 return init_srcu_struct_fields(ssp, false);
227}
228EXPORT_SYMBOL_GPL(init_srcu_struct);
229
230#endif
231
232
233
234
235
236
237
238
239
240static void check_init_srcu_struct(struct srcu_struct *ssp)
241{
242 unsigned long flags;
243
244
245 if (!rcu_seq_state(smp_load_acquire(&ssp->srcu_gp_seq_needed)))
246 return;
247 spin_lock_irqsave_rcu_node(ssp, flags);
248 if (!rcu_seq_state(ssp->srcu_gp_seq_needed)) {
249 spin_unlock_irqrestore_rcu_node(ssp, flags);
250 return;
251 }
252 init_srcu_struct_fields(ssp, true);
253 spin_unlock_irqrestore_rcu_node(ssp, flags);
254}
255
256
257
258
259
260static unsigned long srcu_readers_lock_idx(struct srcu_struct *ssp, int idx)
261{
262 int cpu;
263 unsigned long sum = 0;
264
265 for_each_possible_cpu(cpu) {
266 struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu);
267
268 sum += READ_ONCE(cpuc->srcu_lock_count[idx]);
269 }
270 return sum;
271}
272
273
274
275
276
277static unsigned long srcu_readers_unlock_idx(struct srcu_struct *ssp, int idx)
278{
279 int cpu;
280 unsigned long sum = 0;
281
282 for_each_possible_cpu(cpu) {
283 struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu);
284
285 sum += READ_ONCE(cpuc->srcu_unlock_count[idx]);
286 }
287 return sum;
288}
289
290
291
292
293
294static bool srcu_readers_active_idx_check(struct srcu_struct *ssp, int idx)
295{
296 unsigned long unlocks;
297
298 unlocks = srcu_readers_unlock_idx(ssp, idx);
299
300
301
302
303
304
305
306
307
308
309
310
311 smp_mb();
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334 return srcu_readers_lock_idx(ssp, idx) == unlocks;
335}
336
337
338
339
340
341
342
343
344
345
346static bool srcu_readers_active(struct srcu_struct *ssp)
347{
348 int cpu;
349 unsigned long sum = 0;
350
351 for_each_possible_cpu(cpu) {
352 struct srcu_data *cpuc = per_cpu_ptr(ssp->sda, cpu);
353
354 sum += READ_ONCE(cpuc->srcu_lock_count[0]);
355 sum += READ_ONCE(cpuc->srcu_lock_count[1]);
356 sum -= READ_ONCE(cpuc->srcu_unlock_count[0]);
357 sum -= READ_ONCE(cpuc->srcu_unlock_count[1]);
358 }
359 return sum;
360}
361
362#define SRCU_INTERVAL 1
363
364
365
366
367
368static unsigned long srcu_get_delay(struct srcu_struct *ssp)
369{
370 if (ULONG_CMP_LT(READ_ONCE(ssp->srcu_gp_seq),
371 READ_ONCE(ssp->srcu_gp_seq_needed_exp)))
372 return 0;
373 return SRCU_INTERVAL;
374}
375
376
377
378
379
380
381
382
383void cleanup_srcu_struct(struct srcu_struct *ssp)
384{
385 int cpu;
386
387 if (WARN_ON(!srcu_get_delay(ssp)))
388 return;
389 if (WARN_ON(srcu_readers_active(ssp)))
390 return;
391 flush_delayed_work(&ssp->work);
392 for_each_possible_cpu(cpu) {
393 struct srcu_data *sdp = per_cpu_ptr(ssp->sda, cpu);
394
395 del_timer_sync(&sdp->delay_work);
396 flush_work(&sdp->work);
397 if (WARN_ON(rcu_segcblist_n_cbs(&sdp->srcu_cblist)))
398 return;
399 }
400 if (WARN_ON(rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) != SRCU_STATE_IDLE) ||
401 WARN_ON(srcu_readers_active(ssp))) {
402 pr_info("%s: Active srcu_struct %p state: %d\n",
403 __func__, ssp, rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)));
404 return;
405 }
406 free_percpu(ssp->sda);
407 ssp->sda = NULL;
408}
409EXPORT_SYMBOL_GPL(cleanup_srcu_struct);
410
411
412
413
414
415
416int __srcu_read_lock(struct srcu_struct *ssp)
417{
418 int idx;
419
420 idx = READ_ONCE(ssp->srcu_idx) & 0x1;
421 this_cpu_inc(ssp->sda->srcu_lock_count[idx]);
422 smp_mb();
423 return idx;
424}
425EXPORT_SYMBOL_GPL(__srcu_read_lock);
426
427
428
429
430
431
432void __srcu_read_unlock(struct srcu_struct *ssp, int idx)
433{
434 smp_mb();
435 this_cpu_inc(ssp->sda->srcu_unlock_count[idx]);
436}
437EXPORT_SYMBOL_GPL(__srcu_read_unlock);
438
439
440
441
442
443
444
445
446#define SRCU_RETRY_CHECK_DELAY 5
447
448
449
450
451static void srcu_gp_start(struct srcu_struct *ssp)
452{
453 struct srcu_data *sdp = this_cpu_ptr(ssp->sda);
454 int state;
455
456 lockdep_assert_held(&ACCESS_PRIVATE(ssp, lock));
457 WARN_ON_ONCE(ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed));
458 spin_lock_rcu_node(sdp);
459 rcu_segcblist_advance(&sdp->srcu_cblist,
460 rcu_seq_current(&ssp->srcu_gp_seq));
461 (void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
462 rcu_seq_snap(&ssp->srcu_gp_seq));
463 spin_unlock_rcu_node(sdp);
464 smp_mb();
465 rcu_seq_start(&ssp->srcu_gp_seq);
466 state = rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq));
467 WARN_ON_ONCE(state != SRCU_STATE_SCAN1);
468}
469
470
471static void srcu_delay_timer(struct timer_list *t)
472{
473 struct srcu_data *sdp = container_of(t, struct srcu_data, delay_work);
474
475 queue_work_on(sdp->cpu, rcu_gp_wq, &sdp->work);
476}
477
478static void srcu_queue_delayed_work_on(struct srcu_data *sdp,
479 unsigned long delay)
480{
481 if (!delay) {
482 queue_work_on(sdp->cpu, rcu_gp_wq, &sdp->work);
483 return;
484 }
485
486 timer_reduce(&sdp->delay_work, jiffies + delay);
487}
488
489
490
491
492
493static void srcu_schedule_cbs_sdp(struct srcu_data *sdp, unsigned long delay)
494{
495 srcu_queue_delayed_work_on(sdp, delay);
496}
497
498
499
500
501
502
503
504static void srcu_schedule_cbs_snp(struct srcu_struct *ssp, struct srcu_node *snp,
505 unsigned long mask, unsigned long delay)
506{
507 int cpu;
508
509 for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) {
510 if (!(mask & (1 << (cpu - snp->grplo))))
511 continue;
512 srcu_schedule_cbs_sdp(per_cpu_ptr(ssp->sda, cpu), delay);
513 }
514}
515
516
517
518
519
520
521
522
523
524
525static void srcu_gp_end(struct srcu_struct *ssp)
526{
527 unsigned long cbdelay;
528 bool cbs;
529 bool last_lvl;
530 int cpu;
531 unsigned long flags;
532 unsigned long gpseq;
533 int idx;
534 unsigned long mask;
535 struct srcu_data *sdp;
536 struct srcu_node *snp;
537
538
539 mutex_lock(&ssp->srcu_cb_mutex);
540
541
542 spin_lock_irq_rcu_node(ssp);
543 idx = rcu_seq_state(ssp->srcu_gp_seq);
544 WARN_ON_ONCE(idx != SRCU_STATE_SCAN2);
545 cbdelay = srcu_get_delay(ssp);
546 ssp->srcu_last_gp_end = ktime_get_mono_fast_ns();
547 rcu_seq_end(&ssp->srcu_gp_seq);
548 gpseq = rcu_seq_current(&ssp->srcu_gp_seq);
549 if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, gpseq))
550 ssp->srcu_gp_seq_needed_exp = gpseq;
551 spin_unlock_irq_rcu_node(ssp);
552 mutex_unlock(&ssp->srcu_gp_mutex);
553
554
555
556 idx = rcu_seq_ctr(gpseq) % ARRAY_SIZE(snp->srcu_have_cbs);
557 srcu_for_each_node_breadth_first(ssp, snp) {
558 spin_lock_irq_rcu_node(snp);
559 cbs = false;
560 last_lvl = snp >= ssp->level[rcu_num_lvls - 1];
561 if (last_lvl)
562 cbs = snp->srcu_have_cbs[idx] == gpseq;
563 snp->srcu_have_cbs[idx] = gpseq;
564 rcu_seq_set_state(&snp->srcu_have_cbs[idx], 1);
565 if (ULONG_CMP_LT(snp->srcu_gp_seq_needed_exp, gpseq))
566 snp->srcu_gp_seq_needed_exp = gpseq;
567 mask = snp->srcu_data_have_cbs[idx];
568 snp->srcu_data_have_cbs[idx] = 0;
569 spin_unlock_irq_rcu_node(snp);
570 if (cbs)
571 srcu_schedule_cbs_snp(ssp, snp, mask, cbdelay);
572
573
574 if (!(gpseq & counter_wrap_check) && last_lvl)
575 for (cpu = snp->grplo; cpu <= snp->grphi; cpu++) {
576 sdp = per_cpu_ptr(ssp->sda, cpu);
577 spin_lock_irqsave_rcu_node(sdp, flags);
578 if (ULONG_CMP_GE(gpseq,
579 sdp->srcu_gp_seq_needed + 100))
580 sdp->srcu_gp_seq_needed = gpseq;
581 if (ULONG_CMP_GE(gpseq,
582 sdp->srcu_gp_seq_needed_exp + 100))
583 sdp->srcu_gp_seq_needed_exp = gpseq;
584 spin_unlock_irqrestore_rcu_node(sdp, flags);
585 }
586 }
587
588
589 mutex_unlock(&ssp->srcu_cb_mutex);
590
591
592 spin_lock_irq_rcu_node(ssp);
593 gpseq = rcu_seq_current(&ssp->srcu_gp_seq);
594 if (!rcu_seq_state(gpseq) &&
595 ULONG_CMP_LT(gpseq, ssp->srcu_gp_seq_needed)) {
596 srcu_gp_start(ssp);
597 spin_unlock_irq_rcu_node(ssp);
598 srcu_reschedule(ssp, 0);
599 } else {
600 spin_unlock_irq_rcu_node(ssp);
601 }
602}
603
604
605
606
607
608
609
610
611static void srcu_funnel_exp_start(struct srcu_struct *ssp, struct srcu_node *snp,
612 unsigned long s)
613{
614 unsigned long flags;
615
616 for (; snp != NULL; snp = snp->srcu_parent) {
617 if (rcu_seq_done(&ssp->srcu_gp_seq, s) ||
618 ULONG_CMP_GE(READ_ONCE(snp->srcu_gp_seq_needed_exp), s))
619 return;
620 spin_lock_irqsave_rcu_node(snp, flags);
621 if (ULONG_CMP_GE(snp->srcu_gp_seq_needed_exp, s)) {
622 spin_unlock_irqrestore_rcu_node(snp, flags);
623 return;
624 }
625 WRITE_ONCE(snp->srcu_gp_seq_needed_exp, s);
626 spin_unlock_irqrestore_rcu_node(snp, flags);
627 }
628 spin_lock_irqsave_rcu_node(ssp, flags);
629 if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, s))
630 ssp->srcu_gp_seq_needed_exp = s;
631 spin_unlock_irqrestore_rcu_node(ssp, flags);
632}
633
634
635
636
637
638
639
640
641
642
643
644static void srcu_funnel_gp_start(struct srcu_struct *ssp, struct srcu_data *sdp,
645 unsigned long s, bool do_norm)
646{
647 unsigned long flags;
648 int idx = rcu_seq_ctr(s) % ARRAY_SIZE(sdp->mynode->srcu_have_cbs);
649 struct srcu_node *snp = sdp->mynode;
650 unsigned long snp_seq;
651
652
653 for (; snp != NULL; snp = snp->srcu_parent) {
654 if (rcu_seq_done(&ssp->srcu_gp_seq, s) && snp != sdp->mynode)
655 return;
656 spin_lock_irqsave_rcu_node(snp, flags);
657 if (ULONG_CMP_GE(snp->srcu_have_cbs[idx], s)) {
658 snp_seq = snp->srcu_have_cbs[idx];
659 if (snp == sdp->mynode && snp_seq == s)
660 snp->srcu_data_have_cbs[idx] |= sdp->grpmask;
661 spin_unlock_irqrestore_rcu_node(snp, flags);
662 if (snp == sdp->mynode && snp_seq != s) {
663 srcu_schedule_cbs_sdp(sdp, do_norm
664 ? SRCU_INTERVAL
665 : 0);
666 return;
667 }
668 if (!do_norm)
669 srcu_funnel_exp_start(ssp, snp, s);
670 return;
671 }
672 snp->srcu_have_cbs[idx] = s;
673 if (snp == sdp->mynode)
674 snp->srcu_data_have_cbs[idx] |= sdp->grpmask;
675 if (!do_norm && ULONG_CMP_LT(snp->srcu_gp_seq_needed_exp, s))
676 snp->srcu_gp_seq_needed_exp = s;
677 spin_unlock_irqrestore_rcu_node(snp, flags);
678 }
679
680
681 spin_lock_irqsave_rcu_node(ssp, flags);
682 if (ULONG_CMP_LT(ssp->srcu_gp_seq_needed, s)) {
683
684
685
686
687 smp_store_release(&ssp->srcu_gp_seq_needed, s);
688 }
689 if (!do_norm && ULONG_CMP_LT(ssp->srcu_gp_seq_needed_exp, s))
690 ssp->srcu_gp_seq_needed_exp = s;
691
692
693 if (!rcu_seq_done(&ssp->srcu_gp_seq, s) &&
694 rcu_seq_state(ssp->srcu_gp_seq) == SRCU_STATE_IDLE) {
695 WARN_ON_ONCE(ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed));
696 srcu_gp_start(ssp);
697 if (likely(srcu_init_done))
698 queue_delayed_work(rcu_gp_wq, &ssp->work,
699 srcu_get_delay(ssp));
700 else if (list_empty(&ssp->work.work.entry))
701 list_add(&ssp->work.work.entry, &srcu_boot_list);
702 }
703 spin_unlock_irqrestore_rcu_node(ssp, flags);
704}
705
706
707
708
709
710
711static bool try_check_zero(struct srcu_struct *ssp, int idx, int trycount)
712{
713 for (;;) {
714 if (srcu_readers_active_idx_check(ssp, idx))
715 return true;
716 if (--trycount + !srcu_get_delay(ssp) <= 0)
717 return false;
718 udelay(SRCU_RETRY_CHECK_DELAY);
719 }
720}
721
722
723
724
725
726
727static void srcu_flip(struct srcu_struct *ssp)
728{
729
730
731
732
733
734
735
736
737 smp_mb();
738
739 WRITE_ONCE(ssp->srcu_idx, ssp->srcu_idx + 1);
740
741
742
743
744
745
746
747
748 smp_mb();
749}
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772static bool srcu_might_be_idle(struct srcu_struct *ssp)
773{
774 unsigned long curseq;
775 unsigned long flags;
776 struct srcu_data *sdp;
777 unsigned long t;
778
779
780 local_irq_save(flags);
781 sdp = this_cpu_ptr(ssp->sda);
782 if (rcu_segcblist_pend_cbs(&sdp->srcu_cblist)) {
783 local_irq_restore(flags);
784 return false;
785 }
786 local_irq_restore(flags);
787
788
789
790
791
792
793
794
795 t = ktime_get_mono_fast_ns();
796 if (exp_holdoff == 0 ||
797 time_in_range_open(t, ssp->srcu_last_gp_end,
798 ssp->srcu_last_gp_end + exp_holdoff))
799 return false;
800
801
802 curseq = rcu_seq_current(&ssp->srcu_gp_seq);
803 smp_mb();
804 if (ULONG_CMP_LT(curseq, READ_ONCE(ssp->srcu_gp_seq_needed)))
805 return false;
806 smp_mb();
807 if (curseq != rcu_seq_current(&ssp->srcu_gp_seq))
808 return false;
809 return true;
810}
811
812
813
814
815static void srcu_leak_callback(struct rcu_head *rhp)
816{
817}
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847static void __call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp,
848 rcu_callback_t func, bool do_norm)
849{
850 unsigned long flags;
851 int idx;
852 bool needexp = false;
853 bool needgp = false;
854 unsigned long s;
855 struct srcu_data *sdp;
856
857 check_init_srcu_struct(ssp);
858 if (debug_rcu_head_queue(rhp)) {
859
860 WRITE_ONCE(rhp->func, srcu_leak_callback);
861 WARN_ONCE(1, "call_srcu(): Leaked duplicate callback\n");
862 return;
863 }
864 rhp->func = func;
865 idx = srcu_read_lock(ssp);
866 local_irq_save(flags);
867 sdp = this_cpu_ptr(ssp->sda);
868 spin_lock_rcu_node(sdp);
869 rcu_segcblist_enqueue(&sdp->srcu_cblist, rhp, false);
870 rcu_segcblist_advance(&sdp->srcu_cblist,
871 rcu_seq_current(&ssp->srcu_gp_seq));
872 s = rcu_seq_snap(&ssp->srcu_gp_seq);
873 (void)rcu_segcblist_accelerate(&sdp->srcu_cblist, s);
874 if (ULONG_CMP_LT(sdp->srcu_gp_seq_needed, s)) {
875 sdp->srcu_gp_seq_needed = s;
876 needgp = true;
877 }
878 if (!do_norm && ULONG_CMP_LT(sdp->srcu_gp_seq_needed_exp, s)) {
879 sdp->srcu_gp_seq_needed_exp = s;
880 needexp = true;
881 }
882 spin_unlock_irqrestore_rcu_node(sdp, flags);
883 if (needgp)
884 srcu_funnel_gp_start(ssp, sdp, s, do_norm);
885 else if (needexp)
886 srcu_funnel_exp_start(ssp, sdp->mynode, s);
887 srcu_read_unlock(ssp, idx);
888}
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907void call_srcu(struct srcu_struct *ssp, struct rcu_head *rhp,
908 rcu_callback_t func)
909{
910 __call_srcu(ssp, rhp, func, true);
911}
912EXPORT_SYMBOL_GPL(call_srcu);
913
914
915
916
917static void __synchronize_srcu(struct srcu_struct *ssp, bool do_norm)
918{
919 struct rcu_synchronize rcu;
920
921 RCU_LOCKDEP_WARN(lock_is_held(&ssp->dep_map) ||
922 lock_is_held(&rcu_bh_lock_map) ||
923 lock_is_held(&rcu_lock_map) ||
924 lock_is_held(&rcu_sched_lock_map),
925 "Illegal synchronize_srcu() in same-type SRCU (or in RCU) read-side critical section");
926
927 if (rcu_scheduler_active == RCU_SCHEDULER_INACTIVE)
928 return;
929 might_sleep();
930 check_init_srcu_struct(ssp);
931 init_completion(&rcu.completion);
932 init_rcu_head_on_stack(&rcu.head);
933 __call_srcu(ssp, &rcu.head, wakeme_after_rcu, do_norm);
934 wait_for_completion(&rcu.completion);
935 destroy_rcu_head_on_stack(&rcu.head);
936
937
938
939
940
941
942
943
944 smp_mb();
945}
946
947
948
949
950
951
952
953
954
955
956
957void synchronize_srcu_expedited(struct srcu_struct *ssp)
958{
959 __synchronize_srcu(ssp, rcu_gp_is_normal());
960}
961EXPORT_SYMBOL_GPL(synchronize_srcu_expedited);
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007void synchronize_srcu(struct srcu_struct *ssp)
1008{
1009 if (srcu_might_be_idle(ssp) || rcu_gp_is_expedited())
1010 synchronize_srcu_expedited(ssp);
1011 else
1012 __synchronize_srcu(ssp, true);
1013}
1014EXPORT_SYMBOL_GPL(synchronize_srcu);
1015
1016
1017
1018
1019static void srcu_barrier_cb(struct rcu_head *rhp)
1020{
1021 struct srcu_data *sdp;
1022 struct srcu_struct *ssp;
1023
1024 sdp = container_of(rhp, struct srcu_data, srcu_barrier_head);
1025 ssp = sdp->ssp;
1026 if (atomic_dec_and_test(&ssp->srcu_barrier_cpu_cnt))
1027 complete(&ssp->srcu_barrier_completion);
1028}
1029
1030
1031
1032
1033
1034void srcu_barrier(struct srcu_struct *ssp)
1035{
1036 int cpu;
1037 struct srcu_data *sdp;
1038 unsigned long s = rcu_seq_snap(&ssp->srcu_barrier_seq);
1039
1040 check_init_srcu_struct(ssp);
1041 mutex_lock(&ssp->srcu_barrier_mutex);
1042 if (rcu_seq_done(&ssp->srcu_barrier_seq, s)) {
1043 smp_mb();
1044 mutex_unlock(&ssp->srcu_barrier_mutex);
1045 return;
1046 }
1047 rcu_seq_start(&ssp->srcu_barrier_seq);
1048 init_completion(&ssp->srcu_barrier_completion);
1049
1050
1051 atomic_set(&ssp->srcu_barrier_cpu_cnt, 1);
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061 for_each_possible_cpu(cpu) {
1062 sdp = per_cpu_ptr(ssp->sda, cpu);
1063 spin_lock_irq_rcu_node(sdp);
1064 atomic_inc(&ssp->srcu_barrier_cpu_cnt);
1065 sdp->srcu_barrier_head.func = srcu_barrier_cb;
1066 debug_rcu_head_queue(&sdp->srcu_barrier_head);
1067 if (!rcu_segcblist_entrain(&sdp->srcu_cblist,
1068 &sdp->srcu_barrier_head, 0)) {
1069 debug_rcu_head_unqueue(&sdp->srcu_barrier_head);
1070 atomic_dec(&ssp->srcu_barrier_cpu_cnt);
1071 }
1072 spin_unlock_irq_rcu_node(sdp);
1073 }
1074
1075
1076 if (atomic_dec_and_test(&ssp->srcu_barrier_cpu_cnt))
1077 complete(&ssp->srcu_barrier_completion);
1078 wait_for_completion(&ssp->srcu_barrier_completion);
1079
1080 rcu_seq_end(&ssp->srcu_barrier_seq);
1081 mutex_unlock(&ssp->srcu_barrier_mutex);
1082}
1083EXPORT_SYMBOL_GPL(srcu_barrier);
1084
1085
1086
1087
1088
1089
1090
1091
1092unsigned long srcu_batches_completed(struct srcu_struct *ssp)
1093{
1094 return ssp->srcu_idx;
1095}
1096EXPORT_SYMBOL_GPL(srcu_batches_completed);
1097
1098
1099
1100
1101
1102
1103static void srcu_advance_state(struct srcu_struct *ssp)
1104{
1105 int idx;
1106
1107 mutex_lock(&ssp->srcu_gp_mutex);
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119 idx = rcu_seq_state(smp_load_acquire(&ssp->srcu_gp_seq));
1120 if (idx == SRCU_STATE_IDLE) {
1121 spin_lock_irq_rcu_node(ssp);
1122 if (ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)) {
1123 WARN_ON_ONCE(rcu_seq_state(ssp->srcu_gp_seq));
1124 spin_unlock_irq_rcu_node(ssp);
1125 mutex_unlock(&ssp->srcu_gp_mutex);
1126 return;
1127 }
1128 idx = rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq));
1129 if (idx == SRCU_STATE_IDLE)
1130 srcu_gp_start(ssp);
1131 spin_unlock_irq_rcu_node(ssp);
1132 if (idx != SRCU_STATE_IDLE) {
1133 mutex_unlock(&ssp->srcu_gp_mutex);
1134 return;
1135 }
1136 }
1137
1138 if (rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) == SRCU_STATE_SCAN1) {
1139 idx = 1 ^ (ssp->srcu_idx & 1);
1140 if (!try_check_zero(ssp, idx, 1)) {
1141 mutex_unlock(&ssp->srcu_gp_mutex);
1142 return;
1143 }
1144 srcu_flip(ssp);
1145 rcu_seq_set_state(&ssp->srcu_gp_seq, SRCU_STATE_SCAN2);
1146 }
1147
1148 if (rcu_seq_state(READ_ONCE(ssp->srcu_gp_seq)) == SRCU_STATE_SCAN2) {
1149
1150
1151
1152
1153
1154 idx = 1 ^ (ssp->srcu_idx & 1);
1155 if (!try_check_zero(ssp, idx, 2)) {
1156 mutex_unlock(&ssp->srcu_gp_mutex);
1157 return;
1158 }
1159 srcu_gp_end(ssp);
1160 }
1161}
1162
1163
1164
1165
1166
1167
1168
1169static void srcu_invoke_callbacks(struct work_struct *work)
1170{
1171 bool more;
1172 struct rcu_cblist ready_cbs;
1173 struct rcu_head *rhp;
1174 struct srcu_data *sdp;
1175 struct srcu_struct *ssp;
1176
1177 sdp = container_of(work, struct srcu_data, work);
1178
1179 ssp = sdp->ssp;
1180 rcu_cblist_init(&ready_cbs);
1181 spin_lock_irq_rcu_node(sdp);
1182 rcu_segcblist_advance(&sdp->srcu_cblist,
1183 rcu_seq_current(&ssp->srcu_gp_seq));
1184 if (sdp->srcu_cblist_invoking ||
1185 !rcu_segcblist_ready_cbs(&sdp->srcu_cblist)) {
1186 spin_unlock_irq_rcu_node(sdp);
1187 return;
1188 }
1189
1190
1191 sdp->srcu_cblist_invoking = true;
1192 rcu_segcblist_extract_done_cbs(&sdp->srcu_cblist, &ready_cbs);
1193 spin_unlock_irq_rcu_node(sdp);
1194 rhp = rcu_cblist_dequeue(&ready_cbs);
1195 for (; rhp != NULL; rhp = rcu_cblist_dequeue(&ready_cbs)) {
1196 debug_rcu_head_unqueue(rhp);
1197 local_bh_disable();
1198 rhp->func(rhp);
1199 local_bh_enable();
1200 }
1201
1202
1203
1204
1205
1206 spin_lock_irq_rcu_node(sdp);
1207 rcu_segcblist_insert_count(&sdp->srcu_cblist, &ready_cbs);
1208 (void)rcu_segcblist_accelerate(&sdp->srcu_cblist,
1209 rcu_seq_snap(&ssp->srcu_gp_seq));
1210 sdp->srcu_cblist_invoking = false;
1211 more = rcu_segcblist_ready_cbs(&sdp->srcu_cblist);
1212 spin_unlock_irq_rcu_node(sdp);
1213 if (more)
1214 srcu_schedule_cbs_sdp(sdp, 0);
1215}
1216
1217
1218
1219
1220
1221static void srcu_reschedule(struct srcu_struct *ssp, unsigned long delay)
1222{
1223 bool pushgp = true;
1224
1225 spin_lock_irq_rcu_node(ssp);
1226 if (ULONG_CMP_GE(ssp->srcu_gp_seq, ssp->srcu_gp_seq_needed)) {
1227 if (!WARN_ON_ONCE(rcu_seq_state(ssp->srcu_gp_seq))) {
1228
1229 pushgp = false;
1230 }
1231 } else if (!rcu_seq_state(ssp->srcu_gp_seq)) {
1232
1233 srcu_gp_start(ssp);
1234 }
1235 spin_unlock_irq_rcu_node(ssp);
1236
1237 if (pushgp)
1238 queue_delayed_work(rcu_gp_wq, &ssp->work, delay);
1239}
1240
1241
1242
1243
1244static void process_srcu(struct work_struct *work)
1245{
1246 struct srcu_struct *ssp;
1247
1248 ssp = container_of(work, struct srcu_struct, work.work);
1249
1250 srcu_advance_state(ssp);
1251 srcu_reschedule(ssp, srcu_get_delay(ssp));
1252}
1253
1254void srcutorture_get_gp_data(enum rcutorture_type test_type,
1255 struct srcu_struct *ssp, int *flags,
1256 unsigned long *gp_seq)
1257{
1258 if (test_type != SRCU_FLAVOR)
1259 return;
1260 *flags = 0;
1261 *gp_seq = rcu_seq_current(&ssp->srcu_gp_seq);
1262}
1263EXPORT_SYMBOL_GPL(srcutorture_get_gp_data);
1264
1265void srcu_torture_stats_print(struct srcu_struct *ssp, char *tt, char *tf)
1266{
1267 int cpu;
1268 int idx;
1269 unsigned long s0 = 0, s1 = 0;
1270
1271 idx = ssp->srcu_idx & 0x1;
1272 pr_alert("%s%s Tree SRCU g%ld per-CPU(idx=%d):",
1273 tt, tf, rcu_seq_current(&ssp->srcu_gp_seq), idx);
1274 for_each_possible_cpu(cpu) {
1275 unsigned long l0, l1;
1276 unsigned long u0, u1;
1277 long c0, c1;
1278 struct srcu_data *sdp;
1279
1280 sdp = per_cpu_ptr(ssp->sda, cpu);
1281 u0 = sdp->srcu_unlock_count[!idx];
1282 u1 = sdp->srcu_unlock_count[idx];
1283
1284
1285
1286
1287
1288 smp_rmb();
1289
1290 l0 = sdp->srcu_lock_count[!idx];
1291 l1 = sdp->srcu_lock_count[idx];
1292
1293 c0 = l0 - u0;
1294 c1 = l1 - u1;
1295 pr_cont(" %d(%ld,%ld %1p)",
1296 cpu, c0, c1, rcu_segcblist_head(&sdp->srcu_cblist));
1297 s0 += c0;
1298 s1 += c1;
1299 }
1300 pr_cont(" T(%ld,%ld)\n", s0, s1);
1301}
1302EXPORT_SYMBOL_GPL(srcu_torture_stats_print);
1303
1304static int __init srcu_bootup_announce(void)
1305{
1306 pr_info("Hierarchical SRCU implementation.\n");
1307 if (exp_holdoff != DEFAULT_SRCU_EXP_HOLDOFF)
1308 pr_info("\tNon-default auto-expedite holdoff of %lu ns.\n", exp_holdoff);
1309 return 0;
1310}
1311early_initcall(srcu_bootup_announce);
1312
1313void __init srcu_init(void)
1314{
1315 struct srcu_struct *ssp;
1316
1317 srcu_init_done = true;
1318 while (!list_empty(&srcu_boot_list)) {
1319 ssp = list_first_entry(&srcu_boot_list, struct srcu_struct,
1320 work.work.entry);
1321 check_init_srcu_struct(ssp);
1322 list_del_init(&ssp->work.work.entry);
1323 queue_work(rcu_gp_wq, &ssp->work.work);
1324 }
1325}
1326