1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include <linux/kthread.h>
26#include <linux/module.h>
27#include <linux/debugfs.h>
28#include <linux/seq_file.h>
29
30
31struct rcu_ctrlblk {
32 struct rcu_head *rcucblist;
33 struct rcu_head **donetail;
34 struct rcu_head **curtail;
35 RCU_TRACE(long qlen);
36 RCU_TRACE(unsigned long gp_start);
37 RCU_TRACE(unsigned long ticks_this_gp);
38 RCU_TRACE(unsigned long jiffies_stall);
39 RCU_TRACE(char *name);
40};
41
42
43static struct rcu_ctrlblk rcu_sched_ctrlblk = {
44 .donetail = &rcu_sched_ctrlblk.rcucblist,
45 .curtail = &rcu_sched_ctrlblk.rcucblist,
46 RCU_TRACE(.name = "rcu_sched")
47};
48
49static struct rcu_ctrlblk rcu_bh_ctrlblk = {
50 .donetail = &rcu_bh_ctrlblk.rcucblist,
51 .curtail = &rcu_bh_ctrlblk.rcucblist,
52 RCU_TRACE(.name = "rcu_bh")
53};
54
55#ifdef CONFIG_DEBUG_LOCK_ALLOC
56int rcu_scheduler_active __read_mostly;
57EXPORT_SYMBOL_GPL(rcu_scheduler_active);
58#endif
59
60#ifdef CONFIG_RCU_TRACE
61
62static void check_cpu_stall(struct rcu_ctrlblk *rcp)
63{
64 unsigned long j;
65 unsigned long js;
66
67 if (rcu_cpu_stall_suppress)
68 return;
69 rcp->ticks_this_gp++;
70 j = jiffies;
71 js = rcp->jiffies_stall;
72 if (*rcp->curtail && ULONG_CMP_GE(j, js)) {
73 pr_err("INFO: %s stall on CPU (%lu ticks this GP) idle=%llx (t=%lu jiffies q=%ld)\n",
74 rcp->name, rcp->ticks_this_gp, rcu_dynticks_nesting,
75 jiffies - rcp->gp_start, rcp->qlen);
76 dump_stack();
77 }
78 if (*rcp->curtail && ULONG_CMP_GE(j, js))
79 rcp->jiffies_stall = jiffies +
80 3 * rcu_jiffies_till_stall_check() + 3;
81 else if (ULONG_CMP_GE(j, js))
82 rcp->jiffies_stall = jiffies + rcu_jiffies_till_stall_check();
83}
84
85static void check_cpu_stall_preempt(void);
86
87#endif
88
89static void reset_cpu_stall_ticks(struct rcu_ctrlblk *rcp)
90{
91#ifdef CONFIG_RCU_TRACE
92 rcp->ticks_this_gp = 0;
93 rcp->gp_start = jiffies;
94 rcp->jiffies_stall = jiffies + rcu_jiffies_till_stall_check();
95#endif
96}
97
98static void check_cpu_stalls(void)
99{
100 RCU_TRACE(check_cpu_stall(&rcu_bh_ctrlblk));
101 RCU_TRACE(check_cpu_stall(&rcu_sched_ctrlblk));
102 RCU_TRACE(check_cpu_stall_preempt());
103}
104
105#ifdef CONFIG_TINY_PREEMPT_RCU
106
107#include <linux/delay.h>
108
109
110struct rcu_preempt_ctrlblk {
111 struct rcu_ctrlblk rcb;
112 struct rcu_head **nexttail;
113
114
115
116
117
118
119
120
121
122 struct list_head blkd_tasks;
123
124
125
126 struct list_head *gp_tasks;
127
128
129
130 struct list_head *exp_tasks;
131
132
133
134
135
136#ifdef CONFIG_RCU_BOOST
137 struct list_head *boost_tasks;
138
139
140
141
142
143#endif
144 u8 gpnum;
145 u8 gpcpu;
146 u8 completed;
147
148#ifdef CONFIG_RCU_BOOST
149 unsigned long boost_time;
150#endif
151#ifdef CONFIG_RCU_TRACE
152 unsigned long n_grace_periods;
153#ifdef CONFIG_RCU_BOOST
154 unsigned long n_tasks_boosted;
155
156 unsigned long n_exp_boosts;
157
158 unsigned long n_normal_boosts;
159
160 unsigned long n_balk_blkd_tasks;
161
162 unsigned long n_balk_exp_gp_tasks;
163
164 unsigned long n_balk_boost_tasks;
165
166 unsigned long n_balk_notyet;
167
168 unsigned long n_balk_nos;
169
170
171#endif
172#endif
173};
174
175static struct rcu_preempt_ctrlblk rcu_preempt_ctrlblk = {
176 .rcb.donetail = &rcu_preempt_ctrlblk.rcb.rcucblist,
177 .rcb.curtail = &rcu_preempt_ctrlblk.rcb.rcucblist,
178 .nexttail = &rcu_preempt_ctrlblk.rcb.rcucblist,
179 .blkd_tasks = LIST_HEAD_INIT(rcu_preempt_ctrlblk.blkd_tasks),
180 RCU_TRACE(.rcb.name = "rcu_preempt")
181};
182
183static int rcu_preempted_readers_exp(void);
184static void rcu_report_exp_done(void);
185
186
187
188
189static int rcu_cpu_blocking_cur_gp(void)
190{
191 return rcu_preempt_ctrlblk.gpcpu != rcu_preempt_ctrlblk.gpnum;
192}
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208static int rcu_preempt_running_reader(void)
209{
210 return current->rcu_read_lock_nesting;
211}
212
213
214
215
216
217static int rcu_preempt_blocked_readers_any(void)
218{
219 return !list_empty(&rcu_preempt_ctrlblk.blkd_tasks);
220}
221
222
223
224
225
226static int rcu_preempt_blocked_readers_cgp(void)
227{
228 return rcu_preempt_ctrlblk.gp_tasks != NULL;
229}
230
231
232
233
234static int rcu_preempt_needs_another_gp(void)
235{
236 return *rcu_preempt_ctrlblk.rcb.curtail != NULL;
237}
238
239
240
241
242
243static int rcu_preempt_gp_in_progress(void)
244{
245 return rcu_preempt_ctrlblk.completed != rcu_preempt_ctrlblk.gpnum;
246}
247
248
249
250
251
252static struct list_head *rcu_next_node_entry(struct task_struct *t)
253{
254 struct list_head *np;
255
256 np = t->rcu_node_entry.next;
257 if (np == &rcu_preempt_ctrlblk.blkd_tasks)
258 np = NULL;
259 return np;
260}
261
262#ifdef CONFIG_RCU_TRACE
263
264#ifdef CONFIG_RCU_BOOST
265static void rcu_initiate_boost_trace(void);
266#endif
267
268
269
270
271static void show_tiny_preempt_stats(struct seq_file *m)
272{
273 seq_printf(m, "rcu_preempt: qlen=%ld gp=%lu g%u/p%u/c%u tasks=%c%c%c\n",
274 rcu_preempt_ctrlblk.rcb.qlen,
275 rcu_preempt_ctrlblk.n_grace_periods,
276 rcu_preempt_ctrlblk.gpnum,
277 rcu_preempt_ctrlblk.gpcpu,
278 rcu_preempt_ctrlblk.completed,
279 "T."[list_empty(&rcu_preempt_ctrlblk.blkd_tasks)],
280 "N."[!rcu_preempt_ctrlblk.gp_tasks],
281 "E."[!rcu_preempt_ctrlblk.exp_tasks]);
282#ifdef CONFIG_RCU_BOOST
283 seq_printf(m, "%sttb=%c ntb=%lu neb=%lu nnb=%lu j=%04x bt=%04x\n",
284 " ",
285 "B."[!rcu_preempt_ctrlblk.boost_tasks],
286 rcu_preempt_ctrlblk.n_tasks_boosted,
287 rcu_preempt_ctrlblk.n_exp_boosts,
288 rcu_preempt_ctrlblk.n_normal_boosts,
289 (int)(jiffies & 0xffff),
290 (int)(rcu_preempt_ctrlblk.boost_time & 0xffff));
291 seq_printf(m, "%s: nt=%lu egt=%lu bt=%lu ny=%lu nos=%lu\n",
292 " balk",
293 rcu_preempt_ctrlblk.n_balk_blkd_tasks,
294 rcu_preempt_ctrlblk.n_balk_exp_gp_tasks,
295 rcu_preempt_ctrlblk.n_balk_boost_tasks,
296 rcu_preempt_ctrlblk.n_balk_notyet,
297 rcu_preempt_ctrlblk.n_balk_nos);
298#endif
299}
300
301#endif
302
303#ifdef CONFIG_RCU_BOOST
304
305#include "rtmutex_common.h"
306
307#define RCU_BOOST_PRIO CONFIG_RCU_BOOST_PRIO
308
309
310static struct task_struct *rcu_kthread_task;
311static DECLARE_WAIT_QUEUE_HEAD(rcu_kthread_wq);
312static unsigned long have_rcu_kthread_work;
313
314
315
316
317
318static int rcu_boost(void)
319{
320 unsigned long flags;
321 struct rt_mutex mtx;
322 struct task_struct *t;
323 struct list_head *tb;
324
325 if (rcu_preempt_ctrlblk.boost_tasks == NULL &&
326 rcu_preempt_ctrlblk.exp_tasks == NULL)
327 return 0;
328
329 local_irq_save(flags);
330
331
332
333
334
335
336 if (rcu_preempt_ctrlblk.boost_tasks == NULL &&
337 rcu_preempt_ctrlblk.exp_tasks == NULL) {
338 local_irq_restore(flags);
339 return 0;
340 }
341
342
343
344
345
346
347
348 if (rcu_preempt_ctrlblk.exp_tasks != NULL) {
349 tb = rcu_preempt_ctrlblk.exp_tasks;
350 RCU_TRACE(rcu_preempt_ctrlblk.n_exp_boosts++);
351 } else {
352 tb = rcu_preempt_ctrlblk.boost_tasks;
353 RCU_TRACE(rcu_preempt_ctrlblk.n_normal_boosts++);
354 }
355 RCU_TRACE(rcu_preempt_ctrlblk.n_tasks_boosted++);
356
357
358
359
360
361
362
363
364
365 t = container_of(tb, struct task_struct, rcu_node_entry);
366 rt_mutex_init_proxy_locked(&mtx, t);
367 t->rcu_boost_mutex = &mtx;
368 local_irq_restore(flags);
369 rt_mutex_lock(&mtx);
370 rt_mutex_unlock(&mtx);
371
372 return ACCESS_ONCE(rcu_preempt_ctrlblk.boost_tasks) != NULL ||
373 ACCESS_ONCE(rcu_preempt_ctrlblk.exp_tasks) != NULL;
374}
375
376
377
378
379
380
381
382
383
384
385
386static int rcu_initiate_boost(void)
387{
388 if (!rcu_preempt_blocked_readers_cgp() &&
389 rcu_preempt_ctrlblk.exp_tasks == NULL) {
390 RCU_TRACE(rcu_preempt_ctrlblk.n_balk_exp_gp_tasks++);
391 return 0;
392 }
393 if (rcu_preempt_ctrlblk.exp_tasks != NULL ||
394 (rcu_preempt_ctrlblk.gp_tasks != NULL &&
395 rcu_preempt_ctrlblk.boost_tasks == NULL &&
396 ULONG_CMP_GE(jiffies, rcu_preempt_ctrlblk.boost_time))) {
397 if (rcu_preempt_ctrlblk.exp_tasks == NULL)
398 rcu_preempt_ctrlblk.boost_tasks =
399 rcu_preempt_ctrlblk.gp_tasks;
400 invoke_rcu_callbacks();
401 } else {
402 RCU_TRACE(rcu_initiate_boost_trace());
403 }
404 return 1;
405}
406
407#define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000)
408
409
410
411
412static void rcu_preempt_boost_start_gp(void)
413{
414 rcu_preempt_ctrlblk.boost_time = jiffies + RCU_BOOST_DELAY_JIFFIES;
415}
416
417#else
418
419
420
421
422
423
424static int rcu_initiate_boost(void)
425{
426 return rcu_preempt_blocked_readers_cgp();
427}
428
429
430
431
432static void rcu_preempt_boost_start_gp(void)
433{
434}
435
436#endif
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459static void rcu_preempt_cpu_qs(void)
460{
461
462 rcu_preempt_ctrlblk.gpcpu = rcu_preempt_ctrlblk.gpnum;
463 current->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS;
464
465
466 if (!rcu_preempt_gp_in_progress())
467 return;
468
469
470
471
472 if (rcu_initiate_boost())
473 return;
474
475
476 rcu_preempt_ctrlblk.completed = rcu_preempt_ctrlblk.gpnum;
477 rcu_preempt_ctrlblk.rcb.donetail = rcu_preempt_ctrlblk.rcb.curtail;
478 rcu_preempt_ctrlblk.rcb.curtail = rcu_preempt_ctrlblk.nexttail;
479
480
481 if (!rcu_preempt_blocked_readers_any())
482 rcu_preempt_ctrlblk.rcb.donetail = rcu_preempt_ctrlblk.nexttail;
483
484
485 if (*rcu_preempt_ctrlblk.rcb.donetail != NULL)
486 invoke_rcu_callbacks();
487}
488
489
490
491
492static void rcu_preempt_start_gp(void)
493{
494 if (!rcu_preempt_gp_in_progress() && rcu_preempt_needs_another_gp()) {
495
496
497 rcu_preempt_ctrlblk.gpnum++;
498 RCU_TRACE(rcu_preempt_ctrlblk.n_grace_periods++);
499 reset_cpu_stall_ticks(&rcu_preempt_ctrlblk.rcb);
500
501
502 if (rcu_preempt_blocked_readers_any())
503 rcu_preempt_ctrlblk.gp_tasks =
504 rcu_preempt_ctrlblk.blkd_tasks.next;
505
506
507 rcu_preempt_boost_start_gp();
508
509
510 if (!rcu_preempt_running_reader())
511 rcu_preempt_cpu_qs();
512 }
513}
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531void rcu_preempt_note_context_switch(void)
532{
533 struct task_struct *t = current;
534 unsigned long flags;
535
536 local_irq_save(flags);
537 if (rcu_preempt_running_reader() > 0 &&
538 (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) {
539
540
541 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED;
542
543
544
545
546
547
548
549
550
551
552
553 list_add(&t->rcu_node_entry, &rcu_preempt_ctrlblk.blkd_tasks);
554 if (rcu_cpu_blocking_cur_gp())
555 rcu_preempt_ctrlblk.gp_tasks = &t->rcu_node_entry;
556 } else if (rcu_preempt_running_reader() < 0 &&
557 t->rcu_read_unlock_special) {
558
559
560
561
562 rcu_read_unlock_special(t);
563 }
564
565
566
567
568
569
570
571
572
573
574 rcu_preempt_cpu_qs();
575 local_irq_restore(flags);
576}
577
578
579
580
581
582
583void rcu_read_unlock_special(struct task_struct *t)
584{
585 int empty;
586 int empty_exp;
587 unsigned long flags;
588 struct list_head *np;
589#ifdef CONFIG_RCU_BOOST
590 struct rt_mutex *rbmp = NULL;
591#endif
592 int special;
593
594
595
596
597
598 if (in_nmi())
599 return;
600
601 local_irq_save(flags);
602
603
604
605
606
607 special = t->rcu_read_unlock_special;
608 if (special & RCU_READ_UNLOCK_NEED_QS)
609 rcu_preempt_cpu_qs();
610
611
612 if (in_irq() || in_serving_softirq()) {
613 local_irq_restore(flags);
614 return;
615 }
616
617
618 if (special & RCU_READ_UNLOCK_BLOCKED) {
619 t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BLOCKED;
620
621
622
623
624
625 empty = !rcu_preempt_blocked_readers_cgp();
626 empty_exp = rcu_preempt_ctrlblk.exp_tasks == NULL;
627 np = rcu_next_node_entry(t);
628 list_del_init(&t->rcu_node_entry);
629 if (&t->rcu_node_entry == rcu_preempt_ctrlblk.gp_tasks)
630 rcu_preempt_ctrlblk.gp_tasks = np;
631 if (&t->rcu_node_entry == rcu_preempt_ctrlblk.exp_tasks)
632 rcu_preempt_ctrlblk.exp_tasks = np;
633#ifdef CONFIG_RCU_BOOST
634 if (&t->rcu_node_entry == rcu_preempt_ctrlblk.boost_tasks)
635 rcu_preempt_ctrlblk.boost_tasks = np;
636#endif
637
638
639
640
641
642
643 if (!empty && !rcu_preempt_blocked_readers_cgp()) {
644 rcu_preempt_cpu_qs();
645 rcu_preempt_start_gp();
646 }
647
648
649
650
651
652 if (!empty_exp && rcu_preempt_ctrlblk.exp_tasks == NULL)
653 rcu_report_exp_done();
654 }
655#ifdef CONFIG_RCU_BOOST
656
657 if (t->rcu_boost_mutex != NULL) {
658 rbmp = t->rcu_boost_mutex;
659 t->rcu_boost_mutex = NULL;
660 rt_mutex_unlock(rbmp);
661 }
662#endif
663 local_irq_restore(flags);
664}
665
666
667
668
669
670
671
672
673static void rcu_preempt_check_callbacks(void)
674{
675 struct task_struct *t = current;
676
677 if (rcu_preempt_gp_in_progress() &&
678 (!rcu_preempt_running_reader() ||
679 !rcu_cpu_blocking_cur_gp()))
680 rcu_preempt_cpu_qs();
681 if (&rcu_preempt_ctrlblk.rcb.rcucblist !=
682 rcu_preempt_ctrlblk.rcb.donetail)
683 invoke_rcu_callbacks();
684 if (rcu_preempt_gp_in_progress() &&
685 rcu_cpu_blocking_cur_gp() &&
686 rcu_preempt_running_reader() > 0)
687 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS;
688}
689
690
691
692
693
694
695
696
697
698static void rcu_preempt_remove_callbacks(struct rcu_ctrlblk *rcp)
699{
700 if (rcu_preempt_ctrlblk.nexttail == rcp->donetail)
701 rcu_preempt_ctrlblk.nexttail = &rcp->rcucblist;
702}
703
704
705
706
707static void rcu_preempt_process_callbacks(void)
708{
709 __rcu_process_callbacks(&rcu_preempt_ctrlblk.rcb);
710}
711
712
713
714
715void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
716{
717 unsigned long flags;
718
719 debug_rcu_head_queue(head);
720 head->func = func;
721 head->next = NULL;
722
723 local_irq_save(flags);
724 *rcu_preempt_ctrlblk.nexttail = head;
725 rcu_preempt_ctrlblk.nexttail = &head->next;
726 RCU_TRACE(rcu_preempt_ctrlblk.rcb.qlen++);
727 rcu_preempt_start_gp();
728 local_irq_restore(flags);
729}
730EXPORT_SYMBOL_GPL(call_rcu);
731
732
733
734
735
736
737
738
739
740
741void synchronize_rcu(void)
742{
743 rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) &&
744 !lock_is_held(&rcu_lock_map) &&
745 !lock_is_held(&rcu_sched_lock_map),
746 "Illegal synchronize_rcu() in RCU read-side critical section");
747
748#ifdef CONFIG_DEBUG_LOCK_ALLOC
749 if (!rcu_scheduler_active)
750 return;
751#endif
752
753 WARN_ON_ONCE(rcu_preempt_running_reader());
754 if (!rcu_preempt_blocked_readers_any())
755 return;
756
757
758 if (rcu_expedited)
759 synchronize_rcu_expedited();
760 else
761 rcu_barrier();
762}
763EXPORT_SYMBOL_GPL(synchronize_rcu);
764
765static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq);
766static unsigned long sync_rcu_preempt_exp_count;
767static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex);
768
769
770
771
772
773
774
775static int rcu_preempted_readers_exp(void)
776{
777 return rcu_preempt_ctrlblk.exp_tasks != NULL;
778}
779
780
781
782
783
784
785static void rcu_report_exp_done(void)
786{
787 wake_up(&sync_rcu_preempt_exp_wq);
788}
789
790
791
792
793
794
795
796
797
798
799
800void synchronize_rcu_expedited(void)
801{
802 unsigned long flags;
803 struct rcu_preempt_ctrlblk *rpcp = &rcu_preempt_ctrlblk;
804 unsigned long snap;
805
806 barrier();
807
808 WARN_ON_ONCE(rcu_preempt_running_reader());
809
810
811
812
813
814
815 snap = sync_rcu_preempt_exp_count + 1;
816 mutex_lock(&sync_rcu_preempt_exp_mutex);
817 if (ULONG_CMP_LT(snap, sync_rcu_preempt_exp_count))
818 goto unlock_mb_ret;
819
820 local_irq_save(flags);
821
822
823
824
825
826
827
828
829 rpcp->exp_tasks = rpcp->blkd_tasks.next;
830 if (rpcp->exp_tasks == &rpcp->blkd_tasks)
831 rpcp->exp_tasks = NULL;
832
833
834 if (!rcu_preempted_readers_exp()) {
835 local_irq_restore(flags);
836 } else {
837 rcu_initiate_boost();
838 local_irq_restore(flags);
839 wait_event(sync_rcu_preempt_exp_wq,
840 !rcu_preempted_readers_exp());
841 }
842
843
844 barrier();
845 sync_rcu_preempt_exp_count++;
846unlock_mb_ret:
847 mutex_unlock(&sync_rcu_preempt_exp_mutex);
848 barrier();
849}
850EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
851
852
853
854
855int rcu_preempt_needs_cpu(void)
856{
857 return rcu_preempt_ctrlblk.rcb.rcucblist != NULL;
858}
859
860#else
861
862#ifdef CONFIG_RCU_TRACE
863
864
865
866
867
868static void show_tiny_preempt_stats(struct seq_file *m)
869{
870}
871
872#endif
873
874
875
876
877
878static void rcu_preempt_check_callbacks(void)
879{
880}
881
882
883
884
885
886static void rcu_preempt_remove_callbacks(struct rcu_ctrlblk *rcp)
887{
888}
889
890
891
892
893
894static void rcu_preempt_process_callbacks(void)
895{
896}
897
898#endif
899
900#ifdef CONFIG_RCU_BOOST
901
902
903
904
905
906static void invoke_rcu_callbacks(void)
907{
908 have_rcu_kthread_work = 1;
909 if (rcu_kthread_task != NULL)
910 wake_up(&rcu_kthread_wq);
911}
912
913#ifdef CONFIG_RCU_TRACE
914
915
916
917
918
919static bool rcu_is_callbacks_kthread(void)
920{
921 return rcu_kthread_task == current;
922}
923
924#endif
925
926
927
928
929
930
931
932
933static int rcu_kthread(void *arg)
934{
935 unsigned long work;
936 unsigned long morework;
937 unsigned long flags;
938
939 for (;;) {
940 wait_event_interruptible(rcu_kthread_wq,
941 have_rcu_kthread_work != 0);
942 morework = rcu_boost();
943 local_irq_save(flags);
944 work = have_rcu_kthread_work;
945 have_rcu_kthread_work = morework;
946 local_irq_restore(flags);
947 if (work)
948 rcu_process_callbacks(NULL);
949 schedule_timeout_interruptible(1);
950 }
951
952 return 0;
953}
954
955
956
957
958static int __init rcu_spawn_kthreads(void)
959{
960 struct sched_param sp;
961
962 rcu_kthread_task = kthread_run(rcu_kthread, NULL, "rcu_kthread");
963 sp.sched_priority = RCU_BOOST_PRIO;
964 sched_setscheduler_nocheck(rcu_kthread_task, SCHED_FIFO, &sp);
965 return 0;
966}
967early_initcall(rcu_spawn_kthreads);
968
969#else
970
971
972static int rcu_scheduler_fully_active __read_mostly;
973
974
975
976
977void invoke_rcu_callbacks(void)
978{
979 if (rcu_scheduler_fully_active)
980 raise_softirq(RCU_SOFTIRQ);
981}
982
983#ifdef CONFIG_RCU_TRACE
984
985
986
987
988static bool rcu_is_callbacks_kthread(void)
989{
990 return false;
991}
992
993#endif
994
995static int __init rcu_scheduler_really_started(void)
996{
997 rcu_scheduler_fully_active = 1;
998 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
999 raise_softirq(RCU_SOFTIRQ);
1000 return 0;
1001}
1002early_initcall(rcu_scheduler_really_started);
1003
1004#endif
1005
1006#ifdef CONFIG_DEBUG_LOCK_ALLOC
1007#include <linux/kernel_stat.h>
1008
1009
1010
1011
1012
1013void __init rcu_scheduler_starting(void)
1014{
1015 WARN_ON(nr_context_switches() > 0);
1016 rcu_scheduler_active = 1;
1017}
1018
1019#endif
1020
1021#ifdef CONFIG_RCU_TRACE
1022
1023#ifdef CONFIG_RCU_BOOST
1024
1025static void rcu_initiate_boost_trace(void)
1026{
1027 if (list_empty(&rcu_preempt_ctrlblk.blkd_tasks))
1028 rcu_preempt_ctrlblk.n_balk_blkd_tasks++;
1029 else if (rcu_preempt_ctrlblk.gp_tasks == NULL &&
1030 rcu_preempt_ctrlblk.exp_tasks == NULL)
1031 rcu_preempt_ctrlblk.n_balk_exp_gp_tasks++;
1032 else if (rcu_preempt_ctrlblk.boost_tasks != NULL)
1033 rcu_preempt_ctrlblk.n_balk_boost_tasks++;
1034 else if (!ULONG_CMP_GE(jiffies, rcu_preempt_ctrlblk.boost_time))
1035 rcu_preempt_ctrlblk.n_balk_notyet++;
1036 else
1037 rcu_preempt_ctrlblk.n_balk_nos++;
1038}
1039
1040#endif
1041
1042static void rcu_trace_sub_qlen(struct rcu_ctrlblk *rcp, int n)
1043{
1044 unsigned long flags;
1045
1046 local_irq_save(flags);
1047 rcp->qlen -= n;
1048 local_irq_restore(flags);
1049}
1050
1051
1052
1053
1054static int show_tiny_stats(struct seq_file *m, void *unused)
1055{
1056 show_tiny_preempt_stats(m);
1057 seq_printf(m, "rcu_sched: qlen: %ld\n", rcu_sched_ctrlblk.qlen);
1058 seq_printf(m, "rcu_bh: qlen: %ld\n", rcu_bh_ctrlblk.qlen);
1059 return 0;
1060}
1061
1062static int show_tiny_stats_open(struct inode *inode, struct file *file)
1063{
1064 return single_open(file, show_tiny_stats, NULL);
1065}
1066
1067static const struct file_operations show_tiny_stats_fops = {
1068 .owner = THIS_MODULE,
1069 .open = show_tiny_stats_open,
1070 .read = seq_read,
1071 .llseek = seq_lseek,
1072 .release = single_release,
1073};
1074
1075static struct dentry *rcudir;
1076
1077static int __init rcutiny_trace_init(void)
1078{
1079 struct dentry *retval;
1080
1081 rcudir = debugfs_create_dir("rcu", NULL);
1082 if (!rcudir)
1083 goto free_out;
1084 retval = debugfs_create_file("rcudata", 0444, rcudir,
1085 NULL, &show_tiny_stats_fops);
1086 if (!retval)
1087 goto free_out;
1088 return 0;
1089free_out:
1090 debugfs_remove_recursive(rcudir);
1091 return 1;
1092}
1093
1094static void __exit rcutiny_trace_cleanup(void)
1095{
1096 debugfs_remove_recursive(rcudir);
1097}
1098
1099module_init(rcutiny_trace_init);
1100module_exit(rcutiny_trace_cleanup);
1101
1102MODULE_AUTHOR("Paul E. McKenney");
1103MODULE_DESCRIPTION("Read-Copy Update tracing for tiny implementation");
1104MODULE_LICENSE("GPL");
1105
1106static void check_cpu_stall_preempt(void)
1107{
1108#ifdef CONFIG_TINY_PREEMPT_RCU
1109 check_cpu_stall(&rcu_preempt_ctrlblk.rcb);
1110#endif
1111}
1112
1113#endif
1114