1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include <linux/kthread.h>
26#include <linux/module.h>
27#include <linux/debugfs.h>
28#include <linux/seq_file.h>
29
30
31struct rcu_ctrlblk {
32 struct rcu_head *rcucblist;
33 struct rcu_head **donetail;
34 struct rcu_head **curtail;
35 RCU_TRACE(long qlen);
36 RCU_TRACE(char *name);
37};
38
39
40static struct rcu_ctrlblk rcu_sched_ctrlblk = {
41 .donetail = &rcu_sched_ctrlblk.rcucblist,
42 .curtail = &rcu_sched_ctrlblk.rcucblist,
43 RCU_TRACE(.name = "rcu_sched")
44};
45
46static struct rcu_ctrlblk rcu_bh_ctrlblk = {
47 .donetail = &rcu_bh_ctrlblk.rcucblist,
48 .curtail = &rcu_bh_ctrlblk.rcucblist,
49 RCU_TRACE(.name = "rcu_bh")
50};
51
52#ifdef CONFIG_DEBUG_LOCK_ALLOC
53int rcu_scheduler_active __read_mostly;
54EXPORT_SYMBOL_GPL(rcu_scheduler_active);
55#endif
56
57#ifdef CONFIG_TINY_PREEMPT_RCU
58
59#include <linux/delay.h>
60
61
62struct rcu_preempt_ctrlblk {
63 struct rcu_ctrlblk rcb;
64 struct rcu_head **nexttail;
65
66
67
68
69
70
71
72
73
74 struct list_head blkd_tasks;
75
76
77
78 struct list_head *gp_tasks;
79
80
81
82 struct list_head *exp_tasks;
83
84
85
86
87
88#ifdef CONFIG_RCU_BOOST
89 struct list_head *boost_tasks;
90
91
92
93
94
95#endif
96 u8 gpnum;
97 u8 gpcpu;
98 u8 completed;
99
100#ifdef CONFIG_RCU_BOOST
101 unsigned long boost_time;
102#endif
103#ifdef CONFIG_RCU_TRACE
104 unsigned long n_grace_periods;
105#ifdef CONFIG_RCU_BOOST
106 unsigned long n_tasks_boosted;
107
108 unsigned long n_exp_boosts;
109
110 unsigned long n_normal_boosts;
111
112 unsigned long n_balk_blkd_tasks;
113
114 unsigned long n_balk_exp_gp_tasks;
115
116 unsigned long n_balk_boost_tasks;
117
118 unsigned long n_balk_notyet;
119
120 unsigned long n_balk_nos;
121
122
123#endif
124#endif
125};
126
127static struct rcu_preempt_ctrlblk rcu_preempt_ctrlblk = {
128 .rcb.donetail = &rcu_preempt_ctrlblk.rcb.rcucblist,
129 .rcb.curtail = &rcu_preempt_ctrlblk.rcb.rcucblist,
130 .nexttail = &rcu_preempt_ctrlblk.rcb.rcucblist,
131 .blkd_tasks = LIST_HEAD_INIT(rcu_preempt_ctrlblk.blkd_tasks),
132 RCU_TRACE(.rcb.name = "rcu_preempt")
133};
134
135static int rcu_preempted_readers_exp(void);
136static void rcu_report_exp_done(void);
137
138
139
140
141static int rcu_cpu_blocking_cur_gp(void)
142{
143 return rcu_preempt_ctrlblk.gpcpu != rcu_preempt_ctrlblk.gpnum;
144}
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160static int rcu_preempt_running_reader(void)
161{
162 return current->rcu_read_lock_nesting;
163}
164
165
166
167
168
169static int rcu_preempt_blocked_readers_any(void)
170{
171 return !list_empty(&rcu_preempt_ctrlblk.blkd_tasks);
172}
173
174
175
176
177
178static int rcu_preempt_blocked_readers_cgp(void)
179{
180 return rcu_preempt_ctrlblk.gp_tasks != NULL;
181}
182
183
184
185
186static int rcu_preempt_needs_another_gp(void)
187{
188 return *rcu_preempt_ctrlblk.rcb.curtail != NULL;
189}
190
191
192
193
194
195static int rcu_preempt_gp_in_progress(void)
196{
197 return rcu_preempt_ctrlblk.completed != rcu_preempt_ctrlblk.gpnum;
198}
199
200
201
202
203
204static struct list_head *rcu_next_node_entry(struct task_struct *t)
205{
206 struct list_head *np;
207
208 np = t->rcu_node_entry.next;
209 if (np == &rcu_preempt_ctrlblk.blkd_tasks)
210 np = NULL;
211 return np;
212}
213
214#ifdef CONFIG_RCU_TRACE
215
216#ifdef CONFIG_RCU_BOOST
217static void rcu_initiate_boost_trace(void);
218#endif
219
220
221
222
223static void show_tiny_preempt_stats(struct seq_file *m)
224{
225 seq_printf(m, "rcu_preempt: qlen=%ld gp=%lu g%u/p%u/c%u tasks=%c%c%c\n",
226 rcu_preempt_ctrlblk.rcb.qlen,
227 rcu_preempt_ctrlblk.n_grace_periods,
228 rcu_preempt_ctrlblk.gpnum,
229 rcu_preempt_ctrlblk.gpcpu,
230 rcu_preempt_ctrlblk.completed,
231 "T."[list_empty(&rcu_preempt_ctrlblk.blkd_tasks)],
232 "N."[!rcu_preempt_ctrlblk.gp_tasks],
233 "E."[!rcu_preempt_ctrlblk.exp_tasks]);
234#ifdef CONFIG_RCU_BOOST
235 seq_printf(m, "%sttb=%c ntb=%lu neb=%lu nnb=%lu j=%04x bt=%04x\n",
236 " ",
237 "B."[!rcu_preempt_ctrlblk.boost_tasks],
238 rcu_preempt_ctrlblk.n_tasks_boosted,
239 rcu_preempt_ctrlblk.n_exp_boosts,
240 rcu_preempt_ctrlblk.n_normal_boosts,
241 (int)(jiffies & 0xffff),
242 (int)(rcu_preempt_ctrlblk.boost_time & 0xffff));
243 seq_printf(m, "%s: nt=%lu egt=%lu bt=%lu ny=%lu nos=%lu\n",
244 " balk",
245 rcu_preempt_ctrlblk.n_balk_blkd_tasks,
246 rcu_preempt_ctrlblk.n_balk_exp_gp_tasks,
247 rcu_preempt_ctrlblk.n_balk_boost_tasks,
248 rcu_preempt_ctrlblk.n_balk_notyet,
249 rcu_preempt_ctrlblk.n_balk_nos);
250#endif
251}
252
253#endif
254
255#ifdef CONFIG_RCU_BOOST
256
257#include "rtmutex_common.h"
258
259#define RCU_BOOST_PRIO CONFIG_RCU_BOOST_PRIO
260
261
262static struct task_struct *rcu_kthread_task;
263static DECLARE_WAIT_QUEUE_HEAD(rcu_kthread_wq);
264static unsigned long have_rcu_kthread_work;
265
266
267
268
269
270static int rcu_boost(void)
271{
272 unsigned long flags;
273 struct rt_mutex mtx;
274 struct task_struct *t;
275 struct list_head *tb;
276
277 if (rcu_preempt_ctrlblk.boost_tasks == NULL &&
278 rcu_preempt_ctrlblk.exp_tasks == NULL)
279 return 0;
280
281 raw_local_irq_save(flags);
282
283
284
285
286
287
288 if (rcu_preempt_ctrlblk.boost_tasks == NULL &&
289 rcu_preempt_ctrlblk.exp_tasks == NULL) {
290 raw_local_irq_restore(flags);
291 return 0;
292 }
293
294
295
296
297
298
299
300 if (rcu_preempt_ctrlblk.exp_tasks != NULL) {
301 tb = rcu_preempt_ctrlblk.exp_tasks;
302 RCU_TRACE(rcu_preempt_ctrlblk.n_exp_boosts++);
303 } else {
304 tb = rcu_preempt_ctrlblk.boost_tasks;
305 RCU_TRACE(rcu_preempt_ctrlblk.n_normal_boosts++);
306 }
307 RCU_TRACE(rcu_preempt_ctrlblk.n_tasks_boosted++);
308
309
310
311
312
313
314
315
316
317 t = container_of(tb, struct task_struct, rcu_node_entry);
318 rt_mutex_init_proxy_locked(&mtx, t);
319 t->rcu_boost_mutex = &mtx;
320 raw_local_irq_restore(flags);
321 rt_mutex_lock(&mtx);
322 rt_mutex_unlock(&mtx);
323
324 return ACCESS_ONCE(rcu_preempt_ctrlblk.boost_tasks) != NULL ||
325 ACCESS_ONCE(rcu_preempt_ctrlblk.exp_tasks) != NULL;
326}
327
328
329
330
331
332
333
334
335
336
337
338static int rcu_initiate_boost(void)
339{
340 if (!rcu_preempt_blocked_readers_cgp() &&
341 rcu_preempt_ctrlblk.exp_tasks == NULL) {
342 RCU_TRACE(rcu_preempt_ctrlblk.n_balk_exp_gp_tasks++);
343 return 0;
344 }
345 if (rcu_preempt_ctrlblk.exp_tasks != NULL ||
346 (rcu_preempt_ctrlblk.gp_tasks != NULL &&
347 rcu_preempt_ctrlblk.boost_tasks == NULL &&
348 ULONG_CMP_GE(jiffies, rcu_preempt_ctrlblk.boost_time))) {
349 if (rcu_preempt_ctrlblk.exp_tasks == NULL)
350 rcu_preempt_ctrlblk.boost_tasks =
351 rcu_preempt_ctrlblk.gp_tasks;
352 invoke_rcu_callbacks();
353 } else {
354 RCU_TRACE(rcu_initiate_boost_trace());
355 }
356 return 1;
357}
358
359#define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000)
360
361
362
363
364static void rcu_preempt_boost_start_gp(void)
365{
366 rcu_preempt_ctrlblk.boost_time = jiffies + RCU_BOOST_DELAY_JIFFIES;
367}
368
369#else
370
371
372
373
374
375
376static int rcu_initiate_boost(void)
377{
378 return rcu_preempt_blocked_readers_cgp();
379}
380
381
382
383
384static void rcu_preempt_boost_start_gp(void)
385{
386}
387
388#endif
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411static void rcu_preempt_cpu_qs(void)
412{
413
414 rcu_preempt_ctrlblk.gpcpu = rcu_preempt_ctrlblk.gpnum;
415 current->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS;
416
417
418 if (!rcu_preempt_gp_in_progress())
419 return;
420
421
422
423
424 if (rcu_initiate_boost())
425 return;
426
427
428 rcu_preempt_ctrlblk.completed = rcu_preempt_ctrlblk.gpnum;
429 rcu_preempt_ctrlblk.rcb.donetail = rcu_preempt_ctrlblk.rcb.curtail;
430 rcu_preempt_ctrlblk.rcb.curtail = rcu_preempt_ctrlblk.nexttail;
431
432
433 if (!rcu_preempt_blocked_readers_any())
434 rcu_preempt_ctrlblk.rcb.donetail = rcu_preempt_ctrlblk.nexttail;
435
436
437 if (*rcu_preempt_ctrlblk.rcb.donetail != NULL)
438 invoke_rcu_callbacks();
439}
440
441
442
443
444static void rcu_preempt_start_gp(void)
445{
446 if (!rcu_preempt_gp_in_progress() && rcu_preempt_needs_another_gp()) {
447
448
449 rcu_preempt_ctrlblk.gpnum++;
450 RCU_TRACE(rcu_preempt_ctrlblk.n_grace_periods++);
451
452
453 if (rcu_preempt_blocked_readers_any())
454 rcu_preempt_ctrlblk.gp_tasks =
455 rcu_preempt_ctrlblk.blkd_tasks.next;
456
457
458 rcu_preempt_boost_start_gp();
459
460
461 if (!rcu_preempt_running_reader())
462 rcu_preempt_cpu_qs();
463 }
464}
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482void rcu_preempt_note_context_switch(void)
483{
484 struct task_struct *t = current;
485 unsigned long flags;
486
487 local_irq_save(flags);
488 if (rcu_preempt_running_reader() > 0 &&
489 (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) {
490
491
492 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED;
493
494
495
496
497
498
499
500
501
502
503
504 list_add(&t->rcu_node_entry, &rcu_preempt_ctrlblk.blkd_tasks);
505 if (rcu_cpu_blocking_cur_gp())
506 rcu_preempt_ctrlblk.gp_tasks = &t->rcu_node_entry;
507 } else if (rcu_preempt_running_reader() < 0 &&
508 t->rcu_read_unlock_special) {
509
510
511
512
513 rcu_read_unlock_special(t);
514 }
515
516
517
518
519
520
521
522
523
524
525 rcu_preempt_cpu_qs();
526 local_irq_restore(flags);
527}
528
529
530
531
532
533
534void rcu_read_unlock_special(struct task_struct *t)
535{
536 int empty;
537 int empty_exp;
538 unsigned long flags;
539 struct list_head *np;
540#ifdef CONFIG_RCU_BOOST
541 struct rt_mutex *rbmp = NULL;
542#endif
543 int special;
544
545
546
547
548
549 if (in_nmi())
550 return;
551
552 local_irq_save(flags);
553
554
555
556
557
558 special = t->rcu_read_unlock_special;
559 if (special & RCU_READ_UNLOCK_NEED_QS)
560 rcu_preempt_cpu_qs();
561
562
563 if (in_irq() || in_serving_softirq()) {
564 local_irq_restore(flags);
565 return;
566 }
567
568
569 if (special & RCU_READ_UNLOCK_BLOCKED) {
570 t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BLOCKED;
571
572
573
574
575
576 empty = !rcu_preempt_blocked_readers_cgp();
577 empty_exp = rcu_preempt_ctrlblk.exp_tasks == NULL;
578 np = rcu_next_node_entry(t);
579 list_del_init(&t->rcu_node_entry);
580 if (&t->rcu_node_entry == rcu_preempt_ctrlblk.gp_tasks)
581 rcu_preempt_ctrlblk.gp_tasks = np;
582 if (&t->rcu_node_entry == rcu_preempt_ctrlblk.exp_tasks)
583 rcu_preempt_ctrlblk.exp_tasks = np;
584#ifdef CONFIG_RCU_BOOST
585 if (&t->rcu_node_entry == rcu_preempt_ctrlblk.boost_tasks)
586 rcu_preempt_ctrlblk.boost_tasks = np;
587#endif
588
589
590
591
592
593
594 if (!empty && !rcu_preempt_blocked_readers_cgp()) {
595 rcu_preempt_cpu_qs();
596 rcu_preempt_start_gp();
597 }
598
599
600
601
602
603 if (!empty_exp && rcu_preempt_ctrlblk.exp_tasks == NULL)
604 rcu_report_exp_done();
605 }
606#ifdef CONFIG_RCU_BOOST
607
608 if (t->rcu_boost_mutex != NULL) {
609 rbmp = t->rcu_boost_mutex;
610 t->rcu_boost_mutex = NULL;
611 rt_mutex_unlock(rbmp);
612 }
613#endif
614 local_irq_restore(flags);
615}
616
617
618
619
620
621
622
623
624static void rcu_preempt_check_callbacks(void)
625{
626 struct task_struct *t = current;
627
628 if (rcu_preempt_gp_in_progress() &&
629 (!rcu_preempt_running_reader() ||
630 !rcu_cpu_blocking_cur_gp()))
631 rcu_preempt_cpu_qs();
632 if (&rcu_preempt_ctrlblk.rcb.rcucblist !=
633 rcu_preempt_ctrlblk.rcb.donetail)
634 invoke_rcu_callbacks();
635 if (rcu_preempt_gp_in_progress() &&
636 rcu_cpu_blocking_cur_gp() &&
637 rcu_preempt_running_reader() > 0)
638 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS;
639}
640
641
642
643
644
645
646
647
648
649static void rcu_preempt_remove_callbacks(struct rcu_ctrlblk *rcp)
650{
651 if (rcu_preempt_ctrlblk.nexttail == rcp->donetail)
652 rcu_preempt_ctrlblk.nexttail = &rcp->rcucblist;
653}
654
655
656
657
658static void rcu_preempt_process_callbacks(void)
659{
660 __rcu_process_callbacks(&rcu_preempt_ctrlblk.rcb);
661}
662
663
664
665
666void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
667{
668 unsigned long flags;
669
670 debug_rcu_head_queue(head);
671 head->func = func;
672 head->next = NULL;
673
674 local_irq_save(flags);
675 *rcu_preempt_ctrlblk.nexttail = head;
676 rcu_preempt_ctrlblk.nexttail = &head->next;
677 RCU_TRACE(rcu_preempt_ctrlblk.rcb.qlen++);
678 rcu_preempt_start_gp();
679 local_irq_restore(flags);
680}
681EXPORT_SYMBOL_GPL(call_rcu);
682
683
684
685
686
687
688
689
690
691
692void synchronize_rcu(void)
693{
694 rcu_lockdep_assert(!lock_is_held(&rcu_bh_lock_map) &&
695 !lock_is_held(&rcu_lock_map) &&
696 !lock_is_held(&rcu_sched_lock_map),
697 "Illegal synchronize_rcu() in RCU read-side critical section");
698
699#ifdef CONFIG_DEBUG_LOCK_ALLOC
700 if (!rcu_scheduler_active)
701 return;
702#endif
703
704 WARN_ON_ONCE(rcu_preempt_running_reader());
705 if (!rcu_preempt_blocked_readers_any())
706 return;
707
708
709 rcu_barrier();
710}
711EXPORT_SYMBOL_GPL(synchronize_rcu);
712
713static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq);
714static unsigned long sync_rcu_preempt_exp_count;
715static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex);
716
717
718
719
720
721
722
723static int rcu_preempted_readers_exp(void)
724{
725 return rcu_preempt_ctrlblk.exp_tasks != NULL;
726}
727
728
729
730
731
732
733static void rcu_report_exp_done(void)
734{
735 wake_up(&sync_rcu_preempt_exp_wq);
736}
737
738
739
740
741
742
743
744
745
746
747
748void synchronize_rcu_expedited(void)
749{
750 unsigned long flags;
751 struct rcu_preempt_ctrlblk *rpcp = &rcu_preempt_ctrlblk;
752 unsigned long snap;
753
754 barrier();
755
756 WARN_ON_ONCE(rcu_preempt_running_reader());
757
758
759
760
761
762
763 snap = sync_rcu_preempt_exp_count + 1;
764 mutex_lock(&sync_rcu_preempt_exp_mutex);
765 if (ULONG_CMP_LT(snap, sync_rcu_preempt_exp_count))
766 goto unlock_mb_ret;
767
768 local_irq_save(flags);
769
770
771
772
773
774
775
776
777 rpcp->exp_tasks = rpcp->blkd_tasks.next;
778 if (rpcp->exp_tasks == &rpcp->blkd_tasks)
779 rpcp->exp_tasks = NULL;
780
781
782 if (!rcu_preempted_readers_exp()) {
783 local_irq_restore(flags);
784 } else {
785 rcu_initiate_boost();
786 local_irq_restore(flags);
787 wait_event(sync_rcu_preempt_exp_wq,
788 !rcu_preempted_readers_exp());
789 }
790
791
792 barrier();
793 sync_rcu_preempt_exp_count++;
794unlock_mb_ret:
795 mutex_unlock(&sync_rcu_preempt_exp_mutex);
796 barrier();
797}
798EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
799
800
801
802
803int rcu_preempt_needs_cpu(void)
804{
805 return rcu_preempt_ctrlblk.rcb.rcucblist != NULL;
806}
807
808#else
809
810#ifdef CONFIG_RCU_TRACE
811
812
813
814
815
816static void show_tiny_preempt_stats(struct seq_file *m)
817{
818}
819
820#endif
821
822
823
824
825
826static void rcu_preempt_check_callbacks(void)
827{
828}
829
830
831
832
833
834static void rcu_preempt_remove_callbacks(struct rcu_ctrlblk *rcp)
835{
836}
837
838
839
840
841
842static void rcu_preempt_process_callbacks(void)
843{
844}
845
846#endif
847
848#ifdef CONFIG_RCU_BOOST
849
850
851
852
853
854static void invoke_rcu_callbacks(void)
855{
856 have_rcu_kthread_work = 1;
857 if (rcu_kthread_task != NULL)
858 wake_up(&rcu_kthread_wq);
859}
860
861#ifdef CONFIG_RCU_TRACE
862
863
864
865
866
867static bool rcu_is_callbacks_kthread(void)
868{
869 return rcu_kthread_task == current;
870}
871
872#endif
873
874
875
876
877
878
879
880
881static int rcu_kthread(void *arg)
882{
883 unsigned long work;
884 unsigned long morework;
885 unsigned long flags;
886
887 for (;;) {
888 wait_event_interruptible(rcu_kthread_wq,
889 have_rcu_kthread_work != 0);
890 morework = rcu_boost();
891 local_irq_save(flags);
892 work = have_rcu_kthread_work;
893 have_rcu_kthread_work = morework;
894 local_irq_restore(flags);
895 if (work)
896 rcu_process_callbacks(NULL);
897 schedule_timeout_interruptible(1);
898 }
899
900 return 0;
901}
902
903
904
905
906static int __init rcu_spawn_kthreads(void)
907{
908 struct sched_param sp;
909
910 rcu_kthread_task = kthread_run(rcu_kthread, NULL, "rcu_kthread");
911 sp.sched_priority = RCU_BOOST_PRIO;
912 sched_setscheduler_nocheck(rcu_kthread_task, SCHED_FIFO, &sp);
913 return 0;
914}
915early_initcall(rcu_spawn_kthreads);
916
917#else
918
919
920static int rcu_scheduler_fully_active __read_mostly;
921
922
923
924
925void invoke_rcu_callbacks(void)
926{
927 if (rcu_scheduler_fully_active)
928 raise_softirq(RCU_SOFTIRQ);
929}
930
931#ifdef CONFIG_RCU_TRACE
932
933
934
935
936static bool rcu_is_callbacks_kthread(void)
937{
938 return false;
939}
940
941#endif
942
943static int __init rcu_scheduler_really_started(void)
944{
945 rcu_scheduler_fully_active = 1;
946 open_softirq(RCU_SOFTIRQ, rcu_process_callbacks);
947 raise_softirq(RCU_SOFTIRQ);
948 return 0;
949}
950early_initcall(rcu_scheduler_really_started);
951
952#endif
953
954#ifdef CONFIG_DEBUG_LOCK_ALLOC
955#include <linux/kernel_stat.h>
956
957
958
959
960
961void __init rcu_scheduler_starting(void)
962{
963 WARN_ON(nr_context_switches() > 0);
964 rcu_scheduler_active = 1;
965}
966
967#endif
968
969#ifdef CONFIG_RCU_TRACE
970
971#ifdef CONFIG_RCU_BOOST
972
973static void rcu_initiate_boost_trace(void)
974{
975 if (list_empty(&rcu_preempt_ctrlblk.blkd_tasks))
976 rcu_preempt_ctrlblk.n_balk_blkd_tasks++;
977 else if (rcu_preempt_ctrlblk.gp_tasks == NULL &&
978 rcu_preempt_ctrlblk.exp_tasks == NULL)
979 rcu_preempt_ctrlblk.n_balk_exp_gp_tasks++;
980 else if (rcu_preempt_ctrlblk.boost_tasks != NULL)
981 rcu_preempt_ctrlblk.n_balk_boost_tasks++;
982 else if (!ULONG_CMP_GE(jiffies, rcu_preempt_ctrlblk.boost_time))
983 rcu_preempt_ctrlblk.n_balk_notyet++;
984 else
985 rcu_preempt_ctrlblk.n_balk_nos++;
986}
987
988#endif
989
990static void rcu_trace_sub_qlen(struct rcu_ctrlblk *rcp, int n)
991{
992 unsigned long flags;
993
994 raw_local_irq_save(flags);
995 rcp->qlen -= n;
996 raw_local_irq_restore(flags);
997}
998
999
1000
1001
1002static int show_tiny_stats(struct seq_file *m, void *unused)
1003{
1004 show_tiny_preempt_stats(m);
1005 seq_printf(m, "rcu_sched: qlen: %ld\n", rcu_sched_ctrlblk.qlen);
1006 seq_printf(m, "rcu_bh: qlen: %ld\n", rcu_bh_ctrlblk.qlen);
1007 return 0;
1008}
1009
1010static int show_tiny_stats_open(struct inode *inode, struct file *file)
1011{
1012 return single_open(file, show_tiny_stats, NULL);
1013}
1014
1015static const struct file_operations show_tiny_stats_fops = {
1016 .owner = THIS_MODULE,
1017 .open = show_tiny_stats_open,
1018 .read = seq_read,
1019 .llseek = seq_lseek,
1020 .release = single_release,
1021};
1022
1023static struct dentry *rcudir;
1024
1025static int __init rcutiny_trace_init(void)
1026{
1027 struct dentry *retval;
1028
1029 rcudir = debugfs_create_dir("rcu", NULL);
1030 if (!rcudir)
1031 goto free_out;
1032 retval = debugfs_create_file("rcudata", 0444, rcudir,
1033 NULL, &show_tiny_stats_fops);
1034 if (!retval)
1035 goto free_out;
1036 return 0;
1037free_out:
1038 debugfs_remove_recursive(rcudir);
1039 return 1;
1040}
1041
1042static void __exit rcutiny_trace_cleanup(void)
1043{
1044 debugfs_remove_recursive(rcudir);
1045}
1046
1047module_init(rcutiny_trace_init);
1048module_exit(rcutiny_trace_cleanup);
1049
1050MODULE_AUTHOR("Paul E. McKenney");
1051MODULE_DESCRIPTION("Read-Copy Update tracing for tiny implementation");
1052MODULE_LICENSE("GPL");
1053
1054#endif
1055