1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include <linux/kthread.h>
26#include <linux/debugfs.h>
27#include <linux/seq_file.h>
28
29#ifdef CONFIG_RCU_TRACE
30#define RCU_TRACE(stmt) stmt
31#else
32#define RCU_TRACE(stmt)
33#endif
34
35
36struct rcu_ctrlblk {
37 struct rcu_head *rcucblist;
38 struct rcu_head **donetail;
39 struct rcu_head **curtail;
40 RCU_TRACE(long qlen);
41};
42
43
44static struct rcu_ctrlblk rcu_sched_ctrlblk = {
45 .donetail = &rcu_sched_ctrlblk.rcucblist,
46 .curtail = &rcu_sched_ctrlblk.rcucblist,
47};
48
49static struct rcu_ctrlblk rcu_bh_ctrlblk = {
50 .donetail = &rcu_bh_ctrlblk.rcucblist,
51 .curtail = &rcu_bh_ctrlblk.rcucblist,
52};
53
54#ifdef CONFIG_DEBUG_LOCK_ALLOC
55int rcu_scheduler_active __read_mostly;
56EXPORT_SYMBOL_GPL(rcu_scheduler_active);
57#endif
58
59#ifdef CONFIG_TINY_PREEMPT_RCU
60
61#include <linux/delay.h>
62
63
64struct rcu_preempt_ctrlblk {
65 struct rcu_ctrlblk rcb;
66 struct rcu_head **nexttail;
67
68
69
70
71
72
73
74
75
76 struct list_head blkd_tasks;
77
78
79
80 struct list_head *gp_tasks;
81
82
83
84 struct list_head *exp_tasks;
85
86
87
88
89
90#ifdef CONFIG_RCU_BOOST
91 struct list_head *boost_tasks;
92
93
94
95
96
97#endif
98 u8 gpnum;
99 u8 gpcpu;
100 u8 completed;
101
102#ifdef CONFIG_RCU_BOOST
103 unsigned long boost_time;
104#endif
105#ifdef CONFIG_RCU_TRACE
106 unsigned long n_grace_periods;
107#ifdef CONFIG_RCU_BOOST
108 unsigned long n_tasks_boosted;
109
110 unsigned long n_exp_boosts;
111
112 unsigned long n_normal_boosts;
113
114 unsigned long n_balk_blkd_tasks;
115
116 unsigned long n_balk_exp_gp_tasks;
117
118 unsigned long n_balk_boost_tasks;
119
120 unsigned long n_balk_notyet;
121
122 unsigned long n_balk_nos;
123
124
125#endif
126#endif
127};
128
129static struct rcu_preempt_ctrlblk rcu_preempt_ctrlblk = {
130 .rcb.donetail = &rcu_preempt_ctrlblk.rcb.rcucblist,
131 .rcb.curtail = &rcu_preempt_ctrlblk.rcb.rcucblist,
132 .nexttail = &rcu_preempt_ctrlblk.rcb.rcucblist,
133 .blkd_tasks = LIST_HEAD_INIT(rcu_preempt_ctrlblk.blkd_tasks),
134};
135
136static int rcu_preempted_readers_exp(void);
137static void rcu_report_exp_done(void);
138
139
140
141
142static int rcu_cpu_blocking_cur_gp(void)
143{
144 return rcu_preempt_ctrlblk.gpcpu != rcu_preempt_ctrlblk.gpnum;
145}
146
147
148
149
150
151static int rcu_preempt_running_reader(void)
152{
153 return current->rcu_read_lock_nesting;
154}
155
156
157
158
159
160static int rcu_preempt_blocked_readers_any(void)
161{
162 return !list_empty(&rcu_preempt_ctrlblk.blkd_tasks);
163}
164
165
166
167
168
169static int rcu_preempt_blocked_readers_cgp(void)
170{
171 return rcu_preempt_ctrlblk.gp_tasks != NULL;
172}
173
174
175
176
177static int rcu_preempt_needs_another_gp(void)
178{
179 return *rcu_preempt_ctrlblk.rcb.curtail != NULL;
180}
181
182
183
184
185
186static int rcu_preempt_gp_in_progress(void)
187{
188 return rcu_preempt_ctrlblk.completed != rcu_preempt_ctrlblk.gpnum;
189}
190
191
192
193
194
195static struct list_head *rcu_next_node_entry(struct task_struct *t)
196{
197 struct list_head *np;
198
199 np = t->rcu_node_entry.next;
200 if (np == &rcu_preempt_ctrlblk.blkd_tasks)
201 np = NULL;
202 return np;
203}
204
205#ifdef CONFIG_RCU_TRACE
206
207#ifdef CONFIG_RCU_BOOST
208static void rcu_initiate_boost_trace(void);
209#endif
210
211
212
213
214static void show_tiny_preempt_stats(struct seq_file *m)
215{
216 seq_printf(m, "rcu_preempt: qlen=%ld gp=%lu g%u/p%u/c%u tasks=%c%c%c\n",
217 rcu_preempt_ctrlblk.rcb.qlen,
218 rcu_preempt_ctrlblk.n_grace_periods,
219 rcu_preempt_ctrlblk.gpnum,
220 rcu_preempt_ctrlblk.gpcpu,
221 rcu_preempt_ctrlblk.completed,
222 "T."[list_empty(&rcu_preempt_ctrlblk.blkd_tasks)],
223 "N."[!rcu_preempt_ctrlblk.gp_tasks],
224 "E."[!rcu_preempt_ctrlblk.exp_tasks]);
225#ifdef CONFIG_RCU_BOOST
226 seq_printf(m, "%sttb=%c ntb=%lu neb=%lu nnb=%lu j=%04x bt=%04x\n",
227 " ",
228 "B."[!rcu_preempt_ctrlblk.boost_tasks],
229 rcu_preempt_ctrlblk.n_tasks_boosted,
230 rcu_preempt_ctrlblk.n_exp_boosts,
231 rcu_preempt_ctrlblk.n_normal_boosts,
232 (int)(jiffies & 0xffff),
233 (int)(rcu_preempt_ctrlblk.boost_time & 0xffff));
234 seq_printf(m, "%s: nt=%lu egt=%lu bt=%lu ny=%lu nos=%lu\n",
235 " balk",
236 rcu_preempt_ctrlblk.n_balk_blkd_tasks,
237 rcu_preempt_ctrlblk.n_balk_exp_gp_tasks,
238 rcu_preempt_ctrlblk.n_balk_boost_tasks,
239 rcu_preempt_ctrlblk.n_balk_notyet,
240 rcu_preempt_ctrlblk.n_balk_nos);
241#endif
242}
243
244#endif
245
246#ifdef CONFIG_RCU_BOOST
247
248#include "rtmutex_common.h"
249
250
251
252
253
254static int rcu_boost(void)
255{
256 unsigned long flags;
257 struct rt_mutex mtx;
258 struct task_struct *t;
259 struct list_head *tb;
260
261 if (rcu_preempt_ctrlblk.boost_tasks == NULL &&
262 rcu_preempt_ctrlblk.exp_tasks == NULL)
263 return 0;
264
265 raw_local_irq_save(flags);
266
267
268
269
270
271
272 if (rcu_preempt_ctrlblk.boost_tasks == NULL &&
273 rcu_preempt_ctrlblk.exp_tasks == NULL) {
274 raw_local_irq_restore(flags);
275 return 0;
276 }
277
278
279
280
281
282
283
284 if (rcu_preempt_ctrlblk.exp_tasks != NULL) {
285 tb = rcu_preempt_ctrlblk.exp_tasks;
286 RCU_TRACE(rcu_preempt_ctrlblk.n_exp_boosts++);
287 } else {
288 tb = rcu_preempt_ctrlblk.boost_tasks;
289 RCU_TRACE(rcu_preempt_ctrlblk.n_normal_boosts++);
290 }
291 RCU_TRACE(rcu_preempt_ctrlblk.n_tasks_boosted++);
292
293
294
295
296
297
298
299
300
301 t = container_of(tb, struct task_struct, rcu_node_entry);
302 rt_mutex_init_proxy_locked(&mtx, t);
303 t->rcu_boost_mutex = &mtx;
304 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BOOSTED;
305 raw_local_irq_restore(flags);
306 rt_mutex_lock(&mtx);
307 rt_mutex_unlock(&mtx);
308
309 return rcu_preempt_ctrlblk.boost_tasks != NULL ||
310 rcu_preempt_ctrlblk.exp_tasks != NULL;
311}
312
313
314
315
316
317
318
319
320
321
322
323static int rcu_initiate_boost(void)
324{
325 if (!rcu_preempt_blocked_readers_cgp() &&
326 rcu_preempt_ctrlblk.exp_tasks == NULL) {
327 RCU_TRACE(rcu_preempt_ctrlblk.n_balk_exp_gp_tasks++);
328 return 0;
329 }
330 if (rcu_preempt_ctrlblk.exp_tasks != NULL ||
331 (rcu_preempt_ctrlblk.gp_tasks != NULL &&
332 rcu_preempt_ctrlblk.boost_tasks == NULL &&
333 ULONG_CMP_GE(jiffies, rcu_preempt_ctrlblk.boost_time))) {
334 if (rcu_preempt_ctrlblk.exp_tasks == NULL)
335 rcu_preempt_ctrlblk.boost_tasks =
336 rcu_preempt_ctrlblk.gp_tasks;
337 invoke_rcu_kthread();
338 } else
339 RCU_TRACE(rcu_initiate_boost_trace());
340 return 1;
341}
342
343#define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000)
344
345
346
347
348static void rcu_preempt_boost_start_gp(void)
349{
350 rcu_preempt_ctrlblk.boost_time = jiffies + RCU_BOOST_DELAY_JIFFIES;
351}
352
353#else
354
355
356
357
358static int rcu_boost(void)
359{
360 return 0;
361}
362
363
364
365
366
367
368static int rcu_initiate_boost(void)
369{
370 return rcu_preempt_blocked_readers_cgp();
371}
372
373
374
375
376static void rcu_preempt_boost_start_gp(void)
377{
378}
379
380#endif
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403static void rcu_preempt_cpu_qs(void)
404{
405
406 rcu_preempt_ctrlblk.gpcpu = rcu_preempt_ctrlblk.gpnum;
407 current->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS;
408
409
410 if (!rcu_preempt_gp_in_progress())
411 return;
412
413
414
415
416 if (rcu_initiate_boost())
417 return;
418
419
420 rcu_preempt_ctrlblk.completed = rcu_preempt_ctrlblk.gpnum;
421 rcu_preempt_ctrlblk.rcb.donetail = rcu_preempt_ctrlblk.rcb.curtail;
422 rcu_preempt_ctrlblk.rcb.curtail = rcu_preempt_ctrlblk.nexttail;
423
424
425 if (!rcu_preempt_blocked_readers_any())
426 rcu_preempt_ctrlblk.rcb.donetail = rcu_preempt_ctrlblk.nexttail;
427
428
429 if (*rcu_preempt_ctrlblk.rcb.donetail != NULL)
430 invoke_rcu_kthread();
431}
432
433
434
435
436static void rcu_preempt_start_gp(void)
437{
438 if (!rcu_preempt_gp_in_progress() && rcu_preempt_needs_another_gp()) {
439
440
441 rcu_preempt_ctrlblk.gpnum++;
442 RCU_TRACE(rcu_preempt_ctrlblk.n_grace_periods++);
443
444
445 if (rcu_preempt_blocked_readers_any())
446 rcu_preempt_ctrlblk.gp_tasks =
447 rcu_preempt_ctrlblk.blkd_tasks.next;
448
449
450 rcu_preempt_boost_start_gp();
451
452
453 if (!rcu_preempt_running_reader())
454 rcu_preempt_cpu_qs();
455 }
456}
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474void rcu_preempt_note_context_switch(void)
475{
476 struct task_struct *t = current;
477 unsigned long flags;
478
479 local_irq_save(flags);
480 if (rcu_preempt_running_reader() &&
481 (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) {
482
483
484 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED;
485
486
487
488
489
490
491
492
493
494
495
496 list_add(&t->rcu_node_entry, &rcu_preempt_ctrlblk.blkd_tasks);
497 if (rcu_cpu_blocking_cur_gp())
498 rcu_preempt_ctrlblk.gp_tasks = &t->rcu_node_entry;
499 }
500
501
502
503
504
505
506
507
508
509
510 rcu_preempt_cpu_qs();
511 local_irq_restore(flags);
512}
513
514
515
516
517
518
519void __rcu_read_lock(void)
520{
521 current->rcu_read_lock_nesting++;
522 barrier();
523}
524EXPORT_SYMBOL_GPL(__rcu_read_lock);
525
526
527
528
529
530
531static void rcu_read_unlock_special(struct task_struct *t)
532{
533 int empty;
534 int empty_exp;
535 unsigned long flags;
536 struct list_head *np;
537 int special;
538
539
540
541
542
543 if (in_nmi())
544 return;
545
546 local_irq_save(flags);
547
548
549
550
551
552 special = t->rcu_read_unlock_special;
553 if (special & RCU_READ_UNLOCK_NEED_QS)
554 rcu_preempt_cpu_qs();
555
556
557 if (in_irq()) {
558 local_irq_restore(flags);
559 return;
560 }
561
562
563 if (special & RCU_READ_UNLOCK_BLOCKED) {
564 t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BLOCKED;
565
566
567
568
569
570 empty = !rcu_preempt_blocked_readers_cgp();
571 empty_exp = rcu_preempt_ctrlblk.exp_tasks == NULL;
572 np = rcu_next_node_entry(t);
573 list_del_init(&t->rcu_node_entry);
574 if (&t->rcu_node_entry == rcu_preempt_ctrlblk.gp_tasks)
575 rcu_preempt_ctrlblk.gp_tasks = np;
576 if (&t->rcu_node_entry == rcu_preempt_ctrlblk.exp_tasks)
577 rcu_preempt_ctrlblk.exp_tasks = np;
578#ifdef CONFIG_RCU_BOOST
579 if (&t->rcu_node_entry == rcu_preempt_ctrlblk.boost_tasks)
580 rcu_preempt_ctrlblk.boost_tasks = np;
581#endif
582
583
584
585
586
587
588 if (!empty && !rcu_preempt_blocked_readers_cgp()) {
589 rcu_preempt_cpu_qs();
590 rcu_preempt_start_gp();
591 }
592
593
594
595
596
597 if (!empty_exp && rcu_preempt_ctrlblk.exp_tasks == NULL)
598 rcu_report_exp_done();
599 }
600#ifdef CONFIG_RCU_BOOST
601
602 if (special & RCU_READ_UNLOCK_BOOSTED) {
603 t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BOOSTED;
604 rt_mutex_unlock(t->rcu_boost_mutex);
605 t->rcu_boost_mutex = NULL;
606 }
607#endif
608 local_irq_restore(flags);
609}
610
611
612
613
614
615
616
617
618void __rcu_read_unlock(void)
619{
620 struct task_struct *t = current;
621
622 barrier();
623 --t->rcu_read_lock_nesting;
624 barrier();
625 if (t->rcu_read_lock_nesting == 0 &&
626 unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
627 rcu_read_unlock_special(t);
628#ifdef CONFIG_PROVE_LOCKING
629 WARN_ON_ONCE(t->rcu_read_lock_nesting < 0);
630#endif
631}
632EXPORT_SYMBOL_GPL(__rcu_read_unlock);
633
634
635
636
637
638
639
640
641static void rcu_preempt_check_callbacks(void)
642{
643 struct task_struct *t = current;
644
645 if (rcu_preempt_gp_in_progress() &&
646 (!rcu_preempt_running_reader() ||
647 !rcu_cpu_blocking_cur_gp()))
648 rcu_preempt_cpu_qs();
649 if (&rcu_preempt_ctrlblk.rcb.rcucblist !=
650 rcu_preempt_ctrlblk.rcb.donetail)
651 invoke_rcu_kthread();
652 if (rcu_preempt_gp_in_progress() &&
653 rcu_cpu_blocking_cur_gp() &&
654 rcu_preempt_running_reader())
655 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS;
656}
657
658
659
660
661
662
663
664
665
666static void rcu_preempt_remove_callbacks(struct rcu_ctrlblk *rcp)
667{
668 if (rcu_preempt_ctrlblk.nexttail == rcp->donetail)
669 rcu_preempt_ctrlblk.nexttail = &rcp->rcucblist;
670}
671
672
673
674
675static void rcu_preempt_process_callbacks(void)
676{
677 rcu_process_callbacks(&rcu_preempt_ctrlblk.rcb);
678}
679
680
681
682
683void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
684{
685 unsigned long flags;
686
687 debug_rcu_head_queue(head);
688 head->func = func;
689 head->next = NULL;
690
691 local_irq_save(flags);
692 *rcu_preempt_ctrlblk.nexttail = head;
693 rcu_preempt_ctrlblk.nexttail = &head->next;
694 RCU_TRACE(rcu_preempt_ctrlblk.rcb.qlen++);
695 rcu_preempt_start_gp();
696 local_irq_restore(flags);
697}
698EXPORT_SYMBOL_GPL(call_rcu);
699
700void rcu_barrier(void)
701{
702 struct rcu_synchronize rcu;
703
704 init_rcu_head_on_stack(&rcu.head);
705 init_completion(&rcu.completion);
706
707 call_rcu(&rcu.head, wakeme_after_rcu);
708
709 wait_for_completion(&rcu.completion);
710 destroy_rcu_head_on_stack(&rcu.head);
711}
712EXPORT_SYMBOL_GPL(rcu_barrier);
713
714
715
716
717
718
719
720
721
722
723void synchronize_rcu(void)
724{
725#ifdef CONFIG_DEBUG_LOCK_ALLOC
726 if (!rcu_scheduler_active)
727 return;
728#endif
729
730 WARN_ON_ONCE(rcu_preempt_running_reader());
731 if (!rcu_preempt_blocked_readers_any())
732 return;
733
734
735 rcu_barrier();
736}
737EXPORT_SYMBOL_GPL(synchronize_rcu);
738
739static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq);
740static unsigned long sync_rcu_preempt_exp_count;
741static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex);
742
743
744
745
746
747
748
749static int rcu_preempted_readers_exp(void)
750{
751 return rcu_preempt_ctrlblk.exp_tasks != NULL;
752}
753
754
755
756
757
758
759static void rcu_report_exp_done(void)
760{
761 wake_up(&sync_rcu_preempt_exp_wq);
762}
763
764
765
766
767
768
769
770
771
772
773
774void synchronize_rcu_expedited(void)
775{
776 unsigned long flags;
777 struct rcu_preempt_ctrlblk *rpcp = &rcu_preempt_ctrlblk;
778 unsigned long snap;
779
780 barrier();
781
782 WARN_ON_ONCE(rcu_preempt_running_reader());
783
784
785
786
787
788
789 snap = sync_rcu_preempt_exp_count + 1;
790 mutex_lock(&sync_rcu_preempt_exp_mutex);
791 if (ULONG_CMP_LT(snap, sync_rcu_preempt_exp_count))
792 goto unlock_mb_ret;
793
794 local_irq_save(flags);
795
796
797
798
799
800
801
802
803 rpcp->exp_tasks = rpcp->blkd_tasks.next;
804 if (rpcp->exp_tasks == &rpcp->blkd_tasks)
805 rpcp->exp_tasks = NULL;
806
807
808 if (!rcu_preempted_readers_exp())
809 local_irq_restore(flags);
810 else {
811 rcu_initiate_boost();
812 local_irq_restore(flags);
813 wait_event(sync_rcu_preempt_exp_wq,
814 !rcu_preempted_readers_exp());
815 }
816
817
818 barrier();
819 sync_rcu_preempt_exp_count++;
820unlock_mb_ret:
821 mutex_unlock(&sync_rcu_preempt_exp_mutex);
822 barrier();
823}
824EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
825
826
827
828
829int rcu_preempt_needs_cpu(void)
830{
831 if (!rcu_preempt_running_reader())
832 rcu_preempt_cpu_qs();
833 return rcu_preempt_ctrlblk.rcb.rcucblist != NULL;
834}
835
836
837
838
839
840
841
842void exit_rcu(void)
843{
844 struct task_struct *t = current;
845
846 if (t->rcu_read_lock_nesting == 0)
847 return;
848 t->rcu_read_lock_nesting = 1;
849 __rcu_read_unlock();
850}
851
852#else
853
854#ifdef CONFIG_RCU_TRACE
855
856
857
858
859
860static void show_tiny_preempt_stats(struct seq_file *m)
861{
862}
863
864#endif
865
866
867
868
869
870static int rcu_boost(void)
871{
872 return 0;
873}
874
875
876
877
878
879static void rcu_preempt_check_callbacks(void)
880{
881}
882
883
884
885
886
887static void rcu_preempt_remove_callbacks(struct rcu_ctrlblk *rcp)
888{
889}
890
891
892
893
894
895static void rcu_preempt_process_callbacks(void)
896{
897}
898
899#endif
900
901#ifdef CONFIG_DEBUG_LOCK_ALLOC
902#include <linux/kernel_stat.h>
903
904
905
906
907
908void __init rcu_scheduler_starting(void)
909{
910 WARN_ON(nr_context_switches() > 0);
911 rcu_scheduler_active = 1;
912}
913
914#endif
915
916#ifdef CONFIG_RCU_BOOST
917#define RCU_BOOST_PRIO CONFIG_RCU_BOOST_PRIO
918#else
919#define RCU_BOOST_PRIO 1
920#endif
921
922#ifdef CONFIG_RCU_TRACE
923
924#ifdef CONFIG_RCU_BOOST
925
926static void rcu_initiate_boost_trace(void)
927{
928 if (list_empty(&rcu_preempt_ctrlblk.blkd_tasks))
929 rcu_preempt_ctrlblk.n_balk_blkd_tasks++;
930 else if (rcu_preempt_ctrlblk.gp_tasks == NULL &&
931 rcu_preempt_ctrlblk.exp_tasks == NULL)
932 rcu_preempt_ctrlblk.n_balk_exp_gp_tasks++;
933 else if (rcu_preempt_ctrlblk.boost_tasks != NULL)
934 rcu_preempt_ctrlblk.n_balk_boost_tasks++;
935 else if (!ULONG_CMP_GE(jiffies, rcu_preempt_ctrlblk.boost_time))
936 rcu_preempt_ctrlblk.n_balk_notyet++;
937 else
938 rcu_preempt_ctrlblk.n_balk_nos++;
939}
940
941#endif
942
943static void rcu_trace_sub_qlen(struct rcu_ctrlblk *rcp, int n)
944{
945 unsigned long flags;
946
947 raw_local_irq_save(flags);
948 rcp->qlen -= n;
949 raw_local_irq_restore(flags);
950}
951
952
953
954
955static int show_tiny_stats(struct seq_file *m, void *unused)
956{
957 show_tiny_preempt_stats(m);
958 seq_printf(m, "rcu_sched: qlen: %ld\n", rcu_sched_ctrlblk.qlen);
959 seq_printf(m, "rcu_bh: qlen: %ld\n", rcu_bh_ctrlblk.qlen);
960 return 0;
961}
962
963static int show_tiny_stats_open(struct inode *inode, struct file *file)
964{
965 return single_open(file, show_tiny_stats, NULL);
966}
967
968static const struct file_operations show_tiny_stats_fops = {
969 .owner = THIS_MODULE,
970 .open = show_tiny_stats_open,
971 .read = seq_read,
972 .llseek = seq_lseek,
973 .release = single_release,
974};
975
976static struct dentry *rcudir;
977
978static int __init rcutiny_trace_init(void)
979{
980 struct dentry *retval;
981
982 rcudir = debugfs_create_dir("rcu", NULL);
983 if (!rcudir)
984 goto free_out;
985 retval = debugfs_create_file("rcudata", 0444, rcudir,
986 NULL, &show_tiny_stats_fops);
987 if (!retval)
988 goto free_out;
989 return 0;
990free_out:
991 debugfs_remove_recursive(rcudir);
992 return 1;
993}
994
995static void __exit rcutiny_trace_cleanup(void)
996{
997 debugfs_remove_recursive(rcudir);
998}
999
1000module_init(rcutiny_trace_init);
1001module_exit(rcutiny_trace_cleanup);
1002
1003MODULE_AUTHOR("Paul E. McKenney");
1004MODULE_DESCRIPTION("Read-Copy Update tracing for tiny implementation");
1005MODULE_LICENSE("GPL");
1006
1007#endif
1008