1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include <linux/kthread.h>
26#include <linux/debugfs.h>
27#include <linux/seq_file.h>
28
29#ifdef CONFIG_RCU_TRACE
30#define RCU_TRACE(stmt) stmt
31#else
32#define RCU_TRACE(stmt)
33#endif
34
35
36struct rcu_ctrlblk {
37 struct rcu_head *rcucblist;
38 struct rcu_head **donetail;
39 struct rcu_head **curtail;
40 RCU_TRACE(long qlen);
41};
42
43
44static struct rcu_ctrlblk rcu_sched_ctrlblk = {
45 .donetail = &rcu_sched_ctrlblk.rcucblist,
46 .curtail = &rcu_sched_ctrlblk.rcucblist,
47};
48
49static struct rcu_ctrlblk rcu_bh_ctrlblk = {
50 .donetail = &rcu_bh_ctrlblk.rcucblist,
51 .curtail = &rcu_bh_ctrlblk.rcucblist,
52};
53
54#ifdef CONFIG_DEBUG_LOCK_ALLOC
55int rcu_scheduler_active __read_mostly;
56EXPORT_SYMBOL_GPL(rcu_scheduler_active);
57#endif
58
59#ifdef CONFIG_TINY_PREEMPT_RCU
60
61#include <linux/delay.h>
62
63
64struct rcu_preempt_ctrlblk {
65 struct rcu_ctrlblk rcb;
66 struct rcu_head **nexttail;
67
68
69
70
71
72
73
74
75
76 struct list_head blkd_tasks;
77
78
79
80 struct list_head *gp_tasks;
81
82
83
84 struct list_head *exp_tasks;
85
86
87
88
89
90#ifdef CONFIG_RCU_BOOST
91 struct list_head *boost_tasks;
92
93
94
95
96
97#endif
98 u8 gpnum;
99 u8 gpcpu;
100 u8 completed;
101
102#ifdef CONFIG_RCU_BOOST
103 s8 boosted_this_gp;
104 unsigned long boost_time;
105#endif
106#ifdef CONFIG_RCU_TRACE
107 unsigned long n_grace_periods;
108#ifdef CONFIG_RCU_BOOST
109 unsigned long n_tasks_boosted;
110 unsigned long n_exp_boosts;
111 unsigned long n_normal_boosts;
112 unsigned long n_normal_balk_blkd_tasks;
113 unsigned long n_normal_balk_gp_tasks;
114 unsigned long n_normal_balk_boost_tasks;
115 unsigned long n_normal_balk_boosted;
116 unsigned long n_normal_balk_notyet;
117 unsigned long n_normal_balk_nos;
118 unsigned long n_exp_balk_blkd_tasks;
119 unsigned long n_exp_balk_nos;
120#endif
121#endif
122};
123
124static struct rcu_preempt_ctrlblk rcu_preempt_ctrlblk = {
125 .rcb.donetail = &rcu_preempt_ctrlblk.rcb.rcucblist,
126 .rcb.curtail = &rcu_preempt_ctrlblk.rcb.rcucblist,
127 .nexttail = &rcu_preempt_ctrlblk.rcb.rcucblist,
128 .blkd_tasks = LIST_HEAD_INIT(rcu_preempt_ctrlblk.blkd_tasks),
129};
130
131static int rcu_preempted_readers_exp(void);
132static void rcu_report_exp_done(void);
133
134
135
136
137static int rcu_cpu_blocking_cur_gp(void)
138{
139 return rcu_preempt_ctrlblk.gpcpu != rcu_preempt_ctrlblk.gpnum;
140}
141
142
143
144
145
146static int rcu_preempt_running_reader(void)
147{
148 return current->rcu_read_lock_nesting;
149}
150
151
152
153
154
155static int rcu_preempt_blocked_readers_any(void)
156{
157 return !list_empty(&rcu_preempt_ctrlblk.blkd_tasks);
158}
159
160
161
162
163
164static int rcu_preempt_blocked_readers_cgp(void)
165{
166 return rcu_preempt_ctrlblk.gp_tasks != NULL;
167}
168
169
170
171
172static int rcu_preempt_needs_another_gp(void)
173{
174 return *rcu_preempt_ctrlblk.rcb.curtail != NULL;
175}
176
177
178
179
180
181static int rcu_preempt_gp_in_progress(void)
182{
183 return rcu_preempt_ctrlblk.completed != rcu_preempt_ctrlblk.gpnum;
184}
185
186
187
188
189
190static struct list_head *rcu_next_node_entry(struct task_struct *t)
191{
192 struct list_head *np;
193
194 np = t->rcu_node_entry.next;
195 if (np == &rcu_preempt_ctrlblk.blkd_tasks)
196 np = NULL;
197 return np;
198}
199
200#ifdef CONFIG_RCU_TRACE
201
202#ifdef CONFIG_RCU_BOOST
203static void rcu_initiate_boost_trace(void);
204static void rcu_initiate_exp_boost_trace(void);
205#endif
206
207
208
209
210static void show_tiny_preempt_stats(struct seq_file *m)
211{
212 seq_printf(m, "rcu_preempt: qlen=%ld gp=%lu g%u/p%u/c%u tasks=%c%c%c\n",
213 rcu_preempt_ctrlblk.rcb.qlen,
214 rcu_preempt_ctrlblk.n_grace_periods,
215 rcu_preempt_ctrlblk.gpnum,
216 rcu_preempt_ctrlblk.gpcpu,
217 rcu_preempt_ctrlblk.completed,
218 "T."[list_empty(&rcu_preempt_ctrlblk.blkd_tasks)],
219 "N."[!rcu_preempt_ctrlblk.gp_tasks],
220 "E."[!rcu_preempt_ctrlblk.exp_tasks]);
221#ifdef CONFIG_RCU_BOOST
222 seq_printf(m, " ttb=%c btg=",
223 "B."[!rcu_preempt_ctrlblk.boost_tasks]);
224 switch (rcu_preempt_ctrlblk.boosted_this_gp) {
225 case -1:
226 seq_puts(m, "exp");
227 break;
228 case 0:
229 seq_puts(m, "no");
230 break;
231 case 1:
232 seq_puts(m, "begun");
233 break;
234 case 2:
235 seq_puts(m, "done");
236 break;
237 default:
238 seq_printf(m, "?%d?", rcu_preempt_ctrlblk.boosted_this_gp);
239 }
240 seq_printf(m, " ntb=%lu neb=%lu nnb=%lu j=%04x bt=%04x\n",
241 rcu_preempt_ctrlblk.n_tasks_boosted,
242 rcu_preempt_ctrlblk.n_exp_boosts,
243 rcu_preempt_ctrlblk.n_normal_boosts,
244 (int)(jiffies & 0xffff),
245 (int)(rcu_preempt_ctrlblk.boost_time & 0xffff));
246 seq_printf(m, " %s: nt=%lu gt=%lu bt=%lu b=%lu ny=%lu nos=%lu\n",
247 "normal balk",
248 rcu_preempt_ctrlblk.n_normal_balk_blkd_tasks,
249 rcu_preempt_ctrlblk.n_normal_balk_gp_tasks,
250 rcu_preempt_ctrlblk.n_normal_balk_boost_tasks,
251 rcu_preempt_ctrlblk.n_normal_balk_boosted,
252 rcu_preempt_ctrlblk.n_normal_balk_notyet,
253 rcu_preempt_ctrlblk.n_normal_balk_nos);
254 seq_printf(m, " exp balk: bt=%lu nos=%lu\n",
255 rcu_preempt_ctrlblk.n_exp_balk_blkd_tasks,
256 rcu_preempt_ctrlblk.n_exp_balk_nos);
257#endif
258}
259
260#endif
261
262#ifdef CONFIG_RCU_BOOST
263
264#include "rtmutex_common.h"
265
266
267
268
269
270static int rcu_boost(void)
271{
272 unsigned long flags;
273 struct rt_mutex mtx;
274 struct list_head *np;
275 struct task_struct *t;
276
277 if (rcu_preempt_ctrlblk.boost_tasks == NULL)
278 return 0;
279 raw_local_irq_save(flags);
280 rcu_preempt_ctrlblk.boosted_this_gp++;
281 t = container_of(rcu_preempt_ctrlblk.boost_tasks, struct task_struct,
282 rcu_node_entry);
283 np = rcu_next_node_entry(t);
284 rt_mutex_init_proxy_locked(&mtx, t);
285 t->rcu_boost_mutex = &mtx;
286 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BOOSTED;
287 raw_local_irq_restore(flags);
288 rt_mutex_lock(&mtx);
289 RCU_TRACE(rcu_preempt_ctrlblk.n_tasks_boosted++);
290 rcu_preempt_ctrlblk.boosted_this_gp++;
291 rt_mutex_unlock(&mtx);
292 return rcu_preempt_ctrlblk.boost_tasks != NULL;
293}
294
295
296
297
298
299
300
301
302
303
304
305static int rcu_initiate_boost(void)
306{
307 if (!rcu_preempt_blocked_readers_cgp()) {
308 RCU_TRACE(rcu_preempt_ctrlblk.n_normal_balk_blkd_tasks++);
309 return 0;
310 }
311 if (rcu_preempt_ctrlblk.gp_tasks != NULL &&
312 rcu_preempt_ctrlblk.boost_tasks == NULL &&
313 rcu_preempt_ctrlblk.boosted_this_gp == 0 &&
314 ULONG_CMP_GE(jiffies, rcu_preempt_ctrlblk.boost_time)) {
315 rcu_preempt_ctrlblk.boost_tasks = rcu_preempt_ctrlblk.gp_tasks;
316 invoke_rcu_kthread();
317 RCU_TRACE(rcu_preempt_ctrlblk.n_normal_boosts++);
318 } else
319 RCU_TRACE(rcu_initiate_boost_trace());
320 return 1;
321}
322
323
324
325
326static void rcu_initiate_expedited_boost(void)
327{
328 unsigned long flags;
329
330 raw_local_irq_save(flags);
331 if (!list_empty(&rcu_preempt_ctrlblk.blkd_tasks)) {
332 rcu_preempt_ctrlblk.boost_tasks =
333 rcu_preempt_ctrlblk.blkd_tasks.next;
334 rcu_preempt_ctrlblk.boosted_this_gp = -1;
335 invoke_rcu_kthread();
336 RCU_TRACE(rcu_preempt_ctrlblk.n_exp_boosts++);
337 } else
338 RCU_TRACE(rcu_initiate_exp_boost_trace());
339 raw_local_irq_restore(flags);
340}
341
342#define RCU_BOOST_DELAY_JIFFIES DIV_ROUND_UP(CONFIG_RCU_BOOST_DELAY * HZ, 1000);
343
344
345
346
347static void rcu_preempt_boost_start_gp(void)
348{
349 rcu_preempt_ctrlblk.boost_time = jiffies + RCU_BOOST_DELAY_JIFFIES;
350 if (rcu_preempt_ctrlblk.boosted_this_gp > 0)
351 rcu_preempt_ctrlblk.boosted_this_gp = 0;
352}
353
354#else
355
356
357
358
359static int rcu_boost(void)
360{
361 return 0;
362}
363
364
365
366
367
368
369static int rcu_initiate_boost(void)
370{
371 return rcu_preempt_blocked_readers_cgp();
372}
373
374
375
376
377static void rcu_initiate_expedited_boost(void)
378{
379}
380
381
382
383
384static void rcu_preempt_boost_start_gp(void)
385{
386}
387
388#endif
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411static void rcu_preempt_cpu_qs(void)
412{
413
414 rcu_preempt_ctrlblk.gpcpu = rcu_preempt_ctrlblk.gpnum;
415 current->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_NEED_QS;
416
417
418 if (!rcu_preempt_gp_in_progress())
419 return;
420
421
422
423
424 if (rcu_initiate_boost())
425 return;
426
427
428 rcu_preempt_ctrlblk.completed = rcu_preempt_ctrlblk.gpnum;
429 rcu_preempt_ctrlblk.rcb.donetail = rcu_preempt_ctrlblk.rcb.curtail;
430 rcu_preempt_ctrlblk.rcb.curtail = rcu_preempt_ctrlblk.nexttail;
431
432
433 if (!rcu_preempt_blocked_readers_any())
434 rcu_preempt_ctrlblk.rcb.donetail = rcu_preempt_ctrlblk.nexttail;
435
436
437 if (*rcu_preempt_ctrlblk.rcb.donetail != NULL)
438 invoke_rcu_kthread();
439}
440
441
442
443
444static void rcu_preempt_start_gp(void)
445{
446 if (!rcu_preempt_gp_in_progress() && rcu_preempt_needs_another_gp()) {
447
448
449 rcu_preempt_ctrlblk.gpnum++;
450 RCU_TRACE(rcu_preempt_ctrlblk.n_grace_periods++);
451
452
453 if (rcu_preempt_blocked_readers_any())
454 rcu_preempt_ctrlblk.gp_tasks =
455 rcu_preempt_ctrlblk.blkd_tasks.next;
456
457
458 rcu_preempt_boost_start_gp();
459
460
461 if (!rcu_preempt_running_reader())
462 rcu_preempt_cpu_qs();
463 }
464}
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482void rcu_preempt_note_context_switch(void)
483{
484 struct task_struct *t = current;
485 unsigned long flags;
486
487 local_irq_save(flags);
488 if (rcu_preempt_running_reader() &&
489 (t->rcu_read_unlock_special & RCU_READ_UNLOCK_BLOCKED) == 0) {
490
491
492 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_BLOCKED;
493
494
495
496
497
498
499
500
501
502
503
504 list_add(&t->rcu_node_entry, &rcu_preempt_ctrlblk.blkd_tasks);
505 if (rcu_cpu_blocking_cur_gp())
506 rcu_preempt_ctrlblk.gp_tasks = &t->rcu_node_entry;
507 }
508
509
510
511
512
513
514
515
516
517
518 rcu_preempt_cpu_qs();
519 local_irq_restore(flags);
520}
521
522
523
524
525
526
527void __rcu_read_lock(void)
528{
529 current->rcu_read_lock_nesting++;
530 barrier();
531}
532EXPORT_SYMBOL_GPL(__rcu_read_lock);
533
534
535
536
537
538
539static void rcu_read_unlock_special(struct task_struct *t)
540{
541 int empty;
542 int empty_exp;
543 unsigned long flags;
544 struct list_head *np;
545 int special;
546
547
548
549
550
551 if (in_nmi())
552 return;
553
554 local_irq_save(flags);
555
556
557
558
559
560 special = t->rcu_read_unlock_special;
561 if (special & RCU_READ_UNLOCK_NEED_QS)
562 rcu_preempt_cpu_qs();
563
564
565 if (in_irq()) {
566 local_irq_restore(flags);
567 return;
568 }
569
570
571 if (special & RCU_READ_UNLOCK_BLOCKED) {
572 t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BLOCKED;
573
574
575
576
577
578 empty = !rcu_preempt_blocked_readers_cgp();
579 empty_exp = rcu_preempt_ctrlblk.exp_tasks == NULL;
580 np = rcu_next_node_entry(t);
581 list_del(&t->rcu_node_entry);
582 if (&t->rcu_node_entry == rcu_preempt_ctrlblk.gp_tasks)
583 rcu_preempt_ctrlblk.gp_tasks = np;
584 if (&t->rcu_node_entry == rcu_preempt_ctrlblk.exp_tasks)
585 rcu_preempt_ctrlblk.exp_tasks = np;
586#ifdef CONFIG_RCU_BOOST
587 if (&t->rcu_node_entry == rcu_preempt_ctrlblk.boost_tasks)
588 rcu_preempt_ctrlblk.boost_tasks = np;
589#endif
590 INIT_LIST_HEAD(&t->rcu_node_entry);
591
592
593
594
595
596
597 if (!empty && !rcu_preempt_blocked_readers_cgp()) {
598 rcu_preempt_cpu_qs();
599 rcu_preempt_start_gp();
600 }
601
602
603
604
605
606 if (!empty_exp && rcu_preempt_ctrlblk.exp_tasks == NULL)
607 rcu_report_exp_done();
608 }
609#ifdef CONFIG_RCU_BOOST
610
611 if (special & RCU_READ_UNLOCK_BOOSTED) {
612 t->rcu_read_unlock_special &= ~RCU_READ_UNLOCK_BOOSTED;
613 rt_mutex_unlock(t->rcu_boost_mutex);
614 t->rcu_boost_mutex = NULL;
615 }
616#endif
617 local_irq_restore(flags);
618}
619
620
621
622
623
624
625
626
627void __rcu_read_unlock(void)
628{
629 struct task_struct *t = current;
630
631 barrier();
632 --t->rcu_read_lock_nesting;
633 barrier();
634 if (t->rcu_read_lock_nesting == 0 &&
635 unlikely(ACCESS_ONCE(t->rcu_read_unlock_special)))
636 rcu_read_unlock_special(t);
637#ifdef CONFIG_PROVE_LOCKING
638 WARN_ON_ONCE(t->rcu_read_lock_nesting < 0);
639#endif
640}
641EXPORT_SYMBOL_GPL(__rcu_read_unlock);
642
643
644
645
646
647
648
649
650static void rcu_preempt_check_callbacks(void)
651{
652 struct task_struct *t = current;
653
654 if (rcu_preempt_gp_in_progress() &&
655 (!rcu_preempt_running_reader() ||
656 !rcu_cpu_blocking_cur_gp()))
657 rcu_preempt_cpu_qs();
658 if (&rcu_preempt_ctrlblk.rcb.rcucblist !=
659 rcu_preempt_ctrlblk.rcb.donetail)
660 invoke_rcu_kthread();
661 if (rcu_preempt_gp_in_progress() &&
662 rcu_cpu_blocking_cur_gp() &&
663 rcu_preempt_running_reader())
664 t->rcu_read_unlock_special |= RCU_READ_UNLOCK_NEED_QS;
665}
666
667
668
669
670
671
672
673
674
675static void rcu_preempt_remove_callbacks(struct rcu_ctrlblk *rcp)
676{
677 if (rcu_preempt_ctrlblk.nexttail == rcp->donetail)
678 rcu_preempt_ctrlblk.nexttail = &rcp->rcucblist;
679}
680
681
682
683
684static void rcu_preempt_process_callbacks(void)
685{
686 rcu_process_callbacks(&rcu_preempt_ctrlblk.rcb);
687}
688
689
690
691
692void call_rcu(struct rcu_head *head, void (*func)(struct rcu_head *rcu))
693{
694 unsigned long flags;
695
696 debug_rcu_head_queue(head);
697 head->func = func;
698 head->next = NULL;
699
700 local_irq_save(flags);
701 *rcu_preempt_ctrlblk.nexttail = head;
702 rcu_preempt_ctrlblk.nexttail = &head->next;
703 RCU_TRACE(rcu_preempt_ctrlblk.rcb.qlen++);
704 rcu_preempt_start_gp();
705 local_irq_restore(flags);
706}
707EXPORT_SYMBOL_GPL(call_rcu);
708
709void rcu_barrier(void)
710{
711 struct rcu_synchronize rcu;
712
713 init_rcu_head_on_stack(&rcu.head);
714 init_completion(&rcu.completion);
715
716 call_rcu(&rcu.head, wakeme_after_rcu);
717
718 wait_for_completion(&rcu.completion);
719 destroy_rcu_head_on_stack(&rcu.head);
720}
721EXPORT_SYMBOL_GPL(rcu_barrier);
722
723
724
725
726
727
728
729
730
731
732void synchronize_rcu(void)
733{
734#ifdef CONFIG_DEBUG_LOCK_ALLOC
735 if (!rcu_scheduler_active)
736 return;
737#endif
738
739 WARN_ON_ONCE(rcu_preempt_running_reader());
740 if (!rcu_preempt_blocked_readers_any())
741 return;
742
743
744 rcu_barrier();
745}
746EXPORT_SYMBOL_GPL(synchronize_rcu);
747
748static DECLARE_WAIT_QUEUE_HEAD(sync_rcu_preempt_exp_wq);
749static unsigned long sync_rcu_preempt_exp_count;
750static DEFINE_MUTEX(sync_rcu_preempt_exp_mutex);
751
752
753
754
755
756
757
758static int rcu_preempted_readers_exp(void)
759{
760 return rcu_preempt_ctrlblk.exp_tasks != NULL;
761}
762
763
764
765
766
767
768static void rcu_report_exp_done(void)
769{
770 wake_up(&sync_rcu_preempt_exp_wq);
771}
772
773
774
775
776
777
778
779
780
781
782
783void synchronize_rcu_expedited(void)
784{
785 unsigned long flags;
786 struct rcu_preempt_ctrlblk *rpcp = &rcu_preempt_ctrlblk;
787 unsigned long snap;
788
789 barrier();
790
791 WARN_ON_ONCE(rcu_preempt_running_reader());
792
793
794
795
796
797
798 snap = sync_rcu_preempt_exp_count + 1;
799 mutex_lock(&sync_rcu_preempt_exp_mutex);
800 if (ULONG_CMP_LT(snap, sync_rcu_preempt_exp_count))
801 goto unlock_mb_ret;
802
803 local_irq_save(flags);
804
805
806
807
808
809
810
811
812 rpcp->exp_tasks = rpcp->blkd_tasks.next;
813 if (rpcp->exp_tasks == &rpcp->blkd_tasks)
814 rpcp->exp_tasks = NULL;
815 local_irq_restore(flags);
816
817
818 if (rcu_preempted_readers_exp())
819 rcu_initiate_expedited_boost();
820 wait_event(sync_rcu_preempt_exp_wq,
821 !rcu_preempted_readers_exp());
822
823
824 barrier();
825 sync_rcu_preempt_exp_count++;
826unlock_mb_ret:
827 mutex_unlock(&sync_rcu_preempt_exp_mutex);
828 barrier();
829}
830EXPORT_SYMBOL_GPL(synchronize_rcu_expedited);
831
832
833
834
835int rcu_preempt_needs_cpu(void)
836{
837 if (!rcu_preempt_running_reader())
838 rcu_preempt_cpu_qs();
839 return rcu_preempt_ctrlblk.rcb.rcucblist != NULL;
840}
841
842
843
844
845
846
847
848void exit_rcu(void)
849{
850 struct task_struct *t = current;
851
852 if (t->rcu_read_lock_nesting == 0)
853 return;
854 t->rcu_read_lock_nesting = 1;
855 rcu_read_unlock();
856}
857
858#else
859
860#ifdef CONFIG_RCU_TRACE
861
862
863
864
865
866static void show_tiny_preempt_stats(struct seq_file *m)
867{
868}
869
870#endif
871
872
873
874
875
876static int rcu_boost(void)
877{
878 return 0;
879}
880
881
882
883
884
885static void rcu_preempt_check_callbacks(void)
886{
887}
888
889
890
891
892
893static void rcu_preempt_remove_callbacks(struct rcu_ctrlblk *rcp)
894{
895}
896
897
898
899
900
901static void rcu_preempt_process_callbacks(void)
902{
903}
904
905#endif
906
907#ifdef CONFIG_DEBUG_LOCK_ALLOC
908#include <linux/kernel_stat.h>
909
910
911
912
913
914void __init rcu_scheduler_starting(void)
915{
916 WARN_ON(nr_context_switches() > 0);
917 rcu_scheduler_active = 1;
918}
919
920#endif
921
922#ifdef CONFIG_RCU_BOOST
923#define RCU_BOOST_PRIO CONFIG_RCU_BOOST_PRIO
924#else
925#define RCU_BOOST_PRIO 1
926#endif
927
928#ifdef CONFIG_RCU_TRACE
929
930#ifdef CONFIG_RCU_BOOST
931
932static void rcu_initiate_boost_trace(void)
933{
934 if (rcu_preempt_ctrlblk.gp_tasks == NULL)
935 rcu_preempt_ctrlblk.n_normal_balk_gp_tasks++;
936 else if (rcu_preempt_ctrlblk.boost_tasks != NULL)
937 rcu_preempt_ctrlblk.n_normal_balk_boost_tasks++;
938 else if (rcu_preempt_ctrlblk.boosted_this_gp != 0)
939 rcu_preempt_ctrlblk.n_normal_balk_boosted++;
940 else if (!ULONG_CMP_GE(jiffies, rcu_preempt_ctrlblk.boost_time))
941 rcu_preempt_ctrlblk.n_normal_balk_notyet++;
942 else
943 rcu_preempt_ctrlblk.n_normal_balk_nos++;
944}
945
946static void rcu_initiate_exp_boost_trace(void)
947{
948 if (list_empty(&rcu_preempt_ctrlblk.blkd_tasks))
949 rcu_preempt_ctrlblk.n_exp_balk_blkd_tasks++;
950 else
951 rcu_preempt_ctrlblk.n_exp_balk_nos++;
952}
953
954#endif
955
956static void rcu_trace_sub_qlen(struct rcu_ctrlblk *rcp, int n)
957{
958 unsigned long flags;
959
960 raw_local_irq_save(flags);
961 rcp->qlen -= n;
962 raw_local_irq_restore(flags);
963}
964
965
966
967
968static int show_tiny_stats(struct seq_file *m, void *unused)
969{
970 show_tiny_preempt_stats(m);
971 seq_printf(m, "rcu_sched: qlen: %ld\n", rcu_sched_ctrlblk.qlen);
972 seq_printf(m, "rcu_bh: qlen: %ld\n", rcu_bh_ctrlblk.qlen);
973 return 0;
974}
975
976static int show_tiny_stats_open(struct inode *inode, struct file *file)
977{
978 return single_open(file, show_tiny_stats, NULL);
979}
980
981static const struct file_operations show_tiny_stats_fops = {
982 .owner = THIS_MODULE,
983 .open = show_tiny_stats_open,
984 .read = seq_read,
985 .llseek = seq_lseek,
986 .release = single_release,
987};
988
989static struct dentry *rcudir;
990
991static int __init rcutiny_trace_init(void)
992{
993 struct dentry *retval;
994
995 rcudir = debugfs_create_dir("rcu", NULL);
996 if (!rcudir)
997 goto free_out;
998 retval = debugfs_create_file("rcudata", 0444, rcudir,
999 NULL, &show_tiny_stats_fops);
1000 if (!retval)
1001 goto free_out;
1002 return 0;
1003free_out:
1004 debugfs_remove_recursive(rcudir);
1005 return 1;
1006}
1007
1008static void __exit rcutiny_trace_cleanup(void)
1009{
1010 debugfs_remove_recursive(rcudir);
1011}
1012
1013module_init(rcutiny_trace_init);
1014module_exit(rcutiny_trace_cleanup);
1015
1016MODULE_AUTHOR("Paul E. McKenney");
1017MODULE_DESCRIPTION("Read-Copy Update tracing for tiny implementation");
1018MODULE_LICENSE("GPL");
1019
1020#endif
1021