1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25#include <linux/kthread.h>
26#include <linux/init.h>
27#include <linux/debugfs.h>
28#include <linux/seq_file.h>
29
30
31struct rcu_ctrlblk {
32 struct rcu_head *rcucblist;
33 struct rcu_head **donetail;
34 struct rcu_head **curtail;
35 RCU_TRACE(long qlen);
36 RCU_TRACE(unsigned long gp_start);
37 RCU_TRACE(unsigned long ticks_this_gp);
38 RCU_TRACE(unsigned long jiffies_stall);
39 RCU_TRACE(const char *name);
40};
41
42
43static struct rcu_ctrlblk rcu_sched_ctrlblk = {
44 .donetail = &rcu_sched_ctrlblk.rcucblist,
45 .curtail = &rcu_sched_ctrlblk.rcucblist,
46 RCU_TRACE(.name = "rcu_sched")
47};
48
49static struct rcu_ctrlblk rcu_bh_ctrlblk = {
50 .donetail = &rcu_bh_ctrlblk.rcucblist,
51 .curtail = &rcu_bh_ctrlblk.rcucblist,
52 RCU_TRACE(.name = "rcu_bh")
53};
54
55#ifdef CONFIG_DEBUG_LOCK_ALLOC
56#include <linux/kernel_stat.h>
57
58int rcu_scheduler_active __read_mostly;
59EXPORT_SYMBOL_GPL(rcu_scheduler_active);
60
61
62
63
64
65
66
67
68
69
70void __init rcu_scheduler_starting(void)
71{
72 WARN_ON(nr_context_switches() > 0);
73 rcu_scheduler_active = RCU_SCHEDULER_RUNNING;
74}
75
76#endif
77
78#ifdef CONFIG_RCU_TRACE
79
80static void rcu_trace_sub_qlen(struct rcu_ctrlblk *rcp, int n)
81{
82 unsigned long flags;
83
84 local_irq_save(flags);
85 rcp->qlen -= n;
86 local_irq_restore(flags);
87}
88
89
90
91
92static int show_tiny_stats(struct seq_file *m, void *unused)
93{
94 seq_printf(m, "rcu_sched: qlen: %ld\n", rcu_sched_ctrlblk.qlen);
95 seq_printf(m, "rcu_bh: qlen: %ld\n", rcu_bh_ctrlblk.qlen);
96 return 0;
97}
98
99static int show_tiny_stats_open(struct inode *inode, struct file *file)
100{
101 return single_open(file, show_tiny_stats, NULL);
102}
103
104static const struct file_operations show_tiny_stats_fops = {
105 .owner = THIS_MODULE,
106 .open = show_tiny_stats_open,
107 .read = seq_read,
108 .llseek = seq_lseek,
109 .release = single_release,
110};
111
112static struct dentry *rcudir;
113
114static int __init rcutiny_trace_init(void)
115{
116 struct dentry *retval;
117
118 rcudir = debugfs_create_dir("rcu", NULL);
119 if (!rcudir)
120 goto free_out;
121 retval = debugfs_create_file("rcudata", 0444, rcudir,
122 NULL, &show_tiny_stats_fops);
123 if (!retval)
124 goto free_out;
125 return 0;
126free_out:
127 debugfs_remove_recursive(rcudir);
128 return 1;
129}
130device_initcall(rcutiny_trace_init);
131
132static void check_cpu_stall(struct rcu_ctrlblk *rcp)
133{
134 unsigned long j;
135 unsigned long js;
136
137 if (rcu_cpu_stall_suppress)
138 return;
139 rcp->ticks_this_gp++;
140 j = jiffies;
141 js = READ_ONCE(rcp->jiffies_stall);
142 if (rcp->rcucblist && ULONG_CMP_GE(j, js)) {
143 pr_err("INFO: %s stall on CPU (%lu ticks this GP) idle=%llx (t=%lu jiffies q=%ld)\n",
144 rcp->name, rcp->ticks_this_gp, DYNTICK_TASK_EXIT_IDLE,
145 jiffies - rcp->gp_start, rcp->qlen);
146 dump_stack();
147 WRITE_ONCE(rcp->jiffies_stall,
148 jiffies + 3 * rcu_jiffies_till_stall_check() + 3);
149 } else if (ULONG_CMP_GE(j, js)) {
150 WRITE_ONCE(rcp->jiffies_stall,
151 jiffies + rcu_jiffies_till_stall_check());
152 }
153}
154
155static void reset_cpu_stall_ticks(struct rcu_ctrlblk *rcp)
156{
157 rcp->ticks_this_gp = 0;
158 rcp->gp_start = jiffies;
159 WRITE_ONCE(rcp->jiffies_stall,
160 jiffies + rcu_jiffies_till_stall_check());
161}
162
163static void check_cpu_stalls(void)
164{
165 RCU_TRACE(check_cpu_stall(&rcu_bh_ctrlblk));
166 RCU_TRACE(check_cpu_stall(&rcu_sched_ctrlblk));
167}
168
169#endif
170