1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46#include <linux/kallsyms.h>
47#include <linux/seq_file.h>
48#include <linux/notifier.h>
49#include <linux/spinlock.h>
50#include <linux/proc_fs.h>
51#include <linux/latencytop.h>
52#include <linux/export.h>
53#include <linux/sched.h>
54#include <linux/sched/debug.h>
55#include <linux/sched/stat.h>
56#include <linux/list.h>
57#include <linux/stacktrace.h>
58
59static DEFINE_RAW_SPINLOCK(latency_lock);
60
61#define MAXLR 128
62static struct latency_record latency_record[MAXLR];
63
64int latencytop_enabled;
65
66void clear_tsk_latency_tracing(struct task_struct *p)
67{
68 unsigned long flags;
69
70 raw_spin_lock_irqsave(&latency_lock, flags);
71 memset(&p->latency_record, 0, sizeof(p->latency_record));
72 p->latency_record_count = 0;
73 raw_spin_unlock_irqrestore(&latency_lock, flags);
74}
75
76static void clear_global_latency_tracing(void)
77{
78 unsigned long flags;
79
80 raw_spin_lock_irqsave(&latency_lock, flags);
81 memset(&latency_record, 0, sizeof(latency_record));
82 raw_spin_unlock_irqrestore(&latency_lock, flags);
83}
84
85static void __sched
86account_global_scheduler_latency(struct task_struct *tsk,
87 struct latency_record *lat)
88{
89 int firstnonnull = MAXLR + 1;
90 int i;
91
92
93 if (!tsk->mm)
94 return;
95
96 for (i = 0; i < MAXLR; i++) {
97 int q, same = 1;
98
99
100 if (!latency_record[i].backtrace[0]) {
101 if (firstnonnull > i)
102 firstnonnull = i;
103 continue;
104 }
105 for (q = 0; q < LT_BACKTRACEDEPTH; q++) {
106 unsigned long record = lat->backtrace[q];
107
108 if (latency_record[i].backtrace[q] != record) {
109 same = 0;
110 break;
111 }
112
113
114 if (!record)
115 break;
116 }
117 if (same) {
118 latency_record[i].count++;
119 latency_record[i].time += lat->time;
120 if (lat->time > latency_record[i].max)
121 latency_record[i].max = lat->time;
122 return;
123 }
124 }
125
126 i = firstnonnull;
127 if (i >= MAXLR - 1)
128 return;
129
130
131 memcpy(&latency_record[i], lat, sizeof(struct latency_record));
132}
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150void __sched
151__account_scheduler_latency(struct task_struct *tsk, int usecs, int inter)
152{
153 unsigned long flags;
154 int i, q;
155 struct latency_record lat;
156
157
158 if (inter && usecs > 5000)
159 return;
160
161
162
163 if (usecs <= 0)
164 return;
165
166 memset(&lat, 0, sizeof(lat));
167 lat.count = 1;
168 lat.time = usecs;
169 lat.max = usecs;
170
171 stack_trace_save_tsk(tsk, lat.backtrace, LT_BACKTRACEDEPTH, 0);
172
173 raw_spin_lock_irqsave(&latency_lock, flags);
174
175 account_global_scheduler_latency(tsk, &lat);
176
177 for (i = 0; i < tsk->latency_record_count; i++) {
178 struct latency_record *mylat;
179 int same = 1;
180
181 mylat = &tsk->latency_record[i];
182 for (q = 0; q < LT_BACKTRACEDEPTH; q++) {
183 unsigned long record = lat.backtrace[q];
184
185 if (mylat->backtrace[q] != record) {
186 same = 0;
187 break;
188 }
189
190
191 if (!record)
192 break;
193 }
194 if (same) {
195 mylat->count++;
196 mylat->time += lat.time;
197 if (lat.time > mylat->max)
198 mylat->max = lat.time;
199 goto out_unlock;
200 }
201 }
202
203
204
205
206 if (tsk->latency_record_count >= LT_SAVECOUNT)
207 goto out_unlock;
208
209
210 i = tsk->latency_record_count++;
211 memcpy(&tsk->latency_record[i], &lat, sizeof(struct latency_record));
212
213out_unlock:
214 raw_spin_unlock_irqrestore(&latency_lock, flags);
215}
216
217static int lstats_show(struct seq_file *m, void *v)
218{
219 int i;
220
221 seq_puts(m, "Latency Top version : v0.1\n");
222
223 for (i = 0; i < MAXLR; i++) {
224 struct latency_record *lr = &latency_record[i];
225
226 if (lr->backtrace[0]) {
227 int q;
228 seq_printf(m, "%i %lu %lu",
229 lr->count, lr->time, lr->max);
230 for (q = 0; q < LT_BACKTRACEDEPTH; q++) {
231 unsigned long bt = lr->backtrace[q];
232
233 if (!bt)
234 break;
235
236 seq_printf(m, " %ps", (void *)bt);
237 }
238 seq_puts(m, "\n");
239 }
240 }
241 return 0;
242}
243
244static ssize_t
245lstats_write(struct file *file, const char __user *buf, size_t count,
246 loff_t *offs)
247{
248 clear_global_latency_tracing();
249
250 return count;
251}
252
253static int lstats_open(struct inode *inode, struct file *filp)
254{
255 return single_open(filp, lstats_show, NULL);
256}
257
258static const struct proc_ops lstats_proc_ops = {
259 .proc_open = lstats_open,
260 .proc_read = seq_read,
261 .proc_write = lstats_write,
262 .proc_lseek = seq_lseek,
263 .proc_release = single_release,
264};
265
266static int __init init_lstats_procfs(void)
267{
268 proc_create("latency_stats", 0644, NULL, &lstats_proc_ops);
269 return 0;
270}
271
272int sysctl_latencytop(struct ctl_table *table, int write, void *buffer,
273 size_t *lenp, loff_t *ppos)
274{
275 int err;
276
277 err = proc_dointvec(table, write, buffer, lenp, ppos);
278 if (latencytop_enabled)
279 force_schedstat_enabled();
280
281 return err;
282}
283device_initcall(init_lstats_procfs);
284