1
2
3
4
5
6#include <linux/sched/task_stack.h>
7#include <linux/stacktrace.h>
8#include <linux/security.h>
9#include <linux/kallsyms.h>
10#include <linux/seq_file.h>
11#include <linux/spinlock.h>
12#include <linux/uaccess.h>
13#include <linux/ftrace.h>
14#include <linux/module.h>
15#include <linux/sysctl.h>
16#include <linux/init.h>
17
18#include <asm/setup.h>
19
20#include "trace.h"
21
22#define STACK_TRACE_ENTRIES 500
23
24static unsigned long stack_dump_trace[STACK_TRACE_ENTRIES];
25static unsigned stack_trace_index[STACK_TRACE_ENTRIES];
26
27static unsigned int stack_trace_nr_entries;
28static unsigned long stack_trace_max_size;
29static arch_spinlock_t stack_trace_max_lock =
30 (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
31
32DEFINE_PER_CPU(int, disable_stack_tracer);
33static DEFINE_MUTEX(stack_sysctl_mutex);
34
35int stack_tracer_enabled;
36
37static void print_max_stack(void)
38{
39 long i;
40 int size;
41
42 pr_emerg(" Depth Size Location (%d entries)\n"
43 " ----- ---- --------\n",
44 stack_trace_nr_entries);
45
46 for (i = 0; i < stack_trace_nr_entries; i++) {
47 if (i + 1 == stack_trace_nr_entries)
48 size = stack_trace_index[i];
49 else
50 size = stack_trace_index[i] - stack_trace_index[i+1];
51
52 pr_emerg("%3ld) %8d %5d %pS\n", i, stack_trace_index[i],
53 size, (void *)stack_dump_trace[i]);
54 }
55}
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155static void check_stack(unsigned long ip, unsigned long *stack)
156{
157 unsigned long this_size, flags; unsigned long *p, *top, *start;
158 static int tracer_frame;
159 int frame_size = READ_ONCE(tracer_frame);
160 int i, x;
161
162 this_size = ((unsigned long)stack) & (THREAD_SIZE-1);
163 this_size = THREAD_SIZE - this_size;
164
165 this_size -= frame_size;
166
167 if (this_size <= stack_trace_max_size)
168 return;
169
170
171 if (!object_is_on_stack(stack))
172 return;
173
174
175 if (in_nmi())
176 return;
177
178 local_irq_save(flags);
179 arch_spin_lock(&stack_trace_max_lock);
180
181
182 if (unlikely(!frame_size))
183 this_size -= tracer_frame;
184
185
186 if (this_size <= stack_trace_max_size)
187 goto out;
188
189 stack_trace_max_size = this_size;
190
191 stack_trace_nr_entries = stack_trace_save(stack_dump_trace,
192 ARRAY_SIZE(stack_dump_trace) - 1,
193 0);
194
195
196 for (i = 0; i < stack_trace_nr_entries; i++) {
197 if (stack_dump_trace[i] == ip)
198 break;
199 }
200
201
202
203
204
205 if (i == stack_trace_nr_entries)
206 i = 0;
207
208
209
210
211 x = 0;
212 start = stack;
213 top = (unsigned long *)
214 (((unsigned long)start & ~(THREAD_SIZE-1)) + THREAD_SIZE);
215
216
217
218
219
220
221
222
223 while (i < stack_trace_nr_entries) {
224 int found = 0;
225
226 stack_trace_index[x] = this_size;
227 p = start;
228
229 for (; p < top && i < stack_trace_nr_entries; p++) {
230
231
232
233
234 if ((READ_ONCE_NOCHECK(*p)) == stack_dump_trace[i]) {
235 stack_dump_trace[x] = stack_dump_trace[i++];
236 this_size = stack_trace_index[x++] =
237 (top - p) * sizeof(unsigned long);
238 found = 1;
239
240 start = p + 1;
241
242
243
244
245
246
247
248 if (unlikely(!tracer_frame)) {
249 tracer_frame = (p - stack) *
250 sizeof(unsigned long);
251 stack_trace_max_size -= tracer_frame;
252 }
253 }
254 }
255
256 if (!found)
257 i++;
258 }
259
260#ifdef ARCH_FTRACE_SHIFT_STACK_TRACER
261
262
263
264
265
266
267 if (x > 1) {
268 memmove(&stack_trace_index[0], &stack_trace_index[1],
269 sizeof(stack_trace_index[0]) * (x - 1));
270 x--;
271 }
272#endif
273
274 stack_trace_nr_entries = x;
275
276 if (task_stack_end_corrupted(current)) {
277 print_max_stack();
278 BUG();
279 }
280
281 out:
282 arch_spin_unlock(&stack_trace_max_lock);
283 local_irq_restore(flags);
284}
285
286
287#ifndef MCOUNT_INSN_SIZE
288# define MCOUNT_INSN_SIZE 0
289#endif
290
291static void
292stack_trace_call(unsigned long ip, unsigned long parent_ip,
293 struct ftrace_ops *op, struct ftrace_regs *fregs)
294{
295 unsigned long stack;
296
297 preempt_disable_notrace();
298
299
300 __this_cpu_inc(disable_stack_tracer);
301 if (__this_cpu_read(disable_stack_tracer) != 1)
302 goto out;
303
304
305 if (!rcu_is_watching())
306 goto out;
307
308 ip += MCOUNT_INSN_SIZE;
309
310 check_stack(ip, &stack);
311
312 out:
313 __this_cpu_dec(disable_stack_tracer);
314
315 preempt_enable_notrace();
316}
317
318static struct ftrace_ops trace_ops __read_mostly =
319{
320 .func = stack_trace_call,
321};
322
323static ssize_t
324stack_max_size_read(struct file *filp, char __user *ubuf,
325 size_t count, loff_t *ppos)
326{
327 unsigned long *ptr = filp->private_data;
328 char buf[64];
329 int r;
330
331 r = snprintf(buf, sizeof(buf), "%ld\n", *ptr);
332 if (r > sizeof(buf))
333 r = sizeof(buf);
334 return simple_read_from_buffer(ubuf, count, ppos, buf, r);
335}
336
337static ssize_t
338stack_max_size_write(struct file *filp, const char __user *ubuf,
339 size_t count, loff_t *ppos)
340{
341 long *ptr = filp->private_data;
342 unsigned long val, flags;
343 int ret;
344
345 ret = kstrtoul_from_user(ubuf, count, 10, &val);
346 if (ret)
347 return ret;
348
349 local_irq_save(flags);
350
351
352
353
354
355
356 __this_cpu_inc(disable_stack_tracer);
357
358 arch_spin_lock(&stack_trace_max_lock);
359 *ptr = val;
360 arch_spin_unlock(&stack_trace_max_lock);
361
362 __this_cpu_dec(disable_stack_tracer);
363 local_irq_restore(flags);
364
365 return count;
366}
367
368static const struct file_operations stack_max_size_fops = {
369 .open = tracing_open_generic,
370 .read = stack_max_size_read,
371 .write = stack_max_size_write,
372 .llseek = default_llseek,
373};
374
375static void *
376__next(struct seq_file *m, loff_t *pos)
377{
378 long n = *pos - 1;
379
380 if (n >= stack_trace_nr_entries)
381 return NULL;
382
383 m->private = (void *)n;
384 return &m->private;
385}
386
387static void *
388t_next(struct seq_file *m, void *v, loff_t *pos)
389{
390 (*pos)++;
391 return __next(m, pos);
392}
393
394static void *t_start(struct seq_file *m, loff_t *pos)
395{
396 local_irq_disable();
397
398 __this_cpu_inc(disable_stack_tracer);
399
400 arch_spin_lock(&stack_trace_max_lock);
401
402 if (*pos == 0)
403 return SEQ_START_TOKEN;
404
405 return __next(m, pos);
406}
407
408static void t_stop(struct seq_file *m, void *p)
409{
410 arch_spin_unlock(&stack_trace_max_lock);
411
412 __this_cpu_dec(disable_stack_tracer);
413
414 local_irq_enable();
415}
416
417static void trace_lookup_stack(struct seq_file *m, long i)
418{
419 unsigned long addr = stack_dump_trace[i];
420
421 seq_printf(m, "%pS\n", (void *)addr);
422}
423
424static void print_disabled(struct seq_file *m)
425{
426 seq_puts(m, "#\n"
427 "# Stack tracer disabled\n"
428 "#\n"
429 "# To enable the stack tracer, either add 'stacktrace' to the\n"
430 "# kernel command line\n"
431 "# or 'echo 1 > /proc/sys/kernel/stack_tracer_enabled'\n"
432 "#\n");
433}
434
435static int t_show(struct seq_file *m, void *v)
436{
437 long i;
438 int size;
439
440 if (v == SEQ_START_TOKEN) {
441 seq_printf(m, " Depth Size Location"
442 " (%d entries)\n"
443 " ----- ---- --------\n",
444 stack_trace_nr_entries);
445
446 if (!stack_tracer_enabled && !stack_trace_max_size)
447 print_disabled(m);
448
449 return 0;
450 }
451
452 i = *(long *)v;
453
454 if (i >= stack_trace_nr_entries)
455 return 0;
456
457 if (i + 1 == stack_trace_nr_entries)
458 size = stack_trace_index[i];
459 else
460 size = stack_trace_index[i] - stack_trace_index[i+1];
461
462 seq_printf(m, "%3ld) %8d %5d ", i, stack_trace_index[i], size);
463
464 trace_lookup_stack(m, i);
465
466 return 0;
467}
468
469static const struct seq_operations stack_trace_seq_ops = {
470 .start = t_start,
471 .next = t_next,
472 .stop = t_stop,
473 .show = t_show,
474};
475
476static int stack_trace_open(struct inode *inode, struct file *file)
477{
478 int ret;
479
480 ret = security_locked_down(LOCKDOWN_TRACEFS);
481 if (ret)
482 return ret;
483
484 return seq_open(file, &stack_trace_seq_ops);
485}
486
487static const struct file_operations stack_trace_fops = {
488 .open = stack_trace_open,
489 .read = seq_read,
490 .llseek = seq_lseek,
491 .release = seq_release,
492};
493
494#ifdef CONFIG_DYNAMIC_FTRACE
495
496static int
497stack_trace_filter_open(struct inode *inode, struct file *file)
498{
499 struct ftrace_ops *ops = inode->i_private;
500
501
502 return ftrace_regex_open(ops, FTRACE_ITER_FILTER,
503 inode, file);
504}
505
506static const struct file_operations stack_trace_filter_fops = {
507 .open = stack_trace_filter_open,
508 .read = seq_read,
509 .write = ftrace_filter_write,
510 .llseek = tracing_lseek,
511 .release = ftrace_regex_release,
512};
513
514#endif
515
516int
517stack_trace_sysctl(struct ctl_table *table, int write, void *buffer,
518 size_t *lenp, loff_t *ppos)
519{
520 int was_enabled;
521 int ret;
522
523 mutex_lock(&stack_sysctl_mutex);
524 was_enabled = !!stack_tracer_enabled;
525
526 ret = proc_dointvec(table, write, buffer, lenp, ppos);
527
528 if (ret || !write || (was_enabled == !!stack_tracer_enabled))
529 goto out;
530
531 if (stack_tracer_enabled)
532 register_ftrace_function(&trace_ops);
533 else
534 unregister_ftrace_function(&trace_ops);
535 out:
536 mutex_unlock(&stack_sysctl_mutex);
537 return ret;
538}
539
540static char stack_trace_filter_buf[COMMAND_LINE_SIZE+1] __initdata;
541
542static __init int enable_stacktrace(char *str)
543{
544 int len;
545
546 if ((len = str_has_prefix(str, "_filter=")))
547 strncpy(stack_trace_filter_buf, str + len, COMMAND_LINE_SIZE);
548
549 stack_tracer_enabled = 1;
550 return 1;
551}
552__setup("stacktrace", enable_stacktrace);
553
554static __init int stack_trace_init(void)
555{
556 int ret;
557
558 ret = tracing_init_dentry();
559 if (ret)
560 return 0;
561
562 trace_create_file("stack_max_size", 0644, NULL,
563 &stack_trace_max_size, &stack_max_size_fops);
564
565 trace_create_file("stack_trace", 0444, NULL,
566 NULL, &stack_trace_fops);
567
568#ifdef CONFIG_DYNAMIC_FTRACE
569 trace_create_file("stack_trace_filter", 0644, NULL,
570 &trace_ops, &stack_trace_filter_fops);
571#endif
572
573 if (stack_trace_filter_buf[0])
574 ftrace_set_early_filter(&trace_ops, stack_trace_filter_buf, 1);
575
576 if (stack_tracer_enabled)
577 register_ftrace_function(&trace_ops);
578
579 return 0;
580}
581
582device_initcall(stack_trace_init);
583