1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#include <linux/export.h>
16#include <linux/kallsyms.h>
17#include <linux/sched.h>
18#include <linux/sched/debug.h>
19#include <linux/sched/task_stack.h>
20#include <linux/stacktrace.h>
21#include <linux/ftrace.h>
22
23#ifdef CONFIG_FRAME_POINTER
24
25struct stackframe {
26 unsigned long fp;
27 unsigned long ra;
28};
29
30static void notrace walk_stackframe(struct task_struct *task,
31 struct pt_regs *regs, bool (*fn)(unsigned long, void *), void *arg)
32{
33 unsigned long fp, sp, pc;
34
35 if (regs) {
36 fp = GET_FP(regs);
37 sp = GET_USP(regs);
38 pc = GET_IP(regs);
39 } else if (task == NULL || task == current) {
40 const register unsigned long current_sp __asm__ ("sp");
41 fp = (unsigned long)__builtin_frame_address(0);
42 sp = current_sp;
43 pc = (unsigned long)walk_stackframe;
44 } else {
45
46 fp = task->thread.s[0];
47 sp = task->thread.sp;
48 pc = task->thread.ra;
49 }
50
51 for (;;) {
52 unsigned long low, high;
53 struct stackframe *frame;
54
55 if (unlikely(!__kernel_text_address(pc) || fn(pc, arg)))
56 break;
57
58
59 low = sp + sizeof(struct stackframe);
60 high = ALIGN(sp, THREAD_SIZE);
61 if (unlikely(fp < low || fp > high || fp & 0x7))
62 break;
63
64 frame = (struct stackframe *)fp - 1;
65 sp = fp;
66 fp = frame->fp;
67#ifdef HAVE_FUNCTION_GRAPH_RET_ADDR_PTR
68 pc = ftrace_graph_ret_addr(current, NULL, frame->ra,
69 (unsigned long *)(fp - 8));
70#else
71 pc = frame->ra - 0x4;
72#endif
73 }
74}
75
76#else
77
78static void notrace walk_stackframe(struct task_struct *task,
79 struct pt_regs *regs, bool (*fn)(unsigned long, void *), void *arg)
80{
81 unsigned long sp, pc;
82 unsigned long *ksp;
83
84 if (regs) {
85 sp = GET_USP(regs);
86 pc = GET_IP(regs);
87 } else if (task == NULL || task == current) {
88 const register unsigned long current_sp __asm__ ("sp");
89 sp = current_sp;
90 pc = (unsigned long)walk_stackframe;
91 } else {
92
93 sp = task->thread.sp;
94 pc = task->thread.ra;
95 }
96
97 if (unlikely(sp & 0x7))
98 return;
99
100 ksp = (unsigned long *)sp;
101 while (!kstack_end(ksp)) {
102 if (__kernel_text_address(pc) && unlikely(fn(pc, arg)))
103 break;
104 pc = (*ksp++) - 0x4;
105 }
106}
107
108#endif
109
110
111static bool print_trace_address(unsigned long pc, void *arg)
112{
113 print_ip_sym(pc);
114 return false;
115}
116
117void show_stack(struct task_struct *task, unsigned long *sp)
118{
119 pr_cont("Call Trace:\n");
120 walk_stackframe(task, NULL, print_trace_address, NULL);
121}
122
123
124static bool save_wchan(unsigned long pc, void *arg)
125{
126 if (!in_sched_functions(pc)) {
127 unsigned long *p = arg;
128 *p = pc;
129 return true;
130 }
131 return false;
132}
133
134unsigned long get_wchan(struct task_struct *task)
135{
136 unsigned long pc = 0;
137
138 if (likely(task && task != current && task->state != TASK_RUNNING))
139 walk_stackframe(task, NULL, save_wchan, &pc);
140 return pc;
141}
142
143
144#ifdef CONFIG_STACKTRACE
145
146static bool __save_trace(unsigned long pc, void *arg, bool nosched)
147{
148 struct stack_trace *trace = arg;
149
150 if (unlikely(nosched && in_sched_functions(pc)))
151 return false;
152 if (unlikely(trace->skip > 0)) {
153 trace->skip--;
154 return false;
155 }
156
157 trace->entries[trace->nr_entries++] = pc;
158 return (trace->nr_entries >= trace->max_entries);
159}
160
161static bool save_trace(unsigned long pc, void *arg)
162{
163 return __save_trace(pc, arg, false);
164}
165
166
167
168
169void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
170{
171 walk_stackframe(tsk, NULL, save_trace, trace);
172 if (trace->nr_entries < trace->max_entries)
173 trace->entries[trace->nr_entries++] = ULONG_MAX;
174}
175EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
176
177void save_stack_trace(struct stack_trace *trace)
178{
179 save_stack_trace_tsk(NULL, trace);
180}
181EXPORT_SYMBOL_GPL(save_stack_trace);
182
183#endif
184