1
2
3
4
5
6
7
8
9
10
11#include <linux/delay.h>
12#include <linux/export.h>
13#include <linux/kallsyms.h>
14#include <linux/module.h>
15#include <linux/nmi.h>
16#include <linux/sched.h>
17#include <linux/sched/debug.h>
18#include <linux/sched/task_stack.h>
19#include <linux/stacktrace.h>
20#include <asm/ptrace.h>
21#include <asm/processor.h>
22#include <linux/ftrace.h>
23#include <asm/kprobes.h>
24
25#include <asm/paca.h>
26
27
28
29
30static void save_context_stack(struct stack_trace *trace, unsigned long sp,
31 struct task_struct *tsk, int savesched)
32{
33 for (;;) {
34 unsigned long *stack = (unsigned long *) sp;
35 unsigned long newsp, ip;
36
37 if (!validate_sp(sp, tsk, STACK_FRAME_OVERHEAD))
38 return;
39
40 newsp = stack[0];
41 ip = stack[STACK_FRAME_LR_SAVE];
42
43 if (savesched || !in_sched_functions(ip)) {
44 if (!trace->skip)
45 trace->entries[trace->nr_entries++] = ip;
46 else
47 trace->skip--;
48 }
49
50 if (trace->nr_entries >= trace->max_entries)
51 return;
52
53 sp = newsp;
54 }
55}
56
57void save_stack_trace(struct stack_trace *trace)
58{
59 unsigned long sp;
60
61 sp = current_stack_pointer();
62
63 save_context_stack(trace, sp, current, 1);
64}
65EXPORT_SYMBOL_GPL(save_stack_trace);
66
67void save_stack_trace_tsk(struct task_struct *tsk, struct stack_trace *trace)
68{
69 unsigned long sp;
70
71 if (tsk == current)
72 sp = current_stack_pointer();
73 else
74 sp = tsk->thread.ksp;
75
76 save_context_stack(trace, sp, tsk, 0);
77}
78EXPORT_SYMBOL_GPL(save_stack_trace_tsk);
79
80void
81save_stack_trace_regs(struct pt_regs *regs, struct stack_trace *trace)
82{
83 save_context_stack(trace, regs->gpr[1], current, 0);
84}
85EXPORT_SYMBOL_GPL(save_stack_trace_regs);
86
87#ifdef CONFIG_HAVE_RELIABLE_STACKTRACE
88
89
90
91
92
93
94int
95save_stack_trace_tsk_reliable(struct task_struct *tsk,
96 struct stack_trace *trace)
97{
98 unsigned long sp;
99 unsigned long newsp;
100 unsigned long stack_page = (unsigned long)task_stack_page(tsk);
101 unsigned long stack_end;
102 int graph_idx = 0;
103 bool firstframe;
104
105 stack_end = stack_page + THREAD_SIZE;
106 if (!is_idle_task(tsk)) {
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121 stack_end -= STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
122 } else {
123
124
125
126
127 stack_end -= STACK_FRAME_OVERHEAD;
128 }
129
130 if (tsk == current)
131 sp = current_stack_pointer();
132 else
133 sp = tsk->thread.ksp;
134
135 if (sp < stack_page + sizeof(struct thread_struct) ||
136 sp > stack_end - STACK_FRAME_MIN_SIZE) {
137 return -EINVAL;
138 }
139
140 for (firstframe = true; sp != stack_end;
141 firstframe = false, sp = newsp) {
142 unsigned long *stack = (unsigned long *) sp;
143 unsigned long ip;
144
145
146 if (sp & 0xF)
147 return -EINVAL;
148
149 newsp = stack[0];
150
151 if (newsp <= sp)
152 return -EINVAL;
153
154 if (newsp != stack_end &&
155 newsp > stack_end - STACK_FRAME_MIN_SIZE) {
156 return -EINVAL;
157 }
158
159
160
161
162
163
164 if (firstframe)
165 continue;
166
167
168 if (sp <= stack_end - STACK_INT_FRAME_SIZE &&
169 stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) {
170 return -EINVAL;
171 }
172
173
174 ip = stack[STACK_FRAME_LR_SAVE];
175 if (!__kernel_text_address(ip))
176 return -EINVAL;
177
178
179
180
181
182 ip = ftrace_graph_ret_addr(tsk, &graph_idx, ip, NULL);
183#ifdef CONFIG_KPROBES
184
185
186
187
188 if (ip == (unsigned long)kretprobe_trampoline)
189 return -EINVAL;
190#endif
191
192 if (trace->nr_entries >= trace->max_entries)
193 return -E2BIG;
194 if (!trace->skip)
195 trace->entries[trace->nr_entries++] = ip;
196 else
197 trace->skip--;
198 }
199 return 0;
200}
201EXPORT_SYMBOL_GPL(save_stack_trace_tsk_reliable);
202#endif
203
204#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_NMI_IPI)
205static void handle_backtrace_ipi(struct pt_regs *regs)
206{
207 nmi_cpu_backtrace(regs);
208}
209
210static void raise_backtrace_ipi(cpumask_t *mask)
211{
212 struct paca_struct *p;
213 unsigned int cpu;
214 u64 delay_us;
215
216 for_each_cpu(cpu, mask) {
217 if (cpu == smp_processor_id()) {
218 handle_backtrace_ipi(NULL);
219 continue;
220 }
221
222 delay_us = 5 * USEC_PER_SEC;
223
224 if (smp_send_safe_nmi_ipi(cpu, handle_backtrace_ipi, delay_us)) {
225
226 while (cpumask_test_cpu(cpu, mask) && delay_us) {
227 udelay(1);
228 delay_us--;
229 }
230
231
232 if (delay_us)
233 continue;
234 }
235
236 p = paca_ptrs[cpu];
237
238 cpumask_clear_cpu(cpu, mask);
239
240 pr_warn("CPU %d didn't respond to backtrace IPI, inspecting paca.\n", cpu);
241 if (!virt_addr_valid(p)) {
242 pr_warn("paca pointer appears corrupt? (%px)\n", p);
243 continue;
244 }
245
246 pr_warn("irq_soft_mask: 0x%02x in_mce: %d in_nmi: %d",
247 p->irq_soft_mask, p->in_mce, p->in_nmi);
248
249 if (virt_addr_valid(p->__current))
250 pr_cont(" current: %d (%s)\n", p->__current->pid,
251 p->__current->comm);
252 else
253 pr_cont(" current pointer corrupt? (%px)\n", p->__current);
254
255 pr_warn("Back trace of paca->saved_r1 (0x%016llx) (possibly stale):\n", p->saved_r1);
256 show_stack(p->__current, (unsigned long *)p->saved_r1);
257 }
258}
259
260void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
261{
262 nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_backtrace_ipi);
263}
264#endif
265