1
2
3
4
5
6
7
8
9
10
11#include <linux/export.h>
12#include <linux/kallsyms.h>
13#include <linux/module.h>
14#include <linux/nmi.h>
15#include <linux/sched.h>
16#include <linux/sched/debug.h>
17#include <linux/sched/task_stack.h>
18#include <linux/stacktrace.h>
19#include <asm/ptrace.h>
20#include <asm/processor.h>
21#include <linux/ftrace.h>
22#include <asm/kprobes.h>
23
24#include <asm/paca.h>
25
26void __no_sanitize_address arch_stack_walk(stack_trace_consume_fn consume_entry, void *cookie,
27 struct task_struct *task, struct pt_regs *regs)
28{
29 unsigned long sp;
30
31 if (regs && !consume_entry(cookie, regs->nip))
32 return;
33
34 if (regs)
35 sp = regs->gpr[1];
36 else if (task == current)
37 sp = current_stack_frame();
38 else
39 sp = task->thread.ksp;
40
41 for (;;) {
42 unsigned long *stack = (unsigned long *) sp;
43 unsigned long newsp, ip;
44
45 if (!validate_sp(sp, task, STACK_FRAME_OVERHEAD))
46 return;
47
48 newsp = stack[0];
49 ip = stack[STACK_FRAME_LR_SAVE];
50
51 if (!consume_entry(cookie, ip))
52 return;
53
54 sp = newsp;
55 }
56}
57
58
59
60
61
62
63
64int __no_sanitize_address arch_stack_walk_reliable(stack_trace_consume_fn consume_entry,
65 void *cookie, struct task_struct *task)
66{
67 unsigned long sp;
68 unsigned long newsp;
69 unsigned long stack_page = (unsigned long)task_stack_page(task);
70 unsigned long stack_end;
71 int graph_idx = 0;
72 bool firstframe;
73
74 stack_end = stack_page + THREAD_SIZE;
75 if (!is_idle_task(task)) {
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90 stack_end -= STACK_FRAME_OVERHEAD + sizeof(struct pt_regs);
91 } else {
92
93
94
95
96 stack_end -= STACK_FRAME_OVERHEAD;
97 }
98
99 if (task == current)
100 sp = current_stack_frame();
101 else
102 sp = task->thread.ksp;
103
104 if (sp < stack_page + sizeof(struct thread_struct) ||
105 sp > stack_end - STACK_FRAME_MIN_SIZE) {
106 return -EINVAL;
107 }
108
109 for (firstframe = true; sp != stack_end;
110 firstframe = false, sp = newsp) {
111 unsigned long *stack = (unsigned long *) sp;
112 unsigned long ip;
113
114
115 if (sp & 0xF)
116 return -EINVAL;
117
118 newsp = stack[0];
119
120 if (newsp <= sp)
121 return -EINVAL;
122
123 if (newsp != stack_end &&
124 newsp > stack_end - STACK_FRAME_MIN_SIZE) {
125 return -EINVAL;
126 }
127
128
129
130
131
132
133 if (firstframe)
134 continue;
135
136
137 if (sp <= stack_end - STACK_INT_FRAME_SIZE &&
138 stack[STACK_FRAME_MARKER] == STACK_FRAME_REGS_MARKER) {
139 return -EINVAL;
140 }
141
142
143 ip = stack[STACK_FRAME_LR_SAVE];
144 if (!__kernel_text_address(ip))
145 return -EINVAL;
146
147
148
149
150
151 ip = ftrace_graph_ret_addr(task, &graph_idx, ip, stack);
152#ifdef CONFIG_KPROBES
153
154
155
156
157 if (ip == (unsigned long)kretprobe_trampoline)
158 return -EINVAL;
159#endif
160
161 if (!consume_entry(cookie, ip))
162 return -EINVAL;
163 }
164 return 0;
165}
166
167#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_NMI_IPI)
168static void handle_backtrace_ipi(struct pt_regs *regs)
169{
170 nmi_cpu_backtrace(regs);
171}
172
173static void raise_backtrace_ipi(cpumask_t *mask)
174{
175 struct paca_struct *p;
176 unsigned int cpu;
177 u64 delay_us;
178
179 for_each_cpu(cpu, mask) {
180 if (cpu == smp_processor_id()) {
181 handle_backtrace_ipi(NULL);
182 continue;
183 }
184
185 delay_us = 5 * USEC_PER_SEC;
186
187 if (smp_send_safe_nmi_ipi(cpu, handle_backtrace_ipi, delay_us)) {
188
189 while (cpumask_test_cpu(cpu, mask) && delay_us) {
190 udelay(1);
191 delay_us--;
192 }
193
194
195 if (delay_us)
196 continue;
197 }
198
199 p = paca_ptrs[cpu];
200
201 cpumask_clear_cpu(cpu, mask);
202
203 pr_warn("CPU %d didn't respond to backtrace IPI, inspecting paca.\n", cpu);
204 if (!virt_addr_valid(p)) {
205 pr_warn("paca pointer appears corrupt? (%px)\n", p);
206 continue;
207 }
208
209 pr_warn("irq_soft_mask: 0x%02x in_mce: %d in_nmi: %d",
210 p->irq_soft_mask, p->in_mce, p->in_nmi);
211
212 if (virt_addr_valid(p->__current))
213 pr_cont(" current: %d (%s)\n", p->__current->pid,
214 p->__current->comm);
215 else
216 pr_cont(" current pointer corrupt? (%px)\n", p->__current);
217
218 pr_warn("Back trace of paca->saved_r1 (0x%016llx) (possibly stale):\n", p->saved_r1);
219 show_stack(p->__current, (unsigned long *)p->saved_r1, KERN_WARNING);
220 }
221}
222
223void arch_trigger_cpumask_backtrace(const cpumask_t *mask, bool exclude_self)
224{
225 nmi_trigger_cpumask_backtrace(mask, exclude_self, raise_backtrace_ipi);
226}
227#endif
228