1
2
3
4
5
6
7
8
9
10
11#include <linux/kallsyms.h>
12#include <linux/kernel.h>
13#include <linux/types.h>
14#include <linux/errno.h>
15#include <asm/page.h>
16#include <asm/ptrace.h>
17#include <asm/processor.h>
18#include <asm/io.h>
19#include <asm/unwinder.h>
20#include <asm/stacktrace.h>
21
22static u8 regcache[63];
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46static int lookup_prev_stack_frame(unsigned long fp, unsigned long pc,
47 unsigned long *pprev_fp, unsigned long *pprev_pc,
48 struct pt_regs *regs)
49{
50 const char *sym;
51 char namebuf[128];
52 unsigned long offset;
53 unsigned long prologue = 0;
54 unsigned long fp_displacement = 0;
55 unsigned long fp_prev = 0;
56 unsigned long offset_r14 = 0, offset_r18 = 0;
57 int i, found_prologue_end = 0;
58
59 sym = kallsyms_lookup(pc, NULL, &offset, NULL, namebuf);
60 if (!sym)
61 return -EINVAL;
62
63 prologue = pc - offset;
64 if (!prologue)
65 return -EINVAL;
66
67
68
69
70 if ((fp < (unsigned long) phys_to_virt(__MEMORY_START)) ||
71 (fp >= (unsigned long)(phys_to_virt(__MEMORY_START)) + 128*1024*1024) ||
72 ((fp & 7) != 0)) {
73 return -EINVAL;
74 }
75
76
77
78
79 for (i = 0; i < 100; i++, prologue += sizeof(unsigned long)) {
80 unsigned long op;
81 u8 major, minor;
82 u8 src, dest, disp;
83
84 op = *(unsigned long *)prologue;
85
86 major = (op >> 26) & 0x3f;
87 src = (op >> 20) & 0x3f;
88 minor = (op >> 16) & 0xf;
89 disp = (op >> 10) & 0x3f;
90 dest = (op >> 4) & 0x3f;
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106 switch (major) {
107 case (0x00 >> 2):
108 switch (minor) {
109 case 0x8:
110 case 0x9:
111
112 if (src == 15 && disp == 63 && dest == 14)
113 found_prologue_end = 1;
114
115 break;
116 case 0xa:
117 case 0xb:
118 if (src != 15 || dest != 15)
119 continue;
120
121 fp_displacement -= regcache[disp];
122 fp_prev = fp - fp_displacement;
123 break;
124 }
125 break;
126 case (0xa8 >> 2):
127 if (src != 15)
128 continue;
129
130 switch (dest) {
131 case 14:
132 if (offset_r14 || fp_displacement == 0)
133 continue;
134
135 offset_r14 = (u64)(((((s64)op >> 10) & 0x3ff) << 54) >> 54);
136 offset_r14 *= sizeof(unsigned long);
137 offset_r14 += fp_displacement;
138 break;
139 case 18:
140 if (offset_r18 || fp_displacement == 0)
141 continue;
142
143 offset_r18 = (u64)(((((s64)op >> 10) & 0x3ff) << 54) >> 54);
144 offset_r18 *= sizeof(unsigned long);
145 offset_r18 += fp_displacement;
146 break;
147 }
148
149 break;
150 case (0xcc >> 2):
151 if (dest >= 63) {
152 printk(KERN_NOTICE "%s: Invalid dest reg %d "
153 "specified in movi handler. Failed "
154 "opcode was 0x%lx: ", __func__,
155 dest, op);
156
157 continue;
158 }
159
160
161 regcache[dest] =
162 sign_extend64((((u64)op >> 10) & 0xffff), 9);
163 break;
164 case (0xd0 >> 2):
165 case (0xd4 >> 2):
166
167 if (src != 15 || dest != 15)
168 continue;
169
170
171 fp_displacement +=
172 (u64)(((((s64)op >> 10) & 0x3ff) << 54) >> 54);
173 fp_prev = fp - fp_displacement;
174 break;
175 }
176
177 if (found_prologue_end && offset_r14 && (offset_r18 || *pprev_pc) && fp_prev)
178 break;
179 }
180
181 if (offset_r14 == 0 || fp_prev == 0) {
182 if (!offset_r14)
183 pr_debug("Unable to find r14 offset\n");
184 if (!fp_prev)
185 pr_debug("Unable to find previous fp\n");
186
187 return -EINVAL;
188 }
189
190
191 if (!*pprev_pc && (offset_r18 == 0))
192 return -EINVAL;
193
194 *pprev_fp = *(unsigned long *)(fp_prev + offset_r14);
195
196 if (offset_r18)
197 *pprev_pc = *(unsigned long *)(fp_prev + offset_r18);
198
199 *pprev_pc &= ~1;
200
201 return 0;
202}
203
204
205
206
207
208
209static struct pt_regs here_regs;
210
211extern const char syscall_ret;
212extern const char ret_from_syscall;
213extern const char ret_from_exception;
214extern const char ret_from_irq;
215
216static void sh64_unwind_inner(const struct stacktrace_ops *ops,
217 void *data, struct pt_regs *regs);
218
219static inline void unwind_nested(const struct stacktrace_ops *ops, void *data,
220 unsigned long pc, unsigned long fp)
221{
222 if ((fp >= __MEMORY_START) &&
223 ((fp & 7) == 0))
224 sh64_unwind_inner(ops, data, (struct pt_regs *)fp);
225}
226
227static void sh64_unwind_inner(const struct stacktrace_ops *ops,
228 void *data, struct pt_regs *regs)
229{
230 unsigned long pc, fp;
231 int ofs = 0;
232 int first_pass;
233
234 pc = regs->pc & ~1;
235 fp = regs->regs[14];
236
237 first_pass = 1;
238 for (;;) {
239 int cond;
240 unsigned long next_fp, next_pc;
241
242 if (pc == ((unsigned long)&syscall_ret & ~1)) {
243 printk("SYSCALL\n");
244 unwind_nested(ops, data, pc, fp);
245 return;
246 }
247
248 if (pc == ((unsigned long)&ret_from_syscall & ~1)) {
249 printk("SYSCALL (PREEMPTED)\n");
250 unwind_nested(ops, data, pc, fp);
251 return;
252 }
253
254
255
256 if (pc == ((unsigned long)&ret_from_exception & ~1)) {
257 printk("EXCEPTION\n");
258 unwind_nested(ops, data, pc, fp);
259 return;
260 }
261
262 if (pc == ((unsigned long)&ret_from_irq & ~1)) {
263 printk("IRQ\n");
264 unwind_nested(ops, data, pc, fp);
265 return;
266 }
267
268 cond = ((pc >= __MEMORY_START) && (fp >= __MEMORY_START) &&
269 ((pc & 3) == 0) && ((fp & 7) == 0));
270
271 pc -= ofs;
272
273 ops->address(data, pc, 1);
274
275 if (first_pass) {
276
277
278
279 next_pc = regs->regs[18];
280 } else {
281 next_pc = 0;
282 }
283
284 if (lookup_prev_stack_frame(fp, pc, &next_fp, &next_pc, regs) == 0) {
285 ofs = sizeof(unsigned long);
286 pc = next_pc & ~1;
287 fp = next_fp;
288 } else {
289 printk("Unable to lookup previous stack frame\n");
290 break;
291 }
292 first_pass = 0;
293 }
294
295 printk("\n");
296}
297
298static void sh64_unwinder_dump(struct task_struct *task,
299 struct pt_regs *regs,
300 unsigned long *sp,
301 const struct stacktrace_ops *ops,
302 void *data)
303{
304 if (!regs) {
305
306
307
308
309 regs = &here_regs;
310
311 __asm__ __volatile__ ("ori r14, 0, %0" : "=r" (regs->regs[14]));
312 __asm__ __volatile__ ("ori r15, 0, %0" : "=r" (regs->regs[15]));
313 __asm__ __volatile__ ("ori r18, 0, %0" : "=r" (regs->regs[18]));
314
315 __asm__ __volatile__ ("gettr tr0, %0" : "=r" (regs->tregs[0]));
316 __asm__ __volatile__ ("gettr tr1, %0" : "=r" (regs->tregs[1]));
317 __asm__ __volatile__ ("gettr tr2, %0" : "=r" (regs->tregs[2]));
318 __asm__ __volatile__ ("gettr tr3, %0" : "=r" (regs->tregs[3]));
319 __asm__ __volatile__ ("gettr tr4, %0" : "=r" (regs->tregs[4]));
320 __asm__ __volatile__ ("gettr tr5, %0" : "=r" (regs->tregs[5]));
321 __asm__ __volatile__ ("gettr tr6, %0" : "=r" (regs->tregs[6]));
322 __asm__ __volatile__ ("gettr tr7, %0" : "=r" (regs->tregs[7]));
323
324 __asm__ __volatile__ (
325 "pta 0f, tr0\n\t"
326 "blink tr0, %0\n\t"
327 "0: nop"
328 : "=r" (regs->pc)
329 );
330 }
331
332 sh64_unwind_inner(ops, data, regs);
333}
334
335static struct unwinder sh64_unwinder = {
336 .name = "sh64-unwinder",
337 .dump = sh64_unwinder_dump,
338 .rating = 150,
339};
340
341static int __init sh64_unwinder_init(void)
342{
343 return unwinder_register(&sh64_unwinder);
344}
345early_initcall(sh64_unwinder_init);
346