1
2
3
4
5
6
7
8
9
10
11
12
13
14
15#include <linux/errno.h>
16#include <linux/hw_breakpoint.h>
17#include <linux/kernel.h>
18#include <linux/mm.h>
19#include <linux/perf_event.h>
20#include <linux/ptrace.h>
21#include <linux/regset.h>
22#include <linux/sched.h>
23#include <linux/sched/task_stack.h>
24#include <linux/security.h>
25#include <linux/signal.h>
26#include <linux/smp.h>
27#include <linux/tracehook.h>
28#include <linux/uaccess.h>
29
30#define CREATE_TRACE_POINTS
31#include <trace/events/syscalls.h>
32
33#include <asm/coprocessor.h>
34#include <asm/elf.h>
35#include <asm/page.h>
36#include <asm/ptrace.h>
37
38static int gpr_get(struct task_struct *target,
39 const struct user_regset *regset,
40 unsigned int pos, unsigned int count,
41 void *kbuf, void __user *ubuf)
42{
43 struct pt_regs *regs = task_pt_regs(target);
44 struct user_pt_regs newregs = {
45 .pc = regs->pc,
46 .ps = regs->ps & ~(1 << PS_EXCM_BIT),
47 .lbeg = regs->lbeg,
48 .lend = regs->lend,
49 .lcount = regs->lcount,
50 .sar = regs->sar,
51 .threadptr = regs->threadptr,
52 .windowbase = regs->windowbase,
53 .windowstart = regs->windowstart,
54 };
55
56 memcpy(newregs.a,
57 regs->areg + XCHAL_NUM_AREGS - regs->windowbase * 4,
58 regs->windowbase * 16);
59 memcpy(newregs.a + regs->windowbase * 4,
60 regs->areg,
61 (WSBITS - regs->windowbase) * 16);
62
63 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
64 &newregs, 0, -1);
65}
66
67static int gpr_set(struct task_struct *target,
68 const struct user_regset *regset,
69 unsigned int pos, unsigned int count,
70 const void *kbuf, const void __user *ubuf)
71{
72 int ret;
73 struct user_pt_regs newregs = {0};
74 struct pt_regs *regs;
75 const u32 ps_mask = PS_CALLINC_MASK | PS_OWB_MASK;
76
77 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &newregs, 0, -1);
78 if (ret)
79 return ret;
80
81 if (newregs.windowbase >= XCHAL_NUM_AREGS / 4)
82 return -EINVAL;
83
84 regs = task_pt_regs(target);
85 regs->pc = newregs.pc;
86 regs->ps = (regs->ps & ~ps_mask) | (newregs.ps & ps_mask);
87 regs->lbeg = newregs.lbeg;
88 regs->lend = newregs.lend;
89 regs->lcount = newregs.lcount;
90 regs->sar = newregs.sar;
91 regs->threadptr = newregs.threadptr;
92
93 if (newregs.windowbase != regs->windowbase ||
94 newregs.windowstart != regs->windowstart) {
95 u32 rotws, wmask;
96
97 rotws = (((newregs.windowstart |
98 (newregs.windowstart << WSBITS)) >>
99 newregs.windowbase) &
100 ((1 << WSBITS) - 1)) & ~1;
101 wmask = ((rotws ? WSBITS + 1 - ffs(rotws) : 0) << 4) |
102 (rotws & 0xF) | 1;
103 regs->windowbase = newregs.windowbase;
104 regs->windowstart = newregs.windowstart;
105 regs->wmask = wmask;
106 }
107
108 memcpy(regs->areg + XCHAL_NUM_AREGS - newregs.windowbase * 4,
109 newregs.a, newregs.windowbase * 16);
110 memcpy(regs->areg, newregs.a + newregs.windowbase * 4,
111 (WSBITS - newregs.windowbase) * 16);
112
113 return 0;
114}
115
116static int tie_get(struct task_struct *target,
117 const struct user_regset *regset,
118 unsigned int pos, unsigned int count,
119 void *kbuf, void __user *ubuf)
120{
121 int ret;
122 struct pt_regs *regs = task_pt_regs(target);
123 struct thread_info *ti = task_thread_info(target);
124 elf_xtregs_t *newregs = kzalloc(sizeof(elf_xtregs_t), GFP_KERNEL);
125
126 if (!newregs)
127 return -ENOMEM;
128
129 newregs->opt = regs->xtregs_opt;
130 newregs->user = ti->xtregs_user;
131
132#if XTENSA_HAVE_COPROCESSORS
133
134 coprocessor_flush_all(ti);
135 newregs->cp0 = ti->xtregs_cp.cp0;
136 newregs->cp1 = ti->xtregs_cp.cp1;
137 newregs->cp2 = ti->xtregs_cp.cp2;
138 newregs->cp3 = ti->xtregs_cp.cp3;
139 newregs->cp4 = ti->xtregs_cp.cp4;
140 newregs->cp5 = ti->xtregs_cp.cp5;
141 newregs->cp6 = ti->xtregs_cp.cp6;
142 newregs->cp7 = ti->xtregs_cp.cp7;
143#endif
144 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
145 newregs, 0, -1);
146 kfree(newregs);
147 return ret;
148}
149
150static int tie_set(struct task_struct *target,
151 const struct user_regset *regset,
152 unsigned int pos, unsigned int count,
153 const void *kbuf, const void __user *ubuf)
154{
155 int ret;
156 struct pt_regs *regs = task_pt_regs(target);
157 struct thread_info *ti = task_thread_info(target);
158 elf_xtregs_t *newregs = kzalloc(sizeof(elf_xtregs_t), GFP_KERNEL);
159
160 if (!newregs)
161 return -ENOMEM;
162
163 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
164 newregs, 0, -1);
165
166 if (ret)
167 goto exit;
168 regs->xtregs_opt = newregs->opt;
169 ti->xtregs_user = newregs->user;
170
171#if XTENSA_HAVE_COPROCESSORS
172
173 coprocessor_flush_all(ti);
174 coprocessor_release_all(ti);
175 ti->xtregs_cp.cp0 = newregs->cp0;
176 ti->xtregs_cp.cp1 = newregs->cp1;
177 ti->xtregs_cp.cp2 = newregs->cp2;
178 ti->xtregs_cp.cp3 = newregs->cp3;
179 ti->xtregs_cp.cp4 = newregs->cp4;
180 ti->xtregs_cp.cp5 = newregs->cp5;
181 ti->xtregs_cp.cp6 = newregs->cp6;
182 ti->xtregs_cp.cp7 = newregs->cp7;
183#endif
184exit:
185 kfree(newregs);
186 return ret;
187}
188
189enum xtensa_regset {
190 REGSET_GPR,
191 REGSET_TIE,
192};
193
194static const struct user_regset xtensa_regsets[] = {
195 [REGSET_GPR] = {
196 .core_note_type = NT_PRSTATUS,
197 .n = sizeof(struct user_pt_regs) / sizeof(u32),
198 .size = sizeof(u32),
199 .align = sizeof(u32),
200 .get = gpr_get,
201 .set = gpr_set,
202 },
203 [REGSET_TIE] = {
204 .core_note_type = NT_PRFPREG,
205 .n = sizeof(elf_xtregs_t) / sizeof(u32),
206 .size = sizeof(u32),
207 .align = sizeof(u32),
208 .get = tie_get,
209 .set = tie_set,
210 },
211};
212
213static const struct user_regset_view user_xtensa_view = {
214 .name = "xtensa",
215 .e_machine = EM_XTENSA,
216 .regsets = xtensa_regsets,
217 .n = ARRAY_SIZE(xtensa_regsets)
218};
219
220const struct user_regset_view *task_user_regset_view(struct task_struct *task)
221{
222 return &user_xtensa_view;
223}
224
225void user_enable_single_step(struct task_struct *child)
226{
227 child->ptrace |= PT_SINGLESTEP;
228}
229
230void user_disable_single_step(struct task_struct *child)
231{
232 child->ptrace &= ~PT_SINGLESTEP;
233}
234
235
236
237
238
239void ptrace_disable(struct task_struct *child)
240{
241
242}
243
244static int ptrace_getregs(struct task_struct *child, void __user *uregs)
245{
246 return copy_regset_to_user(child, &user_xtensa_view, REGSET_GPR,
247 0, sizeof(xtensa_gregset_t), uregs);
248}
249
250static int ptrace_setregs(struct task_struct *child, void __user *uregs)
251{
252 return copy_regset_from_user(child, &user_xtensa_view, REGSET_GPR,
253 0, sizeof(xtensa_gregset_t), uregs);
254}
255
256static int ptrace_getxregs(struct task_struct *child, void __user *uregs)
257{
258 return copy_regset_to_user(child, &user_xtensa_view, REGSET_TIE,
259 0, sizeof(elf_xtregs_t), uregs);
260}
261
262static int ptrace_setxregs(struct task_struct *child, void __user *uregs)
263{
264 return copy_regset_from_user(child, &user_xtensa_view, REGSET_TIE,
265 0, sizeof(elf_xtregs_t), uregs);
266}
267
268static int ptrace_peekusr(struct task_struct *child, long regno,
269 long __user *ret)
270{
271 struct pt_regs *regs;
272 unsigned long tmp;
273
274 regs = task_pt_regs(child);
275 tmp = 0;
276
277 switch(regno) {
278 case REG_AR_BASE ... REG_AR_BASE + XCHAL_NUM_AREGS - 1:
279 tmp = regs->areg[regno - REG_AR_BASE];
280 break;
281
282 case REG_A_BASE ... REG_A_BASE + 15:
283 tmp = regs->areg[regno - REG_A_BASE];
284 break;
285
286 case REG_PC:
287 tmp = regs->pc;
288 break;
289
290 case REG_PS:
291
292
293
294
295 tmp = (regs->ps & ~(1 << PS_EXCM_BIT));
296 break;
297
298 case REG_WB:
299 break;
300
301 case REG_WS:
302 {
303 unsigned long wb = regs->windowbase;
304 unsigned long ws = regs->windowstart;
305 tmp = ((ws >> wb) | (ws << (WSBITS - wb))) &
306 ((1 << WSBITS) - 1);
307 break;
308 }
309 case REG_LBEG:
310 tmp = regs->lbeg;
311 break;
312
313 case REG_LEND:
314 tmp = regs->lend;
315 break;
316
317 case REG_LCOUNT:
318 tmp = regs->lcount;
319 break;
320
321 case REG_SAR:
322 tmp = regs->sar;
323 break;
324
325 case SYSCALL_NR:
326 tmp = regs->syscall;
327 break;
328
329 default:
330 return -EIO;
331 }
332 return put_user(tmp, ret);
333}
334
335static int ptrace_pokeusr(struct task_struct *child, long regno, long val)
336{
337 struct pt_regs *regs;
338 regs = task_pt_regs(child);
339
340 switch (regno) {
341 case REG_AR_BASE ... REG_AR_BASE + XCHAL_NUM_AREGS - 1:
342 regs->areg[regno - REG_AR_BASE] = val;
343 break;
344
345 case REG_A_BASE ... REG_A_BASE + 15:
346 regs->areg[regno - REG_A_BASE] = val;
347 break;
348
349 case REG_PC:
350 regs->pc = val;
351 break;
352
353 case SYSCALL_NR:
354 regs->syscall = val;
355 break;
356
357 default:
358 return -EIO;
359 }
360 return 0;
361}
362
363#ifdef CONFIG_HAVE_HW_BREAKPOINT
364static void ptrace_hbptriggered(struct perf_event *bp,
365 struct perf_sample_data *data,
366 struct pt_regs *regs)
367{
368 int i;
369 struct arch_hw_breakpoint *bkpt = counter_arch_bp(bp);
370
371 if (bp->attr.bp_type & HW_BREAKPOINT_X) {
372 for (i = 0; i < XCHAL_NUM_IBREAK; ++i)
373 if (current->thread.ptrace_bp[i] == bp)
374 break;
375 i <<= 1;
376 } else {
377 for (i = 0; i < XCHAL_NUM_DBREAK; ++i)
378 if (current->thread.ptrace_wp[i] == bp)
379 break;
380 i = (i << 1) | 1;
381 }
382
383 force_sig_ptrace_errno_trap(i, (void __user *)bkpt->address);
384}
385
386static struct perf_event *ptrace_hbp_create(struct task_struct *tsk, int type)
387{
388 struct perf_event_attr attr;
389
390 ptrace_breakpoint_init(&attr);
391
392
393 attr.bp_addr = 0;
394 attr.bp_len = 1;
395 attr.bp_type = type;
396 attr.disabled = 1;
397
398 return register_user_hw_breakpoint(&attr, ptrace_hbptriggered, NULL,
399 tsk);
400}
401
402
403
404
405
406
407
408
409
410
411
412
413
414static long ptrace_gethbpregs(struct task_struct *child, long addr,
415 long __user *datap)
416{
417 struct perf_event *bp;
418 u32 user_data[2] = {0};
419 bool dbreak = addr & 1;
420 unsigned idx = addr >> 1;
421
422 if ((!dbreak && idx >= XCHAL_NUM_IBREAK) ||
423 (dbreak && idx >= XCHAL_NUM_DBREAK))
424 return -EINVAL;
425
426 if (dbreak)
427 bp = child->thread.ptrace_wp[idx];
428 else
429 bp = child->thread.ptrace_bp[idx];
430
431 if (bp) {
432 user_data[0] = bp->attr.bp_addr;
433 user_data[1] = bp->attr.disabled ? 0 : bp->attr.bp_len;
434 if (dbreak) {
435 if (bp->attr.bp_type & HW_BREAKPOINT_R)
436 user_data[1] |= DBREAKC_LOAD_MASK;
437 if (bp->attr.bp_type & HW_BREAKPOINT_W)
438 user_data[1] |= DBREAKC_STOR_MASK;
439 }
440 }
441
442 if (copy_to_user(datap, user_data, sizeof(user_data)))
443 return -EFAULT;
444
445 return 0;
446}
447
448static long ptrace_sethbpregs(struct task_struct *child, long addr,
449 long __user *datap)
450{
451 struct perf_event *bp;
452 struct perf_event_attr attr;
453 u32 user_data[2];
454 bool dbreak = addr & 1;
455 unsigned idx = addr >> 1;
456 int bp_type = 0;
457
458 if ((!dbreak && idx >= XCHAL_NUM_IBREAK) ||
459 (dbreak && idx >= XCHAL_NUM_DBREAK))
460 return -EINVAL;
461
462 if (copy_from_user(user_data, datap, sizeof(user_data)))
463 return -EFAULT;
464
465 if (dbreak) {
466 bp = child->thread.ptrace_wp[idx];
467 if (user_data[1] & DBREAKC_LOAD_MASK)
468 bp_type |= HW_BREAKPOINT_R;
469 if (user_data[1] & DBREAKC_STOR_MASK)
470 bp_type |= HW_BREAKPOINT_W;
471 } else {
472 bp = child->thread.ptrace_bp[idx];
473 bp_type = HW_BREAKPOINT_X;
474 }
475
476 if (!bp) {
477 bp = ptrace_hbp_create(child,
478 bp_type ? bp_type : HW_BREAKPOINT_RW);
479 if (IS_ERR(bp))
480 return PTR_ERR(bp);
481 if (dbreak)
482 child->thread.ptrace_wp[idx] = bp;
483 else
484 child->thread.ptrace_bp[idx] = bp;
485 }
486
487 attr = bp->attr;
488 attr.bp_addr = user_data[0];
489 attr.bp_len = user_data[1] & ~(DBREAKC_LOAD_MASK | DBREAKC_STOR_MASK);
490 attr.bp_type = bp_type;
491 attr.disabled = !attr.bp_len;
492
493 return modify_user_hw_breakpoint(bp, &attr);
494}
495#endif
496
497long arch_ptrace(struct task_struct *child, long request,
498 unsigned long addr, unsigned long data)
499{
500 int ret = -EPERM;
501 void __user *datap = (void __user *) data;
502
503 switch (request) {
504 case PTRACE_PEEKUSR:
505 ret = ptrace_peekusr(child, addr, datap);
506 break;
507
508 case PTRACE_POKEUSR:
509 ret = ptrace_pokeusr(child, addr, data);
510 break;
511
512 case PTRACE_GETREGS:
513 ret = ptrace_getregs(child, datap);
514 break;
515
516 case PTRACE_SETREGS:
517 ret = ptrace_setregs(child, datap);
518 break;
519
520 case PTRACE_GETXTREGS:
521 ret = ptrace_getxregs(child, datap);
522 break;
523
524 case PTRACE_SETXTREGS:
525 ret = ptrace_setxregs(child, datap);
526 break;
527#ifdef CONFIG_HAVE_HW_BREAKPOINT
528 case PTRACE_GETHBPREGS:
529 ret = ptrace_gethbpregs(child, addr, datap);
530 break;
531
532 case PTRACE_SETHBPREGS:
533 ret = ptrace_sethbpregs(child, addr, datap);
534 break;
535#endif
536 default:
537 ret = ptrace_request(child, request, addr, data);
538 break;
539 }
540
541 return ret;
542}
543
544void do_syscall_trace_leave(struct pt_regs *regs);
545int do_syscall_trace_enter(struct pt_regs *regs)
546{
547 if (regs->syscall == NO_SYSCALL)
548 regs->areg[2] = -ENOSYS;
549
550 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
551 tracehook_report_syscall_entry(regs)) {
552 regs->areg[2] = -ENOSYS;
553 regs->syscall = NO_SYSCALL;
554 return 0;
555 }
556
557 if (regs->syscall == NO_SYSCALL) {
558 do_syscall_trace_leave(regs);
559 return 0;
560 }
561
562 if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
563 trace_sys_enter(regs, syscall_get_nr(current, regs));
564
565 return 1;
566}
567
568void do_syscall_trace_leave(struct pt_regs *regs)
569{
570 int step;
571
572 if (test_thread_flag(TIF_SYSCALL_TRACEPOINT))
573 trace_sys_exit(regs, regs_return_value(regs));
574
575 step = test_thread_flag(TIF_SINGLESTEP);
576
577 if (step || test_thread_flag(TIF_SYSCALL_TRACE))
578 tracehook_report_syscall_exit(regs, step);
579}
580