1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17#include <linux/compiler.h>
18#include <linux/context_tracking.h>
19#include <linux/elf.h>
20#include <linux/kernel.h>
21#include <linux/sched.h>
22#include <linux/sched/task_stack.h>
23#include <linux/mm.h>
24#include <linux/errno.h>
25#include <linux/ptrace.h>
26#include <linux/regset.h>
27#include <linux/smp.h>
28#include <linux/security.h>
29#include <linux/stddef.h>
30#include <linux/audit.h>
31#include <linux/seccomp.h>
32#include <linux/ftrace.h>
33
34#include <asm/byteorder.h>
35#include <asm/cpu.h>
36#include <asm/cpu-info.h>
37#include <asm/dsp.h>
38#include <asm/fpu.h>
39#include <asm/mipsregs.h>
40#include <asm/mipsmtregs.h>
41#include <asm/page.h>
42#include <asm/processor.h>
43#include <asm/syscall.h>
44#include <linux/uaccess.h>
45#include <asm/bootinfo.h>
46#include <asm/reg.h>
47
48#define CREATE_TRACE_POINTS
49#include <trace/events/syscalls.h>
50
51
52
53
54
55
56void ptrace_disable(struct task_struct *child)
57{
58
59 clear_tsk_thread_flag(child, TIF_LOAD_WATCH);
60}
61
62
63
64
65
66
67int ptrace_getregs(struct task_struct *child, struct user_pt_regs __user *data)
68{
69 struct pt_regs *regs;
70 int i;
71
72 if (!access_ok(data, 38 * 8))
73 return -EIO;
74
75 regs = task_pt_regs(child);
76
77 for (i = 0; i < 32; i++)
78 __put_user((long)regs->regs[i], (__s64 __user *)&data->regs[i]);
79 __put_user((long)regs->lo, (__s64 __user *)&data->lo);
80 __put_user((long)regs->hi, (__s64 __user *)&data->hi);
81 __put_user((long)regs->cp0_epc, (__s64 __user *)&data->cp0_epc);
82 __put_user((long)regs->cp0_badvaddr, (__s64 __user *)&data->cp0_badvaddr);
83 __put_user((long)regs->cp0_status, (__s64 __user *)&data->cp0_status);
84 __put_user((long)regs->cp0_cause, (__s64 __user *)&data->cp0_cause);
85
86 return 0;
87}
88
89
90
91
92
93
94int ptrace_setregs(struct task_struct *child, struct user_pt_regs __user *data)
95{
96 struct pt_regs *regs;
97 int i;
98
99 if (!access_ok(data, 38 * 8))
100 return -EIO;
101
102 regs = task_pt_regs(child);
103
104 for (i = 0; i < 32; i++)
105 __get_user(regs->regs[i], (__s64 __user *)&data->regs[i]);
106 __get_user(regs->lo, (__s64 __user *)&data->lo);
107 __get_user(regs->hi, (__s64 __user *)&data->hi);
108 __get_user(regs->cp0_epc, (__s64 __user *)&data->cp0_epc);
109
110
111
112
113 mips_syscall_update_nr(child, regs);
114
115 return 0;
116}
117
118int ptrace_get_watch_regs(struct task_struct *child,
119 struct pt_watch_regs __user *addr)
120{
121 enum pt_watch_style style;
122 int i;
123
124 if (!cpu_has_watch || boot_cpu_data.watch_reg_use_cnt == 0)
125 return -EIO;
126 if (!access_ok(addr, sizeof(struct pt_watch_regs)))
127 return -EIO;
128
129#ifdef CONFIG_32BIT
130 style = pt_watch_style_mips32;
131#define WATCH_STYLE mips32
132#else
133 style = pt_watch_style_mips64;
134#define WATCH_STYLE mips64
135#endif
136
137 __put_user(style, &addr->style);
138 __put_user(boot_cpu_data.watch_reg_use_cnt,
139 &addr->WATCH_STYLE.num_valid);
140 for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) {
141 __put_user(child->thread.watch.mips3264.watchlo[i],
142 &addr->WATCH_STYLE.watchlo[i]);
143 __put_user(child->thread.watch.mips3264.watchhi[i] &
144 (MIPS_WATCHHI_MASK | MIPS_WATCHHI_IRW),
145 &addr->WATCH_STYLE.watchhi[i]);
146 __put_user(boot_cpu_data.watch_reg_masks[i],
147 &addr->WATCH_STYLE.watch_masks[i]);
148 }
149 for (; i < 8; i++) {
150 __put_user(0, &addr->WATCH_STYLE.watchlo[i]);
151 __put_user(0, &addr->WATCH_STYLE.watchhi[i]);
152 __put_user(0, &addr->WATCH_STYLE.watch_masks[i]);
153 }
154
155 return 0;
156}
157
158int ptrace_set_watch_regs(struct task_struct *child,
159 struct pt_watch_regs __user *addr)
160{
161 int i;
162 int watch_active = 0;
163 unsigned long lt[NUM_WATCH_REGS];
164 u16 ht[NUM_WATCH_REGS];
165
166 if (!cpu_has_watch || boot_cpu_data.watch_reg_use_cnt == 0)
167 return -EIO;
168 if (!access_ok(addr, sizeof(struct pt_watch_regs)))
169 return -EIO;
170
171 for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) {
172 __get_user(lt[i], &addr->WATCH_STYLE.watchlo[i]);
173#ifdef CONFIG_32BIT
174 if (lt[i] & __UA_LIMIT)
175 return -EINVAL;
176#else
177 if (test_tsk_thread_flag(child, TIF_32BIT_ADDR)) {
178 if (lt[i] & 0xffffffff80000000UL)
179 return -EINVAL;
180 } else {
181 if (lt[i] & __UA_LIMIT)
182 return -EINVAL;
183 }
184#endif
185 __get_user(ht[i], &addr->WATCH_STYLE.watchhi[i]);
186 if (ht[i] & ~MIPS_WATCHHI_MASK)
187 return -EINVAL;
188 }
189
190 for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) {
191 if (lt[i] & MIPS_WATCHLO_IRW)
192 watch_active = 1;
193 child->thread.watch.mips3264.watchlo[i] = lt[i];
194
195 child->thread.watch.mips3264.watchhi[i] = ht[i];
196 }
197
198 if (watch_active)
199 set_tsk_thread_flag(child, TIF_LOAD_WATCH);
200 else
201 clear_tsk_thread_flag(child, TIF_LOAD_WATCH);
202
203 return 0;
204}
205
206
207
208#if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32)
209
210static int gpr32_get(struct task_struct *target,
211 const struct user_regset *regset,
212 struct membuf to)
213{
214 struct pt_regs *regs = task_pt_regs(target);
215 u32 uregs[ELF_NGREG] = {};
216
217 mips_dump_regs32(uregs, regs);
218 return membuf_write(&to, uregs, sizeof(uregs));
219}
220
221static int gpr32_set(struct task_struct *target,
222 const struct user_regset *regset,
223 unsigned int pos, unsigned int count,
224 const void *kbuf, const void __user *ubuf)
225{
226 struct pt_regs *regs = task_pt_regs(target);
227 u32 uregs[ELF_NGREG];
228 unsigned start, num_regs, i;
229 int err;
230
231 start = pos / sizeof(u32);
232 num_regs = count / sizeof(u32);
233
234 if (start + num_regs > ELF_NGREG)
235 return -EIO;
236
237 err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0,
238 sizeof(uregs));
239 if (err)
240 return err;
241
242 for (i = start; i < num_regs; i++) {
243
244
245
246
247 switch (i) {
248 case MIPS32_EF_R1 ... MIPS32_EF_R25:
249
250 case MIPS32_EF_R28 ... MIPS32_EF_R31:
251 regs->regs[i - MIPS32_EF_R0] = (s32)uregs[i];
252 break;
253 case MIPS32_EF_LO:
254 regs->lo = (s32)uregs[i];
255 break;
256 case MIPS32_EF_HI:
257 regs->hi = (s32)uregs[i];
258 break;
259 case MIPS32_EF_CP0_EPC:
260 regs->cp0_epc = (s32)uregs[i];
261 break;
262 }
263 }
264
265
266 mips_syscall_update_nr(target, regs);
267
268 return 0;
269}
270
271#endif
272
273#ifdef CONFIG_64BIT
274
275static int gpr64_get(struct task_struct *target,
276 const struct user_regset *regset,
277 struct membuf to)
278{
279 struct pt_regs *regs = task_pt_regs(target);
280 u64 uregs[ELF_NGREG] = {};
281
282 mips_dump_regs64(uregs, regs);
283 return membuf_write(&to, uregs, sizeof(uregs));
284}
285
286static int gpr64_set(struct task_struct *target,
287 const struct user_regset *regset,
288 unsigned int pos, unsigned int count,
289 const void *kbuf, const void __user *ubuf)
290{
291 struct pt_regs *regs = task_pt_regs(target);
292 u64 uregs[ELF_NGREG];
293 unsigned start, num_regs, i;
294 int err;
295
296 start = pos / sizeof(u64);
297 num_regs = count / sizeof(u64);
298
299 if (start + num_regs > ELF_NGREG)
300 return -EIO;
301
302 err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, uregs, 0,
303 sizeof(uregs));
304 if (err)
305 return err;
306
307 for (i = start; i < num_regs; i++) {
308 switch (i) {
309 case MIPS64_EF_R1 ... MIPS64_EF_R25:
310
311 case MIPS64_EF_R28 ... MIPS64_EF_R31:
312 regs->regs[i - MIPS64_EF_R0] = uregs[i];
313 break;
314 case MIPS64_EF_LO:
315 regs->lo = uregs[i];
316 break;
317 case MIPS64_EF_HI:
318 regs->hi = uregs[i];
319 break;
320 case MIPS64_EF_CP0_EPC:
321 regs->cp0_epc = uregs[i];
322 break;
323 }
324 }
325
326
327 mips_syscall_update_nr(target, regs);
328
329 return 0;
330}
331
332#endif
333
334
335#ifdef CONFIG_MIPS_FP_SUPPORT
336
337
338
339
340
341
342static void ptrace_setfcr31(struct task_struct *child, u32 value)
343{
344 u32 fcr31;
345 u32 mask;
346
347 fcr31 = child->thread.fpu.fcr31;
348 mask = boot_cpu_data.fpu_msk31;
349 child->thread.fpu.fcr31 = (value & ~mask) | (fcr31 & mask);
350}
351
352int ptrace_getfpregs(struct task_struct *child, __u32 __user *data)
353{
354 int i;
355
356 if (!access_ok(data, 33 * 8))
357 return -EIO;
358
359 if (tsk_used_math(child)) {
360 union fpureg *fregs = get_fpu_regs(child);
361 for (i = 0; i < 32; i++)
362 __put_user(get_fpr64(&fregs[i], 0),
363 i + (__u64 __user *)data);
364 } else {
365 for (i = 0; i < 32; i++)
366 __put_user((__u64) -1, i + (__u64 __user *) data);
367 }
368
369 __put_user(child->thread.fpu.fcr31, data + 64);
370 __put_user(boot_cpu_data.fpu_id, data + 65);
371
372 return 0;
373}
374
375int ptrace_setfpregs(struct task_struct *child, __u32 __user *data)
376{
377 union fpureg *fregs;
378 u64 fpr_val;
379 u32 value;
380 int i;
381
382 if (!access_ok(data, 33 * 8))
383 return -EIO;
384
385 init_fp_ctx(child);
386 fregs = get_fpu_regs(child);
387
388 for (i = 0; i < 32; i++) {
389 __get_user(fpr_val, i + (__u64 __user *)data);
390 set_fpr64(&fregs[i], 0, fpr_val);
391 }
392
393 __get_user(value, data + 64);
394 ptrace_setfcr31(child, value);
395
396
397
398 return 0;
399}
400
401
402
403
404
405
406static void fpr_get_fpa(struct task_struct *target,
407 struct membuf *to)
408{
409 membuf_write(to, &target->thread.fpu,
410 NUM_FPU_REGS * sizeof(elf_fpreg_t));
411}
412
413
414
415
416
417
418
419static void fpr_get_msa(struct task_struct *target, struct membuf *to)
420{
421 unsigned int i;
422
423 BUILD_BUG_ON(sizeof(u64) != sizeof(elf_fpreg_t));
424 for (i = 0; i < NUM_FPU_REGS; i++)
425 membuf_store(to, get_fpr64(&target->thread.fpu.fpr[i], 0));
426}
427
428
429
430
431
432
433static int fpr_get(struct task_struct *target,
434 const struct user_regset *regset,
435 struct membuf to)
436{
437 if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
438 fpr_get_fpa(target, &to);
439 else
440 fpr_get_msa(target, &to);
441
442 membuf_write(&to, &target->thread.fpu.fcr31, sizeof(u32));
443 membuf_write(&to, &boot_cpu_data.fpu_id, sizeof(u32));
444 return 0;
445}
446
447
448
449
450
451
452static int fpr_set_fpa(struct task_struct *target,
453 unsigned int *pos, unsigned int *count,
454 const void **kbuf, const void __user **ubuf)
455{
456 return user_regset_copyin(pos, count, kbuf, ubuf,
457 &target->thread.fpu,
458 0, NUM_FPU_REGS * sizeof(elf_fpreg_t));
459}
460
461
462
463
464
465
466
467static int fpr_set_msa(struct task_struct *target,
468 unsigned int *pos, unsigned int *count,
469 const void **kbuf, const void __user **ubuf)
470{
471 unsigned int i;
472 u64 fpr_val;
473 int err;
474
475 BUILD_BUG_ON(sizeof(fpr_val) != sizeof(elf_fpreg_t));
476 for (i = 0; i < NUM_FPU_REGS && *count > 0; i++) {
477 err = user_regset_copyin(pos, count, kbuf, ubuf,
478 &fpr_val, i * sizeof(elf_fpreg_t),
479 (i + 1) * sizeof(elf_fpreg_t));
480 if (err)
481 return err;
482 set_fpr64(&target->thread.fpu.fpr[i], 0, fpr_val);
483 }
484
485 return 0;
486}
487
488
489
490
491
492
493
494
495
496
497
498
499
500static int fpr_set(struct task_struct *target,
501 const struct user_regset *regset,
502 unsigned int pos, unsigned int count,
503 const void *kbuf, const void __user *ubuf)
504{
505 const int fcr31_pos = NUM_FPU_REGS * sizeof(elf_fpreg_t);
506 const int fir_pos = fcr31_pos + sizeof(u32);
507 u32 fcr31;
508 int err;
509
510 BUG_ON(count % sizeof(elf_fpreg_t));
511
512 if (pos + count > sizeof(elf_fpregset_t))
513 return -EIO;
514
515 init_fp_ctx(target);
516
517 if (sizeof(target->thread.fpu.fpr[0]) == sizeof(elf_fpreg_t))
518 err = fpr_set_fpa(target, &pos, &count, &kbuf, &ubuf);
519 else
520 err = fpr_set_msa(target, &pos, &count, &kbuf, &ubuf);
521 if (err)
522 return err;
523
524 if (count > 0) {
525 err = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
526 &fcr31,
527 fcr31_pos, fcr31_pos + sizeof(u32));
528 if (err)
529 return err;
530
531 ptrace_setfcr31(target, fcr31);
532 }
533
534 if (count > 0)
535 err = user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
536 fir_pos,
537 fir_pos + sizeof(u32));
538
539 return err;
540}
541
542
543static int fp_mode_get(struct task_struct *target,
544 const struct user_regset *regset,
545 struct membuf to)
546{
547 return membuf_store(&to, (int)mips_get_process_fp_mode(target));
548}
549
550
551
552
553
554
555
556
557
558
559static int fp_mode_set(struct task_struct *target,
560 const struct user_regset *regset,
561 unsigned int pos, unsigned int count,
562 const void *kbuf, const void __user *ubuf)
563{
564 int fp_mode;
565 int err;
566
567 BUG_ON(count % sizeof(int));
568
569 if (pos + count > sizeof(fp_mode))
570 return -EIO;
571
572 err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &fp_mode, 0,
573 sizeof(fp_mode));
574 if (err)
575 return err;
576
577 if (count > 0)
578 err = mips_set_process_fp_mode(target, fp_mode);
579
580 return err;
581}
582
583#endif
584
585#ifdef CONFIG_CPU_HAS_MSA
586
587struct msa_control_regs {
588 unsigned int fir;
589 unsigned int fcsr;
590 unsigned int msair;
591 unsigned int msacsr;
592};
593
594static void copy_pad_fprs(struct task_struct *target,
595 const struct user_regset *regset,
596 struct membuf *to,
597 unsigned int live_sz)
598{
599 int i, j;
600 unsigned long long fill = ~0ull;
601 unsigned int cp_sz, pad_sz;
602
603 cp_sz = min(regset->size, live_sz);
604 pad_sz = regset->size - cp_sz;
605 WARN_ON(pad_sz % sizeof(fill));
606
607 for (i = 0; i < NUM_FPU_REGS; i++) {
608 membuf_write(to, &target->thread.fpu.fpr[i], cp_sz);
609 for (j = 0; j < (pad_sz / sizeof(fill)); j++)
610 membuf_store(to, fill);
611 }
612}
613
614static int msa_get(struct task_struct *target,
615 const struct user_regset *regset,
616 struct membuf to)
617{
618 const unsigned int wr_size = NUM_FPU_REGS * regset->size;
619 const struct msa_control_regs ctrl_regs = {
620 .fir = boot_cpu_data.fpu_id,
621 .fcsr = target->thread.fpu.fcr31,
622 .msair = boot_cpu_data.msa_id,
623 .msacsr = target->thread.fpu.msacsr,
624 };
625
626 if (!tsk_used_math(target)) {
627
628 copy_pad_fprs(target, regset, &to, 0);
629 } else if (!test_tsk_thread_flag(target, TIF_MSA_CTX_LIVE)) {
630
631 copy_pad_fprs(target, regset, &to, 8);
632 } else if (sizeof(target->thread.fpu.fpr[0]) == regset->size) {
633
634 membuf_write(&to, &target->thread.fpu.fpr, wr_size);
635 } else {
636
637 copy_pad_fprs(target, regset, &to,
638 sizeof(target->thread.fpu.fpr[0]));
639 }
640
641 return membuf_write(&to, &ctrl_regs, sizeof(ctrl_regs));
642}
643
644static int msa_set(struct task_struct *target,
645 const struct user_regset *regset,
646 unsigned int pos, unsigned int count,
647 const void *kbuf, const void __user *ubuf)
648{
649 const unsigned int wr_size = NUM_FPU_REGS * regset->size;
650 struct msa_control_regs ctrl_regs;
651 unsigned int cp_sz;
652 int i, err, start;
653
654 init_fp_ctx(target);
655
656 if (sizeof(target->thread.fpu.fpr[0]) == regset->size) {
657
658 err = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
659 &target->thread.fpu.fpr,
660 0, wr_size);
661 } else {
662
663 cp_sz = min_t(unsigned int, regset->size,
664 sizeof(target->thread.fpu.fpr[0]));
665
666 i = start = err = 0;
667 for (; i < NUM_FPU_REGS; i++, start += regset->size) {
668 err |= user_regset_copyin(&pos, &count, &kbuf, &ubuf,
669 &target->thread.fpu.fpr[i],
670 start, start + cp_sz);
671 }
672 }
673
674 if (!err)
675 err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &ctrl_regs,
676 wr_size, wr_size + sizeof(ctrl_regs));
677 if (!err) {
678 target->thread.fpu.fcr31 = ctrl_regs.fcsr & ~FPU_CSR_ALL_X;
679 target->thread.fpu.msacsr = ctrl_regs.msacsr & ~MSA_CSR_CAUSEF;
680 }
681
682 return err;
683}
684
685#endif
686
687#if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32)
688
689
690
691
692static int dsp32_get(struct task_struct *target,
693 const struct user_regset *regset,
694 struct membuf to)
695{
696 u32 dspregs[NUM_DSP_REGS + 1];
697 unsigned int i;
698
699 BUG_ON(to.left % sizeof(u32));
700
701 if (!cpu_has_dsp)
702 return -EIO;
703
704 for (i = 0; i < NUM_DSP_REGS; i++)
705 dspregs[i] = target->thread.dsp.dspr[i];
706 dspregs[NUM_DSP_REGS] = target->thread.dsp.dspcontrol;
707 return membuf_write(&to, dspregs, sizeof(dspregs));
708}
709
710
711
712
713static int dsp32_set(struct task_struct *target,
714 const struct user_regset *regset,
715 unsigned int pos, unsigned int count,
716 const void *kbuf, const void __user *ubuf)
717{
718 unsigned int start, num_regs, i;
719 u32 dspregs[NUM_DSP_REGS + 1];
720 int err;
721
722 BUG_ON(count % sizeof(u32));
723
724 if (!cpu_has_dsp)
725 return -EIO;
726
727 start = pos / sizeof(u32);
728 num_regs = count / sizeof(u32);
729
730 if (start + num_regs > NUM_DSP_REGS + 1)
731 return -EIO;
732
733 err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, dspregs, 0,
734 sizeof(dspregs));
735 if (err)
736 return err;
737
738 for (i = start; i < num_regs; i++)
739 switch (i) {
740 case 0 ... NUM_DSP_REGS - 1:
741 target->thread.dsp.dspr[i] = (s32)dspregs[i];
742 break;
743 case NUM_DSP_REGS:
744 target->thread.dsp.dspcontrol = (s32)dspregs[i];
745 break;
746 }
747
748 return 0;
749}
750
751#endif
752
753#ifdef CONFIG_64BIT
754
755
756
757
758static int dsp64_get(struct task_struct *target,
759 const struct user_regset *regset,
760 struct membuf to)
761{
762 u64 dspregs[NUM_DSP_REGS + 1];
763 unsigned int i;
764
765 BUG_ON(to.left % sizeof(u64));
766
767 if (!cpu_has_dsp)
768 return -EIO;
769
770 for (i = 0; i < NUM_DSP_REGS; i++)
771 dspregs[i] = target->thread.dsp.dspr[i];
772 dspregs[NUM_DSP_REGS] = target->thread.dsp.dspcontrol;
773 return membuf_write(&to, dspregs, sizeof(dspregs));
774}
775
776
777
778
779static int dsp64_set(struct task_struct *target,
780 const struct user_regset *regset,
781 unsigned int pos, unsigned int count,
782 const void *kbuf, const void __user *ubuf)
783{
784 unsigned int start, num_regs, i;
785 u64 dspregs[NUM_DSP_REGS + 1];
786 int err;
787
788 BUG_ON(count % sizeof(u64));
789
790 if (!cpu_has_dsp)
791 return -EIO;
792
793 start = pos / sizeof(u64);
794 num_regs = count / sizeof(u64);
795
796 if (start + num_regs > NUM_DSP_REGS + 1)
797 return -EIO;
798
799 err = user_regset_copyin(&pos, &count, &kbuf, &ubuf, dspregs, 0,
800 sizeof(dspregs));
801 if (err)
802 return err;
803
804 for (i = start; i < num_regs; i++)
805 switch (i) {
806 case 0 ... NUM_DSP_REGS - 1:
807 target->thread.dsp.dspr[i] = dspregs[i];
808 break;
809 case NUM_DSP_REGS:
810 target->thread.dsp.dspcontrol = dspregs[i];
811 break;
812 }
813
814 return 0;
815}
816
817#endif
818
819
820
821
822static int dsp_active(struct task_struct *target,
823 const struct user_regset *regset)
824{
825 return cpu_has_dsp ? NUM_DSP_REGS + 1 : -ENODEV;
826}
827
828enum mips_regset {
829 REGSET_GPR,
830 REGSET_DSP,
831#ifdef CONFIG_MIPS_FP_SUPPORT
832 REGSET_FPR,
833 REGSET_FP_MODE,
834#endif
835#ifdef CONFIG_CPU_HAS_MSA
836 REGSET_MSA,
837#endif
838};
839
840struct pt_regs_offset {
841 const char *name;
842 int offset;
843};
844
845#define REG_OFFSET_NAME(reg, r) { \
846 .name = #reg, \
847 .offset = offsetof(struct pt_regs, r) \
848}
849
850#define REG_OFFSET_END { \
851 .name = NULL, \
852 .offset = 0 \
853}
854
855static const struct pt_regs_offset regoffset_table[] = {
856 REG_OFFSET_NAME(r0, regs[0]),
857 REG_OFFSET_NAME(r1, regs[1]),
858 REG_OFFSET_NAME(r2, regs[2]),
859 REG_OFFSET_NAME(r3, regs[3]),
860 REG_OFFSET_NAME(r4, regs[4]),
861 REG_OFFSET_NAME(r5, regs[5]),
862 REG_OFFSET_NAME(r6, regs[6]),
863 REG_OFFSET_NAME(r7, regs[7]),
864 REG_OFFSET_NAME(r8, regs[8]),
865 REG_OFFSET_NAME(r9, regs[9]),
866 REG_OFFSET_NAME(r10, regs[10]),
867 REG_OFFSET_NAME(r11, regs[11]),
868 REG_OFFSET_NAME(r12, regs[12]),
869 REG_OFFSET_NAME(r13, regs[13]),
870 REG_OFFSET_NAME(r14, regs[14]),
871 REG_OFFSET_NAME(r15, regs[15]),
872 REG_OFFSET_NAME(r16, regs[16]),
873 REG_OFFSET_NAME(r17, regs[17]),
874 REG_OFFSET_NAME(r18, regs[18]),
875 REG_OFFSET_NAME(r19, regs[19]),
876 REG_OFFSET_NAME(r20, regs[20]),
877 REG_OFFSET_NAME(r21, regs[21]),
878 REG_OFFSET_NAME(r22, regs[22]),
879 REG_OFFSET_NAME(r23, regs[23]),
880 REG_OFFSET_NAME(r24, regs[24]),
881 REG_OFFSET_NAME(r25, regs[25]),
882 REG_OFFSET_NAME(r26, regs[26]),
883 REG_OFFSET_NAME(r27, regs[27]),
884 REG_OFFSET_NAME(r28, regs[28]),
885 REG_OFFSET_NAME(r29, regs[29]),
886 REG_OFFSET_NAME(r30, regs[30]),
887 REG_OFFSET_NAME(r31, regs[31]),
888 REG_OFFSET_NAME(c0_status, cp0_status),
889 REG_OFFSET_NAME(hi, hi),
890 REG_OFFSET_NAME(lo, lo),
891#ifdef CONFIG_CPU_HAS_SMARTMIPS
892 REG_OFFSET_NAME(acx, acx),
893#endif
894 REG_OFFSET_NAME(c0_badvaddr, cp0_badvaddr),
895 REG_OFFSET_NAME(c0_cause, cp0_cause),
896 REG_OFFSET_NAME(c0_epc, cp0_epc),
897#ifdef CONFIG_CPU_CAVIUM_OCTEON
898 REG_OFFSET_NAME(mpl0, mpl[0]),
899 REG_OFFSET_NAME(mpl1, mpl[1]),
900 REG_OFFSET_NAME(mpl2, mpl[2]),
901 REG_OFFSET_NAME(mtp0, mtp[0]),
902 REG_OFFSET_NAME(mtp1, mtp[1]),
903 REG_OFFSET_NAME(mtp2, mtp[2]),
904#endif
905 REG_OFFSET_END,
906};
907
908
909
910
911
912
913
914
915int regs_query_register_offset(const char *name)
916{
917 const struct pt_regs_offset *roff;
918 for (roff = regoffset_table; roff->name != NULL; roff++)
919 if (!strcmp(roff->name, name))
920 return roff->offset;
921 return -EINVAL;
922}
923
924#if defined(CONFIG_32BIT) || defined(CONFIG_MIPS32_O32)
925
926static const struct user_regset mips_regsets[] = {
927 [REGSET_GPR] = {
928 .core_note_type = NT_PRSTATUS,
929 .n = ELF_NGREG,
930 .size = sizeof(unsigned int),
931 .align = sizeof(unsigned int),
932 .regset_get = gpr32_get,
933 .set = gpr32_set,
934 },
935 [REGSET_DSP] = {
936 .core_note_type = NT_MIPS_DSP,
937 .n = NUM_DSP_REGS + 1,
938 .size = sizeof(u32),
939 .align = sizeof(u32),
940 .regset_get = dsp32_get,
941 .set = dsp32_set,
942 .active = dsp_active,
943 },
944#ifdef CONFIG_MIPS_FP_SUPPORT
945 [REGSET_FPR] = {
946 .core_note_type = NT_PRFPREG,
947 .n = ELF_NFPREG,
948 .size = sizeof(elf_fpreg_t),
949 .align = sizeof(elf_fpreg_t),
950 .regset_get = fpr_get,
951 .set = fpr_set,
952 },
953 [REGSET_FP_MODE] = {
954 .core_note_type = NT_MIPS_FP_MODE,
955 .n = 1,
956 .size = sizeof(int),
957 .align = sizeof(int),
958 .regset_get = fp_mode_get,
959 .set = fp_mode_set,
960 },
961#endif
962#ifdef CONFIG_CPU_HAS_MSA
963 [REGSET_MSA] = {
964 .core_note_type = NT_MIPS_MSA,
965 .n = NUM_FPU_REGS + 1,
966 .size = 16,
967 .align = 16,
968 .regset_get = msa_get,
969 .set = msa_set,
970 },
971#endif
972};
973
974static const struct user_regset_view user_mips_view = {
975 .name = "mips",
976 .e_machine = ELF_ARCH,
977 .ei_osabi = ELF_OSABI,
978 .regsets = mips_regsets,
979 .n = ARRAY_SIZE(mips_regsets),
980};
981
982#endif
983
984#ifdef CONFIG_64BIT
985
986static const struct user_regset mips64_regsets[] = {
987 [REGSET_GPR] = {
988 .core_note_type = NT_PRSTATUS,
989 .n = ELF_NGREG,
990 .size = sizeof(unsigned long),
991 .align = sizeof(unsigned long),
992 .regset_get = gpr64_get,
993 .set = gpr64_set,
994 },
995 [REGSET_DSP] = {
996 .core_note_type = NT_MIPS_DSP,
997 .n = NUM_DSP_REGS + 1,
998 .size = sizeof(u64),
999 .align = sizeof(u64),
1000 .regset_get = dsp64_get,
1001 .set = dsp64_set,
1002 .active = dsp_active,
1003 },
1004#ifdef CONFIG_MIPS_FP_SUPPORT
1005 [REGSET_FP_MODE] = {
1006 .core_note_type = NT_MIPS_FP_MODE,
1007 .n = 1,
1008 .size = sizeof(int),
1009 .align = sizeof(int),
1010 .regset_get = fp_mode_get,
1011 .set = fp_mode_set,
1012 },
1013 [REGSET_FPR] = {
1014 .core_note_type = NT_PRFPREG,
1015 .n = ELF_NFPREG,
1016 .size = sizeof(elf_fpreg_t),
1017 .align = sizeof(elf_fpreg_t),
1018 .regset_get = fpr_get,
1019 .set = fpr_set,
1020 },
1021#endif
1022#ifdef CONFIG_CPU_HAS_MSA
1023 [REGSET_MSA] = {
1024 .core_note_type = NT_MIPS_MSA,
1025 .n = NUM_FPU_REGS + 1,
1026 .size = 16,
1027 .align = 16,
1028 .regset_get = msa_get,
1029 .set = msa_set,
1030 },
1031#endif
1032};
1033
1034static const struct user_regset_view user_mips64_view = {
1035 .name = "mips64",
1036 .e_machine = ELF_ARCH,
1037 .ei_osabi = ELF_OSABI,
1038 .regsets = mips64_regsets,
1039 .n = ARRAY_SIZE(mips64_regsets),
1040};
1041
1042#ifdef CONFIG_MIPS32_N32
1043
1044static const struct user_regset_view user_mipsn32_view = {
1045 .name = "mipsn32",
1046 .e_flags = EF_MIPS_ABI2,
1047 .e_machine = ELF_ARCH,
1048 .ei_osabi = ELF_OSABI,
1049 .regsets = mips64_regsets,
1050 .n = ARRAY_SIZE(mips64_regsets),
1051};
1052
1053#endif
1054
1055#endif
1056
1057const struct user_regset_view *task_user_regset_view(struct task_struct *task)
1058{
1059#ifdef CONFIG_32BIT
1060 return &user_mips_view;
1061#else
1062#ifdef CONFIG_MIPS32_O32
1063 if (test_tsk_thread_flag(task, TIF_32BIT_REGS))
1064 return &user_mips_view;
1065#endif
1066#ifdef CONFIG_MIPS32_N32
1067 if (test_tsk_thread_flag(task, TIF_32BIT_ADDR))
1068 return &user_mipsn32_view;
1069#endif
1070 return &user_mips64_view;
1071#endif
1072}
1073
1074long arch_ptrace(struct task_struct *child, long request,
1075 unsigned long addr, unsigned long data)
1076{
1077 int ret;
1078 void __user *addrp = (void __user *) addr;
1079 void __user *datavp = (void __user *) data;
1080 unsigned long __user *datalp = (void __user *) data;
1081
1082 switch (request) {
1083
1084 case PTRACE_PEEKTEXT:
1085 case PTRACE_PEEKDATA:
1086 ret = generic_ptrace_peekdata(child, addr, data);
1087 break;
1088
1089
1090 case PTRACE_PEEKUSR: {
1091 struct pt_regs *regs;
1092 unsigned long tmp = 0;
1093
1094 regs = task_pt_regs(child);
1095 ret = 0;
1096
1097 switch (addr) {
1098 case 0 ... 31:
1099 tmp = regs->regs[addr];
1100 break;
1101#ifdef CONFIG_MIPS_FP_SUPPORT
1102 case FPR_BASE ... FPR_BASE + 31: {
1103 union fpureg *fregs;
1104
1105 if (!tsk_used_math(child)) {
1106
1107 tmp = -1;
1108 break;
1109 }
1110 fregs = get_fpu_regs(child);
1111
1112#ifdef CONFIG_32BIT
1113 if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) {
1114
1115
1116
1117
1118
1119 tmp = get_fpr32(&fregs[(addr & ~1) - FPR_BASE],
1120 addr & 1);
1121 break;
1122 }
1123#endif
1124 tmp = get_fpr64(&fregs[addr - FPR_BASE], 0);
1125 break;
1126 }
1127 case FPC_CSR:
1128 tmp = child->thread.fpu.fcr31;
1129 break;
1130 case FPC_EIR:
1131
1132 tmp = boot_cpu_data.fpu_id;
1133 break;
1134#endif
1135 case PC:
1136 tmp = regs->cp0_epc;
1137 break;
1138 case CAUSE:
1139 tmp = regs->cp0_cause;
1140 break;
1141 case BADVADDR:
1142 tmp = regs->cp0_badvaddr;
1143 break;
1144 case MMHI:
1145 tmp = regs->hi;
1146 break;
1147 case MMLO:
1148 tmp = regs->lo;
1149 break;
1150#ifdef CONFIG_CPU_HAS_SMARTMIPS
1151 case ACX:
1152 tmp = regs->acx;
1153 break;
1154#endif
1155 case DSP_BASE ... DSP_BASE + 5: {
1156 dspreg_t *dregs;
1157
1158 if (!cpu_has_dsp) {
1159 tmp = 0;
1160 ret = -EIO;
1161 goto out;
1162 }
1163 dregs = __get_dsp_regs(child);
1164 tmp = dregs[addr - DSP_BASE];
1165 break;
1166 }
1167 case DSP_CONTROL:
1168 if (!cpu_has_dsp) {
1169 tmp = 0;
1170 ret = -EIO;
1171 goto out;
1172 }
1173 tmp = child->thread.dsp.dspcontrol;
1174 break;
1175 default:
1176 tmp = 0;
1177 ret = -EIO;
1178 goto out;
1179 }
1180 ret = put_user(tmp, datalp);
1181 break;
1182 }
1183
1184
1185 case PTRACE_POKETEXT:
1186 case PTRACE_POKEDATA:
1187 ret = generic_ptrace_pokedata(child, addr, data);
1188 break;
1189
1190 case PTRACE_POKEUSR: {
1191 struct pt_regs *regs;
1192 ret = 0;
1193 regs = task_pt_regs(child);
1194
1195 switch (addr) {
1196 case 0 ... 31:
1197 regs->regs[addr] = data;
1198
1199 if (addr == 2)
1200 mips_syscall_update_nr(child, regs);
1201 else if (addr == 4 &&
1202 mips_syscall_is_indirect(child, regs))
1203 mips_syscall_update_nr(child, regs);
1204 break;
1205#ifdef CONFIG_MIPS_FP_SUPPORT
1206 case FPR_BASE ... FPR_BASE + 31: {
1207 union fpureg *fregs = get_fpu_regs(child);
1208
1209 init_fp_ctx(child);
1210#ifdef CONFIG_32BIT
1211 if (test_tsk_thread_flag(child, TIF_32BIT_FPREGS)) {
1212
1213
1214
1215
1216
1217 set_fpr32(&fregs[(addr & ~1) - FPR_BASE],
1218 addr & 1, data);
1219 break;
1220 }
1221#endif
1222 set_fpr64(&fregs[addr - FPR_BASE], 0, data);
1223 break;
1224 }
1225 case FPC_CSR:
1226 init_fp_ctx(child);
1227 ptrace_setfcr31(child, data);
1228 break;
1229#endif
1230 case PC:
1231 regs->cp0_epc = data;
1232 break;
1233 case MMHI:
1234 regs->hi = data;
1235 break;
1236 case MMLO:
1237 regs->lo = data;
1238 break;
1239#ifdef CONFIG_CPU_HAS_SMARTMIPS
1240 case ACX:
1241 regs->acx = data;
1242 break;
1243#endif
1244 case DSP_BASE ... DSP_BASE + 5: {
1245 dspreg_t *dregs;
1246
1247 if (!cpu_has_dsp) {
1248 ret = -EIO;
1249 break;
1250 }
1251
1252 dregs = __get_dsp_regs(child);
1253 dregs[addr - DSP_BASE] = data;
1254 break;
1255 }
1256 case DSP_CONTROL:
1257 if (!cpu_has_dsp) {
1258 ret = -EIO;
1259 break;
1260 }
1261 child->thread.dsp.dspcontrol = data;
1262 break;
1263 default:
1264
1265 ret = -EIO;
1266 break;
1267 }
1268 break;
1269 }
1270
1271 case PTRACE_GETREGS:
1272 ret = ptrace_getregs(child, datavp);
1273 break;
1274
1275 case PTRACE_SETREGS:
1276 ret = ptrace_setregs(child, datavp);
1277 break;
1278
1279#ifdef CONFIG_MIPS_FP_SUPPORT
1280 case PTRACE_GETFPREGS:
1281 ret = ptrace_getfpregs(child, datavp);
1282 break;
1283
1284 case PTRACE_SETFPREGS:
1285 ret = ptrace_setfpregs(child, datavp);
1286 break;
1287#endif
1288 case PTRACE_GET_THREAD_AREA:
1289 ret = put_user(task_thread_info(child)->tp_value, datalp);
1290 break;
1291
1292 case PTRACE_GET_WATCH_REGS:
1293 ret = ptrace_get_watch_regs(child, addrp);
1294 break;
1295
1296 case PTRACE_SET_WATCH_REGS:
1297 ret = ptrace_set_watch_regs(child, addrp);
1298 break;
1299
1300 default:
1301 ret = ptrace_request(child, request, addr, data);
1302 break;
1303 }
1304 out:
1305 return ret;
1306}
1307
1308
1309
1310
1311
1312asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
1313{
1314 user_exit();
1315
1316 current_thread_info()->syscall = syscall;
1317
1318 if (test_thread_flag(TIF_SYSCALL_TRACE)) {
1319 if (ptrace_report_syscall_entry(regs))
1320 return -1;
1321 syscall = current_thread_info()->syscall;
1322 }
1323
1324#ifdef CONFIG_SECCOMP
1325 if (unlikely(test_thread_flag(TIF_SECCOMP))) {
1326 int ret, i;
1327 struct seccomp_data sd;
1328 unsigned long args[6];
1329
1330 sd.nr = syscall;
1331 sd.arch = syscall_get_arch(current);
1332 syscall_get_arguments(current, regs, args);
1333 for (i = 0; i < 6; i++)
1334 sd.args[i] = args[i];
1335 sd.instruction_pointer = KSTK_EIP(current);
1336
1337 ret = __secure_computing(&sd);
1338 if (ret == -1)
1339 return ret;
1340 syscall = current_thread_info()->syscall;
1341 }
1342#endif
1343
1344 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
1345 trace_sys_enter(regs, regs->regs[2]);
1346
1347 audit_syscall_entry(syscall, regs->regs[4], regs->regs[5],
1348 regs->regs[6], regs->regs[7]);
1349
1350
1351
1352
1353
1354 if (syscall < 0)
1355 syscall_set_return_value(current, regs, -ENOSYS, 0);
1356 return syscall;
1357}
1358
1359
1360
1361
1362
1363asmlinkage void syscall_trace_leave(struct pt_regs *regs)
1364{
1365
1366
1367
1368
1369
1370 user_exit();
1371
1372 audit_syscall_exit(regs);
1373
1374 if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
1375 trace_sys_exit(regs, regs_return_value(regs));
1376
1377 if (test_thread_flag(TIF_SYSCALL_TRACE))
1378 ptrace_report_syscall_exit(regs, 0);
1379
1380 user_enter();
1381}
1382