1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18#include <linux/kernel.h>
19#include <linux/sched.h>
20#include <linux/mm.h>
21#include <linux/smp.h>
22#include <linux/errno.h>
23#include <linux/ptrace.h>
24#include <linux/regset.h>
25#include <linux/tracehook.h>
26#include <linux/elf.h>
27#include <linux/user.h>
28#include <linux/security.h>
29#include <linux/signal.h>
30#include <linux/seccomp.h>
31#include <linux/audit.h>
32#ifdef CONFIG_PPC32
33#include <linux/module.h>
34#endif
35
36#include <asm/uaccess.h>
37#include <asm/page.h>
38#include <asm/pgtable.h>
39#include <asm/system.h>
40
41
42
43
44
45
46
47
48
49#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
50#define MSR_DEBUGCHANGE 0
51#else
52#define MSR_DEBUGCHANGE (MSR_SE | MSR_BE)
53#endif
54
55
56
57
58#ifdef CONFIG_PPC32
59#define PT_MAX_PUT_REG PT_MQ
60#else
61#define PT_MAX_PUT_REG PT_CCR
62#endif
63
64static unsigned long get_user_msr(struct task_struct *task)
65{
66 return task->thread.regs->msr | task->thread.fpexc_mode;
67}
68
69static int set_user_msr(struct task_struct *task, unsigned long msr)
70{
71 task->thread.regs->msr &= ~MSR_DEBUGCHANGE;
72 task->thread.regs->msr |= msr & MSR_DEBUGCHANGE;
73 return 0;
74}
75
76
77
78
79
80static int set_user_trap(struct task_struct *task, unsigned long trap)
81{
82 task->thread.regs->trap = trap & 0xfff0;
83 return 0;
84}
85
86
87
88
89unsigned long ptrace_get_reg(struct task_struct *task, int regno)
90{
91 if (task->thread.regs == NULL)
92 return -EIO;
93
94 if (regno == PT_MSR)
95 return get_user_msr(task);
96
97 if (regno < (sizeof(struct pt_regs) / sizeof(unsigned long)))
98 return ((unsigned long *)task->thread.regs)[regno];
99
100 return -EIO;
101}
102
103
104
105
106int ptrace_put_reg(struct task_struct *task, int regno, unsigned long data)
107{
108 if (task->thread.regs == NULL)
109 return -EIO;
110
111 if (regno == PT_MSR)
112 return set_user_msr(task, data);
113 if (regno == PT_TRAP)
114 return set_user_trap(task, data);
115
116 if (regno <= PT_MAX_PUT_REG) {
117 ((unsigned long *)task->thread.regs)[regno] = data;
118 return 0;
119 }
120 return -EIO;
121}
122
123static int gpr_get(struct task_struct *target, const struct user_regset *regset,
124 unsigned int pos, unsigned int count,
125 void *kbuf, void __user *ubuf)
126{
127 int ret;
128
129 if (target->thread.regs == NULL)
130 return -EIO;
131
132 CHECK_FULL_REGS(target->thread.regs);
133
134 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
135 target->thread.regs,
136 0, offsetof(struct pt_regs, msr));
137 if (!ret) {
138 unsigned long msr = get_user_msr(target);
139 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &msr,
140 offsetof(struct pt_regs, msr),
141 offsetof(struct pt_regs, msr) +
142 sizeof(msr));
143 }
144
145 BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
146 offsetof(struct pt_regs, msr) + sizeof(long));
147
148 if (!ret)
149 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
150 &target->thread.regs->orig_gpr3,
151 offsetof(struct pt_regs, orig_gpr3),
152 sizeof(struct pt_regs));
153 if (!ret)
154 ret = user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
155 sizeof(struct pt_regs), -1);
156
157 return ret;
158}
159
160static int gpr_set(struct task_struct *target, const struct user_regset *regset,
161 unsigned int pos, unsigned int count,
162 const void *kbuf, const void __user *ubuf)
163{
164 unsigned long reg;
165 int ret;
166
167 if (target->thread.regs == NULL)
168 return -EIO;
169
170 CHECK_FULL_REGS(target->thread.regs);
171
172 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
173 target->thread.regs,
174 0, PT_MSR * sizeof(reg));
175
176 if (!ret && count > 0) {
177 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, ®,
178 PT_MSR * sizeof(reg),
179 (PT_MSR + 1) * sizeof(reg));
180 if (!ret)
181 ret = set_user_msr(target, reg);
182 }
183
184 BUILD_BUG_ON(offsetof(struct pt_regs, orig_gpr3) !=
185 offsetof(struct pt_regs, msr) + sizeof(long));
186
187 if (!ret)
188 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
189 &target->thread.regs->orig_gpr3,
190 PT_ORIG_R3 * sizeof(reg),
191 (PT_MAX_PUT_REG + 1) * sizeof(reg));
192
193 if (PT_MAX_PUT_REG + 1 < PT_TRAP && !ret)
194 ret = user_regset_copyin_ignore(
195 &pos, &count, &kbuf, &ubuf,
196 (PT_MAX_PUT_REG + 1) * sizeof(reg),
197 PT_TRAP * sizeof(reg));
198
199 if (!ret && count > 0) {
200 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, ®,
201 PT_TRAP * sizeof(reg),
202 (PT_TRAP + 1) * sizeof(reg));
203 if (!ret)
204 ret = set_user_trap(target, reg);
205 }
206
207 if (!ret)
208 ret = user_regset_copyin_ignore(
209 &pos, &count, &kbuf, &ubuf,
210 (PT_TRAP + 1) * sizeof(reg), -1);
211
212 return ret;
213}
214
215static int fpr_get(struct task_struct *target, const struct user_regset *regset,
216 unsigned int pos, unsigned int count,
217 void *kbuf, void __user *ubuf)
218{
219#ifdef CONFIG_VSX
220 double buf[33];
221 int i;
222#endif
223 flush_fp_to_thread(target);
224
225#ifdef CONFIG_VSX
226
227 for (i = 0; i < 32 ; i++)
228 buf[i] = target->thread.TS_FPR(i);
229 memcpy(&buf[32], &target->thread.fpscr, sizeof(double));
230 return user_regset_copyout(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
231
232#else
233 BUILD_BUG_ON(offsetof(struct thread_struct, fpscr) !=
234 offsetof(struct thread_struct, TS_FPR(32)));
235
236 return user_regset_copyout(&pos, &count, &kbuf, &ubuf,
237 &target->thread.fpr, 0, -1);
238#endif
239}
240
241static int fpr_set(struct task_struct *target, const struct user_regset *regset,
242 unsigned int pos, unsigned int count,
243 const void *kbuf, const void __user *ubuf)
244{
245#ifdef CONFIG_VSX
246 double buf[33];
247 int i;
248#endif
249 flush_fp_to_thread(target);
250
251#ifdef CONFIG_VSX
252
253 i = user_regset_copyin(&pos, &count, &kbuf, &ubuf, buf, 0, -1);
254 if (i)
255 return i;
256 for (i = 0; i < 32 ; i++)
257 target->thread.TS_FPR(i) = buf[i];
258 memcpy(&target->thread.fpscr, &buf[32], sizeof(double));
259 return 0;
260#else
261 BUILD_BUG_ON(offsetof(struct thread_struct, fpscr) !=
262 offsetof(struct thread_struct, TS_FPR(32)));
263
264 return user_regset_copyin(&pos, &count, &kbuf, &ubuf,
265 &target->thread.fpr, 0, -1);
266#endif
267}
268
269#ifdef CONFIG_ALTIVEC
270
271
272
273
274
275
276
277
278
279
280
281
282
283static int vr_active(struct task_struct *target,
284 const struct user_regset *regset)
285{
286 flush_altivec_to_thread(target);
287 return target->thread.used_vr ? regset->n : 0;
288}
289
290static int vr_get(struct task_struct *target, const struct user_regset *regset,
291 unsigned int pos, unsigned int count,
292 void *kbuf, void __user *ubuf)
293{
294 int ret;
295
296 flush_altivec_to_thread(target);
297
298 BUILD_BUG_ON(offsetof(struct thread_struct, vscr) !=
299 offsetof(struct thread_struct, vr[32]));
300
301 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
302 &target->thread.vr, 0,
303 33 * sizeof(vector128));
304 if (!ret) {
305
306
307
308 union {
309 elf_vrreg_t reg;
310 u32 word;
311 } vrsave;
312 memset(&vrsave, 0, sizeof(vrsave));
313 vrsave.word = target->thread.vrsave;
314 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf, &vrsave,
315 33 * sizeof(vector128), -1);
316 }
317
318 return ret;
319}
320
321static int vr_set(struct task_struct *target, const struct user_regset *regset,
322 unsigned int pos, unsigned int count,
323 const void *kbuf, const void __user *ubuf)
324{
325 int ret;
326
327 flush_altivec_to_thread(target);
328
329 BUILD_BUG_ON(offsetof(struct thread_struct, vscr) !=
330 offsetof(struct thread_struct, vr[32]));
331
332 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
333 &target->thread.vr, 0, 33 * sizeof(vector128));
334 if (!ret && count > 0) {
335
336
337
338 union {
339 elf_vrreg_t reg;
340 u32 word;
341 } vrsave;
342 memset(&vrsave, 0, sizeof(vrsave));
343 vrsave.word = target->thread.vrsave;
344 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf, &vrsave,
345 33 * sizeof(vector128), -1);
346 if (!ret)
347 target->thread.vrsave = vrsave.word;
348 }
349
350 return ret;
351}
352#endif
353
354#ifdef CONFIG_VSX
355
356
357
358
359
360
361static int vsr_active(struct task_struct *target,
362 const struct user_regset *regset)
363{
364 flush_vsx_to_thread(target);
365 return target->thread.used_vsr ? regset->n : 0;
366}
367
368static int vsr_get(struct task_struct *target, const struct user_regset *regset,
369 unsigned int pos, unsigned int count,
370 void *kbuf, void __user *ubuf)
371{
372 double buf[32];
373 int ret, i;
374
375 flush_vsx_to_thread(target);
376
377 for (i = 0; i < 32 ; i++)
378 buf[i] = target->thread.fpr[i][TS_VSRLOWOFFSET];
379 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
380 buf, 0, 32 * sizeof(double));
381
382 return ret;
383}
384
385static int vsr_set(struct task_struct *target, const struct user_regset *regset,
386 unsigned int pos, unsigned int count,
387 const void *kbuf, const void __user *ubuf)
388{
389 double buf[32];
390 int ret,i;
391
392 flush_vsx_to_thread(target);
393
394 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
395 buf, 0, 32 * sizeof(double));
396 for (i = 0; i < 32 ; i++)
397 target->thread.fpr[i][TS_VSRLOWOFFSET] = buf[i];
398
399
400 return ret;
401}
402#endif
403
404#ifdef CONFIG_SPE
405
406
407
408
409
410
411
412
413
414
415
416static int evr_active(struct task_struct *target,
417 const struct user_regset *regset)
418{
419 flush_spe_to_thread(target);
420 return target->thread.used_spe ? regset->n : 0;
421}
422
423static int evr_get(struct task_struct *target, const struct user_regset *regset,
424 unsigned int pos, unsigned int count,
425 void *kbuf, void __user *ubuf)
426{
427 int ret;
428
429 flush_spe_to_thread(target);
430
431 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
432 &target->thread.evr,
433 0, sizeof(target->thread.evr));
434
435 BUILD_BUG_ON(offsetof(struct thread_struct, acc) + sizeof(u64) !=
436 offsetof(struct thread_struct, spefscr));
437
438 if (!ret)
439 ret = user_regset_copyout(&pos, &count, &kbuf, &ubuf,
440 &target->thread.acc,
441 sizeof(target->thread.evr), -1);
442
443 return ret;
444}
445
446static int evr_set(struct task_struct *target, const struct user_regset *regset,
447 unsigned int pos, unsigned int count,
448 const void *kbuf, const void __user *ubuf)
449{
450 int ret;
451
452 flush_spe_to_thread(target);
453
454 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
455 &target->thread.evr,
456 0, sizeof(target->thread.evr));
457
458 BUILD_BUG_ON(offsetof(struct thread_struct, acc) + sizeof(u64) !=
459 offsetof(struct thread_struct, spefscr));
460
461 if (!ret)
462 ret = user_regset_copyin(&pos, &count, &kbuf, &ubuf,
463 &target->thread.acc,
464 sizeof(target->thread.evr), -1);
465
466 return ret;
467}
468#endif
469
470
471
472
473
474enum powerpc_regset {
475 REGSET_GPR,
476 REGSET_FPR,
477#ifdef CONFIG_ALTIVEC
478 REGSET_VMX,
479#endif
480#ifdef CONFIG_VSX
481 REGSET_VSX,
482#endif
483#ifdef CONFIG_SPE
484 REGSET_SPE,
485#endif
486};
487
488static const struct user_regset native_regsets[] = {
489 [REGSET_GPR] = {
490 .core_note_type = NT_PRSTATUS, .n = ELF_NGREG,
491 .size = sizeof(long), .align = sizeof(long),
492 .get = gpr_get, .set = gpr_set
493 },
494 [REGSET_FPR] = {
495 .core_note_type = NT_PRFPREG, .n = ELF_NFPREG,
496 .size = sizeof(double), .align = sizeof(double),
497 .get = fpr_get, .set = fpr_set
498 },
499#ifdef CONFIG_ALTIVEC
500 [REGSET_VMX] = {
501 .core_note_type = NT_PPC_VMX, .n = 34,
502 .size = sizeof(vector128), .align = sizeof(vector128),
503 .active = vr_active, .get = vr_get, .set = vr_set
504 },
505#endif
506#ifdef CONFIG_VSX
507 [REGSET_VSX] = {
508 .core_note_type = NT_PPC_VSX, .n = 32,
509 .size = sizeof(double), .align = sizeof(double),
510 .active = vsr_active, .get = vsr_get, .set = vsr_set
511 },
512#endif
513#ifdef CONFIG_SPE
514 [REGSET_SPE] = {
515 .n = 35,
516 .size = sizeof(u32), .align = sizeof(u32),
517 .active = evr_active, .get = evr_get, .set = evr_set
518 },
519#endif
520};
521
522static const struct user_regset_view user_ppc_native_view = {
523 .name = UTS_MACHINE, .e_machine = ELF_ARCH, .ei_osabi = ELF_OSABI,
524 .regsets = native_regsets, .n = ARRAY_SIZE(native_regsets)
525};
526
527#ifdef CONFIG_PPC64
528#include <linux/compat.h>
529
530static int gpr32_get(struct task_struct *target,
531 const struct user_regset *regset,
532 unsigned int pos, unsigned int count,
533 void *kbuf, void __user *ubuf)
534{
535 const unsigned long *regs = &target->thread.regs->gpr[0];
536 compat_ulong_t *k = kbuf;
537 compat_ulong_t __user *u = ubuf;
538 compat_ulong_t reg;
539
540 if (target->thread.regs == NULL)
541 return -EIO;
542
543 CHECK_FULL_REGS(target->thread.regs);
544
545 pos /= sizeof(reg);
546 count /= sizeof(reg);
547
548 if (kbuf)
549 for (; count > 0 && pos < PT_MSR; --count)
550 *k++ = regs[pos++];
551 else
552 for (; count > 0 && pos < PT_MSR; --count)
553 if (__put_user((compat_ulong_t) regs[pos++], u++))
554 return -EFAULT;
555
556 if (count > 0 && pos == PT_MSR) {
557 reg = get_user_msr(target);
558 if (kbuf)
559 *k++ = reg;
560 else if (__put_user(reg, u++))
561 return -EFAULT;
562 ++pos;
563 --count;
564 }
565
566 if (kbuf)
567 for (; count > 0 && pos < PT_REGS_COUNT; --count)
568 *k++ = regs[pos++];
569 else
570 for (; count > 0 && pos < PT_REGS_COUNT; --count)
571 if (__put_user((compat_ulong_t) regs[pos++], u++))
572 return -EFAULT;
573
574 kbuf = k;
575 ubuf = u;
576 pos *= sizeof(reg);
577 count *= sizeof(reg);
578 return user_regset_copyout_zero(&pos, &count, &kbuf, &ubuf,
579 PT_REGS_COUNT * sizeof(reg), -1);
580}
581
582static int gpr32_set(struct task_struct *target,
583 const struct user_regset *regset,
584 unsigned int pos, unsigned int count,
585 const void *kbuf, const void __user *ubuf)
586{
587 unsigned long *regs = &target->thread.regs->gpr[0];
588 const compat_ulong_t *k = kbuf;
589 const compat_ulong_t __user *u = ubuf;
590 compat_ulong_t reg;
591
592 if (target->thread.regs == NULL)
593 return -EIO;
594
595 CHECK_FULL_REGS(target->thread.regs);
596
597 pos /= sizeof(reg);
598 count /= sizeof(reg);
599
600 if (kbuf)
601 for (; count > 0 && pos < PT_MSR; --count)
602 regs[pos++] = *k++;
603 else
604 for (; count > 0 && pos < PT_MSR; --count) {
605 if (__get_user(reg, u++))
606 return -EFAULT;
607 regs[pos++] = reg;
608 }
609
610
611 if (count > 0 && pos == PT_MSR) {
612 if (kbuf)
613 reg = *k++;
614 else if (__get_user(reg, u++))
615 return -EFAULT;
616 set_user_msr(target, reg);
617 ++pos;
618 --count;
619 }
620
621 if (kbuf) {
622 for (; count > 0 && pos <= PT_MAX_PUT_REG; --count)
623 regs[pos++] = *k++;
624 for (; count > 0 && pos < PT_TRAP; --count, ++pos)
625 ++k;
626 } else {
627 for (; count > 0 && pos <= PT_MAX_PUT_REG; --count) {
628 if (__get_user(reg, u++))
629 return -EFAULT;
630 regs[pos++] = reg;
631 }
632 for (; count > 0 && pos < PT_TRAP; --count, ++pos)
633 if (__get_user(reg, u++))
634 return -EFAULT;
635 }
636
637 if (count > 0 && pos == PT_TRAP) {
638 if (kbuf)
639 reg = *k++;
640 else if (__get_user(reg, u++))
641 return -EFAULT;
642 set_user_trap(target, reg);
643 ++pos;
644 --count;
645 }
646
647 kbuf = k;
648 ubuf = u;
649 pos *= sizeof(reg);
650 count *= sizeof(reg);
651 return user_regset_copyin_ignore(&pos, &count, &kbuf, &ubuf,
652 (PT_TRAP + 1) * sizeof(reg), -1);
653}
654
655
656
657
658static const struct user_regset compat_regsets[] = {
659 [REGSET_GPR] = {
660 .core_note_type = NT_PRSTATUS, .n = ELF_NGREG,
661 .size = sizeof(compat_long_t), .align = sizeof(compat_long_t),
662 .get = gpr32_get, .set = gpr32_set
663 },
664 [REGSET_FPR] = {
665 .core_note_type = NT_PRFPREG, .n = ELF_NFPREG,
666 .size = sizeof(double), .align = sizeof(double),
667 .get = fpr_get, .set = fpr_set
668 },
669#ifdef CONFIG_ALTIVEC
670 [REGSET_VMX] = {
671 .core_note_type = NT_PPC_VMX, .n = 34,
672 .size = sizeof(vector128), .align = sizeof(vector128),
673 .active = vr_active, .get = vr_get, .set = vr_set
674 },
675#endif
676#ifdef CONFIG_SPE
677 [REGSET_SPE] = {
678 .core_note_type = NT_PPC_SPE, .n = 35,
679 .size = sizeof(u32), .align = sizeof(u32),
680 .active = evr_active, .get = evr_get, .set = evr_set
681 },
682#endif
683};
684
685static const struct user_regset_view user_ppc_compat_view = {
686 .name = "ppc", .e_machine = EM_PPC, .ei_osabi = ELF_OSABI,
687 .regsets = compat_regsets, .n = ARRAY_SIZE(compat_regsets)
688};
689#endif
690
691const struct user_regset_view *task_user_regset_view(struct task_struct *task)
692{
693#ifdef CONFIG_PPC64
694 if (test_tsk_thread_flag(task, TIF_32BIT))
695 return &user_ppc_compat_view;
696#endif
697 return &user_ppc_native_view;
698}
699
700
701void user_enable_single_step(struct task_struct *task)
702{
703 struct pt_regs *regs = task->thread.regs;
704
705 if (regs != NULL) {
706#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
707 task->thread.dbcr0 &= ~DBCR0_BT;
708 task->thread.dbcr0 |= DBCR0_IDM | DBCR0_IC;
709 regs->msr |= MSR_DE;
710#else
711 regs->msr &= ~MSR_BE;
712 regs->msr |= MSR_SE;
713#endif
714 }
715 set_tsk_thread_flag(task, TIF_SINGLESTEP);
716}
717
718void user_enable_block_step(struct task_struct *task)
719{
720 struct pt_regs *regs = task->thread.regs;
721
722 if (regs != NULL) {
723#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
724 task->thread.dbcr0 &= ~DBCR0_IC;
725 task->thread.dbcr0 = DBCR0_IDM | DBCR0_BT;
726 regs->msr |= MSR_DE;
727#else
728 regs->msr &= ~MSR_SE;
729 regs->msr |= MSR_BE;
730#endif
731 }
732 set_tsk_thread_flag(task, TIF_SINGLESTEP);
733}
734
735void user_disable_single_step(struct task_struct *task)
736{
737 struct pt_regs *regs = task->thread.regs;
738
739 if (regs != NULL) {
740#if defined(CONFIG_BOOKE)
741
742 if (task->thread.dabr)
743 task->thread.dbcr0 &= ~(DBCR0_IC | DBCR0_BT);
744 else {
745 task->thread.dbcr0 &= ~(DBCR0_IC | DBCR0_BT | DBCR0_IDM);
746 regs->msr &= ~MSR_DE;
747 }
748#elif defined(CONFIG_40x)
749 task->thread.dbcr0 &= ~(DBCR0_IC | DBCR0_BT | DBCR0_IDM);
750 regs->msr &= ~MSR_DE;
751#else
752 regs->msr &= ~(MSR_SE | MSR_BE);
753#endif
754 }
755 clear_tsk_thread_flag(task, TIF_SINGLESTEP);
756}
757
758int ptrace_set_debugreg(struct task_struct *task, unsigned long addr,
759 unsigned long data)
760{
761
762
763
764
765 if (addr > 0)
766 return -EINVAL;
767
768
769 if ((data & ~0x7UL) >= TASK_SIZE)
770 return -EIO;
771
772#ifndef CONFIG_BOOKE
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787 if (data && !(data & DABR_TRANSLATION))
788 return -EIO;
789
790
791 task->thread.dabr = data;
792
793#endif
794#if defined(CONFIG_BOOKE)
795
796
797
798
799
800
801
802 task->thread.dabr = data & ~0x3UL;
803
804 if (task->thread.dabr == 0) {
805 task->thread.dbcr0 &= ~(DBSR_DAC1R | DBSR_DAC1W | DBCR0_IDM);
806 task->thread.regs->msr &= ~MSR_DE;
807 return 0;
808 }
809
810
811
812 if (!(data & 0x3UL))
813 return -EINVAL;
814
815
816
817 task->thread.dbcr0 = DBCR0_IDM;
818
819
820
821 if (data & 0x1UL)
822 task->thread.dbcr0 |= DBSR_DAC1R;
823 if (data & 0x2UL)
824 task->thread.dbcr0 |= DBSR_DAC1W;
825
826 task->thread.regs->msr |= MSR_DE;
827#endif
828 return 0;
829}
830
831
832
833
834
835
836void ptrace_disable(struct task_struct *child)
837{
838
839 user_disable_single_step(child);
840}
841
842
843
844
845
846static long arch_ptrace_old(struct task_struct *child, long request, long addr,
847 long data)
848{
849 switch (request) {
850 case PPC_PTRACE_GETREGS:
851 return copy_regset_to_user(child, &user_ppc_native_view,
852 REGSET_GPR, 0, 32 * sizeof(long),
853 (void __user *) data);
854
855 case PPC_PTRACE_SETREGS:
856 return copy_regset_from_user(child, &user_ppc_native_view,
857 REGSET_GPR, 0, 32 * sizeof(long),
858 (const void __user *) data);
859
860 case PPC_PTRACE_GETFPREGS:
861 return copy_regset_to_user(child, &user_ppc_native_view,
862 REGSET_FPR, 0, 32 * sizeof(double),
863 (void __user *) data);
864
865 case PPC_PTRACE_SETFPREGS:
866 return copy_regset_from_user(child, &user_ppc_native_view,
867 REGSET_FPR, 0, 32 * sizeof(double),
868 (const void __user *) data);
869 }
870
871 return -EPERM;
872}
873
874long arch_ptrace(struct task_struct *child, long request, long addr, long data)
875{
876 int ret = -EPERM;
877
878 switch (request) {
879
880 case PTRACE_PEEKUSR: {
881 unsigned long index, tmp;
882
883 ret = -EIO;
884
885#ifdef CONFIG_PPC32
886 index = (unsigned long) addr >> 2;
887 if ((addr & 3) || (index > PT_FPSCR)
888 || (child->thread.regs == NULL))
889#else
890 index = (unsigned long) addr >> 3;
891 if ((addr & 7) || (index > PT_FPSCR))
892#endif
893 break;
894
895 CHECK_FULL_REGS(child->thread.regs);
896 if (index < PT_FPR0) {
897 tmp = ptrace_get_reg(child, (int) index);
898 } else {
899 flush_fp_to_thread(child);
900 tmp = ((unsigned long *)child->thread.fpr)
901 [TS_FPRWIDTH * (index - PT_FPR0)];
902 }
903 ret = put_user(tmp,(unsigned long __user *) data);
904 break;
905 }
906
907
908 case PTRACE_POKEUSR: {
909 unsigned long index;
910
911 ret = -EIO;
912
913#ifdef CONFIG_PPC32
914 index = (unsigned long) addr >> 2;
915 if ((addr & 3) || (index > PT_FPSCR)
916 || (child->thread.regs == NULL))
917#else
918 index = (unsigned long) addr >> 3;
919 if ((addr & 7) || (index > PT_FPSCR))
920#endif
921 break;
922
923 CHECK_FULL_REGS(child->thread.regs);
924 if (index < PT_FPR0) {
925 ret = ptrace_put_reg(child, index, data);
926 } else {
927 flush_fp_to_thread(child);
928 ((unsigned long *)child->thread.fpr)
929 [TS_FPRWIDTH * (index - PT_FPR0)] = data;
930 ret = 0;
931 }
932 break;
933 }
934
935 case PTRACE_GET_DEBUGREG: {
936 ret = -EINVAL;
937
938 if (addr > 0)
939 break;
940 ret = put_user(child->thread.dabr,
941 (unsigned long __user *)data);
942 break;
943 }
944
945 case PTRACE_SET_DEBUGREG:
946 ret = ptrace_set_debugreg(child, addr, data);
947 break;
948
949#ifdef CONFIG_PPC64
950 case PTRACE_GETREGS64:
951#endif
952 case PTRACE_GETREGS:
953 return copy_regset_to_user(child, &user_ppc_native_view,
954 REGSET_GPR,
955 0, sizeof(struct pt_regs),
956 (void __user *) data);
957
958#ifdef CONFIG_PPC64
959 case PTRACE_SETREGS64:
960#endif
961 case PTRACE_SETREGS:
962 return copy_regset_from_user(child, &user_ppc_native_view,
963 REGSET_GPR,
964 0, sizeof(struct pt_regs),
965 (const void __user *) data);
966
967 case PTRACE_GETFPREGS:
968 return copy_regset_to_user(child, &user_ppc_native_view,
969 REGSET_FPR,
970 0, sizeof(elf_fpregset_t),
971 (void __user *) data);
972
973 case PTRACE_SETFPREGS:
974 return copy_regset_from_user(child, &user_ppc_native_view,
975 REGSET_FPR,
976 0, sizeof(elf_fpregset_t),
977 (const void __user *) data);
978
979#ifdef CONFIG_ALTIVEC
980 case PTRACE_GETVRREGS:
981 return copy_regset_to_user(child, &user_ppc_native_view,
982 REGSET_VMX,
983 0, (33 * sizeof(vector128) +
984 sizeof(u32)),
985 (void __user *) data);
986
987 case PTRACE_SETVRREGS:
988 return copy_regset_from_user(child, &user_ppc_native_view,
989 REGSET_VMX,
990 0, (33 * sizeof(vector128) +
991 sizeof(u32)),
992 (const void __user *) data);
993#endif
994#ifdef CONFIG_VSX
995 case PTRACE_GETVSRREGS:
996 return copy_regset_to_user(child, &user_ppc_native_view,
997 REGSET_VSX,
998 0, 32 * sizeof(double),
999 (void __user *) data);
1000
1001 case PTRACE_SETVSRREGS:
1002 return copy_regset_from_user(child, &user_ppc_native_view,
1003 REGSET_VSX,
1004 0, 32 * sizeof(double),
1005 (const void __user *) data);
1006#endif
1007#ifdef CONFIG_SPE
1008 case PTRACE_GETEVRREGS:
1009
1010 return copy_regset_to_user(child, &user_ppc_native_view,
1011 REGSET_SPE, 0, 35 * sizeof(u32),
1012 (void __user *) data);
1013
1014 case PTRACE_SETEVRREGS:
1015
1016 return copy_regset_from_user(child, &user_ppc_native_view,
1017 REGSET_SPE, 0, 35 * sizeof(u32),
1018 (const void __user *) data);
1019#endif
1020
1021
1022 case PPC_PTRACE_GETREGS:
1023 case PPC_PTRACE_SETREGS:
1024 case PPC_PTRACE_GETFPREGS:
1025 case PPC_PTRACE_SETFPREGS:
1026 ret = arch_ptrace_old(child, request, addr, data);
1027 break;
1028
1029 default:
1030 ret = ptrace_request(child, request, addr, data);
1031 break;
1032 }
1033 return ret;
1034}
1035
1036
1037
1038
1039
1040long do_syscall_trace_enter(struct pt_regs *regs)
1041{
1042 long ret = 0;
1043
1044 secure_computing(regs->gpr[0]);
1045
1046 if (test_thread_flag(TIF_SYSCALL_TRACE) &&
1047 tracehook_report_syscall_entry(regs))
1048
1049
1050
1051
1052
1053 ret = -1L;
1054
1055 if (unlikely(current->audit_context)) {
1056#ifdef CONFIG_PPC64
1057 if (!test_thread_flag(TIF_32BIT))
1058 audit_syscall_entry(AUDIT_ARCH_PPC64,
1059 regs->gpr[0],
1060 regs->gpr[3], regs->gpr[4],
1061 regs->gpr[5], regs->gpr[6]);
1062 else
1063#endif
1064 audit_syscall_entry(AUDIT_ARCH_PPC,
1065 regs->gpr[0],
1066 regs->gpr[3] & 0xffffffff,
1067 regs->gpr[4] & 0xffffffff,
1068 regs->gpr[5] & 0xffffffff,
1069 regs->gpr[6] & 0xffffffff);
1070 }
1071
1072 return ret ?: regs->gpr[0];
1073}
1074
1075void do_syscall_trace_leave(struct pt_regs *regs)
1076{
1077 int step;
1078
1079 if (unlikely(current->audit_context))
1080 audit_syscall_exit((regs->ccr&0x10000000)?AUDITSC_FAILURE:AUDITSC_SUCCESS,
1081 regs->result);
1082
1083 step = test_thread_flag(TIF_SINGLESTEP);
1084 if (step || test_thread_flag(TIF_SYSCALL_TRACE))
1085 tracehook_report_syscall_exit(regs, step);
1086}
1087