1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include <linux/sched.h>
21#include <linux/mm.h>
22#include <linux/smp.h>
23#include <linux/kernel.h>
24#include <linux/signal.h>
25#include <linux/errno.h>
26#include <linux/elf.h>
27#include <linux/ptrace.h>
28#include <linux/ratelimit.h>
29#ifdef CONFIG_PPC64
30#include <linux/syscalls.h>
31#include <linux/compat.h>
32#else
33#include <linux/wait.h>
34#include <linux/unistd.h>
35#include <linux/stddef.h>
36#include <linux/tty.h>
37#include <linux/binfmts.h>
38#endif
39
40#include <linux/uaccess.h>
41#include <asm/cacheflush.h>
42#include <asm/syscalls.h>
43#include <asm/sigcontext.h>
44#include <asm/vdso.h>
45#include <asm/switch_to.h>
46#include <asm/tm.h>
47#include <asm/asm-prototypes.h>
48#ifdef CONFIG_PPC64
49#include "ppc32.h"
50#include <asm/unistd.h>
51#else
52#include <asm/ucontext.h>
53#include <asm/pgtable.h>
54#endif
55
56#include "signal.h"
57
58
59#ifdef CONFIG_PPC64
60#define sys_rt_sigreturn compat_sys_rt_sigreturn
61#define sys_swapcontext compat_sys_swapcontext
62#define sys_sigreturn compat_sys_sigreturn
63
64#define old_sigaction old_sigaction32
65#define sigcontext sigcontext32
66#define mcontext mcontext32
67#define ucontext ucontext32
68
69#define __save_altstack __compat_save_altstack
70
71
72
73
74
75#define UCONTEXTSIZEWITHOUTVSX \
76 (sizeof(struct ucontext) - sizeof(elf_vsrreghalf_t32))
77
78
79
80
81
82
83
84
85#define GP_REGS_SIZE min(sizeof(elf_gregset_t32), sizeof(struct pt_regs32))
86#undef __SIGNAL_FRAMESIZE
87#define __SIGNAL_FRAMESIZE __SIGNAL_FRAMESIZE32
88#undef ELF_NVRREG
89#define ELF_NVRREG ELF_NVRREG32
90
91
92
93
94
95static inline int put_sigset_t(compat_sigset_t __user *uset, sigset_t *set)
96{
97 compat_sigset_t cset;
98
99 switch (_NSIG_WORDS) {
100 case 4: cset.sig[6] = set->sig[3] & 0xffffffffull;
101 cset.sig[7] = set->sig[3] >> 32;
102 case 3: cset.sig[4] = set->sig[2] & 0xffffffffull;
103 cset.sig[5] = set->sig[2] >> 32;
104 case 2: cset.sig[2] = set->sig[1] & 0xffffffffull;
105 cset.sig[3] = set->sig[1] >> 32;
106 case 1: cset.sig[0] = set->sig[0] & 0xffffffffull;
107 cset.sig[1] = set->sig[0] >> 32;
108 }
109 return copy_to_user(uset, &cset, sizeof(*uset));
110}
111
112static inline int get_sigset_t(sigset_t *set,
113 const compat_sigset_t __user *uset)
114{
115 compat_sigset_t s32;
116
117 if (copy_from_user(&s32, uset, sizeof(*uset)))
118 return -EFAULT;
119
120
121
122
123
124 switch (_NSIG_WORDS) {
125 case 4: set->sig[3] = s32.sig[6] | (((long)s32.sig[7]) << 32);
126 case 3: set->sig[2] = s32.sig[4] | (((long)s32.sig[5]) << 32);
127 case 2: set->sig[1] = s32.sig[2] | (((long)s32.sig[3]) << 32);
128 case 1: set->sig[0] = s32.sig[0] | (((long)s32.sig[1]) << 32);
129 }
130 return 0;
131}
132
133#define to_user_ptr(p) ptr_to_compat(p)
134#define from_user_ptr(p) compat_ptr(p)
135
136static inline int save_general_regs(struct pt_regs *regs,
137 struct mcontext __user *frame)
138{
139 elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
140 int i;
141
142 WARN_ON(!FULL_REGS(regs));
143
144 for (i = 0; i <= PT_RESULT; i ++) {
145 if (i == 14 && !FULL_REGS(regs))
146 i = 32;
147 if (__put_user((unsigned int)gregs[i], &frame->mc_gregs[i]))
148 return -EFAULT;
149 }
150 return 0;
151}
152
153static inline int restore_general_regs(struct pt_regs *regs,
154 struct mcontext __user *sr)
155{
156 elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
157 int i;
158
159 for (i = 0; i <= PT_RESULT; i++) {
160 if ((i == PT_MSR) || (i == PT_SOFTE))
161 continue;
162 if (__get_user(gregs[i], &sr->mc_gregs[i]))
163 return -EFAULT;
164 }
165 return 0;
166}
167
168#else
169
170#define GP_REGS_SIZE min(sizeof(elf_gregset_t), sizeof(struct pt_regs))
171
172static inline int put_sigset_t(sigset_t __user *uset, sigset_t *set)
173{
174 return copy_to_user(uset, set, sizeof(*uset));
175}
176
177static inline int get_sigset_t(sigset_t *set, const sigset_t __user *uset)
178{
179 return copy_from_user(set, uset, sizeof(*uset));
180}
181
182#define to_user_ptr(p) ((unsigned long)(p))
183#define from_user_ptr(p) ((void __user *)(p))
184
185static inline int save_general_regs(struct pt_regs *regs,
186 struct mcontext __user *frame)
187{
188 WARN_ON(!FULL_REGS(regs));
189 return __copy_to_user(&frame->mc_gregs, regs, GP_REGS_SIZE);
190}
191
192static inline int restore_general_regs(struct pt_regs *regs,
193 struct mcontext __user *sr)
194{
195
196 if (__copy_from_user(regs, &sr->mc_gregs,
197 PT_MSR * sizeof(elf_greg_t)))
198 return -EFAULT;
199
200 if (__copy_from_user(®s->orig_gpr3, &sr->mc_gregs[PT_ORIG_R3],
201 GP_REGS_SIZE - PT_ORIG_R3 * sizeof(elf_greg_t)))
202 return -EFAULT;
203 return 0;
204}
205#endif
206
207
208
209
210
211
212
213
214
215
216
217
218
219struct sigframe {
220 struct sigcontext sctx;
221 struct mcontext mctx;
222#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
223 struct sigcontext sctx_transact;
224 struct mcontext mctx_transact;
225#endif
226
227
228
229
230 int abigap[56];
231};
232
233
234#define tramp mc_pad
235
236
237
238
239
240
241
242
243
244
245
246
247struct rt_sigframe {
248#ifdef CONFIG_PPC64
249 compat_siginfo_t info;
250#else
251 struct siginfo info;
252#endif
253 struct ucontext uc;
254#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
255 struct ucontext uc_transact;
256#endif
257
258
259
260
261 int abigap[56];
262};
263
264#ifdef CONFIG_VSX
265unsigned long copy_fpr_to_user(void __user *to,
266 struct task_struct *task)
267{
268 u64 buf[ELF_NFPREG];
269 int i;
270
271
272 for (i = 0; i < (ELF_NFPREG - 1) ; i++)
273 buf[i] = task->thread.TS_FPR(i);
274 buf[i] = task->thread.fp_state.fpscr;
275 return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double));
276}
277
278unsigned long copy_fpr_from_user(struct task_struct *task,
279 void __user *from)
280{
281 u64 buf[ELF_NFPREG];
282 int i;
283
284 if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double)))
285 return 1;
286 for (i = 0; i < (ELF_NFPREG - 1) ; i++)
287 task->thread.TS_FPR(i) = buf[i];
288 task->thread.fp_state.fpscr = buf[i];
289
290 return 0;
291}
292
293unsigned long copy_vsx_to_user(void __user *to,
294 struct task_struct *task)
295{
296 u64 buf[ELF_NVSRHALFREG];
297 int i;
298
299
300 for (i = 0; i < ELF_NVSRHALFREG; i++)
301 buf[i] = task->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
302 return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double));
303}
304
305unsigned long copy_vsx_from_user(struct task_struct *task,
306 void __user *from)
307{
308 u64 buf[ELF_NVSRHALFREG];
309 int i;
310
311 if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double)))
312 return 1;
313 for (i = 0; i < ELF_NVSRHALFREG ; i++)
314 task->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
315 return 0;
316}
317
318#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
319unsigned long copy_ckfpr_to_user(void __user *to,
320 struct task_struct *task)
321{
322 u64 buf[ELF_NFPREG];
323 int i;
324
325
326 for (i = 0; i < (ELF_NFPREG - 1) ; i++)
327 buf[i] = task->thread.TS_CKFPR(i);
328 buf[i] = task->thread.ckfp_state.fpscr;
329 return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double));
330}
331
332unsigned long copy_ckfpr_from_user(struct task_struct *task,
333 void __user *from)
334{
335 u64 buf[ELF_NFPREG];
336 int i;
337
338 if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double)))
339 return 1;
340 for (i = 0; i < (ELF_NFPREG - 1) ; i++)
341 task->thread.TS_CKFPR(i) = buf[i];
342 task->thread.ckfp_state.fpscr = buf[i];
343
344 return 0;
345}
346
347unsigned long copy_ckvsx_to_user(void __user *to,
348 struct task_struct *task)
349{
350 u64 buf[ELF_NVSRHALFREG];
351 int i;
352
353
354 for (i = 0; i < ELF_NVSRHALFREG; i++)
355 buf[i] = task->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET];
356 return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double));
357}
358
359unsigned long copy_ckvsx_from_user(struct task_struct *task,
360 void __user *from)
361{
362 u64 buf[ELF_NVSRHALFREG];
363 int i;
364
365 if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double)))
366 return 1;
367 for (i = 0; i < ELF_NVSRHALFREG ; i++)
368 task->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
369 return 0;
370}
371#endif
372#else
373inline unsigned long copy_fpr_to_user(void __user *to,
374 struct task_struct *task)
375{
376 return __copy_to_user(to, task->thread.fp_state.fpr,
377 ELF_NFPREG * sizeof(double));
378}
379
380inline unsigned long copy_fpr_from_user(struct task_struct *task,
381 void __user *from)
382{
383 return __copy_from_user(task->thread.fp_state.fpr, from,
384 ELF_NFPREG * sizeof(double));
385}
386
387#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
388inline unsigned long copy_ckfpr_to_user(void __user *to,
389 struct task_struct *task)
390{
391 return __copy_to_user(to, task->thread.ckfp_state.fpr,
392 ELF_NFPREG * sizeof(double));
393}
394
395inline unsigned long copy_ckfpr_from_user(struct task_struct *task,
396 void __user *from)
397{
398 return __copy_from_user(task->thread.ckfp_state.fpr, from,
399 ELF_NFPREG * sizeof(double));
400}
401#endif
402#endif
403
404
405
406
407
408
409static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
410 struct mcontext __user *tm_frame, int sigret,
411 int ctx_has_vsx_region)
412{
413 unsigned long msr = regs->msr;
414
415
416 flush_fp_to_thread(current);
417
418
419 if (save_general_regs(regs, frame))
420 return 1;
421
422#ifdef CONFIG_ALTIVEC
423
424 if (current->thread.used_vr) {
425 flush_altivec_to_thread(current);
426 if (__copy_to_user(&frame->mc_vregs, ¤t->thread.vr_state,
427 ELF_NVRREG * sizeof(vector128)))
428 return 1;
429
430
431 msr |= MSR_VEC;
432 }
433
434
435
436
437
438
439
440
441 if (cpu_has_feature(CPU_FTR_ALTIVEC))
442 current->thread.vrsave = mfspr(SPRN_VRSAVE);
443 if (__put_user(current->thread.vrsave, (u32 __user *)&frame->mc_vregs[32]))
444 return 1;
445#endif
446 if (copy_fpr_to_user(&frame->mc_fregs, current))
447 return 1;
448
449
450
451
452
453 msr &= ~MSR_VSX;
454#ifdef CONFIG_VSX
455
456
457
458
459
460
461 if (current->thread.used_vsr && ctx_has_vsx_region) {
462 flush_vsx_to_thread(current);
463 if (copy_vsx_to_user(&frame->mc_vsregs, current))
464 return 1;
465 msr |= MSR_VSX;
466 }
467#endif
468#ifdef CONFIG_SPE
469
470 if (current->thread.used_spe) {
471 flush_spe_to_thread(current);
472 if (__copy_to_user(&frame->mc_vregs, current->thread.evr,
473 ELF_NEVRREG * sizeof(u32)))
474 return 1;
475
476
477 msr |= MSR_SPE;
478 }
479
480
481
482 if (__put_user(current->thread.spefscr, (u32 __user *)&frame->mc_vregs + ELF_NEVRREG))
483 return 1;
484#endif
485
486 if (__put_user(msr, &frame->mc_gregs[PT_MSR]))
487 return 1;
488
489
490
491 if (tm_frame && __put_user(0, &tm_frame->mc_gregs[PT_MSR]))
492 return 1;
493
494 if (sigret) {
495
496 if (__put_user(0x38000000UL + sigret, &frame->tramp[0])
497 || __put_user(0x44000002UL, &frame->tramp[1]))
498 return 1;
499 flush_icache_range((unsigned long) &frame->tramp[0],
500 (unsigned long) &frame->tramp[2]);
501 }
502
503 return 0;
504}
505
506#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
507
508
509
510
511
512
513
514
515
516static int save_tm_user_regs(struct pt_regs *regs,
517 struct mcontext __user *frame,
518 struct mcontext __user *tm_frame, int sigret)
519{
520 unsigned long msr = regs->msr;
521
522
523
524
525
526
527 regs->msr &= ~MSR_TS_MASK;
528
529
530 if (save_general_regs(¤t->thread.ckpt_regs, frame)
531 || save_general_regs(regs, tm_frame))
532 return 1;
533
534
535
536
537
538
539
540 if (__put_user((msr >> 32), &tm_frame->mc_gregs[PT_MSR]))
541 return 1;
542
543#ifdef CONFIG_ALTIVEC
544
545 if (current->thread.used_vr) {
546 if (__copy_to_user(&frame->mc_vregs, ¤t->thread.ckvr_state,
547 ELF_NVRREG * sizeof(vector128)))
548 return 1;
549 if (msr & MSR_VEC) {
550 if (__copy_to_user(&tm_frame->mc_vregs,
551 ¤t->thread.vr_state,
552 ELF_NVRREG * sizeof(vector128)))
553 return 1;
554 } else {
555 if (__copy_to_user(&tm_frame->mc_vregs,
556 ¤t->thread.ckvr_state,
557 ELF_NVRREG * sizeof(vector128)))
558 return 1;
559 }
560
561
562
563
564 msr |= MSR_VEC;
565 }
566
567
568
569
570
571
572 if (cpu_has_feature(CPU_FTR_ALTIVEC))
573 current->thread.ckvrsave = mfspr(SPRN_VRSAVE);
574 if (__put_user(current->thread.ckvrsave,
575 (u32 __user *)&frame->mc_vregs[32]))
576 return 1;
577 if (msr & MSR_VEC) {
578 if (__put_user(current->thread.vrsave,
579 (u32 __user *)&tm_frame->mc_vregs[32]))
580 return 1;
581 } else {
582 if (__put_user(current->thread.ckvrsave,
583 (u32 __user *)&tm_frame->mc_vregs[32]))
584 return 1;
585 }
586#endif
587
588 if (copy_ckfpr_to_user(&frame->mc_fregs, current))
589 return 1;
590 if (msr & MSR_FP) {
591 if (copy_fpr_to_user(&tm_frame->mc_fregs, current))
592 return 1;
593 } else {
594 if (copy_ckfpr_to_user(&tm_frame->mc_fregs, current))
595 return 1;
596 }
597
598#ifdef CONFIG_VSX
599
600
601
602
603
604
605 if (current->thread.used_vsr) {
606 if (copy_ckvsx_to_user(&frame->mc_vsregs, current))
607 return 1;
608 if (msr & MSR_VSX) {
609 if (copy_vsx_to_user(&tm_frame->mc_vsregs,
610 current))
611 return 1;
612 } else {
613 if (copy_ckvsx_to_user(&tm_frame->mc_vsregs, current))
614 return 1;
615 }
616
617 msr |= MSR_VSX;
618 }
619#endif
620#ifdef CONFIG_SPE
621
622
623
624 if (current->thread.used_spe) {
625 flush_spe_to_thread(current);
626 if (__copy_to_user(&frame->mc_vregs, current->thread.evr,
627 ELF_NEVRREG * sizeof(u32)))
628 return 1;
629
630
631 msr |= MSR_SPE;
632 }
633
634
635 if (__put_user(current->thread.spefscr, (u32 __user *)&frame->mc_vregs + ELF_NEVRREG))
636 return 1;
637#endif
638
639 if (__put_user(msr, &frame->mc_gregs[PT_MSR]))
640 return 1;
641 if (sigret) {
642
643 if (__put_user(0x38000000UL + sigret, &frame->tramp[0])
644 || __put_user(0x44000002UL, &frame->tramp[1]))
645 return 1;
646 flush_icache_range((unsigned long) &frame->tramp[0],
647 (unsigned long) &frame->tramp[2]);
648 }
649
650 return 0;
651}
652#endif
653
654
655
656
657
658static long restore_user_regs(struct pt_regs *regs,
659 struct mcontext __user *sr, int sig)
660{
661 long err;
662 unsigned int save_r2 = 0;
663 unsigned long msr;
664#ifdef CONFIG_VSX
665 int i;
666#endif
667
668
669
670
671
672 if (!sig)
673 save_r2 = (unsigned int)regs->gpr[2];
674 err = restore_general_regs(regs, sr);
675 regs->trap = 0;
676 err |= __get_user(msr, &sr->mc_gregs[PT_MSR]);
677 if (!sig)
678 regs->gpr[2] = (unsigned long) save_r2;
679 if (err)
680 return 1;
681
682
683 if (sig)
684 regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
685
686#ifdef CONFIG_ALTIVEC
687
688
689
690
691 regs->msr &= ~MSR_VEC;
692 if (msr & MSR_VEC) {
693
694 if (__copy_from_user(¤t->thread.vr_state, &sr->mc_vregs,
695 sizeof(sr->mc_vregs)))
696 return 1;
697 current->thread.used_vr = true;
698 } else if (current->thread.used_vr)
699 memset(¤t->thread.vr_state, 0,
700 ELF_NVRREG * sizeof(vector128));
701
702
703 if (__get_user(current->thread.vrsave, (u32 __user *)&sr->mc_vregs[32]))
704 return 1;
705 if (cpu_has_feature(CPU_FTR_ALTIVEC))
706 mtspr(SPRN_VRSAVE, current->thread.vrsave);
707#endif
708 if (copy_fpr_from_user(current, &sr->mc_fregs))
709 return 1;
710
711#ifdef CONFIG_VSX
712
713
714
715
716 regs->msr &= ~MSR_VSX;
717 if (msr & MSR_VSX) {
718
719
720
721
722 if (copy_vsx_from_user(current, &sr->mc_vsregs))
723 return 1;
724 current->thread.used_vsr = true;
725 } else if (current->thread.used_vsr)
726 for (i = 0; i < 32 ; i++)
727 current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
728#endif
729
730
731
732
733 regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1);
734
735#ifdef CONFIG_SPE
736
737
738 regs->msr &= ~MSR_SPE;
739 if (msr & MSR_SPE) {
740
741 if (__copy_from_user(current->thread.evr, &sr->mc_vregs,
742 ELF_NEVRREG * sizeof(u32)))
743 return 1;
744 current->thread.used_spe = true;
745 } else if (current->thread.used_spe)
746 memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32));
747
748
749 if (__get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs + ELF_NEVRREG))
750 return 1;
751#endif
752
753 return 0;
754}
755
756#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
757
758
759
760
761
762static long restore_tm_user_regs(struct pt_regs *regs,
763 struct mcontext __user *sr,
764 struct mcontext __user *tm_sr)
765{
766 long err;
767 unsigned long msr, msr_hi;
768#ifdef CONFIG_VSX
769 int i;
770#endif
771
772
773
774
775
776
777
778
779 err = restore_general_regs(regs, tm_sr);
780 err |= restore_general_regs(¤t->thread.ckpt_regs, sr);
781
782 err |= __get_user(current->thread.tm_tfhar, &sr->mc_gregs[PT_NIP]);
783
784 err |= __get_user(msr, &sr->mc_gregs[PT_MSR]);
785 if (err)
786 return 1;
787
788
789 regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
790
791#ifdef CONFIG_ALTIVEC
792 regs->msr &= ~MSR_VEC;
793 if (msr & MSR_VEC) {
794
795 if (__copy_from_user(¤t->thread.ckvr_state, &sr->mc_vregs,
796 sizeof(sr->mc_vregs)) ||
797 __copy_from_user(¤t->thread.vr_state,
798 &tm_sr->mc_vregs,
799 sizeof(sr->mc_vregs)))
800 return 1;
801 current->thread.used_vr = true;
802 } else if (current->thread.used_vr) {
803 memset(¤t->thread.vr_state, 0,
804 ELF_NVRREG * sizeof(vector128));
805 memset(¤t->thread.ckvr_state, 0,
806 ELF_NVRREG * sizeof(vector128));
807 }
808
809
810 if (__get_user(current->thread.ckvrsave,
811 (u32 __user *)&sr->mc_vregs[32]) ||
812 __get_user(current->thread.vrsave,
813 (u32 __user *)&tm_sr->mc_vregs[32]))
814 return 1;
815 if (cpu_has_feature(CPU_FTR_ALTIVEC))
816 mtspr(SPRN_VRSAVE, current->thread.ckvrsave);
817#endif
818
819 regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1);
820
821 if (copy_fpr_from_user(current, &sr->mc_fregs) ||
822 copy_ckfpr_from_user(current, &tm_sr->mc_fregs))
823 return 1;
824
825#ifdef CONFIG_VSX
826 regs->msr &= ~MSR_VSX;
827 if (msr & MSR_VSX) {
828
829
830
831
832 if (copy_vsx_from_user(current, &tm_sr->mc_vsregs) ||
833 copy_ckvsx_from_user(current, &sr->mc_vsregs))
834 return 1;
835 current->thread.used_vsr = true;
836 } else if (current->thread.used_vsr)
837 for (i = 0; i < 32 ; i++) {
838 current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
839 current->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
840 }
841#endif
842
843#ifdef CONFIG_SPE
844
845
846
847 regs->msr &= ~MSR_SPE;
848 if (msr & MSR_SPE) {
849 if (__copy_from_user(current->thread.evr, &sr->mc_vregs,
850 ELF_NEVRREG * sizeof(u32)))
851 return 1;
852 current->thread.used_spe = true;
853 } else if (current->thread.used_spe)
854 memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32));
855
856
857 if (__get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs
858 + ELF_NEVRREG))
859 return 1;
860#endif
861
862
863 if (__get_user(msr_hi, &tm_sr->mc_gregs[PT_MSR]))
864 return 1;
865 msr_hi <<= 32;
866
867 if (MSR_TM_RESV(msr_hi))
868 return 1;
869
870 regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr_hi & MSR_TS_MASK);
871
872
873
874
875 tm_enable();
876
877 current->thread.tm_texasr |= TEXASR_FS;
878
879 tm_recheckpoint(¤t->thread, msr);
880
881
882 msr_check_and_set(msr & (MSR_FP | MSR_VEC));
883 if (msr & MSR_FP) {
884 load_fp_state(¤t->thread.fp_state);
885 regs->msr |= (MSR_FP | current->thread.fpexc_mode);
886 }
887#ifdef CONFIG_ALTIVEC
888 if (msr & MSR_VEC) {
889 load_vr_state(¤t->thread.vr_state);
890 regs->msr |= MSR_VEC;
891 }
892#endif
893
894 return 0;
895}
896#endif
897
898#ifdef CONFIG_PPC64
899int copy_siginfo_to_user32(struct compat_siginfo __user *d, const siginfo_t *s)
900{
901 int err;
902
903 if (!access_ok (VERIFY_WRITE, d, sizeof(*d)))
904 return -EFAULT;
905
906
907
908
909
910
911
912
913
914 err = __put_user(s->si_signo, &d->si_signo);
915 err |= __put_user(s->si_errno, &d->si_errno);
916 err |= __put_user(s->si_code, &d->si_code);
917 if (s->si_code < 0)
918 err |= __copy_to_user(&d->_sifields._pad, &s->_sifields._pad,
919 SI_PAD_SIZE32);
920 else switch(siginfo_layout(s->si_signo, s->si_code)) {
921 case SIL_CHLD:
922 err |= __put_user(s->si_pid, &d->si_pid);
923 err |= __put_user(s->si_uid, &d->si_uid);
924 err |= __put_user(s->si_utime, &d->si_utime);
925 err |= __put_user(s->si_stime, &d->si_stime);
926 err |= __put_user(s->si_status, &d->si_status);
927 break;
928 case SIL_FAULT:
929 err |= __put_user((unsigned int)(unsigned long)s->si_addr,
930 &d->si_addr);
931 break;
932 case SIL_POLL:
933 err |= __put_user(s->si_band, &d->si_band);
934 err |= __put_user(s->si_fd, &d->si_fd);
935 break;
936 case SIL_TIMER:
937 err |= __put_user(s->si_tid, &d->si_tid);
938 err |= __put_user(s->si_overrun, &d->si_overrun);
939 err |= __put_user(s->si_int, &d->si_int);
940 break;
941 case SIL_SYS:
942 err |= __put_user(ptr_to_compat(s->si_call_addr), &d->si_call_addr);
943 err |= __put_user(s->si_syscall, &d->si_syscall);
944 err |= __put_user(s->si_arch, &d->si_arch);
945 break;
946 case SIL_RT:
947 err |= __put_user(s->si_int, &d->si_int);
948
949 case SIL_KILL:
950 err |= __put_user(s->si_pid, &d->si_pid);
951 err |= __put_user(s->si_uid, &d->si_uid);
952 break;
953 }
954 return err;
955}
956
957#define copy_siginfo_to_user copy_siginfo_to_user32
958
959int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from)
960{
961 if (copy_from_user(to, from, 3*sizeof(int)) ||
962 copy_from_user(to->_sifields._pad,
963 from->_sifields._pad, SI_PAD_SIZE32))
964 return -EFAULT;
965
966 return 0;
967}
968#endif
969
970
971
972
973
974int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
975 struct task_struct *tsk)
976{
977 struct rt_sigframe __user *rt_sf;
978 struct mcontext __user *frame;
979 struct mcontext __user *tm_frame = NULL;
980 void __user *addr;
981 unsigned long newsp = 0;
982 int sigret;
983 unsigned long tramp;
984 struct pt_regs *regs = tsk->thread.regs;
985
986 BUG_ON(tsk != current);
987
988
989
990 rt_sf = get_sigframe(ksig, get_tm_stackpointer(tsk), sizeof(*rt_sf), 1);
991 addr = rt_sf;
992 if (unlikely(rt_sf == NULL))
993 goto badframe;
994
995
996 if (copy_siginfo_to_user(&rt_sf->info, &ksig->info)
997 || __put_user(0, &rt_sf->uc.uc_flags)
998 || __save_altstack(&rt_sf->uc.uc_stack, regs->gpr[1])
999 || __put_user(to_user_ptr(&rt_sf->uc.uc_mcontext),
1000 &rt_sf->uc.uc_regs)
1001 || put_sigset_t(&rt_sf->uc.uc_sigmask, oldset))
1002 goto badframe;
1003
1004
1005 frame = &rt_sf->uc.uc_mcontext;
1006 addr = frame;
1007 if (vdso32_rt_sigtramp && tsk->mm->context.vdso_base) {
1008 sigret = 0;
1009 tramp = tsk->mm->context.vdso_base + vdso32_rt_sigtramp;
1010 } else {
1011 sigret = __NR_rt_sigreturn;
1012 tramp = (unsigned long) frame->tramp;
1013 }
1014
1015#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1016 tm_frame = &rt_sf->uc_transact.uc_mcontext;
1017 if (MSR_TM_ACTIVE(regs->msr)) {
1018 if (__put_user((unsigned long)&rt_sf->uc_transact,
1019 &rt_sf->uc.uc_link) ||
1020 __put_user((unsigned long)tm_frame,
1021 &rt_sf->uc_transact.uc_regs))
1022 goto badframe;
1023 if (save_tm_user_regs(regs, frame, tm_frame, sigret))
1024 goto badframe;
1025 }
1026 else
1027#endif
1028 {
1029 if (__put_user(0, &rt_sf->uc.uc_link))
1030 goto badframe;
1031 if (save_user_regs(regs, frame, tm_frame, sigret, 1))
1032 goto badframe;
1033 }
1034 regs->link = tramp;
1035
1036 tsk->thread.fp_state.fpscr = 0;
1037
1038
1039 newsp = ((unsigned long)rt_sf) - (__SIGNAL_FRAMESIZE + 16);
1040 addr = (void __user *)regs->gpr[1];
1041 if (put_user(regs->gpr[1], (u32 __user *)newsp))
1042 goto badframe;
1043
1044
1045 regs->gpr[1] = newsp;
1046 regs->gpr[3] = ksig->sig;
1047 regs->gpr[4] = (unsigned long) &rt_sf->info;
1048 regs->gpr[5] = (unsigned long) &rt_sf->uc;
1049 regs->gpr[6] = (unsigned long) rt_sf;
1050 regs->nip = (unsigned long) ksig->ka.sa.sa_handler;
1051
1052 regs->msr &= ~MSR_LE;
1053 regs->msr |= (MSR_KERNEL & MSR_LE);
1054 return 0;
1055
1056badframe:
1057 if (show_unhandled_signals)
1058 printk_ratelimited(KERN_INFO
1059 "%s[%d]: bad frame in handle_rt_signal32: "
1060 "%p nip %08lx lr %08lx\n",
1061 tsk->comm, tsk->pid,
1062 addr, regs->nip, regs->link);
1063
1064 return 1;
1065}
1066
1067static int do_setcontext(struct ucontext __user *ucp, struct pt_regs *regs, int sig)
1068{
1069 sigset_t set;
1070 struct mcontext __user *mcp;
1071
1072 if (get_sigset_t(&set, &ucp->uc_sigmask))
1073 return -EFAULT;
1074#ifdef CONFIG_PPC64
1075 {
1076 u32 cmcp;
1077
1078 if (__get_user(cmcp, &ucp->uc_regs))
1079 return -EFAULT;
1080 mcp = (struct mcontext __user *)(u64)cmcp;
1081
1082 }
1083#else
1084 if (__get_user(mcp, &ucp->uc_regs))
1085 return -EFAULT;
1086 if (!access_ok(VERIFY_READ, mcp, sizeof(*mcp)))
1087 return -EFAULT;
1088#endif
1089 set_current_blocked(&set);
1090 if (restore_user_regs(regs, mcp, sig))
1091 return -EFAULT;
1092
1093 return 0;
1094}
1095
1096#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1097static int do_setcontext_tm(struct ucontext __user *ucp,
1098 struct ucontext __user *tm_ucp,
1099 struct pt_regs *regs)
1100{
1101 sigset_t set;
1102 struct mcontext __user *mcp;
1103 struct mcontext __user *tm_mcp;
1104 u32 cmcp;
1105 u32 tm_cmcp;
1106
1107 if (get_sigset_t(&set, &ucp->uc_sigmask))
1108 return -EFAULT;
1109
1110 if (__get_user(cmcp, &ucp->uc_regs) ||
1111 __get_user(tm_cmcp, &tm_ucp->uc_regs))
1112 return -EFAULT;
1113 mcp = (struct mcontext __user *)(u64)cmcp;
1114 tm_mcp = (struct mcontext __user *)(u64)tm_cmcp;
1115
1116
1117 set_current_blocked(&set);
1118 if (restore_tm_user_regs(regs, mcp, tm_mcp))
1119 return -EFAULT;
1120
1121 return 0;
1122}
1123#endif
1124
1125long sys_swapcontext(struct ucontext __user *old_ctx,
1126 struct ucontext __user *new_ctx,
1127 int ctx_size, int r6, int r7, int r8, struct pt_regs *regs)
1128{
1129 unsigned char tmp;
1130 int ctx_has_vsx_region = 0;
1131
1132#ifdef CONFIG_PPC64
1133 unsigned long new_msr = 0;
1134
1135 if (new_ctx) {
1136 struct mcontext __user *mcp;
1137 u32 cmcp;
1138
1139
1140
1141
1142
1143
1144 if (__get_user(cmcp, &new_ctx->uc_regs))
1145 return -EFAULT;
1146 mcp = (struct mcontext __user *)(u64)cmcp;
1147 if (__get_user(new_msr, &mcp->mc_gregs[PT_MSR]))
1148 return -EFAULT;
1149 }
1150
1151
1152
1153
1154 if (ctx_size < UCONTEXTSIZEWITHOUTVSX)
1155 return -EINVAL;
1156
1157
1158
1159
1160 if ((ctx_size < sizeof(struct ucontext)) &&
1161 (new_msr & MSR_VSX))
1162 return -EINVAL;
1163
1164 if (ctx_size >= sizeof(struct ucontext))
1165 ctx_has_vsx_region = 1;
1166#else
1167
1168
1169
1170 if (ctx_size < sizeof(struct ucontext))
1171 return -EINVAL;
1172#endif
1173 if (old_ctx != NULL) {
1174 struct mcontext __user *mctx;
1175
1176
1177
1178
1179
1180
1181
1182
1183 mctx = (struct mcontext __user *)
1184 ((unsigned long) &old_ctx->uc_mcontext & ~0xfUL);
1185 if (!access_ok(VERIFY_WRITE, old_ctx, ctx_size)
1186 || save_user_regs(regs, mctx, NULL, 0, ctx_has_vsx_region)
1187 || put_sigset_t(&old_ctx->uc_sigmask, ¤t->blocked)
1188 || __put_user(to_user_ptr(mctx), &old_ctx->uc_regs))
1189 return -EFAULT;
1190 }
1191 if (new_ctx == NULL)
1192 return 0;
1193 if (!access_ok(VERIFY_READ, new_ctx, ctx_size)
1194 || __get_user(tmp, (u8 __user *) new_ctx)
1195 || __get_user(tmp, (u8 __user *) new_ctx + ctx_size - 1))
1196 return -EFAULT;
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209 if (do_setcontext(new_ctx, regs, 0))
1210 do_exit(SIGSEGV);
1211
1212 set_thread_flag(TIF_RESTOREALL);
1213 return 0;
1214}
1215
1216long sys_rt_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
1217 struct pt_regs *regs)
1218{
1219 struct rt_sigframe __user *rt_sf;
1220#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1221 struct ucontext __user *uc_transact;
1222 unsigned long msr_hi;
1223 unsigned long tmp;
1224 int tm_restore = 0;
1225#endif
1226
1227 current->restart_block.fn = do_no_restart_syscall;
1228
1229 rt_sf = (struct rt_sigframe __user *)
1230 (regs->gpr[1] + __SIGNAL_FRAMESIZE + 16);
1231 if (!access_ok(VERIFY_READ, rt_sf, sizeof(*rt_sf)))
1232 goto bad;
1233
1234#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245 if (MSR_TM_SUSPENDED(mfmsr()))
1246 tm_reclaim_current(0);
1247
1248 if (__get_user(tmp, &rt_sf->uc.uc_link))
1249 goto bad;
1250 uc_transact = (struct ucontext __user *)(uintptr_t)tmp;
1251 if (uc_transact) {
1252 u32 cmcp;
1253 struct mcontext __user *mcp;
1254
1255 if (__get_user(cmcp, &uc_transact->uc_regs))
1256 return -EFAULT;
1257 mcp = (struct mcontext __user *)(u64)cmcp;
1258
1259
1260 if (__get_user(msr_hi, &mcp->mc_gregs[PT_MSR]))
1261 goto bad;
1262
1263 if (MSR_TM_ACTIVE(msr_hi<<32)) {
1264
1265
1266
1267 tm_restore = 1;
1268 if (do_setcontext_tm(&rt_sf->uc, uc_transact, regs))
1269 goto bad;
1270 }
1271 }
1272 if (!tm_restore)
1273
1274#endif
1275 if (do_setcontext(&rt_sf->uc, regs, 1))
1276 goto bad;
1277
1278
1279
1280
1281
1282
1283
1284
1285#ifdef CONFIG_PPC64
1286 if (compat_restore_altstack(&rt_sf->uc.uc_stack))
1287 goto bad;
1288#else
1289 if (restore_altstack(&rt_sf->uc.uc_stack))
1290 goto bad;
1291#endif
1292 set_thread_flag(TIF_RESTOREALL);
1293 return 0;
1294
1295 bad:
1296 if (show_unhandled_signals)
1297 printk_ratelimited(KERN_INFO
1298 "%s[%d]: bad frame in sys_rt_sigreturn: "
1299 "%p nip %08lx lr %08lx\n",
1300 current->comm, current->pid,
1301 rt_sf, regs->nip, regs->link);
1302
1303 force_sig(SIGSEGV, current);
1304 return 0;
1305}
1306
1307#ifdef CONFIG_PPC32
1308int sys_debug_setcontext(struct ucontext __user *ctx,
1309 int ndbg, struct sig_dbg_op __user *dbg,
1310 int r6, int r7, int r8,
1311 struct pt_regs *regs)
1312{
1313 struct sig_dbg_op op;
1314 int i;
1315 unsigned char tmp;
1316 unsigned long new_msr = regs->msr;
1317#ifdef CONFIG_PPC_ADV_DEBUG_REGS
1318 unsigned long new_dbcr0 = current->thread.debug.dbcr0;
1319#endif
1320
1321 for (i=0; i<ndbg; i++) {
1322 if (copy_from_user(&op, dbg + i, sizeof(op)))
1323 return -EFAULT;
1324 switch (op.dbg_type) {
1325 case SIG_DBG_SINGLE_STEPPING:
1326#ifdef CONFIG_PPC_ADV_DEBUG_REGS
1327 if (op.dbg_value) {
1328 new_msr |= MSR_DE;
1329 new_dbcr0 |= (DBCR0_IDM | DBCR0_IC);
1330 } else {
1331 new_dbcr0 &= ~DBCR0_IC;
1332 if (!DBCR_ACTIVE_EVENTS(new_dbcr0,
1333 current->thread.debug.dbcr1)) {
1334 new_msr &= ~MSR_DE;
1335 new_dbcr0 &= ~DBCR0_IDM;
1336 }
1337 }
1338#else
1339 if (op.dbg_value)
1340 new_msr |= MSR_SE;
1341 else
1342 new_msr &= ~MSR_SE;
1343#endif
1344 break;
1345 case SIG_DBG_BRANCH_TRACING:
1346#ifdef CONFIG_PPC_ADV_DEBUG_REGS
1347 return -EINVAL;
1348#else
1349 if (op.dbg_value)
1350 new_msr |= MSR_BE;
1351 else
1352 new_msr &= ~MSR_BE;
1353#endif
1354 break;
1355
1356 default:
1357 return -EINVAL;
1358 }
1359 }
1360
1361
1362
1363
1364
1365
1366 regs->msr = new_msr;
1367#ifdef CONFIG_PPC_ADV_DEBUG_REGS
1368 current->thread.debug.dbcr0 = new_dbcr0;
1369#endif
1370
1371 if (!access_ok(VERIFY_READ, ctx, sizeof(*ctx))
1372 || __get_user(tmp, (u8 __user *) ctx)
1373 || __get_user(tmp, (u8 __user *) (ctx + 1) - 1))
1374 return -EFAULT;
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387 if (do_setcontext(ctx, regs, 1)) {
1388 if (show_unhandled_signals)
1389 printk_ratelimited(KERN_INFO "%s[%d]: bad frame in "
1390 "sys_debug_setcontext: %p nip %08lx "
1391 "lr %08lx\n",
1392 current->comm, current->pid,
1393 ctx, regs->nip, regs->link);
1394
1395 force_sig(SIGSEGV, current);
1396 goto out;
1397 }
1398
1399
1400
1401
1402
1403
1404
1405
1406 restore_altstack(&ctx->uc_stack);
1407
1408 set_thread_flag(TIF_RESTOREALL);
1409 out:
1410 return 0;
1411}
1412#endif
1413
1414
1415
1416
1417int handle_signal32(struct ksignal *ksig, sigset_t *oldset,
1418 struct task_struct *tsk)
1419{
1420 struct sigcontext __user *sc;
1421 struct sigframe __user *frame;
1422 struct mcontext __user *tm_mctx = NULL;
1423 unsigned long newsp = 0;
1424 int sigret;
1425 unsigned long tramp;
1426 struct pt_regs *regs = tsk->thread.regs;
1427
1428 BUG_ON(tsk != current);
1429
1430
1431 frame = get_sigframe(ksig, get_tm_stackpointer(tsk), sizeof(*frame), 1);
1432 if (unlikely(frame == NULL))
1433 goto badframe;
1434 sc = (struct sigcontext __user *) &frame->sctx;
1435
1436#if _NSIG != 64
1437#error "Please adjust handle_signal()"
1438#endif
1439 if (__put_user(to_user_ptr(ksig->ka.sa.sa_handler), &sc->handler)
1440 || __put_user(oldset->sig[0], &sc->oldmask)
1441#ifdef CONFIG_PPC64
1442 || __put_user((oldset->sig[0] >> 32), &sc->_unused[3])
1443#else
1444 || __put_user(oldset->sig[1], &sc->_unused[3])
1445#endif
1446 || __put_user(to_user_ptr(&frame->mctx), &sc->regs)
1447 || __put_user(ksig->sig, &sc->signal))
1448 goto badframe;
1449
1450 if (vdso32_sigtramp && tsk->mm->context.vdso_base) {
1451 sigret = 0;
1452 tramp = tsk->mm->context.vdso_base + vdso32_sigtramp;
1453 } else {
1454 sigret = __NR_sigreturn;
1455 tramp = (unsigned long) frame->mctx.tramp;
1456 }
1457
1458#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1459 tm_mctx = &frame->mctx_transact;
1460 if (MSR_TM_ACTIVE(regs->msr)) {
1461 if (save_tm_user_regs(regs, &frame->mctx, &frame->mctx_transact,
1462 sigret))
1463 goto badframe;
1464 }
1465 else
1466#endif
1467 {
1468 if (save_user_regs(regs, &frame->mctx, tm_mctx, sigret, 1))
1469 goto badframe;
1470 }
1471
1472 regs->link = tramp;
1473
1474 tsk->thread.fp_state.fpscr = 0;
1475
1476
1477 newsp = ((unsigned long)frame) - __SIGNAL_FRAMESIZE;
1478 if (put_user(regs->gpr[1], (u32 __user *)newsp))
1479 goto badframe;
1480
1481 regs->gpr[1] = newsp;
1482 regs->gpr[3] = ksig->sig;
1483 regs->gpr[4] = (unsigned long) sc;
1484 regs->nip = (unsigned long) (unsigned long)ksig->ka.sa.sa_handler;
1485
1486 regs->msr &= ~MSR_LE;
1487 return 0;
1488
1489badframe:
1490 if (show_unhandled_signals)
1491 printk_ratelimited(KERN_INFO
1492 "%s[%d]: bad frame in handle_signal32: "
1493 "%p nip %08lx lr %08lx\n",
1494 tsk->comm, tsk->pid,
1495 frame, regs->nip, regs->link);
1496
1497 return 1;
1498}
1499
1500
1501
1502
1503long sys_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
1504 struct pt_regs *regs)
1505{
1506 struct sigframe __user *sf;
1507 struct sigcontext __user *sc;
1508 struct sigcontext sigctx;
1509 struct mcontext __user *sr;
1510 void __user *addr;
1511 sigset_t set;
1512#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1513 struct mcontext __user *mcp, *tm_mcp;
1514 unsigned long msr_hi;
1515#endif
1516
1517
1518 current->restart_block.fn = do_no_restart_syscall;
1519
1520 sf = (struct sigframe __user *)(regs->gpr[1] + __SIGNAL_FRAMESIZE);
1521 sc = &sf->sctx;
1522 addr = sc;
1523 if (copy_from_user(&sigctx, sc, sizeof(sigctx)))
1524 goto badframe;
1525
1526#ifdef CONFIG_PPC64
1527
1528
1529
1530
1531 set.sig[0] = sigctx.oldmask + ((long)(sigctx._unused[3]) << 32);
1532#else
1533 set.sig[0] = sigctx.oldmask;
1534 set.sig[1] = sigctx._unused[3];
1535#endif
1536 set_current_blocked(&set);
1537
1538#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1539 mcp = (struct mcontext __user *)&sf->mctx;
1540 tm_mcp = (struct mcontext __user *)&sf->mctx_transact;
1541 if (__get_user(msr_hi, &tm_mcp->mc_gregs[PT_MSR]))
1542 goto badframe;
1543 if (MSR_TM_ACTIVE(msr_hi<<32)) {
1544 if (!cpu_has_feature(CPU_FTR_TM))
1545 goto badframe;
1546 if (restore_tm_user_regs(regs, mcp, tm_mcp))
1547 goto badframe;
1548 } else
1549#endif
1550 {
1551 sr = (struct mcontext __user *)from_user_ptr(sigctx.regs);
1552 addr = sr;
1553 if (!access_ok(VERIFY_READ, sr, sizeof(*sr))
1554 || restore_user_regs(regs, sr, 1))
1555 goto badframe;
1556 }
1557
1558 set_thread_flag(TIF_RESTOREALL);
1559 return 0;
1560
1561badframe:
1562 if (show_unhandled_signals)
1563 printk_ratelimited(KERN_INFO
1564 "%s[%d]: bad frame in sys_sigreturn: "
1565 "%p nip %08lx lr %08lx\n",
1566 current->comm, current->pid,
1567 addr, regs->nip, regs->link);
1568
1569 force_sig(SIGSEGV, current);
1570 return 0;
1571}
1572