1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include <linux/sched.h>
21#include <linux/mm.h>
22#include <linux/smp.h>
23#include <linux/kernel.h>
24#include <linux/signal.h>
25#include <linux/errno.h>
26#include <linux/elf.h>
27#include <linux/ptrace.h>
28#include <linux/pagemap.h>
29#include <linux/ratelimit.h>
30#include <linux/syscalls.h>
31#ifdef CONFIG_PPC64
32#include <linux/compat.h>
33#else
34#include <linux/wait.h>
35#include <linux/unistd.h>
36#include <linux/stddef.h>
37#include <linux/tty.h>
38#include <linux/binfmts.h>
39#endif
40
41#include <linux/uaccess.h>
42#include <asm/cacheflush.h>
43#include <asm/syscalls.h>
44#include <asm/sigcontext.h>
45#include <asm/vdso.h>
46#include <asm/switch_to.h>
47#include <asm/tm.h>
48#include <asm/asm-prototypes.h>
49#ifdef CONFIG_PPC64
50#include "ppc32.h"
51#include <asm/unistd.h>
52#else
53#include <asm/ucontext.h>
54#include <asm/pgtable.h>
55#endif
56
57#include "signal.h"
58
59
60#ifdef CONFIG_PPC64
61#define old_sigaction old_sigaction32
62#define sigcontext sigcontext32
63#define mcontext mcontext32
64#define ucontext ucontext32
65
66#define __save_altstack __compat_save_altstack
67
68
69
70
71
72#define UCONTEXTSIZEWITHOUTVSX \
73 (sizeof(struct ucontext) - sizeof(elf_vsrreghalf_t32))
74
75
76
77
78
79
80
81
82#define GP_REGS_SIZE min(sizeof(elf_gregset_t32), sizeof(struct pt_regs32))
83#undef __SIGNAL_FRAMESIZE
84#define __SIGNAL_FRAMESIZE __SIGNAL_FRAMESIZE32
85#undef ELF_NVRREG
86#define ELF_NVRREG ELF_NVRREG32
87
88
89
90
91
92static inline int put_sigset_t(compat_sigset_t __user *uset, sigset_t *set)
93{
94 return put_compat_sigset(uset, set, sizeof(*uset));
95}
96
97static inline int get_sigset_t(sigset_t *set,
98 const compat_sigset_t __user *uset)
99{
100 return get_compat_sigset(set, uset);
101}
102
103#define to_user_ptr(p) ptr_to_compat(p)
104#define from_user_ptr(p) compat_ptr(p)
105
106static inline int save_general_regs(struct pt_regs *regs,
107 struct mcontext __user *frame)
108{
109 elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
110 int i;
111
112 elf_greg_t64 softe = 0x1;
113
114 WARN_ON(!FULL_REGS(regs));
115
116 for (i = 0; i <= PT_RESULT; i ++) {
117 if (i == 14 && !FULL_REGS(regs))
118 i = 32;
119 if ( i == PT_SOFTE) {
120 if(__put_user((unsigned int)softe, &frame->mc_gregs[i]))
121 return -EFAULT;
122 else
123 continue;
124 }
125 if (__put_user((unsigned int)gregs[i], &frame->mc_gregs[i]))
126 return -EFAULT;
127 }
128 return 0;
129}
130
131static inline int restore_general_regs(struct pt_regs *regs,
132 struct mcontext __user *sr)
133{
134 elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
135 int i;
136
137 for (i = 0; i <= PT_RESULT; i++) {
138 if ((i == PT_MSR) || (i == PT_SOFTE))
139 continue;
140 if (__get_user(gregs[i], &sr->mc_gregs[i]))
141 return -EFAULT;
142 }
143 return 0;
144}
145
146#else
147
148#define GP_REGS_SIZE min(sizeof(elf_gregset_t), sizeof(struct pt_regs))
149
150static inline int put_sigset_t(sigset_t __user *uset, sigset_t *set)
151{
152 return copy_to_user(uset, set, sizeof(*uset));
153}
154
155static inline int get_sigset_t(sigset_t *set, const sigset_t __user *uset)
156{
157 return copy_from_user(set, uset, sizeof(*uset));
158}
159
160#define to_user_ptr(p) ((unsigned long)(p))
161#define from_user_ptr(p) ((void __user *)(p))
162
163static inline int save_general_regs(struct pt_regs *regs,
164 struct mcontext __user *frame)
165{
166 WARN_ON(!FULL_REGS(regs));
167 return __copy_to_user(&frame->mc_gregs, regs, GP_REGS_SIZE);
168}
169
170static inline int restore_general_regs(struct pt_regs *regs,
171 struct mcontext __user *sr)
172{
173
174 if (__copy_from_user(regs, &sr->mc_gregs,
175 PT_MSR * sizeof(elf_greg_t)))
176 return -EFAULT;
177
178 if (__copy_from_user(®s->orig_gpr3, &sr->mc_gregs[PT_ORIG_R3],
179 GP_REGS_SIZE - PT_ORIG_R3 * sizeof(elf_greg_t)))
180 return -EFAULT;
181 return 0;
182}
183#endif
184
185
186
187
188
189
190
191
192
193
194
195
196
197struct sigframe {
198 struct sigcontext sctx;
199 struct mcontext mctx;
200#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
201 struct sigcontext sctx_transact;
202 struct mcontext mctx_transact;
203#endif
204
205
206
207
208 int abigap[56];
209};
210
211
212#define tramp mc_pad
213
214
215
216
217
218
219
220
221
222
223
224
225struct rt_sigframe {
226#ifdef CONFIG_PPC64
227 compat_siginfo_t info;
228#else
229 struct siginfo info;
230#endif
231 struct ucontext uc;
232#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
233 struct ucontext uc_transact;
234#endif
235
236
237
238
239 int abigap[56];
240};
241
242#ifdef CONFIG_VSX
243unsigned long copy_fpr_to_user(void __user *to,
244 struct task_struct *task)
245{
246 u64 buf[ELF_NFPREG];
247 int i;
248
249
250 for (i = 0; i < (ELF_NFPREG - 1) ; i++)
251 buf[i] = task->thread.TS_FPR(i);
252 buf[i] = task->thread.fp_state.fpscr;
253 return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double));
254}
255
256unsigned long copy_fpr_from_user(struct task_struct *task,
257 void __user *from)
258{
259 u64 buf[ELF_NFPREG];
260 int i;
261
262 if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double)))
263 return 1;
264 for (i = 0; i < (ELF_NFPREG - 1) ; i++)
265 task->thread.TS_FPR(i) = buf[i];
266 task->thread.fp_state.fpscr = buf[i];
267
268 return 0;
269}
270
271unsigned long copy_vsx_to_user(void __user *to,
272 struct task_struct *task)
273{
274 u64 buf[ELF_NVSRHALFREG];
275 int i;
276
277
278 for (i = 0; i < ELF_NVSRHALFREG; i++)
279 buf[i] = task->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
280 return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double));
281}
282
283unsigned long copy_vsx_from_user(struct task_struct *task,
284 void __user *from)
285{
286 u64 buf[ELF_NVSRHALFREG];
287 int i;
288
289 if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double)))
290 return 1;
291 for (i = 0; i < ELF_NVSRHALFREG ; i++)
292 task->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
293 return 0;
294}
295
296#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
297unsigned long copy_ckfpr_to_user(void __user *to,
298 struct task_struct *task)
299{
300 u64 buf[ELF_NFPREG];
301 int i;
302
303
304 for (i = 0; i < (ELF_NFPREG - 1) ; i++)
305 buf[i] = task->thread.TS_CKFPR(i);
306 buf[i] = task->thread.ckfp_state.fpscr;
307 return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double));
308}
309
310unsigned long copy_ckfpr_from_user(struct task_struct *task,
311 void __user *from)
312{
313 u64 buf[ELF_NFPREG];
314 int i;
315
316 if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double)))
317 return 1;
318 for (i = 0; i < (ELF_NFPREG - 1) ; i++)
319 task->thread.TS_CKFPR(i) = buf[i];
320 task->thread.ckfp_state.fpscr = buf[i];
321
322 return 0;
323}
324
325unsigned long copy_ckvsx_to_user(void __user *to,
326 struct task_struct *task)
327{
328 u64 buf[ELF_NVSRHALFREG];
329 int i;
330
331
332 for (i = 0; i < ELF_NVSRHALFREG; i++)
333 buf[i] = task->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET];
334 return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double));
335}
336
337unsigned long copy_ckvsx_from_user(struct task_struct *task,
338 void __user *from)
339{
340 u64 buf[ELF_NVSRHALFREG];
341 int i;
342
343 if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double)))
344 return 1;
345 for (i = 0; i < ELF_NVSRHALFREG ; i++)
346 task->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
347 return 0;
348}
349#endif
350#else
351inline unsigned long copy_fpr_to_user(void __user *to,
352 struct task_struct *task)
353{
354 return __copy_to_user(to, task->thread.fp_state.fpr,
355 ELF_NFPREG * sizeof(double));
356}
357
358inline unsigned long copy_fpr_from_user(struct task_struct *task,
359 void __user *from)
360{
361 return __copy_from_user(task->thread.fp_state.fpr, from,
362 ELF_NFPREG * sizeof(double));
363}
364
365#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
366inline unsigned long copy_ckfpr_to_user(void __user *to,
367 struct task_struct *task)
368{
369 return __copy_to_user(to, task->thread.ckfp_state.fpr,
370 ELF_NFPREG * sizeof(double));
371}
372
373inline unsigned long copy_ckfpr_from_user(struct task_struct *task,
374 void __user *from)
375{
376 return __copy_from_user(task->thread.ckfp_state.fpr, from,
377 ELF_NFPREG * sizeof(double));
378}
379#endif
380#endif
381
382
383
384
385
386
387static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
388 struct mcontext __user *tm_frame, int sigret,
389 int ctx_has_vsx_region)
390{
391 unsigned long msr = regs->msr;
392
393
394 flush_fp_to_thread(current);
395
396
397 if (save_general_regs(regs, frame))
398 return 1;
399
400#ifdef CONFIG_ALTIVEC
401
402 if (current->thread.used_vr) {
403 flush_altivec_to_thread(current);
404 if (__copy_to_user(&frame->mc_vregs, ¤t->thread.vr_state,
405 ELF_NVRREG * sizeof(vector128)))
406 return 1;
407
408
409 msr |= MSR_VEC;
410 }
411
412
413
414
415
416
417
418
419 if (cpu_has_feature(CPU_FTR_ALTIVEC))
420 current->thread.vrsave = mfspr(SPRN_VRSAVE);
421 if (__put_user(current->thread.vrsave, (u32 __user *)&frame->mc_vregs[32]))
422 return 1;
423#endif
424 if (copy_fpr_to_user(&frame->mc_fregs, current))
425 return 1;
426
427
428
429
430
431 msr &= ~MSR_VSX;
432#ifdef CONFIG_VSX
433
434
435
436
437
438
439 if (current->thread.used_vsr && ctx_has_vsx_region) {
440 flush_vsx_to_thread(current);
441 if (copy_vsx_to_user(&frame->mc_vsregs, current))
442 return 1;
443 msr |= MSR_VSX;
444 }
445#endif
446#ifdef CONFIG_SPE
447
448 if (current->thread.used_spe) {
449 flush_spe_to_thread(current);
450 if (__copy_to_user(&frame->mc_vregs, current->thread.evr,
451 ELF_NEVRREG * sizeof(u32)))
452 return 1;
453
454
455 msr |= MSR_SPE;
456 }
457
458
459
460 if (__put_user(current->thread.spefscr, (u32 __user *)&frame->mc_vregs + ELF_NEVRREG))
461 return 1;
462#endif
463
464 if (__put_user(msr, &frame->mc_gregs[PT_MSR]))
465 return 1;
466
467
468
469 if (tm_frame && __put_user(0, &tm_frame->mc_gregs[PT_MSR]))
470 return 1;
471
472 if (sigret) {
473
474 if (__put_user(0x38000000UL + sigret, &frame->tramp[0])
475 || __put_user(0x44000002UL, &frame->tramp[1]))
476 return 1;
477 flush_icache_range((unsigned long) &frame->tramp[0],
478 (unsigned long) &frame->tramp[2]);
479 }
480
481 return 0;
482}
483
484#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
485
486
487
488
489
490
491
492
493
494static int save_tm_user_regs(struct pt_regs *regs,
495 struct mcontext __user *frame,
496 struct mcontext __user *tm_frame, int sigret)
497{
498 unsigned long msr = regs->msr;
499
500 WARN_ON(tm_suspend_disabled);
501
502
503
504
505
506
507 regs->msr &= ~MSR_TS_MASK;
508
509
510 if (save_general_regs(¤t->thread.ckpt_regs, frame)
511 || save_general_regs(regs, tm_frame))
512 return 1;
513
514
515
516
517
518
519
520 if (__put_user((msr >> 32), &tm_frame->mc_gregs[PT_MSR]))
521 return 1;
522
523#ifdef CONFIG_ALTIVEC
524
525 if (current->thread.used_vr) {
526 if (__copy_to_user(&frame->mc_vregs, ¤t->thread.ckvr_state,
527 ELF_NVRREG * sizeof(vector128)))
528 return 1;
529 if (msr & MSR_VEC) {
530 if (__copy_to_user(&tm_frame->mc_vregs,
531 ¤t->thread.vr_state,
532 ELF_NVRREG * sizeof(vector128)))
533 return 1;
534 } else {
535 if (__copy_to_user(&tm_frame->mc_vregs,
536 ¤t->thread.ckvr_state,
537 ELF_NVRREG * sizeof(vector128)))
538 return 1;
539 }
540
541
542
543
544 msr |= MSR_VEC;
545 }
546
547
548
549
550
551
552 if (cpu_has_feature(CPU_FTR_ALTIVEC))
553 current->thread.ckvrsave = mfspr(SPRN_VRSAVE);
554 if (__put_user(current->thread.ckvrsave,
555 (u32 __user *)&frame->mc_vregs[32]))
556 return 1;
557 if (msr & MSR_VEC) {
558 if (__put_user(current->thread.vrsave,
559 (u32 __user *)&tm_frame->mc_vregs[32]))
560 return 1;
561 } else {
562 if (__put_user(current->thread.ckvrsave,
563 (u32 __user *)&tm_frame->mc_vregs[32]))
564 return 1;
565 }
566#endif
567
568 if (copy_ckfpr_to_user(&frame->mc_fregs, current))
569 return 1;
570 if (msr & MSR_FP) {
571 if (copy_fpr_to_user(&tm_frame->mc_fregs, current))
572 return 1;
573 } else {
574 if (copy_ckfpr_to_user(&tm_frame->mc_fregs, current))
575 return 1;
576 }
577
578#ifdef CONFIG_VSX
579
580
581
582
583
584
585 if (current->thread.used_vsr) {
586 if (copy_ckvsx_to_user(&frame->mc_vsregs, current))
587 return 1;
588 if (msr & MSR_VSX) {
589 if (copy_vsx_to_user(&tm_frame->mc_vsregs,
590 current))
591 return 1;
592 } else {
593 if (copy_ckvsx_to_user(&tm_frame->mc_vsregs, current))
594 return 1;
595 }
596
597 msr |= MSR_VSX;
598 }
599#endif
600#ifdef CONFIG_SPE
601
602
603
604 if (current->thread.used_spe) {
605 flush_spe_to_thread(current);
606 if (__copy_to_user(&frame->mc_vregs, current->thread.evr,
607 ELF_NEVRREG * sizeof(u32)))
608 return 1;
609
610
611 msr |= MSR_SPE;
612 }
613
614
615 if (__put_user(current->thread.spefscr, (u32 __user *)&frame->mc_vregs + ELF_NEVRREG))
616 return 1;
617#endif
618
619 if (__put_user(msr, &frame->mc_gregs[PT_MSR]))
620 return 1;
621 if (sigret) {
622
623 if (__put_user(0x38000000UL + sigret, &frame->tramp[0])
624 || __put_user(0x44000002UL, &frame->tramp[1]))
625 return 1;
626 flush_icache_range((unsigned long) &frame->tramp[0],
627 (unsigned long) &frame->tramp[2]);
628 }
629
630 return 0;
631}
632#endif
633
634
635
636
637
638static long restore_user_regs(struct pt_regs *regs,
639 struct mcontext __user *sr, int sig)
640{
641 long err;
642 unsigned int save_r2 = 0;
643 unsigned long msr;
644#ifdef CONFIG_VSX
645 int i;
646#endif
647
648
649
650
651
652 if (!sig)
653 save_r2 = (unsigned int)regs->gpr[2];
654 err = restore_general_regs(regs, sr);
655 regs->trap = 0;
656 err |= __get_user(msr, &sr->mc_gregs[PT_MSR]);
657 if (!sig)
658 regs->gpr[2] = (unsigned long) save_r2;
659 if (err)
660 return 1;
661
662
663 if (sig)
664 regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
665
666#ifdef CONFIG_ALTIVEC
667
668
669
670
671 regs->msr &= ~MSR_VEC;
672 if (msr & MSR_VEC) {
673
674 if (__copy_from_user(¤t->thread.vr_state, &sr->mc_vregs,
675 sizeof(sr->mc_vregs)))
676 return 1;
677 current->thread.used_vr = true;
678 } else if (current->thread.used_vr)
679 memset(¤t->thread.vr_state, 0,
680 ELF_NVRREG * sizeof(vector128));
681
682
683 if (__get_user(current->thread.vrsave, (u32 __user *)&sr->mc_vregs[32]))
684 return 1;
685 if (cpu_has_feature(CPU_FTR_ALTIVEC))
686 mtspr(SPRN_VRSAVE, current->thread.vrsave);
687#endif
688 if (copy_fpr_from_user(current, &sr->mc_fregs))
689 return 1;
690
691#ifdef CONFIG_VSX
692
693
694
695
696 regs->msr &= ~MSR_VSX;
697 if (msr & MSR_VSX) {
698
699
700
701
702 if (copy_vsx_from_user(current, &sr->mc_vsregs))
703 return 1;
704 current->thread.used_vsr = true;
705 } else if (current->thread.used_vsr)
706 for (i = 0; i < 32 ; i++)
707 current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
708#endif
709
710
711
712
713 regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1);
714
715#ifdef CONFIG_SPE
716
717
718 regs->msr &= ~MSR_SPE;
719 if (msr & MSR_SPE) {
720
721 if (__copy_from_user(current->thread.evr, &sr->mc_vregs,
722 ELF_NEVRREG * sizeof(u32)))
723 return 1;
724 current->thread.used_spe = true;
725 } else if (current->thread.used_spe)
726 memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32));
727
728
729 if (__get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs + ELF_NEVRREG))
730 return 1;
731#endif
732
733 return 0;
734}
735
736#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
737
738
739
740
741
742static long restore_tm_user_regs(struct pt_regs *regs,
743 struct mcontext __user *sr,
744 struct mcontext __user *tm_sr)
745{
746 long err;
747 unsigned long msr, msr_hi;
748#ifdef CONFIG_VSX
749 int i;
750#endif
751
752 if (tm_suspend_disabled)
753 return 1;
754
755
756
757
758
759
760
761 err = restore_general_regs(regs, tm_sr);
762 err |= restore_general_regs(¤t->thread.ckpt_regs, sr);
763
764 err |= __get_user(current->thread.tm_tfhar, &sr->mc_gregs[PT_NIP]);
765
766 err |= __get_user(msr, &sr->mc_gregs[PT_MSR]);
767 if (err)
768 return 1;
769
770
771 regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
772
773#ifdef CONFIG_ALTIVEC
774 regs->msr &= ~MSR_VEC;
775 if (msr & MSR_VEC) {
776
777 if (__copy_from_user(¤t->thread.ckvr_state, &sr->mc_vregs,
778 sizeof(sr->mc_vregs)) ||
779 __copy_from_user(¤t->thread.vr_state,
780 &tm_sr->mc_vregs,
781 sizeof(sr->mc_vregs)))
782 return 1;
783 current->thread.used_vr = true;
784 } else if (current->thread.used_vr) {
785 memset(¤t->thread.vr_state, 0,
786 ELF_NVRREG * sizeof(vector128));
787 memset(¤t->thread.ckvr_state, 0,
788 ELF_NVRREG * sizeof(vector128));
789 }
790
791
792 if (__get_user(current->thread.ckvrsave,
793 (u32 __user *)&sr->mc_vregs[32]) ||
794 __get_user(current->thread.vrsave,
795 (u32 __user *)&tm_sr->mc_vregs[32]))
796 return 1;
797 if (cpu_has_feature(CPU_FTR_ALTIVEC))
798 mtspr(SPRN_VRSAVE, current->thread.ckvrsave);
799#endif
800
801 regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1);
802
803 if (copy_fpr_from_user(current, &sr->mc_fregs) ||
804 copy_ckfpr_from_user(current, &tm_sr->mc_fregs))
805 return 1;
806
807#ifdef CONFIG_VSX
808 regs->msr &= ~MSR_VSX;
809 if (msr & MSR_VSX) {
810
811
812
813
814 if (copy_vsx_from_user(current, &tm_sr->mc_vsregs) ||
815 copy_ckvsx_from_user(current, &sr->mc_vsregs))
816 return 1;
817 current->thread.used_vsr = true;
818 } else if (current->thread.used_vsr)
819 for (i = 0; i < 32 ; i++) {
820 current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
821 current->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
822 }
823#endif
824
825#ifdef CONFIG_SPE
826
827
828
829 regs->msr &= ~MSR_SPE;
830 if (msr & MSR_SPE) {
831 if (__copy_from_user(current->thread.evr, &sr->mc_vregs,
832 ELF_NEVRREG * sizeof(u32)))
833 return 1;
834 current->thread.used_spe = true;
835 } else if (current->thread.used_spe)
836 memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32));
837
838
839 if (__get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs
840 + ELF_NEVRREG))
841 return 1;
842#endif
843
844
845 if (__get_user(msr_hi, &tm_sr->mc_gregs[PT_MSR]))
846 return 1;
847 msr_hi <<= 32;
848
849 if (MSR_TM_RESV(msr_hi))
850 return 1;
851
852 regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr_hi & MSR_TS_MASK);
853
854
855
856
857 tm_enable();
858
859 current->thread.tm_texasr |= TEXASR_FS;
860
861 tm_recheckpoint(¤t->thread);
862
863
864 msr_check_and_set(msr & (MSR_FP | MSR_VEC));
865 if (msr & MSR_FP) {
866 load_fp_state(¤t->thread.fp_state);
867 regs->msr |= (MSR_FP | current->thread.fpexc_mode);
868 }
869#ifdef CONFIG_ALTIVEC
870 if (msr & MSR_VEC) {
871 load_vr_state(¤t->thread.vr_state);
872 regs->msr |= MSR_VEC;
873 }
874#endif
875
876 return 0;
877}
878#endif
879
880#ifdef CONFIG_PPC64
881
882#define copy_siginfo_to_user copy_siginfo_to_user32
883
884#endif
885
886
887
888
889
890int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
891 struct task_struct *tsk)
892{
893 struct rt_sigframe __user *rt_sf;
894 struct mcontext __user *frame;
895 struct mcontext __user *tm_frame = NULL;
896 void __user *addr;
897 unsigned long newsp = 0;
898 int sigret;
899 unsigned long tramp;
900 struct pt_regs *regs = tsk->thread.regs;
901
902 BUG_ON(tsk != current);
903
904
905
906 rt_sf = get_sigframe(ksig, get_tm_stackpointer(tsk), sizeof(*rt_sf), 1);
907 addr = rt_sf;
908 if (unlikely(rt_sf == NULL))
909 goto badframe;
910
911
912 if (copy_siginfo_to_user(&rt_sf->info, &ksig->info)
913 || __put_user(0, &rt_sf->uc.uc_flags)
914 || __save_altstack(&rt_sf->uc.uc_stack, regs->gpr[1])
915 || __put_user(to_user_ptr(&rt_sf->uc.uc_mcontext),
916 &rt_sf->uc.uc_regs)
917 || put_sigset_t(&rt_sf->uc.uc_sigmask, oldset))
918 goto badframe;
919
920
921 frame = &rt_sf->uc.uc_mcontext;
922 addr = frame;
923 if (vdso32_rt_sigtramp && tsk->mm->context.vdso_base) {
924 sigret = 0;
925 tramp = tsk->mm->context.vdso_base + vdso32_rt_sigtramp;
926 } else {
927 sigret = __NR_rt_sigreturn;
928 tramp = (unsigned long) frame->tramp;
929 }
930
931#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
932 tm_frame = &rt_sf->uc_transact.uc_mcontext;
933 if (MSR_TM_ACTIVE(regs->msr)) {
934 if (__put_user((unsigned long)&rt_sf->uc_transact,
935 &rt_sf->uc.uc_link) ||
936 __put_user((unsigned long)tm_frame,
937 &rt_sf->uc_transact.uc_regs))
938 goto badframe;
939 if (save_tm_user_regs(regs, frame, tm_frame, sigret))
940 goto badframe;
941 }
942 else
943#endif
944 {
945 if (__put_user(0, &rt_sf->uc.uc_link))
946 goto badframe;
947 if (save_user_regs(regs, frame, tm_frame, sigret, 1))
948 goto badframe;
949 }
950 regs->link = tramp;
951
952 tsk->thread.fp_state.fpscr = 0;
953
954
955 newsp = ((unsigned long)rt_sf) - (__SIGNAL_FRAMESIZE + 16);
956 addr = (void __user *)regs->gpr[1];
957 if (put_user(regs->gpr[1], (u32 __user *)newsp))
958 goto badframe;
959
960
961 regs->gpr[1] = newsp;
962 regs->gpr[3] = ksig->sig;
963 regs->gpr[4] = (unsigned long) &rt_sf->info;
964 regs->gpr[5] = (unsigned long) &rt_sf->uc;
965 regs->gpr[6] = (unsigned long) rt_sf;
966 regs->nip = (unsigned long) ksig->ka.sa.sa_handler;
967
968 regs->msr &= ~MSR_LE;
969 regs->msr |= (MSR_KERNEL & MSR_LE);
970 return 0;
971
972badframe:
973 if (show_unhandled_signals)
974 printk_ratelimited(KERN_INFO
975 "%s[%d]: bad frame in handle_rt_signal32: "
976 "%p nip %08lx lr %08lx\n",
977 tsk->comm, tsk->pid,
978 addr, regs->nip, regs->link);
979
980 return 1;
981}
982
983static int do_setcontext(struct ucontext __user *ucp, struct pt_regs *regs, int sig)
984{
985 sigset_t set;
986 struct mcontext __user *mcp;
987
988 if (get_sigset_t(&set, &ucp->uc_sigmask))
989 return -EFAULT;
990#ifdef CONFIG_PPC64
991 {
992 u32 cmcp;
993
994 if (__get_user(cmcp, &ucp->uc_regs))
995 return -EFAULT;
996 mcp = (struct mcontext __user *)(u64)cmcp;
997
998 }
999#else
1000 if (__get_user(mcp, &ucp->uc_regs))
1001 return -EFAULT;
1002 if (!access_ok(VERIFY_READ, mcp, sizeof(*mcp)))
1003 return -EFAULT;
1004#endif
1005 set_current_blocked(&set);
1006 if (restore_user_regs(regs, mcp, sig))
1007 return -EFAULT;
1008
1009 return 0;
1010}
1011
1012#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1013static int do_setcontext_tm(struct ucontext __user *ucp,
1014 struct ucontext __user *tm_ucp,
1015 struct pt_regs *regs)
1016{
1017 sigset_t set;
1018 struct mcontext __user *mcp;
1019 struct mcontext __user *tm_mcp;
1020 u32 cmcp;
1021 u32 tm_cmcp;
1022
1023 if (get_sigset_t(&set, &ucp->uc_sigmask))
1024 return -EFAULT;
1025
1026 if (__get_user(cmcp, &ucp->uc_regs) ||
1027 __get_user(tm_cmcp, &tm_ucp->uc_regs))
1028 return -EFAULT;
1029 mcp = (struct mcontext __user *)(u64)cmcp;
1030 tm_mcp = (struct mcontext __user *)(u64)tm_cmcp;
1031
1032
1033 set_current_blocked(&set);
1034 if (restore_tm_user_regs(regs, mcp, tm_mcp))
1035 return -EFAULT;
1036
1037 return 0;
1038}
1039#endif
1040
1041#ifdef CONFIG_PPC64
1042COMPAT_SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
1043 struct ucontext __user *, new_ctx, int, ctx_size)
1044#else
1045SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
1046 struct ucontext __user *, new_ctx, long, ctx_size)
1047#endif
1048{
1049 struct pt_regs *regs = current_pt_regs();
1050 int ctx_has_vsx_region = 0;
1051
1052#ifdef CONFIG_PPC64
1053 unsigned long new_msr = 0;
1054
1055 if (new_ctx) {
1056 struct mcontext __user *mcp;
1057 u32 cmcp;
1058
1059
1060
1061
1062
1063
1064 if (__get_user(cmcp, &new_ctx->uc_regs))
1065 return -EFAULT;
1066 mcp = (struct mcontext __user *)(u64)cmcp;
1067 if (__get_user(new_msr, &mcp->mc_gregs[PT_MSR]))
1068 return -EFAULT;
1069 }
1070
1071
1072
1073
1074 if (ctx_size < UCONTEXTSIZEWITHOUTVSX)
1075 return -EINVAL;
1076
1077
1078
1079
1080 if ((ctx_size < sizeof(struct ucontext)) &&
1081 (new_msr & MSR_VSX))
1082 return -EINVAL;
1083
1084 if (ctx_size >= sizeof(struct ucontext))
1085 ctx_has_vsx_region = 1;
1086#else
1087
1088
1089
1090 if (ctx_size < sizeof(struct ucontext))
1091 return -EINVAL;
1092#endif
1093 if (old_ctx != NULL) {
1094 struct mcontext __user *mctx;
1095
1096
1097
1098
1099
1100
1101
1102
1103 mctx = (struct mcontext __user *)
1104 ((unsigned long) &old_ctx->uc_mcontext & ~0xfUL);
1105 if (!access_ok(VERIFY_WRITE, old_ctx, ctx_size)
1106 || save_user_regs(regs, mctx, NULL, 0, ctx_has_vsx_region)
1107 || put_sigset_t(&old_ctx->uc_sigmask, ¤t->blocked)
1108 || __put_user(to_user_ptr(mctx), &old_ctx->uc_regs))
1109 return -EFAULT;
1110 }
1111 if (new_ctx == NULL)
1112 return 0;
1113 if (!access_ok(VERIFY_READ, new_ctx, ctx_size) ||
1114 fault_in_pages_readable((u8 __user *)new_ctx, ctx_size))
1115 return -EFAULT;
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128 if (do_setcontext(new_ctx, regs, 0))
1129 do_exit(SIGSEGV);
1130
1131 set_thread_flag(TIF_RESTOREALL);
1132 return 0;
1133}
1134
1135#ifdef CONFIG_PPC64
1136COMPAT_SYSCALL_DEFINE0(rt_sigreturn)
1137#else
1138SYSCALL_DEFINE0(rt_sigreturn)
1139#endif
1140{
1141 struct rt_sigframe __user *rt_sf;
1142 struct pt_regs *regs = current_pt_regs();
1143 int tm_restore = 0;
1144#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1145 struct ucontext __user *uc_transact;
1146 unsigned long msr_hi;
1147 unsigned long tmp;
1148#endif
1149
1150 current->restart_block.fn = do_no_restart_syscall;
1151
1152 rt_sf = (struct rt_sigframe __user *)
1153 (regs->gpr[1] + __SIGNAL_FRAMESIZE + 16);
1154 if (!access_ok(VERIFY_READ, rt_sf, sizeof(*rt_sf)))
1155 goto bad;
1156
1157#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168 if (MSR_TM_SUSPENDED(mfmsr()))
1169 tm_reclaim_current(0);
1170
1171 if (__get_user(tmp, &rt_sf->uc.uc_link))
1172 goto bad;
1173 uc_transact = (struct ucontext __user *)(uintptr_t)tmp;
1174 if (uc_transact) {
1175 u32 cmcp;
1176 struct mcontext __user *mcp;
1177
1178 if (__get_user(cmcp, &uc_transact->uc_regs))
1179 return -EFAULT;
1180 mcp = (struct mcontext __user *)(u64)cmcp;
1181
1182
1183 if (__get_user(msr_hi, &mcp->mc_gregs[PT_MSR]))
1184 goto bad;
1185
1186 if (MSR_TM_ACTIVE(msr_hi<<32)) {
1187
1188
1189
1190 tm_restore = 1;
1191 if (do_setcontext_tm(&rt_sf->uc, uc_transact, regs))
1192 goto bad;
1193 }
1194 }
1195 if (!tm_restore) {
1196
1197
1198
1199
1200
1201 regs->msr &= ~MSR_TS_MASK;
1202 }
1203
1204#endif
1205 if (!tm_restore)
1206 if (do_setcontext(&rt_sf->uc, regs, 1))
1207 goto bad;
1208
1209
1210
1211
1212
1213
1214
1215
1216#ifdef CONFIG_PPC64
1217 if (compat_restore_altstack(&rt_sf->uc.uc_stack))
1218 goto bad;
1219#else
1220 if (restore_altstack(&rt_sf->uc.uc_stack))
1221 goto bad;
1222#endif
1223 set_thread_flag(TIF_RESTOREALL);
1224 return 0;
1225
1226 bad:
1227 if (show_unhandled_signals)
1228 printk_ratelimited(KERN_INFO
1229 "%s[%d]: bad frame in sys_rt_sigreturn: "
1230 "%p nip %08lx lr %08lx\n",
1231 current->comm, current->pid,
1232 rt_sf, regs->nip, regs->link);
1233
1234 force_sig(SIGSEGV, current);
1235 return 0;
1236}
1237
1238#ifdef CONFIG_PPC32
1239SYSCALL_DEFINE3(debug_setcontext, struct ucontext __user *, ctx,
1240 int, ndbg, struct sig_dbg_op __user *, dbg)
1241{
1242 struct pt_regs *regs = current_pt_regs();
1243 struct sig_dbg_op op;
1244 int i;
1245 unsigned long new_msr = regs->msr;
1246#ifdef CONFIG_PPC_ADV_DEBUG_REGS
1247 unsigned long new_dbcr0 = current->thread.debug.dbcr0;
1248#endif
1249
1250 for (i=0; i<ndbg; i++) {
1251 if (copy_from_user(&op, dbg + i, sizeof(op)))
1252 return -EFAULT;
1253 switch (op.dbg_type) {
1254 case SIG_DBG_SINGLE_STEPPING:
1255#ifdef CONFIG_PPC_ADV_DEBUG_REGS
1256 if (op.dbg_value) {
1257 new_msr |= MSR_DE;
1258 new_dbcr0 |= (DBCR0_IDM | DBCR0_IC);
1259 } else {
1260 new_dbcr0 &= ~DBCR0_IC;
1261 if (!DBCR_ACTIVE_EVENTS(new_dbcr0,
1262 current->thread.debug.dbcr1)) {
1263 new_msr &= ~MSR_DE;
1264 new_dbcr0 &= ~DBCR0_IDM;
1265 }
1266 }
1267#else
1268 if (op.dbg_value)
1269 new_msr |= MSR_SE;
1270 else
1271 new_msr &= ~MSR_SE;
1272#endif
1273 break;
1274 case SIG_DBG_BRANCH_TRACING:
1275#ifdef CONFIG_PPC_ADV_DEBUG_REGS
1276 return -EINVAL;
1277#else
1278 if (op.dbg_value)
1279 new_msr |= MSR_BE;
1280 else
1281 new_msr &= ~MSR_BE;
1282#endif
1283 break;
1284
1285 default:
1286 return -EINVAL;
1287 }
1288 }
1289
1290
1291
1292
1293
1294
1295 regs->msr = new_msr;
1296#ifdef CONFIG_PPC_ADV_DEBUG_REGS
1297 current->thread.debug.dbcr0 = new_dbcr0;
1298#endif
1299
1300 if (!access_ok(VERIFY_READ, ctx, sizeof(*ctx)) ||
1301 fault_in_pages_readable((u8 __user *)ctx, sizeof(*ctx)))
1302 return -EFAULT;
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315 if (do_setcontext(ctx, regs, 1)) {
1316 if (show_unhandled_signals)
1317 printk_ratelimited(KERN_INFO "%s[%d]: bad frame in "
1318 "sys_debug_setcontext: %p nip %08lx "
1319 "lr %08lx\n",
1320 current->comm, current->pid,
1321 ctx, regs->nip, regs->link);
1322
1323 force_sig(SIGSEGV, current);
1324 goto out;
1325 }
1326
1327
1328
1329
1330
1331
1332
1333
1334 restore_altstack(&ctx->uc_stack);
1335
1336 set_thread_flag(TIF_RESTOREALL);
1337 out:
1338 return 0;
1339}
1340#endif
1341
1342
1343
1344
1345int handle_signal32(struct ksignal *ksig, sigset_t *oldset,
1346 struct task_struct *tsk)
1347{
1348 struct sigcontext __user *sc;
1349 struct sigframe __user *frame;
1350 struct mcontext __user *tm_mctx = NULL;
1351 unsigned long newsp = 0;
1352 int sigret;
1353 unsigned long tramp;
1354 struct pt_regs *regs = tsk->thread.regs;
1355
1356 BUG_ON(tsk != current);
1357
1358
1359 frame = get_sigframe(ksig, get_tm_stackpointer(tsk), sizeof(*frame), 1);
1360 if (unlikely(frame == NULL))
1361 goto badframe;
1362 sc = (struct sigcontext __user *) &frame->sctx;
1363
1364#if _NSIG != 64
1365#error "Please adjust handle_signal()"
1366#endif
1367 if (__put_user(to_user_ptr(ksig->ka.sa.sa_handler), &sc->handler)
1368 || __put_user(oldset->sig[0], &sc->oldmask)
1369#ifdef CONFIG_PPC64
1370 || __put_user((oldset->sig[0] >> 32), &sc->_unused[3])
1371#else
1372 || __put_user(oldset->sig[1], &sc->_unused[3])
1373#endif
1374 || __put_user(to_user_ptr(&frame->mctx), &sc->regs)
1375 || __put_user(ksig->sig, &sc->signal))
1376 goto badframe;
1377
1378 if (vdso32_sigtramp && tsk->mm->context.vdso_base) {
1379 sigret = 0;
1380 tramp = tsk->mm->context.vdso_base + vdso32_sigtramp;
1381 } else {
1382 sigret = __NR_sigreturn;
1383 tramp = (unsigned long) frame->mctx.tramp;
1384 }
1385
1386#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1387 tm_mctx = &frame->mctx_transact;
1388 if (MSR_TM_ACTIVE(regs->msr)) {
1389 if (save_tm_user_regs(regs, &frame->mctx, &frame->mctx_transact,
1390 sigret))
1391 goto badframe;
1392 }
1393 else
1394#endif
1395 {
1396 if (save_user_regs(regs, &frame->mctx, tm_mctx, sigret, 1))
1397 goto badframe;
1398 }
1399
1400 regs->link = tramp;
1401
1402 tsk->thread.fp_state.fpscr = 0;
1403
1404
1405 newsp = ((unsigned long)frame) - __SIGNAL_FRAMESIZE;
1406 if (put_user(regs->gpr[1], (u32 __user *)newsp))
1407 goto badframe;
1408
1409 regs->gpr[1] = newsp;
1410 regs->gpr[3] = ksig->sig;
1411 regs->gpr[4] = (unsigned long) sc;
1412 regs->nip = (unsigned long) (unsigned long)ksig->ka.sa.sa_handler;
1413
1414 regs->msr &= ~MSR_LE;
1415 return 0;
1416
1417badframe:
1418 if (show_unhandled_signals)
1419 printk_ratelimited(KERN_INFO
1420 "%s[%d]: bad frame in handle_signal32: "
1421 "%p nip %08lx lr %08lx\n",
1422 tsk->comm, tsk->pid,
1423 frame, regs->nip, regs->link);
1424
1425 return 1;
1426}
1427
1428
1429
1430
1431#ifdef CONFIG_PPC64
1432COMPAT_SYSCALL_DEFINE0(sigreturn)
1433#else
1434SYSCALL_DEFINE0(sigreturn)
1435#endif
1436{
1437 struct pt_regs *regs = current_pt_regs();
1438 struct sigframe __user *sf;
1439 struct sigcontext __user *sc;
1440 struct sigcontext sigctx;
1441 struct mcontext __user *sr;
1442 void __user *addr;
1443 sigset_t set;
1444#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1445 struct mcontext __user *mcp, *tm_mcp;
1446 unsigned long msr_hi;
1447#endif
1448
1449
1450 current->restart_block.fn = do_no_restart_syscall;
1451
1452 sf = (struct sigframe __user *)(regs->gpr[1] + __SIGNAL_FRAMESIZE);
1453 sc = &sf->sctx;
1454 addr = sc;
1455 if (copy_from_user(&sigctx, sc, sizeof(sigctx)))
1456 goto badframe;
1457
1458#ifdef CONFIG_PPC64
1459
1460
1461
1462
1463 set.sig[0] = sigctx.oldmask + ((long)(sigctx._unused[3]) << 32);
1464#else
1465 set.sig[0] = sigctx.oldmask;
1466 set.sig[1] = sigctx._unused[3];
1467#endif
1468 set_current_blocked(&set);
1469
1470#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1471 mcp = (struct mcontext __user *)&sf->mctx;
1472 tm_mcp = (struct mcontext __user *)&sf->mctx_transact;
1473 if (__get_user(msr_hi, &tm_mcp->mc_gregs[PT_MSR]))
1474 goto badframe;
1475 if (MSR_TM_ACTIVE(msr_hi<<32)) {
1476 if (!cpu_has_feature(CPU_FTR_TM))
1477 goto badframe;
1478 if (restore_tm_user_regs(regs, mcp, tm_mcp))
1479 goto badframe;
1480 } else
1481#endif
1482 {
1483 sr = (struct mcontext __user *)from_user_ptr(sigctx.regs);
1484 addr = sr;
1485 if (!access_ok(VERIFY_READ, sr, sizeof(*sr))
1486 || restore_user_regs(regs, sr, 1))
1487 goto badframe;
1488 }
1489
1490 set_thread_flag(TIF_RESTOREALL);
1491 return 0;
1492
1493badframe:
1494 if (show_unhandled_signals)
1495 printk_ratelimited(KERN_INFO
1496 "%s[%d]: bad frame in sys_sigreturn: "
1497 "%p nip %08lx lr %08lx\n",
1498 current->comm, current->pid,
1499 addr, regs->nip, regs->link);
1500
1501 force_sig(SIGSEGV, current);
1502 return 0;
1503}
1504