1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20#include <linux/sched.h>
21#include <linux/mm.h>
22#include <linux/smp.h>
23#include <linux/kernel.h>
24#include <linux/signal.h>
25#include <linux/errno.h>
26#include <linux/elf.h>
27#include <linux/ptrace.h>
28#include <linux/pagemap.h>
29#include <linux/ratelimit.h>
30#include <linux/syscalls.h>
31#ifdef CONFIG_PPC64
32#include <linux/compat.h>
33#else
34#include <linux/wait.h>
35#include <linux/unistd.h>
36#include <linux/stddef.h>
37#include <linux/tty.h>
38#include <linux/binfmts.h>
39#endif
40
41#include <linux/uaccess.h>
42#include <asm/cacheflush.h>
43#include <asm/syscalls.h>
44#include <asm/sigcontext.h>
45#include <asm/vdso.h>
46#include <asm/switch_to.h>
47#include <asm/tm.h>
48#include <asm/asm-prototypes.h>
49#ifdef CONFIG_PPC64
50#include "ppc32.h"
51#include <asm/unistd.h>
52#else
53#include <asm/ucontext.h>
54#include <asm/pgtable.h>
55#endif
56
57#include "signal.h"
58
59
60#ifdef CONFIG_PPC64
61#define old_sigaction old_sigaction32
62#define sigcontext sigcontext32
63#define mcontext mcontext32
64#define ucontext ucontext32
65
66#define __save_altstack __compat_save_altstack
67
68
69
70
71
72#define UCONTEXTSIZEWITHOUTVSX \
73 (sizeof(struct ucontext) - sizeof(elf_vsrreghalf_t32))
74
75
76
77
78
79
80
81
82#define GP_REGS_SIZE min(sizeof(elf_gregset_t32), sizeof(struct pt_regs32))
83#undef __SIGNAL_FRAMESIZE
84#define __SIGNAL_FRAMESIZE __SIGNAL_FRAMESIZE32
85#undef ELF_NVRREG
86#define ELF_NVRREG ELF_NVRREG32
87
88
89
90
91
92static inline int put_sigset_t(compat_sigset_t __user *uset, sigset_t *set)
93{
94 return put_compat_sigset(uset, set, sizeof(*uset));
95}
96
97static inline int get_sigset_t(sigset_t *set,
98 const compat_sigset_t __user *uset)
99{
100 return get_compat_sigset(set, uset);
101}
102
103#define to_user_ptr(p) ptr_to_compat(p)
104#define from_user_ptr(p) compat_ptr(p)
105
106static inline int save_general_regs(struct pt_regs *regs,
107 struct mcontext __user *frame)
108{
109 elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
110 int i;
111
112 elf_greg_t64 softe = 0x1;
113
114 WARN_ON(!FULL_REGS(regs));
115
116 for (i = 0; i <= PT_RESULT; i ++) {
117 if (i == 14 && !FULL_REGS(regs))
118 i = 32;
119 if ( i == PT_SOFTE) {
120 if(__put_user((unsigned int)softe, &frame->mc_gregs[i]))
121 return -EFAULT;
122 else
123 continue;
124 }
125 if (__put_user((unsigned int)gregs[i], &frame->mc_gregs[i]))
126 return -EFAULT;
127 }
128 return 0;
129}
130
131static inline int restore_general_regs(struct pt_regs *regs,
132 struct mcontext __user *sr)
133{
134 elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
135 int i;
136
137 for (i = 0; i <= PT_RESULT; i++) {
138 if ((i == PT_MSR) || (i == PT_SOFTE))
139 continue;
140 if (__get_user(gregs[i], &sr->mc_gregs[i]))
141 return -EFAULT;
142 }
143 return 0;
144}
145
146#else
147
148#define GP_REGS_SIZE min(sizeof(elf_gregset_t), sizeof(struct pt_regs))
149
150static inline int put_sigset_t(sigset_t __user *uset, sigset_t *set)
151{
152 return copy_to_user(uset, set, sizeof(*uset));
153}
154
155static inline int get_sigset_t(sigset_t *set, const sigset_t __user *uset)
156{
157 return copy_from_user(set, uset, sizeof(*uset));
158}
159
160#define to_user_ptr(p) ((unsigned long)(p))
161#define from_user_ptr(p) ((void __user *)(p))
162
163static inline int save_general_regs(struct pt_regs *regs,
164 struct mcontext __user *frame)
165{
166 WARN_ON(!FULL_REGS(regs));
167 return __copy_to_user(&frame->mc_gregs, regs, GP_REGS_SIZE);
168}
169
170static inline int restore_general_regs(struct pt_regs *regs,
171 struct mcontext __user *sr)
172{
173
174 if (__copy_from_user(regs, &sr->mc_gregs,
175 PT_MSR * sizeof(elf_greg_t)))
176 return -EFAULT;
177
178 if (__copy_from_user(®s->orig_gpr3, &sr->mc_gregs[PT_ORIG_R3],
179 GP_REGS_SIZE - PT_ORIG_R3 * sizeof(elf_greg_t)))
180 return -EFAULT;
181 return 0;
182}
183#endif
184
185
186
187
188
189
190
191
192
193
194
195
196
197struct sigframe {
198 struct sigcontext sctx;
199 struct mcontext mctx;
200#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
201 struct sigcontext sctx_transact;
202 struct mcontext mctx_transact;
203#endif
204
205
206
207
208 int abigap[56];
209};
210
211
212#define tramp mc_pad
213
214
215
216
217
218
219
220
221
222
223
224
225struct rt_sigframe {
226#ifdef CONFIG_PPC64
227 compat_siginfo_t info;
228#else
229 struct siginfo info;
230#endif
231 struct ucontext uc;
232#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
233 struct ucontext uc_transact;
234#endif
235
236
237
238
239 int abigap[56];
240};
241
242#ifdef CONFIG_VSX
243unsigned long copy_fpr_to_user(void __user *to,
244 struct task_struct *task)
245{
246 u64 buf[ELF_NFPREG];
247 int i;
248
249
250 for (i = 0; i < (ELF_NFPREG - 1) ; i++)
251 buf[i] = task->thread.TS_FPR(i);
252 buf[i] = task->thread.fp_state.fpscr;
253 return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double));
254}
255
256unsigned long copy_fpr_from_user(struct task_struct *task,
257 void __user *from)
258{
259 u64 buf[ELF_NFPREG];
260 int i;
261
262 if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double)))
263 return 1;
264 for (i = 0; i < (ELF_NFPREG - 1) ; i++)
265 task->thread.TS_FPR(i) = buf[i];
266 task->thread.fp_state.fpscr = buf[i];
267
268 return 0;
269}
270
271unsigned long copy_vsx_to_user(void __user *to,
272 struct task_struct *task)
273{
274 u64 buf[ELF_NVSRHALFREG];
275 int i;
276
277
278 for (i = 0; i < ELF_NVSRHALFREG; i++)
279 buf[i] = task->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
280 return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double));
281}
282
283unsigned long copy_vsx_from_user(struct task_struct *task,
284 void __user *from)
285{
286 u64 buf[ELF_NVSRHALFREG];
287 int i;
288
289 if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double)))
290 return 1;
291 for (i = 0; i < ELF_NVSRHALFREG ; i++)
292 task->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
293 return 0;
294}
295
296#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
297unsigned long copy_ckfpr_to_user(void __user *to,
298 struct task_struct *task)
299{
300 u64 buf[ELF_NFPREG];
301 int i;
302
303
304 for (i = 0; i < (ELF_NFPREG - 1) ; i++)
305 buf[i] = task->thread.TS_CKFPR(i);
306 buf[i] = task->thread.ckfp_state.fpscr;
307 return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double));
308}
309
310unsigned long copy_ckfpr_from_user(struct task_struct *task,
311 void __user *from)
312{
313 u64 buf[ELF_NFPREG];
314 int i;
315
316 if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double)))
317 return 1;
318 for (i = 0; i < (ELF_NFPREG - 1) ; i++)
319 task->thread.TS_CKFPR(i) = buf[i];
320 task->thread.ckfp_state.fpscr = buf[i];
321
322 return 0;
323}
324
325unsigned long copy_ckvsx_to_user(void __user *to,
326 struct task_struct *task)
327{
328 u64 buf[ELF_NVSRHALFREG];
329 int i;
330
331
332 for (i = 0; i < ELF_NVSRHALFREG; i++)
333 buf[i] = task->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET];
334 return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double));
335}
336
337unsigned long copy_ckvsx_from_user(struct task_struct *task,
338 void __user *from)
339{
340 u64 buf[ELF_NVSRHALFREG];
341 int i;
342
343 if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double)))
344 return 1;
345 for (i = 0; i < ELF_NVSRHALFREG ; i++)
346 task->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
347 return 0;
348}
349#endif
350#else
351inline unsigned long copy_fpr_to_user(void __user *to,
352 struct task_struct *task)
353{
354 return __copy_to_user(to, task->thread.fp_state.fpr,
355 ELF_NFPREG * sizeof(double));
356}
357
358inline unsigned long copy_fpr_from_user(struct task_struct *task,
359 void __user *from)
360{
361 return __copy_from_user(task->thread.fp_state.fpr, from,
362 ELF_NFPREG * sizeof(double));
363}
364
365#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
366inline unsigned long copy_ckfpr_to_user(void __user *to,
367 struct task_struct *task)
368{
369 return __copy_to_user(to, task->thread.ckfp_state.fpr,
370 ELF_NFPREG * sizeof(double));
371}
372
373inline unsigned long copy_ckfpr_from_user(struct task_struct *task,
374 void __user *from)
375{
376 return __copy_from_user(task->thread.ckfp_state.fpr, from,
377 ELF_NFPREG * sizeof(double));
378}
379#endif
380#endif
381
382
383
384
385
386
387static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
388 struct mcontext __user *tm_frame, int sigret,
389 int ctx_has_vsx_region)
390{
391 unsigned long msr = regs->msr;
392
393
394 flush_fp_to_thread(current);
395
396
397 if (save_general_regs(regs, frame))
398 return 1;
399
400#ifdef CONFIG_ALTIVEC
401
402 if (current->thread.used_vr) {
403 flush_altivec_to_thread(current);
404 if (__copy_to_user(&frame->mc_vregs, ¤t->thread.vr_state,
405 ELF_NVRREG * sizeof(vector128)))
406 return 1;
407
408
409 msr |= MSR_VEC;
410 }
411
412
413
414
415
416
417
418
419 if (cpu_has_feature(CPU_FTR_ALTIVEC))
420 current->thread.vrsave = mfspr(SPRN_VRSAVE);
421 if (__put_user(current->thread.vrsave, (u32 __user *)&frame->mc_vregs[32]))
422 return 1;
423#endif
424 if (copy_fpr_to_user(&frame->mc_fregs, current))
425 return 1;
426
427
428
429
430
431 msr &= ~MSR_VSX;
432#ifdef CONFIG_VSX
433
434
435
436
437
438
439 if (current->thread.used_vsr && ctx_has_vsx_region) {
440 flush_vsx_to_thread(current);
441 if (copy_vsx_to_user(&frame->mc_vsregs, current))
442 return 1;
443 msr |= MSR_VSX;
444 }
445#endif
446#ifdef CONFIG_SPE
447
448 if (current->thread.used_spe) {
449 flush_spe_to_thread(current);
450 if (__copy_to_user(&frame->mc_vregs, current->thread.evr,
451 ELF_NEVRREG * sizeof(u32)))
452 return 1;
453
454
455 msr |= MSR_SPE;
456 }
457
458
459
460 if (__put_user(current->thread.spefscr, (u32 __user *)&frame->mc_vregs + ELF_NEVRREG))
461 return 1;
462#endif
463
464 if (__put_user(msr, &frame->mc_gregs[PT_MSR]))
465 return 1;
466
467
468
469 if (tm_frame && __put_user(0, &tm_frame->mc_gregs[PT_MSR]))
470 return 1;
471
472 if (sigret) {
473
474 if (__put_user(0x38000000UL + sigret, &frame->tramp[0])
475 || __put_user(0x44000002UL, &frame->tramp[1]))
476 return 1;
477 flush_icache_range((unsigned long) &frame->tramp[0],
478 (unsigned long) &frame->tramp[2]);
479 }
480
481 return 0;
482}
483
484#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
485
486
487
488
489
490
491
492
493
494static int save_tm_user_regs(struct pt_regs *regs,
495 struct mcontext __user *frame,
496 struct mcontext __user *tm_frame, int sigret)
497{
498 unsigned long msr = regs->msr;
499
500 WARN_ON(tm_suspend_disabled);
501
502
503
504
505
506
507 regs->msr &= ~MSR_TS_MASK;
508
509
510 if (save_general_regs(¤t->thread.ckpt_regs, frame)
511 || save_general_regs(regs, tm_frame))
512 return 1;
513
514
515
516
517
518
519
520 if (__put_user((msr >> 32), &tm_frame->mc_gregs[PT_MSR]))
521 return 1;
522
523#ifdef CONFIG_ALTIVEC
524
525 if (current->thread.used_vr) {
526 if (__copy_to_user(&frame->mc_vregs, ¤t->thread.ckvr_state,
527 ELF_NVRREG * sizeof(vector128)))
528 return 1;
529 if (msr & MSR_VEC) {
530 if (__copy_to_user(&tm_frame->mc_vregs,
531 ¤t->thread.vr_state,
532 ELF_NVRREG * sizeof(vector128)))
533 return 1;
534 } else {
535 if (__copy_to_user(&tm_frame->mc_vregs,
536 ¤t->thread.ckvr_state,
537 ELF_NVRREG * sizeof(vector128)))
538 return 1;
539 }
540
541
542
543
544 msr |= MSR_VEC;
545 }
546
547
548
549
550
551
552 if (cpu_has_feature(CPU_FTR_ALTIVEC))
553 current->thread.ckvrsave = mfspr(SPRN_VRSAVE);
554 if (__put_user(current->thread.ckvrsave,
555 (u32 __user *)&frame->mc_vregs[32]))
556 return 1;
557 if (msr & MSR_VEC) {
558 if (__put_user(current->thread.vrsave,
559 (u32 __user *)&tm_frame->mc_vregs[32]))
560 return 1;
561 } else {
562 if (__put_user(current->thread.ckvrsave,
563 (u32 __user *)&tm_frame->mc_vregs[32]))
564 return 1;
565 }
566#endif
567
568 if (copy_ckfpr_to_user(&frame->mc_fregs, current))
569 return 1;
570 if (msr & MSR_FP) {
571 if (copy_fpr_to_user(&tm_frame->mc_fregs, current))
572 return 1;
573 } else {
574 if (copy_ckfpr_to_user(&tm_frame->mc_fregs, current))
575 return 1;
576 }
577
578#ifdef CONFIG_VSX
579
580
581
582
583
584
585 if (current->thread.used_vsr) {
586 if (copy_ckvsx_to_user(&frame->mc_vsregs, current))
587 return 1;
588 if (msr & MSR_VSX) {
589 if (copy_vsx_to_user(&tm_frame->mc_vsregs,
590 current))
591 return 1;
592 } else {
593 if (copy_ckvsx_to_user(&tm_frame->mc_vsregs, current))
594 return 1;
595 }
596
597 msr |= MSR_VSX;
598 }
599#endif
600#ifdef CONFIG_SPE
601
602
603
604 if (current->thread.used_spe) {
605 flush_spe_to_thread(current);
606 if (__copy_to_user(&frame->mc_vregs, current->thread.evr,
607 ELF_NEVRREG * sizeof(u32)))
608 return 1;
609
610
611 msr |= MSR_SPE;
612 }
613
614
615 if (__put_user(current->thread.spefscr, (u32 __user *)&frame->mc_vregs + ELF_NEVRREG))
616 return 1;
617#endif
618
619 if (__put_user(msr, &frame->mc_gregs[PT_MSR]))
620 return 1;
621 if (sigret) {
622
623 if (__put_user(0x38000000UL + sigret, &frame->tramp[0])
624 || __put_user(0x44000002UL, &frame->tramp[1]))
625 return 1;
626 flush_icache_range((unsigned long) &frame->tramp[0],
627 (unsigned long) &frame->tramp[2]);
628 }
629
630 return 0;
631}
632#endif
633
634
635
636
637
638static long restore_user_regs(struct pt_regs *regs,
639 struct mcontext __user *sr, int sig)
640{
641 long err;
642 unsigned int save_r2 = 0;
643 unsigned long msr;
644#ifdef CONFIG_VSX
645 int i;
646#endif
647
648
649
650
651
652 if (!sig)
653 save_r2 = (unsigned int)regs->gpr[2];
654 err = restore_general_regs(regs, sr);
655 regs->trap = 0;
656 err |= __get_user(msr, &sr->mc_gregs[PT_MSR]);
657 if (!sig)
658 regs->gpr[2] = (unsigned long) save_r2;
659 if (err)
660 return 1;
661
662
663 if (sig)
664 regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
665
666#ifdef CONFIG_ALTIVEC
667
668
669
670
671 regs->msr &= ~MSR_VEC;
672 if (msr & MSR_VEC) {
673
674 if (__copy_from_user(¤t->thread.vr_state, &sr->mc_vregs,
675 sizeof(sr->mc_vregs)))
676 return 1;
677 current->thread.used_vr = true;
678 } else if (current->thread.used_vr)
679 memset(¤t->thread.vr_state, 0,
680 ELF_NVRREG * sizeof(vector128));
681
682
683 if (__get_user(current->thread.vrsave, (u32 __user *)&sr->mc_vregs[32]))
684 return 1;
685 if (cpu_has_feature(CPU_FTR_ALTIVEC))
686 mtspr(SPRN_VRSAVE, current->thread.vrsave);
687#endif
688 if (copy_fpr_from_user(current, &sr->mc_fregs))
689 return 1;
690
691#ifdef CONFIG_VSX
692
693
694
695
696 regs->msr &= ~MSR_VSX;
697 if (msr & MSR_VSX) {
698
699
700
701
702 if (copy_vsx_from_user(current, &sr->mc_vsregs))
703 return 1;
704 current->thread.used_vsr = true;
705 } else if (current->thread.used_vsr)
706 for (i = 0; i < 32 ; i++)
707 current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
708#endif
709
710
711
712
713 regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1);
714
715#ifdef CONFIG_SPE
716
717
718 regs->msr &= ~MSR_SPE;
719 if (msr & MSR_SPE) {
720
721 if (__copy_from_user(current->thread.evr, &sr->mc_vregs,
722 ELF_NEVRREG * sizeof(u32)))
723 return 1;
724 current->thread.used_spe = true;
725 } else if (current->thread.used_spe)
726 memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32));
727
728
729 if (__get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs + ELF_NEVRREG))
730 return 1;
731#endif
732
733 return 0;
734}
735
736#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
737
738
739
740
741
742static long restore_tm_user_regs(struct pt_regs *regs,
743 struct mcontext __user *sr,
744 struct mcontext __user *tm_sr)
745{
746 long err;
747 unsigned long msr, msr_hi;
748#ifdef CONFIG_VSX
749 int i;
750#endif
751
752 if (tm_suspend_disabled)
753 return 1;
754
755
756
757
758
759
760
761 err = restore_general_regs(regs, tm_sr);
762 err |= restore_general_regs(¤t->thread.ckpt_regs, sr);
763
764 err |= __get_user(current->thread.tm_tfhar, &sr->mc_gregs[PT_NIP]);
765
766 err |= __get_user(msr, &sr->mc_gregs[PT_MSR]);
767 if (err)
768 return 1;
769
770
771 regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
772
773#ifdef CONFIG_ALTIVEC
774 regs->msr &= ~MSR_VEC;
775 if (msr & MSR_VEC) {
776
777 if (__copy_from_user(¤t->thread.ckvr_state, &sr->mc_vregs,
778 sizeof(sr->mc_vregs)) ||
779 __copy_from_user(¤t->thread.vr_state,
780 &tm_sr->mc_vregs,
781 sizeof(sr->mc_vregs)))
782 return 1;
783 current->thread.used_vr = true;
784 } else if (current->thread.used_vr) {
785 memset(¤t->thread.vr_state, 0,
786 ELF_NVRREG * sizeof(vector128));
787 memset(¤t->thread.ckvr_state, 0,
788 ELF_NVRREG * sizeof(vector128));
789 }
790
791
792 if (__get_user(current->thread.ckvrsave,
793 (u32 __user *)&sr->mc_vregs[32]) ||
794 __get_user(current->thread.vrsave,
795 (u32 __user *)&tm_sr->mc_vregs[32]))
796 return 1;
797 if (cpu_has_feature(CPU_FTR_ALTIVEC))
798 mtspr(SPRN_VRSAVE, current->thread.ckvrsave);
799#endif
800
801 regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1);
802
803 if (copy_fpr_from_user(current, &sr->mc_fregs) ||
804 copy_ckfpr_from_user(current, &tm_sr->mc_fregs))
805 return 1;
806
807#ifdef CONFIG_VSX
808 regs->msr &= ~MSR_VSX;
809 if (msr & MSR_VSX) {
810
811
812
813
814 if (copy_vsx_from_user(current, &tm_sr->mc_vsregs) ||
815 copy_ckvsx_from_user(current, &sr->mc_vsregs))
816 return 1;
817 current->thread.used_vsr = true;
818 } else if (current->thread.used_vsr)
819 for (i = 0; i < 32 ; i++) {
820 current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
821 current->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
822 }
823#endif
824
825#ifdef CONFIG_SPE
826
827
828
829 regs->msr &= ~MSR_SPE;
830 if (msr & MSR_SPE) {
831 if (__copy_from_user(current->thread.evr, &sr->mc_vregs,
832 ELF_NEVRREG * sizeof(u32)))
833 return 1;
834 current->thread.used_spe = true;
835 } else if (current->thread.used_spe)
836 memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32));
837
838
839 if (__get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs
840 + ELF_NEVRREG))
841 return 1;
842#endif
843
844
845 if (__get_user(msr_hi, &tm_sr->mc_gregs[PT_MSR]))
846 return 1;
847 msr_hi <<= 32;
848
849 if (MSR_TM_RESV(msr_hi))
850 return 1;
851
852 regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr_hi & MSR_TS_MASK);
853
854
855
856
857 tm_enable();
858
859 current->thread.tm_texasr |= TEXASR_FS;
860
861 tm_recheckpoint(¤t->thread);
862
863
864 msr_check_and_set(msr & (MSR_FP | MSR_VEC));
865 if (msr & MSR_FP) {
866 load_fp_state(¤t->thread.fp_state);
867 regs->msr |= (MSR_FP | current->thread.fpexc_mode);
868 }
869#ifdef CONFIG_ALTIVEC
870 if (msr & MSR_VEC) {
871 load_vr_state(¤t->thread.vr_state);
872 regs->msr |= MSR_VEC;
873 }
874#endif
875
876 return 0;
877}
878#endif
879
880#ifdef CONFIG_PPC64
881
882#define copy_siginfo_to_user copy_siginfo_to_user32
883
884#endif
885
886
887
888
889
890int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
891 struct task_struct *tsk)
892{
893 struct rt_sigframe __user *rt_sf;
894 struct mcontext __user *frame;
895 struct mcontext __user *tm_frame = NULL;
896 void __user *addr;
897 unsigned long newsp = 0;
898 int sigret;
899 unsigned long tramp;
900 struct pt_regs *regs = tsk->thread.regs;
901
902 BUG_ON(tsk != current);
903
904
905
906 rt_sf = get_sigframe(ksig, get_tm_stackpointer(tsk), sizeof(*rt_sf), 1);
907 addr = rt_sf;
908 if (unlikely(rt_sf == NULL))
909 goto badframe;
910
911
912 if (copy_siginfo_to_user(&rt_sf->info, &ksig->info)
913 || __put_user(0, &rt_sf->uc.uc_flags)
914 || __save_altstack(&rt_sf->uc.uc_stack, regs->gpr[1])
915 || __put_user(to_user_ptr(&rt_sf->uc.uc_mcontext),
916 &rt_sf->uc.uc_regs)
917 || put_sigset_t(&rt_sf->uc.uc_sigmask, oldset))
918 goto badframe;
919
920
921 frame = &rt_sf->uc.uc_mcontext;
922 addr = frame;
923 if (vdso32_rt_sigtramp && tsk->mm->context.vdso_base) {
924 sigret = 0;
925 tramp = tsk->mm->context.vdso_base + vdso32_rt_sigtramp;
926 } else {
927 sigret = __NR_rt_sigreturn;
928 tramp = (unsigned long) frame->tramp;
929 }
930
931#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
932 tm_frame = &rt_sf->uc_transact.uc_mcontext;
933 if (MSR_TM_ACTIVE(regs->msr)) {
934 if (__put_user((unsigned long)&rt_sf->uc_transact,
935 &rt_sf->uc.uc_link) ||
936 __put_user((unsigned long)tm_frame,
937 &rt_sf->uc_transact.uc_regs))
938 goto badframe;
939 if (save_tm_user_regs(regs, frame, tm_frame, sigret))
940 goto badframe;
941 }
942 else
943#endif
944 {
945 if (__put_user(0, &rt_sf->uc.uc_link))
946 goto badframe;
947 if (save_user_regs(regs, frame, tm_frame, sigret, 1))
948 goto badframe;
949 }
950 regs->link = tramp;
951
952 tsk->thread.fp_state.fpscr = 0;
953
954
955 newsp = ((unsigned long)rt_sf) - (__SIGNAL_FRAMESIZE + 16);
956 addr = (void __user *)regs->gpr[1];
957 if (put_user(regs->gpr[1], (u32 __user *)newsp))
958 goto badframe;
959
960
961 regs->gpr[1] = newsp;
962 regs->gpr[3] = ksig->sig;
963 regs->gpr[4] = (unsigned long) &rt_sf->info;
964 regs->gpr[5] = (unsigned long) &rt_sf->uc;
965 regs->gpr[6] = (unsigned long) rt_sf;
966 regs->nip = (unsigned long) ksig->ka.sa.sa_handler;
967
968 regs->msr &= ~MSR_LE;
969 regs->msr |= (MSR_KERNEL & MSR_LE);
970 return 0;
971
972badframe:
973 if (show_unhandled_signals)
974 printk_ratelimited(KERN_INFO
975 "%s[%d]: bad frame in handle_rt_signal32: "
976 "%p nip %08lx lr %08lx\n",
977 tsk->comm, tsk->pid,
978 addr, regs->nip, regs->link);
979
980 return 1;
981}
982
983static int do_setcontext(struct ucontext __user *ucp, struct pt_regs *regs, int sig)
984{
985 sigset_t set;
986 struct mcontext __user *mcp;
987
988 if (get_sigset_t(&set, &ucp->uc_sigmask))
989 return -EFAULT;
990#ifdef CONFIG_PPC64
991 {
992 u32 cmcp;
993
994 if (__get_user(cmcp, &ucp->uc_regs))
995 return -EFAULT;
996 mcp = (struct mcontext __user *)(u64)cmcp;
997
998 }
999#else
1000 if (__get_user(mcp, &ucp->uc_regs))
1001 return -EFAULT;
1002 if (!access_ok(mcp, sizeof(*mcp)))
1003 return -EFAULT;
1004#endif
1005 set_current_blocked(&set);
1006 if (restore_user_regs(regs, mcp, sig))
1007 return -EFAULT;
1008
1009 return 0;
1010}
1011
1012#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1013static int do_setcontext_tm(struct ucontext __user *ucp,
1014 struct ucontext __user *tm_ucp,
1015 struct pt_regs *regs)
1016{
1017 sigset_t set;
1018 struct mcontext __user *mcp;
1019 struct mcontext __user *tm_mcp;
1020 u32 cmcp;
1021 u32 tm_cmcp;
1022
1023 if (get_sigset_t(&set, &ucp->uc_sigmask))
1024 return -EFAULT;
1025
1026 if (__get_user(cmcp, &ucp->uc_regs) ||
1027 __get_user(tm_cmcp, &tm_ucp->uc_regs))
1028 return -EFAULT;
1029 mcp = (struct mcontext __user *)(u64)cmcp;
1030 tm_mcp = (struct mcontext __user *)(u64)tm_cmcp;
1031
1032
1033 set_current_blocked(&set);
1034 if (restore_tm_user_regs(regs, mcp, tm_mcp))
1035 return -EFAULT;
1036
1037 return 0;
1038}
1039#endif
1040
1041#ifdef CONFIG_PPC64
1042COMPAT_SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
1043 struct ucontext __user *, new_ctx, int, ctx_size)
1044#else
1045SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
1046 struct ucontext __user *, new_ctx, long, ctx_size)
1047#endif
1048{
1049 struct pt_regs *regs = current_pt_regs();
1050 int ctx_has_vsx_region = 0;
1051
1052#ifdef CONFIG_PPC64
1053 unsigned long new_msr = 0;
1054
1055 if (new_ctx) {
1056 struct mcontext __user *mcp;
1057 u32 cmcp;
1058
1059
1060
1061
1062
1063
1064 if (__get_user(cmcp, &new_ctx->uc_regs))
1065 return -EFAULT;
1066 mcp = (struct mcontext __user *)(u64)cmcp;
1067 if (__get_user(new_msr, &mcp->mc_gregs[PT_MSR]))
1068 return -EFAULT;
1069 }
1070
1071
1072
1073
1074 if (ctx_size < UCONTEXTSIZEWITHOUTVSX)
1075 return -EINVAL;
1076
1077
1078
1079
1080 if ((ctx_size < sizeof(struct ucontext)) &&
1081 (new_msr & MSR_VSX))
1082 return -EINVAL;
1083
1084 if (ctx_size >= sizeof(struct ucontext))
1085 ctx_has_vsx_region = 1;
1086#else
1087
1088
1089
1090 if (ctx_size < sizeof(struct ucontext))
1091 return -EINVAL;
1092#endif
1093 if (old_ctx != NULL) {
1094 struct mcontext __user *mctx;
1095
1096
1097
1098
1099
1100
1101
1102
1103 mctx = (struct mcontext __user *)
1104 ((unsigned long) &old_ctx->uc_mcontext & ~0xfUL);
1105 if (!access_ok(old_ctx, ctx_size)
1106 || save_user_regs(regs, mctx, NULL, 0, ctx_has_vsx_region)
1107 || put_sigset_t(&old_ctx->uc_sigmask, ¤t->blocked)
1108 || __put_user(to_user_ptr(mctx), &old_ctx->uc_regs))
1109 return -EFAULT;
1110 }
1111 if (new_ctx == NULL)
1112 return 0;
1113 if (!access_ok(new_ctx, ctx_size) ||
1114 fault_in_pages_readable((u8 __user *)new_ctx, ctx_size))
1115 return -EFAULT;
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128 if (do_setcontext(new_ctx, regs, 0))
1129 do_exit(SIGSEGV);
1130
1131 set_thread_flag(TIF_RESTOREALL);
1132 return 0;
1133}
1134
1135#ifdef CONFIG_PPC64
1136COMPAT_SYSCALL_DEFINE0(rt_sigreturn)
1137#else
1138SYSCALL_DEFINE0(rt_sigreturn)
1139#endif
1140{
1141 struct rt_sigframe __user *rt_sf;
1142 struct pt_regs *regs = current_pt_regs();
1143 int tm_restore = 0;
1144#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1145 struct ucontext __user *uc_transact;
1146 unsigned long msr_hi;
1147 unsigned long tmp;
1148#endif
1149
1150 current->restart_block.fn = do_no_restart_syscall;
1151
1152 rt_sf = (struct rt_sigframe __user *)
1153 (regs->gpr[1] + __SIGNAL_FRAMESIZE + 16);
1154 if (!access_ok(rt_sf, sizeof(*rt_sf)))
1155 goto bad;
1156
1157#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168 if (MSR_TM_SUSPENDED(mfmsr()))
1169 tm_reclaim_current(0);
1170
1171 if (__get_user(tmp, &rt_sf->uc.uc_link))
1172 goto bad;
1173 uc_transact = (struct ucontext __user *)(uintptr_t)tmp;
1174 if (uc_transact) {
1175 u32 cmcp;
1176 struct mcontext __user *mcp;
1177
1178 if (__get_user(cmcp, &uc_transact->uc_regs))
1179 return -EFAULT;
1180 mcp = (struct mcontext __user *)(u64)cmcp;
1181
1182
1183 if (__get_user(msr_hi, &mcp->mc_gregs[PT_MSR]))
1184 goto bad;
1185
1186 if (MSR_TM_ACTIVE(msr_hi<<32)) {
1187
1188 if (!cpu_has_feature(CPU_FTR_TM))
1189 goto bad;
1190
1191
1192
1193 tm_restore = 1;
1194 if (do_setcontext_tm(&rt_sf->uc, uc_transact, regs))
1195 goto bad;
1196 }
1197 }
1198 if (!tm_restore) {
1199
1200
1201
1202
1203
1204 regs->msr &= ~MSR_TS_MASK;
1205 }
1206
1207#endif
1208 if (!tm_restore)
1209 if (do_setcontext(&rt_sf->uc, regs, 1))
1210 goto bad;
1211
1212
1213
1214
1215
1216
1217
1218
1219#ifdef CONFIG_PPC64
1220 if (compat_restore_altstack(&rt_sf->uc.uc_stack))
1221 goto bad;
1222#else
1223 if (restore_altstack(&rt_sf->uc.uc_stack))
1224 goto bad;
1225#endif
1226 set_thread_flag(TIF_RESTOREALL);
1227 return 0;
1228
1229 bad:
1230 if (show_unhandled_signals)
1231 printk_ratelimited(KERN_INFO
1232 "%s[%d]: bad frame in sys_rt_sigreturn: "
1233 "%p nip %08lx lr %08lx\n",
1234 current->comm, current->pid,
1235 rt_sf, regs->nip, regs->link);
1236
1237 force_sig(SIGSEGV, current);
1238 return 0;
1239}
1240
1241#ifdef CONFIG_PPC32
1242SYSCALL_DEFINE3(debug_setcontext, struct ucontext __user *, ctx,
1243 int, ndbg, struct sig_dbg_op __user *, dbg)
1244{
1245 struct pt_regs *regs = current_pt_regs();
1246 struct sig_dbg_op op;
1247 int i;
1248 unsigned long new_msr = regs->msr;
1249#ifdef CONFIG_PPC_ADV_DEBUG_REGS
1250 unsigned long new_dbcr0 = current->thread.debug.dbcr0;
1251#endif
1252
1253 for (i=0; i<ndbg; i++) {
1254 if (copy_from_user(&op, dbg + i, sizeof(op)))
1255 return -EFAULT;
1256 switch (op.dbg_type) {
1257 case SIG_DBG_SINGLE_STEPPING:
1258#ifdef CONFIG_PPC_ADV_DEBUG_REGS
1259 if (op.dbg_value) {
1260 new_msr |= MSR_DE;
1261 new_dbcr0 |= (DBCR0_IDM | DBCR0_IC);
1262 } else {
1263 new_dbcr0 &= ~DBCR0_IC;
1264 if (!DBCR_ACTIVE_EVENTS(new_dbcr0,
1265 current->thread.debug.dbcr1)) {
1266 new_msr &= ~MSR_DE;
1267 new_dbcr0 &= ~DBCR0_IDM;
1268 }
1269 }
1270#else
1271 if (op.dbg_value)
1272 new_msr |= MSR_SE;
1273 else
1274 new_msr &= ~MSR_SE;
1275#endif
1276 break;
1277 case SIG_DBG_BRANCH_TRACING:
1278#ifdef CONFIG_PPC_ADV_DEBUG_REGS
1279 return -EINVAL;
1280#else
1281 if (op.dbg_value)
1282 new_msr |= MSR_BE;
1283 else
1284 new_msr &= ~MSR_BE;
1285#endif
1286 break;
1287
1288 default:
1289 return -EINVAL;
1290 }
1291 }
1292
1293
1294
1295
1296
1297
1298 regs->msr = new_msr;
1299#ifdef CONFIG_PPC_ADV_DEBUG_REGS
1300 current->thread.debug.dbcr0 = new_dbcr0;
1301#endif
1302
1303 if (!access_ok(ctx, sizeof(*ctx)) ||
1304 fault_in_pages_readable((u8 __user *)ctx, sizeof(*ctx)))
1305 return -EFAULT;
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318 if (do_setcontext(ctx, regs, 1)) {
1319 if (show_unhandled_signals)
1320 printk_ratelimited(KERN_INFO "%s[%d]: bad frame in "
1321 "sys_debug_setcontext: %p nip %08lx "
1322 "lr %08lx\n",
1323 current->comm, current->pid,
1324 ctx, regs->nip, regs->link);
1325
1326 force_sig(SIGSEGV, current);
1327 goto out;
1328 }
1329
1330
1331
1332
1333
1334
1335
1336
1337 restore_altstack(&ctx->uc_stack);
1338
1339 set_thread_flag(TIF_RESTOREALL);
1340 out:
1341 return 0;
1342}
1343#endif
1344
1345
1346
1347
1348int handle_signal32(struct ksignal *ksig, sigset_t *oldset,
1349 struct task_struct *tsk)
1350{
1351 struct sigcontext __user *sc;
1352 struct sigframe __user *frame;
1353 struct mcontext __user *tm_mctx = NULL;
1354 unsigned long newsp = 0;
1355 int sigret;
1356 unsigned long tramp;
1357 struct pt_regs *regs = tsk->thread.regs;
1358
1359 BUG_ON(tsk != current);
1360
1361
1362 frame = get_sigframe(ksig, get_tm_stackpointer(tsk), sizeof(*frame), 1);
1363 if (unlikely(frame == NULL))
1364 goto badframe;
1365 sc = (struct sigcontext __user *) &frame->sctx;
1366
1367#if _NSIG != 64
1368#error "Please adjust handle_signal()"
1369#endif
1370 if (__put_user(to_user_ptr(ksig->ka.sa.sa_handler), &sc->handler)
1371 || __put_user(oldset->sig[0], &sc->oldmask)
1372#ifdef CONFIG_PPC64
1373 || __put_user((oldset->sig[0] >> 32), &sc->_unused[3])
1374#else
1375 || __put_user(oldset->sig[1], &sc->_unused[3])
1376#endif
1377 || __put_user(to_user_ptr(&frame->mctx), &sc->regs)
1378 || __put_user(ksig->sig, &sc->signal))
1379 goto badframe;
1380
1381 if (vdso32_sigtramp && tsk->mm->context.vdso_base) {
1382 sigret = 0;
1383 tramp = tsk->mm->context.vdso_base + vdso32_sigtramp;
1384 } else {
1385 sigret = __NR_sigreturn;
1386 tramp = (unsigned long) frame->mctx.tramp;
1387 }
1388
1389#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1390 tm_mctx = &frame->mctx_transact;
1391 if (MSR_TM_ACTIVE(regs->msr)) {
1392 if (save_tm_user_regs(regs, &frame->mctx, &frame->mctx_transact,
1393 sigret))
1394 goto badframe;
1395 }
1396 else
1397#endif
1398 {
1399 if (save_user_regs(regs, &frame->mctx, tm_mctx, sigret, 1))
1400 goto badframe;
1401 }
1402
1403 regs->link = tramp;
1404
1405 tsk->thread.fp_state.fpscr = 0;
1406
1407
1408 newsp = ((unsigned long)frame) - __SIGNAL_FRAMESIZE;
1409 if (put_user(regs->gpr[1], (u32 __user *)newsp))
1410 goto badframe;
1411
1412 regs->gpr[1] = newsp;
1413 regs->gpr[3] = ksig->sig;
1414 regs->gpr[4] = (unsigned long) sc;
1415 regs->nip = (unsigned long) (unsigned long)ksig->ka.sa.sa_handler;
1416
1417 regs->msr &= ~MSR_LE;
1418 return 0;
1419
1420badframe:
1421 if (show_unhandled_signals)
1422 printk_ratelimited(KERN_INFO
1423 "%s[%d]: bad frame in handle_signal32: "
1424 "%p nip %08lx lr %08lx\n",
1425 tsk->comm, tsk->pid,
1426 frame, regs->nip, regs->link);
1427
1428 return 1;
1429}
1430
1431
1432
1433
1434#ifdef CONFIG_PPC64
1435COMPAT_SYSCALL_DEFINE0(sigreturn)
1436#else
1437SYSCALL_DEFINE0(sigreturn)
1438#endif
1439{
1440 struct pt_regs *regs = current_pt_regs();
1441 struct sigframe __user *sf;
1442 struct sigcontext __user *sc;
1443 struct sigcontext sigctx;
1444 struct mcontext __user *sr;
1445 void __user *addr;
1446 sigset_t set;
1447#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1448 struct mcontext __user *mcp, *tm_mcp;
1449 unsigned long msr_hi;
1450#endif
1451
1452
1453 current->restart_block.fn = do_no_restart_syscall;
1454
1455 sf = (struct sigframe __user *)(regs->gpr[1] + __SIGNAL_FRAMESIZE);
1456 sc = &sf->sctx;
1457 addr = sc;
1458 if (copy_from_user(&sigctx, sc, sizeof(sigctx)))
1459 goto badframe;
1460
1461#ifdef CONFIG_PPC64
1462
1463
1464
1465
1466 set.sig[0] = sigctx.oldmask + ((long)(sigctx._unused[3]) << 32);
1467#else
1468 set.sig[0] = sigctx.oldmask;
1469 set.sig[1] = sigctx._unused[3];
1470#endif
1471 set_current_blocked(&set);
1472
1473#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1474 mcp = (struct mcontext __user *)&sf->mctx;
1475 tm_mcp = (struct mcontext __user *)&sf->mctx_transact;
1476 if (__get_user(msr_hi, &tm_mcp->mc_gregs[PT_MSR]))
1477 goto badframe;
1478 if (MSR_TM_ACTIVE(msr_hi<<32)) {
1479 if (!cpu_has_feature(CPU_FTR_TM))
1480 goto badframe;
1481 if (restore_tm_user_regs(regs, mcp, tm_mcp))
1482 goto badframe;
1483 } else
1484#endif
1485 {
1486 sr = (struct mcontext __user *)from_user_ptr(sigctx.regs);
1487 addr = sr;
1488 if (!access_ok(sr, sizeof(*sr))
1489 || restore_user_regs(regs, sr, 1))
1490 goto badframe;
1491 }
1492
1493 set_thread_flag(TIF_RESTOREALL);
1494 return 0;
1495
1496badframe:
1497 if (show_unhandled_signals)
1498 printk_ratelimited(KERN_INFO
1499 "%s[%d]: bad frame in sys_sigreturn: "
1500 "%p nip %08lx lr %08lx\n",
1501 current->comm, current->pid,
1502 addr, regs->nip, regs->link);
1503
1504 force_sig(SIGSEGV, current);
1505 return 0;
1506}
1507