1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16#include <linux/sched.h>
17#include <linux/mm.h>
18#include <linux/smp.h>
19#include <linux/kernel.h>
20#include <linux/signal.h>
21#include <linux/errno.h>
22#include <linux/elf.h>
23#include <linux/ptrace.h>
24#include <linux/pagemap.h>
25#include <linux/ratelimit.h>
26#include <linux/syscalls.h>
27#ifdef CONFIG_PPC64
28#include <linux/compat.h>
29#else
30#include <linux/wait.h>
31#include <linux/unistd.h>
32#include <linux/stddef.h>
33#include <linux/tty.h>
34#include <linux/binfmts.h>
35#endif
36
37#include <linux/uaccess.h>
38#include <asm/cacheflush.h>
39#include <asm/syscalls.h>
40#include <asm/sigcontext.h>
41#include <asm/vdso.h>
42#include <asm/switch_to.h>
43#include <asm/tm.h>
44#include <asm/asm-prototypes.h>
45#ifdef CONFIG_PPC64
46#include "ppc32.h"
47#include <asm/unistd.h>
48#else
49#include <asm/ucontext.h>
50#include <asm/pgtable.h>
51#endif
52
53#include "signal.h"
54
55
56#ifdef CONFIG_PPC64
57#define old_sigaction old_sigaction32
58#define sigcontext sigcontext32
59#define mcontext mcontext32
60#define ucontext ucontext32
61
62#define __save_altstack __compat_save_altstack
63
64
65
66
67
68#define UCONTEXTSIZEWITHOUTVSX \
69 (sizeof(struct ucontext) - sizeof(elf_vsrreghalf_t32))
70
71
72
73
74
75
76
77
78#define GP_REGS_SIZE min(sizeof(elf_gregset_t32), sizeof(struct pt_regs32))
79#undef __SIGNAL_FRAMESIZE
80#define __SIGNAL_FRAMESIZE __SIGNAL_FRAMESIZE32
81#undef ELF_NVRREG
82#define ELF_NVRREG ELF_NVRREG32
83
84
85
86
87
88static inline int put_sigset_t(compat_sigset_t __user *uset, sigset_t *set)
89{
90 return put_compat_sigset(uset, set, sizeof(*uset));
91}
92
93static inline int get_sigset_t(sigset_t *set,
94 const compat_sigset_t __user *uset)
95{
96 return get_compat_sigset(set, uset);
97}
98
99#define to_user_ptr(p) ptr_to_compat(p)
100#define from_user_ptr(p) compat_ptr(p)
101
102static inline int save_general_regs(struct pt_regs *regs,
103 struct mcontext __user *frame)
104{
105 elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
106 int i;
107
108 elf_greg_t64 softe = 0x1;
109
110 WARN_ON(!FULL_REGS(regs));
111
112 for (i = 0; i <= PT_RESULT; i ++) {
113 if (i == 14 && !FULL_REGS(regs))
114 i = 32;
115 if ( i == PT_SOFTE) {
116 if(__put_user((unsigned int)softe, &frame->mc_gregs[i]))
117 return -EFAULT;
118 else
119 continue;
120 }
121 if (__put_user((unsigned int)gregs[i], &frame->mc_gregs[i]))
122 return -EFAULT;
123 }
124 return 0;
125}
126
127static inline int restore_general_regs(struct pt_regs *regs,
128 struct mcontext __user *sr)
129{
130 elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
131 int i;
132
133 for (i = 0; i <= PT_RESULT; i++) {
134 if ((i == PT_MSR) || (i == PT_SOFTE))
135 continue;
136 if (__get_user(gregs[i], &sr->mc_gregs[i]))
137 return -EFAULT;
138 }
139 return 0;
140}
141
142#else
143
144#define GP_REGS_SIZE min(sizeof(elf_gregset_t), sizeof(struct pt_regs))
145
146static inline int put_sigset_t(sigset_t __user *uset, sigset_t *set)
147{
148 return copy_to_user(uset, set, sizeof(*uset));
149}
150
151static inline int get_sigset_t(sigset_t *set, const sigset_t __user *uset)
152{
153 return copy_from_user(set, uset, sizeof(*uset));
154}
155
156#define to_user_ptr(p) ((unsigned long)(p))
157#define from_user_ptr(p) ((void __user *)(p))
158
159static inline int save_general_regs(struct pt_regs *regs,
160 struct mcontext __user *frame)
161{
162 WARN_ON(!FULL_REGS(regs));
163 return __copy_to_user(&frame->mc_gregs, regs, GP_REGS_SIZE);
164}
165
166static inline int restore_general_regs(struct pt_regs *regs,
167 struct mcontext __user *sr)
168{
169
170 if (__copy_from_user(regs, &sr->mc_gregs,
171 PT_MSR * sizeof(elf_greg_t)))
172 return -EFAULT;
173
174 if (__copy_from_user(®s->orig_gpr3, &sr->mc_gregs[PT_ORIG_R3],
175 GP_REGS_SIZE - PT_ORIG_R3 * sizeof(elf_greg_t)))
176 return -EFAULT;
177 return 0;
178}
179#endif
180
181
182
183
184
185
186
187
188
189
190
191
192
193struct sigframe {
194 struct sigcontext sctx;
195 struct mcontext mctx;
196#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
197 struct sigcontext sctx_transact;
198 struct mcontext mctx_transact;
199#endif
200
201
202
203
204 int abigap[56];
205};
206
207
208#define tramp mc_pad
209
210
211
212
213
214
215
216
217
218
219
220
221struct rt_sigframe {
222#ifdef CONFIG_PPC64
223 compat_siginfo_t info;
224#else
225 struct siginfo info;
226#endif
227 struct ucontext uc;
228#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
229 struct ucontext uc_transact;
230#endif
231
232
233
234
235 int abigap[56];
236};
237
238#ifdef CONFIG_VSX
239unsigned long copy_fpr_to_user(void __user *to,
240 struct task_struct *task)
241{
242 u64 buf[ELF_NFPREG];
243 int i;
244
245
246 for (i = 0; i < (ELF_NFPREG - 1) ; i++)
247 buf[i] = task->thread.TS_FPR(i);
248 buf[i] = task->thread.fp_state.fpscr;
249 return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double));
250}
251
252unsigned long copy_fpr_from_user(struct task_struct *task,
253 void __user *from)
254{
255 u64 buf[ELF_NFPREG];
256 int i;
257
258 if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double)))
259 return 1;
260 for (i = 0; i < (ELF_NFPREG - 1) ; i++)
261 task->thread.TS_FPR(i) = buf[i];
262 task->thread.fp_state.fpscr = buf[i];
263
264 return 0;
265}
266
267unsigned long copy_vsx_to_user(void __user *to,
268 struct task_struct *task)
269{
270 u64 buf[ELF_NVSRHALFREG];
271 int i;
272
273
274 for (i = 0; i < ELF_NVSRHALFREG; i++)
275 buf[i] = task->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
276 return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double));
277}
278
279unsigned long copy_vsx_from_user(struct task_struct *task,
280 void __user *from)
281{
282 u64 buf[ELF_NVSRHALFREG];
283 int i;
284
285 if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double)))
286 return 1;
287 for (i = 0; i < ELF_NVSRHALFREG ; i++)
288 task->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
289 return 0;
290}
291
292#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
293unsigned long copy_ckfpr_to_user(void __user *to,
294 struct task_struct *task)
295{
296 u64 buf[ELF_NFPREG];
297 int i;
298
299
300 for (i = 0; i < (ELF_NFPREG - 1) ; i++)
301 buf[i] = task->thread.TS_CKFPR(i);
302 buf[i] = task->thread.ckfp_state.fpscr;
303 return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double));
304}
305
306unsigned long copy_ckfpr_from_user(struct task_struct *task,
307 void __user *from)
308{
309 u64 buf[ELF_NFPREG];
310 int i;
311
312 if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double)))
313 return 1;
314 for (i = 0; i < (ELF_NFPREG - 1) ; i++)
315 task->thread.TS_CKFPR(i) = buf[i];
316 task->thread.ckfp_state.fpscr = buf[i];
317
318 return 0;
319}
320
321unsigned long copy_ckvsx_to_user(void __user *to,
322 struct task_struct *task)
323{
324 u64 buf[ELF_NVSRHALFREG];
325 int i;
326
327
328 for (i = 0; i < ELF_NVSRHALFREG; i++)
329 buf[i] = task->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET];
330 return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double));
331}
332
333unsigned long copy_ckvsx_from_user(struct task_struct *task,
334 void __user *from)
335{
336 u64 buf[ELF_NVSRHALFREG];
337 int i;
338
339 if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double)))
340 return 1;
341 for (i = 0; i < ELF_NVSRHALFREG ; i++)
342 task->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
343 return 0;
344}
345#endif
346#else
347inline unsigned long copy_fpr_to_user(void __user *to,
348 struct task_struct *task)
349{
350 return __copy_to_user(to, task->thread.fp_state.fpr,
351 ELF_NFPREG * sizeof(double));
352}
353
354inline unsigned long copy_fpr_from_user(struct task_struct *task,
355 void __user *from)
356{
357 return __copy_from_user(task->thread.fp_state.fpr, from,
358 ELF_NFPREG * sizeof(double));
359}
360
361#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
362inline unsigned long copy_ckfpr_to_user(void __user *to,
363 struct task_struct *task)
364{
365 return __copy_to_user(to, task->thread.ckfp_state.fpr,
366 ELF_NFPREG * sizeof(double));
367}
368
369inline unsigned long copy_ckfpr_from_user(struct task_struct *task,
370 void __user *from)
371{
372 return __copy_from_user(task->thread.ckfp_state.fpr, from,
373 ELF_NFPREG * sizeof(double));
374}
375#endif
376#endif
377
378
379
380
381
382
383static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
384 struct mcontext __user *tm_frame, int sigret,
385 int ctx_has_vsx_region)
386{
387 unsigned long msr = regs->msr;
388
389
390 flush_fp_to_thread(current);
391
392
393 if (save_general_regs(regs, frame))
394 return 1;
395
396#ifdef CONFIG_ALTIVEC
397
398 if (current->thread.used_vr) {
399 flush_altivec_to_thread(current);
400 if (__copy_to_user(&frame->mc_vregs, ¤t->thread.vr_state,
401 ELF_NVRREG * sizeof(vector128)))
402 return 1;
403
404
405 msr |= MSR_VEC;
406 }
407
408
409
410
411
412
413
414
415 if (cpu_has_feature(CPU_FTR_ALTIVEC))
416 current->thread.vrsave = mfspr(SPRN_VRSAVE);
417 if (__put_user(current->thread.vrsave, (u32 __user *)&frame->mc_vregs[32]))
418 return 1;
419#endif
420 if (copy_fpr_to_user(&frame->mc_fregs, current))
421 return 1;
422
423
424
425
426
427 msr &= ~MSR_VSX;
428#ifdef CONFIG_VSX
429
430
431
432
433
434
435 if (current->thread.used_vsr && ctx_has_vsx_region) {
436 flush_vsx_to_thread(current);
437 if (copy_vsx_to_user(&frame->mc_vsregs, current))
438 return 1;
439 msr |= MSR_VSX;
440 }
441#endif
442#ifdef CONFIG_SPE
443
444 if (current->thread.used_spe) {
445 flush_spe_to_thread(current);
446 if (__copy_to_user(&frame->mc_vregs, current->thread.evr,
447 ELF_NEVRREG * sizeof(u32)))
448 return 1;
449
450
451 msr |= MSR_SPE;
452 }
453
454
455
456 if (__put_user(current->thread.spefscr, (u32 __user *)&frame->mc_vregs + ELF_NEVRREG))
457 return 1;
458#endif
459
460 if (__put_user(msr, &frame->mc_gregs[PT_MSR]))
461 return 1;
462
463
464
465 if (tm_frame && __put_user(0, &tm_frame->mc_gregs[PT_MSR]))
466 return 1;
467
468 if (sigret) {
469
470 if (__put_user(PPC_INST_ADDI + sigret, &frame->tramp[0])
471 || __put_user(PPC_INST_SC, &frame->tramp[1]))
472 return 1;
473 flush_icache_range((unsigned long) &frame->tramp[0],
474 (unsigned long) &frame->tramp[2]);
475 }
476
477 return 0;
478}
479
480#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
481
482
483
484
485
486
487
488
489
490static int save_tm_user_regs(struct pt_regs *regs,
491 struct mcontext __user *frame,
492 struct mcontext __user *tm_frame, int sigret)
493{
494 unsigned long msr = regs->msr;
495
496 WARN_ON(tm_suspend_disabled);
497
498
499
500
501
502
503 regs->msr &= ~MSR_TS_MASK;
504
505
506 if (save_general_regs(¤t->thread.ckpt_regs, frame)
507 || save_general_regs(regs, tm_frame))
508 return 1;
509
510
511
512
513
514
515
516 if (__put_user((msr >> 32), &tm_frame->mc_gregs[PT_MSR]))
517 return 1;
518
519#ifdef CONFIG_ALTIVEC
520
521 if (current->thread.used_vr) {
522 if (__copy_to_user(&frame->mc_vregs, ¤t->thread.ckvr_state,
523 ELF_NVRREG * sizeof(vector128)))
524 return 1;
525 if (msr & MSR_VEC) {
526 if (__copy_to_user(&tm_frame->mc_vregs,
527 ¤t->thread.vr_state,
528 ELF_NVRREG * sizeof(vector128)))
529 return 1;
530 } else {
531 if (__copy_to_user(&tm_frame->mc_vregs,
532 ¤t->thread.ckvr_state,
533 ELF_NVRREG * sizeof(vector128)))
534 return 1;
535 }
536
537
538
539
540 msr |= MSR_VEC;
541 }
542
543
544
545
546
547
548 if (cpu_has_feature(CPU_FTR_ALTIVEC))
549 current->thread.ckvrsave = mfspr(SPRN_VRSAVE);
550 if (__put_user(current->thread.ckvrsave,
551 (u32 __user *)&frame->mc_vregs[32]))
552 return 1;
553 if (msr & MSR_VEC) {
554 if (__put_user(current->thread.vrsave,
555 (u32 __user *)&tm_frame->mc_vregs[32]))
556 return 1;
557 } else {
558 if (__put_user(current->thread.ckvrsave,
559 (u32 __user *)&tm_frame->mc_vregs[32]))
560 return 1;
561 }
562#endif
563
564 if (copy_ckfpr_to_user(&frame->mc_fregs, current))
565 return 1;
566 if (msr & MSR_FP) {
567 if (copy_fpr_to_user(&tm_frame->mc_fregs, current))
568 return 1;
569 } else {
570 if (copy_ckfpr_to_user(&tm_frame->mc_fregs, current))
571 return 1;
572 }
573
574#ifdef CONFIG_VSX
575
576
577
578
579
580
581 if (current->thread.used_vsr) {
582 if (copy_ckvsx_to_user(&frame->mc_vsregs, current))
583 return 1;
584 if (msr & MSR_VSX) {
585 if (copy_vsx_to_user(&tm_frame->mc_vsregs,
586 current))
587 return 1;
588 } else {
589 if (copy_ckvsx_to_user(&tm_frame->mc_vsregs, current))
590 return 1;
591 }
592
593 msr |= MSR_VSX;
594 }
595#endif
596#ifdef CONFIG_SPE
597
598
599
600 if (current->thread.used_spe) {
601 flush_spe_to_thread(current);
602 if (__copy_to_user(&frame->mc_vregs, current->thread.evr,
603 ELF_NEVRREG * sizeof(u32)))
604 return 1;
605
606
607 msr |= MSR_SPE;
608 }
609
610
611 if (__put_user(current->thread.spefscr, (u32 __user *)&frame->mc_vregs + ELF_NEVRREG))
612 return 1;
613#endif
614
615 if (__put_user(msr, &frame->mc_gregs[PT_MSR]))
616 return 1;
617 if (sigret) {
618
619 if (__put_user(PPC_INST_ADDI + sigret, &frame->tramp[0])
620 || __put_user(PPC_INST_SC, &frame->tramp[1]))
621 return 1;
622 flush_icache_range((unsigned long) &frame->tramp[0],
623 (unsigned long) &frame->tramp[2]);
624 }
625
626 return 0;
627}
628#endif
629
630
631
632
633
634static long restore_user_regs(struct pt_regs *regs,
635 struct mcontext __user *sr, int sig)
636{
637 long err;
638 unsigned int save_r2 = 0;
639 unsigned long msr;
640#ifdef CONFIG_VSX
641 int i;
642#endif
643
644
645
646
647
648 if (!sig)
649 save_r2 = (unsigned int)regs->gpr[2];
650 err = restore_general_regs(regs, sr);
651 regs->trap = 0;
652 err |= __get_user(msr, &sr->mc_gregs[PT_MSR]);
653 if (!sig)
654 regs->gpr[2] = (unsigned long) save_r2;
655 if (err)
656 return 1;
657
658
659 if (sig)
660 regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
661
662#ifdef CONFIG_ALTIVEC
663
664
665
666
667 regs->msr &= ~MSR_VEC;
668 if (msr & MSR_VEC) {
669
670 if (__copy_from_user(¤t->thread.vr_state, &sr->mc_vregs,
671 sizeof(sr->mc_vregs)))
672 return 1;
673 current->thread.used_vr = true;
674 } else if (current->thread.used_vr)
675 memset(¤t->thread.vr_state, 0,
676 ELF_NVRREG * sizeof(vector128));
677
678
679 if (__get_user(current->thread.vrsave, (u32 __user *)&sr->mc_vregs[32]))
680 return 1;
681 if (cpu_has_feature(CPU_FTR_ALTIVEC))
682 mtspr(SPRN_VRSAVE, current->thread.vrsave);
683#endif
684 if (copy_fpr_from_user(current, &sr->mc_fregs))
685 return 1;
686
687#ifdef CONFIG_VSX
688
689
690
691
692 regs->msr &= ~MSR_VSX;
693 if (msr & MSR_VSX) {
694
695
696
697
698 if (copy_vsx_from_user(current, &sr->mc_vsregs))
699 return 1;
700 current->thread.used_vsr = true;
701 } else if (current->thread.used_vsr)
702 for (i = 0; i < 32 ; i++)
703 current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
704#endif
705
706
707
708
709 regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1);
710
711#ifdef CONFIG_SPE
712
713
714 regs->msr &= ~MSR_SPE;
715 if (msr & MSR_SPE) {
716
717 if (__copy_from_user(current->thread.evr, &sr->mc_vregs,
718 ELF_NEVRREG * sizeof(u32)))
719 return 1;
720 current->thread.used_spe = true;
721 } else if (current->thread.used_spe)
722 memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32));
723
724
725 if (__get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs + ELF_NEVRREG))
726 return 1;
727#endif
728
729 return 0;
730}
731
732#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
733
734
735
736
737
738static long restore_tm_user_regs(struct pt_regs *regs,
739 struct mcontext __user *sr,
740 struct mcontext __user *tm_sr)
741{
742 long err;
743 unsigned long msr, msr_hi;
744#ifdef CONFIG_VSX
745 int i;
746#endif
747
748 if (tm_suspend_disabled)
749 return 1;
750
751
752
753
754
755
756
757 err = restore_general_regs(regs, tm_sr);
758 err |= restore_general_regs(¤t->thread.ckpt_regs, sr);
759
760 err |= __get_user(current->thread.tm_tfhar, &sr->mc_gregs[PT_NIP]);
761
762 err |= __get_user(msr, &sr->mc_gregs[PT_MSR]);
763 if (err)
764 return 1;
765
766
767 regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
768
769#ifdef CONFIG_ALTIVEC
770 regs->msr &= ~MSR_VEC;
771 if (msr & MSR_VEC) {
772
773 if (__copy_from_user(¤t->thread.ckvr_state, &sr->mc_vregs,
774 sizeof(sr->mc_vregs)) ||
775 __copy_from_user(¤t->thread.vr_state,
776 &tm_sr->mc_vregs,
777 sizeof(sr->mc_vregs)))
778 return 1;
779 current->thread.used_vr = true;
780 } else if (current->thread.used_vr) {
781 memset(¤t->thread.vr_state, 0,
782 ELF_NVRREG * sizeof(vector128));
783 memset(¤t->thread.ckvr_state, 0,
784 ELF_NVRREG * sizeof(vector128));
785 }
786
787
788 if (__get_user(current->thread.ckvrsave,
789 (u32 __user *)&sr->mc_vregs[32]) ||
790 __get_user(current->thread.vrsave,
791 (u32 __user *)&tm_sr->mc_vregs[32]))
792 return 1;
793 if (cpu_has_feature(CPU_FTR_ALTIVEC))
794 mtspr(SPRN_VRSAVE, current->thread.ckvrsave);
795#endif
796
797 regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1);
798
799 if (copy_fpr_from_user(current, &sr->mc_fregs) ||
800 copy_ckfpr_from_user(current, &tm_sr->mc_fregs))
801 return 1;
802
803#ifdef CONFIG_VSX
804 regs->msr &= ~MSR_VSX;
805 if (msr & MSR_VSX) {
806
807
808
809
810 if (copy_vsx_from_user(current, &tm_sr->mc_vsregs) ||
811 copy_ckvsx_from_user(current, &sr->mc_vsregs))
812 return 1;
813 current->thread.used_vsr = true;
814 } else if (current->thread.used_vsr)
815 for (i = 0; i < 32 ; i++) {
816 current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
817 current->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
818 }
819#endif
820
821#ifdef CONFIG_SPE
822
823
824
825 regs->msr &= ~MSR_SPE;
826 if (msr & MSR_SPE) {
827 if (__copy_from_user(current->thread.evr, &sr->mc_vregs,
828 ELF_NEVRREG * sizeof(u32)))
829 return 1;
830 current->thread.used_spe = true;
831 } else if (current->thread.used_spe)
832 memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32));
833
834
835 if (__get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs
836 + ELF_NEVRREG))
837 return 1;
838#endif
839
840
841 if (__get_user(msr_hi, &tm_sr->mc_gregs[PT_MSR]))
842 return 1;
843 msr_hi <<= 32;
844
845 if (MSR_TM_RESV(msr_hi))
846 return 1;
847
848
849
850
851
852 preempt_disable();
853
854
855
856
857
858
859
860
861
862
863
864 regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr_hi & MSR_TS_MASK);
865
866
867
868
869 tm_enable();
870
871 current->thread.tm_texasr |= TEXASR_FS;
872
873 tm_recheckpoint(¤t->thread);
874
875
876 msr_check_and_set(msr & (MSR_FP | MSR_VEC));
877 if (msr & MSR_FP) {
878 load_fp_state(¤t->thread.fp_state);
879 regs->msr |= (MSR_FP | current->thread.fpexc_mode);
880 }
881#ifdef CONFIG_ALTIVEC
882 if (msr & MSR_VEC) {
883 load_vr_state(¤t->thread.vr_state);
884 regs->msr |= MSR_VEC;
885 }
886#endif
887
888 preempt_enable();
889
890 return 0;
891}
892#endif
893
894#ifdef CONFIG_PPC64
895
896#define copy_siginfo_to_user copy_siginfo_to_user32
897
898#endif
899
900
901
902
903
904int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
905 struct task_struct *tsk)
906{
907 struct rt_sigframe __user *rt_sf;
908 struct mcontext __user *frame;
909 struct mcontext __user *tm_frame = NULL;
910 void __user *addr;
911 unsigned long newsp = 0;
912 int sigret;
913 unsigned long tramp;
914 struct pt_regs *regs = tsk->thread.regs;
915
916 BUG_ON(tsk != current);
917
918
919
920 rt_sf = get_sigframe(ksig, get_tm_stackpointer(tsk), sizeof(*rt_sf), 1);
921 addr = rt_sf;
922 if (unlikely(rt_sf == NULL))
923 goto badframe;
924
925
926 if (copy_siginfo_to_user(&rt_sf->info, &ksig->info)
927 || __put_user(0, &rt_sf->uc.uc_flags)
928 || __save_altstack(&rt_sf->uc.uc_stack, regs->gpr[1])
929 || __put_user(to_user_ptr(&rt_sf->uc.uc_mcontext),
930 &rt_sf->uc.uc_regs)
931 || put_sigset_t(&rt_sf->uc.uc_sigmask, oldset))
932 goto badframe;
933
934
935 frame = &rt_sf->uc.uc_mcontext;
936 addr = frame;
937 if (vdso32_rt_sigtramp && tsk->mm->context.vdso_base) {
938 sigret = 0;
939 tramp = tsk->mm->context.vdso_base + vdso32_rt_sigtramp;
940 } else {
941 sigret = __NR_rt_sigreturn;
942 tramp = (unsigned long) frame->tramp;
943 }
944
945#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
946 tm_frame = &rt_sf->uc_transact.uc_mcontext;
947 if (MSR_TM_ACTIVE(regs->msr)) {
948 if (__put_user((unsigned long)&rt_sf->uc_transact,
949 &rt_sf->uc.uc_link) ||
950 __put_user((unsigned long)tm_frame,
951 &rt_sf->uc_transact.uc_regs))
952 goto badframe;
953 if (save_tm_user_regs(regs, frame, tm_frame, sigret))
954 goto badframe;
955 }
956 else
957#endif
958 {
959 if (__put_user(0, &rt_sf->uc.uc_link))
960 goto badframe;
961 if (save_user_regs(regs, frame, tm_frame, sigret, 1))
962 goto badframe;
963 }
964 regs->link = tramp;
965
966 tsk->thread.fp_state.fpscr = 0;
967
968
969 newsp = ((unsigned long)rt_sf) - (__SIGNAL_FRAMESIZE + 16);
970 addr = (void __user *)regs->gpr[1];
971 if (put_user(regs->gpr[1], (u32 __user *)newsp))
972 goto badframe;
973
974
975 regs->gpr[1] = newsp;
976 regs->gpr[3] = ksig->sig;
977 regs->gpr[4] = (unsigned long) &rt_sf->info;
978 regs->gpr[5] = (unsigned long) &rt_sf->uc;
979 regs->gpr[6] = (unsigned long) rt_sf;
980 regs->nip = (unsigned long) ksig->ka.sa.sa_handler;
981
982 regs->msr &= ~MSR_LE;
983 regs->msr |= (MSR_KERNEL & MSR_LE);
984 return 0;
985
986badframe:
987 if (show_unhandled_signals)
988 printk_ratelimited(KERN_INFO
989 "%s[%d]: bad frame in handle_rt_signal32: "
990 "%p nip %08lx lr %08lx\n",
991 tsk->comm, tsk->pid,
992 addr, regs->nip, regs->link);
993
994 return 1;
995}
996
997static int do_setcontext(struct ucontext __user *ucp, struct pt_regs *regs, int sig)
998{
999 sigset_t set;
1000 struct mcontext __user *mcp;
1001
1002 if (get_sigset_t(&set, &ucp->uc_sigmask))
1003 return -EFAULT;
1004#ifdef CONFIG_PPC64
1005 {
1006 u32 cmcp;
1007
1008 if (__get_user(cmcp, &ucp->uc_regs))
1009 return -EFAULT;
1010 mcp = (struct mcontext __user *)(u64)cmcp;
1011
1012 }
1013#else
1014 if (__get_user(mcp, &ucp->uc_regs))
1015 return -EFAULT;
1016 if (!access_ok(mcp, sizeof(*mcp)))
1017 return -EFAULT;
1018#endif
1019 set_current_blocked(&set);
1020 if (restore_user_regs(regs, mcp, sig))
1021 return -EFAULT;
1022
1023 return 0;
1024}
1025
1026#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1027static int do_setcontext_tm(struct ucontext __user *ucp,
1028 struct ucontext __user *tm_ucp,
1029 struct pt_regs *regs)
1030{
1031 sigset_t set;
1032 struct mcontext __user *mcp;
1033 struct mcontext __user *tm_mcp;
1034 u32 cmcp;
1035 u32 tm_cmcp;
1036
1037 if (get_sigset_t(&set, &ucp->uc_sigmask))
1038 return -EFAULT;
1039
1040 if (__get_user(cmcp, &ucp->uc_regs) ||
1041 __get_user(tm_cmcp, &tm_ucp->uc_regs))
1042 return -EFAULT;
1043 mcp = (struct mcontext __user *)(u64)cmcp;
1044 tm_mcp = (struct mcontext __user *)(u64)tm_cmcp;
1045
1046
1047 set_current_blocked(&set);
1048 if (restore_tm_user_regs(regs, mcp, tm_mcp))
1049 return -EFAULT;
1050
1051 return 0;
1052}
1053#endif
1054
1055#ifdef CONFIG_PPC64
1056COMPAT_SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
1057 struct ucontext __user *, new_ctx, int, ctx_size)
1058#else
1059SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
1060 struct ucontext __user *, new_ctx, long, ctx_size)
1061#endif
1062{
1063 struct pt_regs *regs = current_pt_regs();
1064 int ctx_has_vsx_region = 0;
1065
1066#ifdef CONFIG_PPC64
1067 unsigned long new_msr = 0;
1068
1069 if (new_ctx) {
1070 struct mcontext __user *mcp;
1071 u32 cmcp;
1072
1073
1074
1075
1076
1077
1078 if (__get_user(cmcp, &new_ctx->uc_regs))
1079 return -EFAULT;
1080 mcp = (struct mcontext __user *)(u64)cmcp;
1081 if (__get_user(new_msr, &mcp->mc_gregs[PT_MSR]))
1082 return -EFAULT;
1083 }
1084
1085
1086
1087
1088 if (ctx_size < UCONTEXTSIZEWITHOUTVSX)
1089 return -EINVAL;
1090
1091
1092
1093
1094 if ((ctx_size < sizeof(struct ucontext)) &&
1095 (new_msr & MSR_VSX))
1096 return -EINVAL;
1097
1098 if (ctx_size >= sizeof(struct ucontext))
1099 ctx_has_vsx_region = 1;
1100#else
1101
1102
1103
1104 if (ctx_size < sizeof(struct ucontext))
1105 return -EINVAL;
1106#endif
1107 if (old_ctx != NULL) {
1108 struct mcontext __user *mctx;
1109
1110
1111
1112
1113
1114
1115
1116
1117 mctx = (struct mcontext __user *)
1118 ((unsigned long) &old_ctx->uc_mcontext & ~0xfUL);
1119 if (!access_ok(old_ctx, ctx_size)
1120 || save_user_regs(regs, mctx, NULL, 0, ctx_has_vsx_region)
1121 || put_sigset_t(&old_ctx->uc_sigmask, ¤t->blocked)
1122 || __put_user(to_user_ptr(mctx), &old_ctx->uc_regs))
1123 return -EFAULT;
1124 }
1125 if (new_ctx == NULL)
1126 return 0;
1127 if (!access_ok(new_ctx, ctx_size) ||
1128 fault_in_pages_readable((u8 __user *)new_ctx, ctx_size))
1129 return -EFAULT;
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142 if (do_setcontext(new_ctx, regs, 0))
1143 do_exit(SIGSEGV);
1144
1145 set_thread_flag(TIF_RESTOREALL);
1146 return 0;
1147}
1148
1149#ifdef CONFIG_PPC64
1150COMPAT_SYSCALL_DEFINE0(rt_sigreturn)
1151#else
1152SYSCALL_DEFINE0(rt_sigreturn)
1153#endif
1154{
1155 struct rt_sigframe __user *rt_sf;
1156 struct pt_regs *regs = current_pt_regs();
1157 int tm_restore = 0;
1158#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1159 struct ucontext __user *uc_transact;
1160 unsigned long msr_hi;
1161 unsigned long tmp;
1162#endif
1163
1164 current->restart_block.fn = do_no_restart_syscall;
1165
1166 rt_sf = (struct rt_sigframe __user *)
1167 (regs->gpr[1] + __SIGNAL_FRAMESIZE + 16);
1168 if (!access_ok(rt_sf, sizeof(*rt_sf)))
1169 goto bad;
1170
1171#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182 if (MSR_TM_SUSPENDED(mfmsr()))
1183 tm_reclaim_current(0);
1184
1185 if (__get_user(tmp, &rt_sf->uc.uc_link))
1186 goto bad;
1187 uc_transact = (struct ucontext __user *)(uintptr_t)tmp;
1188 if (uc_transact) {
1189 u32 cmcp;
1190 struct mcontext __user *mcp;
1191
1192 if (__get_user(cmcp, &uc_transact->uc_regs))
1193 return -EFAULT;
1194 mcp = (struct mcontext __user *)(u64)cmcp;
1195
1196
1197 if (__get_user(msr_hi, &mcp->mc_gregs[PT_MSR]))
1198 goto bad;
1199
1200 if (MSR_TM_ACTIVE(msr_hi<<32)) {
1201
1202 if (!cpu_has_feature(CPU_FTR_TM))
1203 goto bad;
1204
1205
1206
1207 tm_restore = 1;
1208 if (do_setcontext_tm(&rt_sf->uc, uc_transact, regs))
1209 goto bad;
1210 }
1211 }
1212 if (!tm_restore) {
1213
1214
1215
1216
1217
1218 regs->msr &= ~MSR_TS_MASK;
1219 }
1220
1221#endif
1222 if (!tm_restore)
1223 if (do_setcontext(&rt_sf->uc, regs, 1))
1224 goto bad;
1225
1226
1227
1228
1229
1230
1231
1232
1233#ifdef CONFIG_PPC64
1234 if (compat_restore_altstack(&rt_sf->uc.uc_stack))
1235 goto bad;
1236#else
1237 if (restore_altstack(&rt_sf->uc.uc_stack))
1238 goto bad;
1239#endif
1240 set_thread_flag(TIF_RESTOREALL);
1241 return 0;
1242
1243 bad:
1244 if (show_unhandled_signals)
1245 printk_ratelimited(KERN_INFO
1246 "%s[%d]: bad frame in sys_rt_sigreturn: "
1247 "%p nip %08lx lr %08lx\n",
1248 current->comm, current->pid,
1249 rt_sf, regs->nip, regs->link);
1250
1251 force_sig(SIGSEGV);
1252 return 0;
1253}
1254
1255#ifdef CONFIG_PPC32
1256SYSCALL_DEFINE3(debug_setcontext, struct ucontext __user *, ctx,
1257 int, ndbg, struct sig_dbg_op __user *, dbg)
1258{
1259 struct pt_regs *regs = current_pt_regs();
1260 struct sig_dbg_op op;
1261 int i;
1262 unsigned long new_msr = regs->msr;
1263#ifdef CONFIG_PPC_ADV_DEBUG_REGS
1264 unsigned long new_dbcr0 = current->thread.debug.dbcr0;
1265#endif
1266
1267 for (i=0; i<ndbg; i++) {
1268 if (copy_from_user(&op, dbg + i, sizeof(op)))
1269 return -EFAULT;
1270 switch (op.dbg_type) {
1271 case SIG_DBG_SINGLE_STEPPING:
1272#ifdef CONFIG_PPC_ADV_DEBUG_REGS
1273 if (op.dbg_value) {
1274 new_msr |= MSR_DE;
1275 new_dbcr0 |= (DBCR0_IDM | DBCR0_IC);
1276 } else {
1277 new_dbcr0 &= ~DBCR0_IC;
1278 if (!DBCR_ACTIVE_EVENTS(new_dbcr0,
1279 current->thread.debug.dbcr1)) {
1280 new_msr &= ~MSR_DE;
1281 new_dbcr0 &= ~DBCR0_IDM;
1282 }
1283 }
1284#else
1285 if (op.dbg_value)
1286 new_msr |= MSR_SE;
1287 else
1288 new_msr &= ~MSR_SE;
1289#endif
1290 break;
1291 case SIG_DBG_BRANCH_TRACING:
1292#ifdef CONFIG_PPC_ADV_DEBUG_REGS
1293 return -EINVAL;
1294#else
1295 if (op.dbg_value)
1296 new_msr |= MSR_BE;
1297 else
1298 new_msr &= ~MSR_BE;
1299#endif
1300 break;
1301
1302 default:
1303 return -EINVAL;
1304 }
1305 }
1306
1307
1308
1309
1310
1311
1312 regs->msr = new_msr;
1313#ifdef CONFIG_PPC_ADV_DEBUG_REGS
1314 current->thread.debug.dbcr0 = new_dbcr0;
1315#endif
1316
1317 if (!access_ok(ctx, sizeof(*ctx)) ||
1318 fault_in_pages_readable((u8 __user *)ctx, sizeof(*ctx)))
1319 return -EFAULT;
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332 if (do_setcontext(ctx, regs, 1)) {
1333 if (show_unhandled_signals)
1334 printk_ratelimited(KERN_INFO "%s[%d]: bad frame in "
1335 "sys_debug_setcontext: %p nip %08lx "
1336 "lr %08lx\n",
1337 current->comm, current->pid,
1338 ctx, regs->nip, regs->link);
1339
1340 force_sig(SIGSEGV);
1341 goto out;
1342 }
1343
1344
1345
1346
1347
1348
1349
1350
1351 restore_altstack(&ctx->uc_stack);
1352
1353 set_thread_flag(TIF_RESTOREALL);
1354 out:
1355 return 0;
1356}
1357#endif
1358
1359
1360
1361
1362int handle_signal32(struct ksignal *ksig, sigset_t *oldset,
1363 struct task_struct *tsk)
1364{
1365 struct sigcontext __user *sc;
1366 struct sigframe __user *frame;
1367 struct mcontext __user *tm_mctx = NULL;
1368 unsigned long newsp = 0;
1369 int sigret;
1370 unsigned long tramp;
1371 struct pt_regs *regs = tsk->thread.regs;
1372
1373 BUG_ON(tsk != current);
1374
1375
1376 frame = get_sigframe(ksig, get_tm_stackpointer(tsk), sizeof(*frame), 1);
1377 if (unlikely(frame == NULL))
1378 goto badframe;
1379 sc = (struct sigcontext __user *) &frame->sctx;
1380
1381#if _NSIG != 64
1382#error "Please adjust handle_signal()"
1383#endif
1384 if (__put_user(to_user_ptr(ksig->ka.sa.sa_handler), &sc->handler)
1385 || __put_user(oldset->sig[0], &sc->oldmask)
1386#ifdef CONFIG_PPC64
1387 || __put_user((oldset->sig[0] >> 32), &sc->_unused[3])
1388#else
1389 || __put_user(oldset->sig[1], &sc->_unused[3])
1390#endif
1391 || __put_user(to_user_ptr(&frame->mctx), &sc->regs)
1392 || __put_user(ksig->sig, &sc->signal))
1393 goto badframe;
1394
1395 if (vdso32_sigtramp && tsk->mm->context.vdso_base) {
1396 sigret = 0;
1397 tramp = tsk->mm->context.vdso_base + vdso32_sigtramp;
1398 } else {
1399 sigret = __NR_sigreturn;
1400 tramp = (unsigned long) frame->mctx.tramp;
1401 }
1402
1403#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1404 tm_mctx = &frame->mctx_transact;
1405 if (MSR_TM_ACTIVE(regs->msr)) {
1406 if (save_tm_user_regs(regs, &frame->mctx, &frame->mctx_transact,
1407 sigret))
1408 goto badframe;
1409 }
1410 else
1411#endif
1412 {
1413 if (save_user_regs(regs, &frame->mctx, tm_mctx, sigret, 1))
1414 goto badframe;
1415 }
1416
1417 regs->link = tramp;
1418
1419 tsk->thread.fp_state.fpscr = 0;
1420
1421
1422 newsp = ((unsigned long)frame) - __SIGNAL_FRAMESIZE;
1423 if (put_user(regs->gpr[1], (u32 __user *)newsp))
1424 goto badframe;
1425
1426 regs->gpr[1] = newsp;
1427 regs->gpr[3] = ksig->sig;
1428 regs->gpr[4] = (unsigned long) sc;
1429 regs->nip = (unsigned long) (unsigned long)ksig->ka.sa.sa_handler;
1430
1431 regs->msr &= ~MSR_LE;
1432 return 0;
1433
1434badframe:
1435 if (show_unhandled_signals)
1436 printk_ratelimited(KERN_INFO
1437 "%s[%d]: bad frame in handle_signal32: "
1438 "%p nip %08lx lr %08lx\n",
1439 tsk->comm, tsk->pid,
1440 frame, regs->nip, regs->link);
1441
1442 return 1;
1443}
1444
1445
1446
1447
1448#ifdef CONFIG_PPC64
1449COMPAT_SYSCALL_DEFINE0(sigreturn)
1450#else
1451SYSCALL_DEFINE0(sigreturn)
1452#endif
1453{
1454 struct pt_regs *regs = current_pt_regs();
1455 struct sigframe __user *sf;
1456 struct sigcontext __user *sc;
1457 struct sigcontext sigctx;
1458 struct mcontext __user *sr;
1459 void __user *addr;
1460 sigset_t set;
1461#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1462 struct mcontext __user *mcp, *tm_mcp;
1463 unsigned long msr_hi;
1464#endif
1465
1466
1467 current->restart_block.fn = do_no_restart_syscall;
1468
1469 sf = (struct sigframe __user *)(regs->gpr[1] + __SIGNAL_FRAMESIZE);
1470 sc = &sf->sctx;
1471 addr = sc;
1472 if (copy_from_user(&sigctx, sc, sizeof(sigctx)))
1473 goto badframe;
1474
1475#ifdef CONFIG_PPC64
1476
1477
1478
1479
1480 set.sig[0] = sigctx.oldmask + ((long)(sigctx._unused[3]) << 32);
1481#else
1482 set.sig[0] = sigctx.oldmask;
1483 set.sig[1] = sigctx._unused[3];
1484#endif
1485 set_current_blocked(&set);
1486
1487#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1488 mcp = (struct mcontext __user *)&sf->mctx;
1489 tm_mcp = (struct mcontext __user *)&sf->mctx_transact;
1490 if (__get_user(msr_hi, &tm_mcp->mc_gregs[PT_MSR]))
1491 goto badframe;
1492 if (MSR_TM_ACTIVE(msr_hi<<32)) {
1493 if (!cpu_has_feature(CPU_FTR_TM))
1494 goto badframe;
1495 if (restore_tm_user_regs(regs, mcp, tm_mcp))
1496 goto badframe;
1497 } else
1498#endif
1499 {
1500 sr = (struct mcontext __user *)from_user_ptr(sigctx.regs);
1501 addr = sr;
1502 if (!access_ok(sr, sizeof(*sr))
1503 || restore_user_regs(regs, sr, 1))
1504 goto badframe;
1505 }
1506
1507 set_thread_flag(TIF_RESTOREALL);
1508 return 0;
1509
1510badframe:
1511 if (show_unhandled_signals)
1512 printk_ratelimited(KERN_INFO
1513 "%s[%d]: bad frame in sys_sigreturn: "
1514 "%p nip %08lx lr %08lx\n",
1515 current->comm, current->pid,
1516 addr, regs->nip, regs->link);
1517
1518 force_sig(SIGSEGV);
1519 return 0;
1520}
1521