1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16#include <linux/sched.h>
17#include <linux/mm.h>
18#include <linux/smp.h>
19#include <linux/kernel.h>
20#include <linux/signal.h>
21#include <linux/errno.h>
22#include <linux/elf.h>
23#include <linux/ptrace.h>
24#include <linux/pagemap.h>
25#include <linux/ratelimit.h>
26#include <linux/syscalls.h>
27#ifdef CONFIG_PPC64
28#include <linux/compat.h>
29#else
30#include <linux/wait.h>
31#include <linux/unistd.h>
32#include <linux/stddef.h>
33#include <linux/tty.h>
34#include <linux/binfmts.h>
35#endif
36
37#include <linux/uaccess.h>
38#include <asm/cacheflush.h>
39#include <asm/syscalls.h>
40#include <asm/sigcontext.h>
41#include <asm/vdso.h>
42#include <asm/switch_to.h>
43#include <asm/tm.h>
44#include <asm/asm-prototypes.h>
45#ifdef CONFIG_PPC64
46#include "ppc32.h"
47#include <asm/unistd.h>
48#else
49#include <asm/ucontext.h>
50#endif
51
52#include "signal.h"
53
54
55#ifdef CONFIG_PPC64
56#define old_sigaction old_sigaction32
57#define sigcontext sigcontext32
58#define mcontext mcontext32
59#define ucontext ucontext32
60
61#define __save_altstack __compat_save_altstack
62
63
64
65
66
67#define UCONTEXTSIZEWITHOUTVSX \
68 (sizeof(struct ucontext) - sizeof(elf_vsrreghalf_t32))
69
70
71
72
73
74
75
76
77#define GP_REGS_SIZE min(sizeof(elf_gregset_t32), sizeof(struct pt_regs32))
78#undef __SIGNAL_FRAMESIZE
79#define __SIGNAL_FRAMESIZE __SIGNAL_FRAMESIZE32
80#undef ELF_NVRREG
81#define ELF_NVRREG ELF_NVRREG32
82
83
84
85
86
87static inline int put_sigset_t(compat_sigset_t __user *uset, sigset_t *set)
88{
89 return put_compat_sigset(uset, set, sizeof(*uset));
90}
91
92static inline int get_sigset_t(sigset_t *set,
93 const compat_sigset_t __user *uset)
94{
95 return get_compat_sigset(set, uset);
96}
97
98#define to_user_ptr(p) ptr_to_compat(p)
99#define from_user_ptr(p) compat_ptr(p)
100
101static inline int save_general_regs(struct pt_regs *regs,
102 struct mcontext __user *frame)
103{
104 elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
105 int i;
106
107 elf_greg_t64 softe = 0x1;
108
109 WARN_ON(!FULL_REGS(regs));
110
111 for (i = 0; i <= PT_RESULT; i ++) {
112 if (i == 14 && !FULL_REGS(regs))
113 i = 32;
114 if ( i == PT_SOFTE) {
115 if(__put_user((unsigned int)softe, &frame->mc_gregs[i]))
116 return -EFAULT;
117 else
118 continue;
119 }
120 if (__put_user((unsigned int)gregs[i], &frame->mc_gregs[i]))
121 return -EFAULT;
122 }
123 return 0;
124}
125
126static inline int restore_general_regs(struct pt_regs *regs,
127 struct mcontext __user *sr)
128{
129 elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
130 int i;
131
132 for (i = 0; i <= PT_RESULT; i++) {
133 if ((i == PT_MSR) || (i == PT_SOFTE))
134 continue;
135 if (__get_user(gregs[i], &sr->mc_gregs[i]))
136 return -EFAULT;
137 }
138 return 0;
139}
140
141#else
142
143#define GP_REGS_SIZE min(sizeof(elf_gregset_t), sizeof(struct pt_regs))
144
145static inline int put_sigset_t(sigset_t __user *uset, sigset_t *set)
146{
147 return copy_to_user(uset, set, sizeof(*uset));
148}
149
150static inline int get_sigset_t(sigset_t *set, const sigset_t __user *uset)
151{
152 return copy_from_user(set, uset, sizeof(*uset));
153}
154
155#define to_user_ptr(p) ((unsigned long)(p))
156#define from_user_ptr(p) ((void __user *)(p))
157
158static inline int save_general_regs(struct pt_regs *regs,
159 struct mcontext __user *frame)
160{
161 WARN_ON(!FULL_REGS(regs));
162 return __copy_to_user(&frame->mc_gregs, regs, GP_REGS_SIZE);
163}
164
165static inline int restore_general_regs(struct pt_regs *regs,
166 struct mcontext __user *sr)
167{
168
169 if (__copy_from_user(regs, &sr->mc_gregs,
170 PT_MSR * sizeof(elf_greg_t)))
171 return -EFAULT;
172
173 if (__copy_from_user(®s->orig_gpr3, &sr->mc_gregs[PT_ORIG_R3],
174 GP_REGS_SIZE - PT_ORIG_R3 * sizeof(elf_greg_t)))
175 return -EFAULT;
176 return 0;
177}
178#endif
179
180
181
182
183
184
185
186
187
188
189
190
191
192struct sigframe {
193 struct sigcontext sctx;
194 struct mcontext mctx;
195#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
196 struct sigcontext sctx_transact;
197 struct mcontext mctx_transact;
198#endif
199
200
201
202
203 int abigap[56];
204};
205
206
207#define tramp mc_pad
208
209
210
211
212
213
214
215
216
217
218
219
220struct rt_sigframe {
221#ifdef CONFIG_PPC64
222 compat_siginfo_t info;
223#else
224 struct siginfo info;
225#endif
226 struct ucontext uc;
227#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
228 struct ucontext uc_transact;
229#endif
230
231
232
233
234 int abigap[56];
235};
236
237
238
239
240
241
242static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
243 struct mcontext __user *tm_frame, int sigret,
244 int ctx_has_vsx_region)
245{
246 unsigned long msr = regs->msr;
247
248
249 flush_fp_to_thread(current);
250
251
252 if (save_general_regs(regs, frame))
253 return 1;
254
255#ifdef CONFIG_ALTIVEC
256
257 if (current->thread.used_vr) {
258 flush_altivec_to_thread(current);
259 if (__copy_to_user(&frame->mc_vregs, ¤t->thread.vr_state,
260 ELF_NVRREG * sizeof(vector128)))
261 return 1;
262
263
264 msr |= MSR_VEC;
265 }
266
267
268
269
270
271
272
273
274 if (cpu_has_feature(CPU_FTR_ALTIVEC))
275 current->thread.vrsave = mfspr(SPRN_VRSAVE);
276 if (__put_user(current->thread.vrsave, (u32 __user *)&frame->mc_vregs[32]))
277 return 1;
278#endif
279 if (copy_fpr_to_user(&frame->mc_fregs, current))
280 return 1;
281
282
283
284
285
286 msr &= ~MSR_VSX;
287#ifdef CONFIG_VSX
288
289
290
291
292
293
294 if (current->thread.used_vsr && ctx_has_vsx_region) {
295 flush_vsx_to_thread(current);
296 if (copy_vsx_to_user(&frame->mc_vsregs, current))
297 return 1;
298 msr |= MSR_VSX;
299 }
300#endif
301#ifdef CONFIG_SPE
302
303 if (current->thread.used_spe) {
304 flush_spe_to_thread(current);
305 if (__copy_to_user(&frame->mc_vregs, current->thread.evr,
306 ELF_NEVRREG * sizeof(u32)))
307 return 1;
308
309
310 msr |= MSR_SPE;
311 }
312
313
314
315 if (__put_user(current->thread.spefscr, (u32 __user *)&frame->mc_vregs + ELF_NEVRREG))
316 return 1;
317#endif
318
319 if (__put_user(msr, &frame->mc_gregs[PT_MSR]))
320 return 1;
321
322
323
324 if (tm_frame && __put_user(0, &tm_frame->mc_gregs[PT_MSR]))
325 return 1;
326
327 if (sigret) {
328
329 if (__put_user(PPC_INST_ADDI + sigret, &frame->tramp[0])
330 || __put_user(PPC_INST_SC, &frame->tramp[1]))
331 return 1;
332 flush_icache_range((unsigned long) &frame->tramp[0],
333 (unsigned long) &frame->tramp[2]);
334 }
335
336 return 0;
337}
338
339#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
340
341
342
343
344
345
346
347
348
349static int save_tm_user_regs(struct pt_regs *regs,
350 struct mcontext __user *frame,
351 struct mcontext __user *tm_frame, int sigret,
352 unsigned long msr)
353{
354 WARN_ON(tm_suspend_disabled);
355
356
357 if (save_general_regs(¤t->thread.ckpt_regs, frame)
358 || save_general_regs(regs, tm_frame))
359 return 1;
360
361
362
363
364
365
366
367 if (__put_user((msr >> 32), &tm_frame->mc_gregs[PT_MSR]))
368 return 1;
369
370#ifdef CONFIG_ALTIVEC
371
372 if (current->thread.used_vr) {
373 if (__copy_to_user(&frame->mc_vregs, ¤t->thread.ckvr_state,
374 ELF_NVRREG * sizeof(vector128)))
375 return 1;
376 if (msr & MSR_VEC) {
377 if (__copy_to_user(&tm_frame->mc_vregs,
378 ¤t->thread.vr_state,
379 ELF_NVRREG * sizeof(vector128)))
380 return 1;
381 } else {
382 if (__copy_to_user(&tm_frame->mc_vregs,
383 ¤t->thread.ckvr_state,
384 ELF_NVRREG * sizeof(vector128)))
385 return 1;
386 }
387
388
389
390
391 msr |= MSR_VEC;
392 }
393
394
395
396
397
398
399 if (cpu_has_feature(CPU_FTR_ALTIVEC))
400 current->thread.ckvrsave = mfspr(SPRN_VRSAVE);
401 if (__put_user(current->thread.ckvrsave,
402 (u32 __user *)&frame->mc_vregs[32]))
403 return 1;
404 if (msr & MSR_VEC) {
405 if (__put_user(current->thread.vrsave,
406 (u32 __user *)&tm_frame->mc_vregs[32]))
407 return 1;
408 } else {
409 if (__put_user(current->thread.ckvrsave,
410 (u32 __user *)&tm_frame->mc_vregs[32]))
411 return 1;
412 }
413#endif
414
415 if (copy_ckfpr_to_user(&frame->mc_fregs, current))
416 return 1;
417 if (msr & MSR_FP) {
418 if (copy_fpr_to_user(&tm_frame->mc_fregs, current))
419 return 1;
420 } else {
421 if (copy_ckfpr_to_user(&tm_frame->mc_fregs, current))
422 return 1;
423 }
424
425#ifdef CONFIG_VSX
426
427
428
429
430
431
432 if (current->thread.used_vsr) {
433 if (copy_ckvsx_to_user(&frame->mc_vsregs, current))
434 return 1;
435 if (msr & MSR_VSX) {
436 if (copy_vsx_to_user(&tm_frame->mc_vsregs,
437 current))
438 return 1;
439 } else {
440 if (copy_ckvsx_to_user(&tm_frame->mc_vsregs, current))
441 return 1;
442 }
443
444 msr |= MSR_VSX;
445 }
446#endif
447#ifdef CONFIG_SPE
448
449
450
451 if (current->thread.used_spe) {
452 flush_spe_to_thread(current);
453 if (__copy_to_user(&frame->mc_vregs, current->thread.evr,
454 ELF_NEVRREG * sizeof(u32)))
455 return 1;
456
457
458 msr |= MSR_SPE;
459 }
460
461
462 if (__put_user(current->thread.spefscr, (u32 __user *)&frame->mc_vregs + ELF_NEVRREG))
463 return 1;
464#endif
465
466 if (__put_user(msr, &frame->mc_gregs[PT_MSR]))
467 return 1;
468 if (sigret) {
469
470 if (__put_user(PPC_INST_ADDI + sigret, &frame->tramp[0])
471 || __put_user(PPC_INST_SC, &frame->tramp[1]))
472 return 1;
473 flush_icache_range((unsigned long) &frame->tramp[0],
474 (unsigned long) &frame->tramp[2]);
475 }
476
477 return 0;
478}
479#endif
480
481
482
483
484
485static long restore_user_regs(struct pt_regs *regs,
486 struct mcontext __user *sr, int sig)
487{
488 long err;
489 unsigned int save_r2 = 0;
490 unsigned long msr;
491#ifdef CONFIG_VSX
492 int i;
493#endif
494
495
496
497
498
499 if (!sig)
500 save_r2 = (unsigned int)regs->gpr[2];
501 err = restore_general_regs(regs, sr);
502 set_trap_norestart(regs);
503 err |= __get_user(msr, &sr->mc_gregs[PT_MSR]);
504 if (!sig)
505 regs->gpr[2] = (unsigned long) save_r2;
506 if (err)
507 return 1;
508
509
510 if (sig)
511 regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
512
513#ifdef CONFIG_ALTIVEC
514
515
516
517
518 regs->msr &= ~MSR_VEC;
519 if (msr & MSR_VEC) {
520
521 if (__copy_from_user(¤t->thread.vr_state, &sr->mc_vregs,
522 sizeof(sr->mc_vregs)))
523 return 1;
524 current->thread.used_vr = true;
525 } else if (current->thread.used_vr)
526 memset(¤t->thread.vr_state, 0,
527 ELF_NVRREG * sizeof(vector128));
528
529
530 if (__get_user(current->thread.vrsave, (u32 __user *)&sr->mc_vregs[32]))
531 return 1;
532 if (cpu_has_feature(CPU_FTR_ALTIVEC))
533 mtspr(SPRN_VRSAVE, current->thread.vrsave);
534#endif
535 if (copy_fpr_from_user(current, &sr->mc_fregs))
536 return 1;
537
538#ifdef CONFIG_VSX
539
540
541
542
543 regs->msr &= ~MSR_VSX;
544 if (msr & MSR_VSX) {
545
546
547
548
549 if (copy_vsx_from_user(current, &sr->mc_vsregs))
550 return 1;
551 current->thread.used_vsr = true;
552 } else if (current->thread.used_vsr)
553 for (i = 0; i < 32 ; i++)
554 current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
555#endif
556
557
558
559
560 regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1);
561
562#ifdef CONFIG_SPE
563
564
565 regs->msr &= ~MSR_SPE;
566 if (msr & MSR_SPE) {
567
568 if (__copy_from_user(current->thread.evr, &sr->mc_vregs,
569 ELF_NEVRREG * sizeof(u32)))
570 return 1;
571 current->thread.used_spe = true;
572 } else if (current->thread.used_spe)
573 memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32));
574
575
576 if (__get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs + ELF_NEVRREG))
577 return 1;
578#endif
579
580 return 0;
581}
582
583#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
584
585
586
587
588
589static long restore_tm_user_regs(struct pt_regs *regs,
590 struct mcontext __user *sr,
591 struct mcontext __user *tm_sr)
592{
593 long err;
594 unsigned long msr, msr_hi;
595#ifdef CONFIG_VSX
596 int i;
597#endif
598
599 if (tm_suspend_disabled)
600 return 1;
601
602
603
604
605
606
607
608 err = restore_general_regs(regs, tm_sr);
609 err |= restore_general_regs(¤t->thread.ckpt_regs, sr);
610
611 err |= __get_user(current->thread.tm_tfhar, &sr->mc_gregs[PT_NIP]);
612
613 err |= __get_user(msr, &sr->mc_gregs[PT_MSR]);
614 if (err)
615 return 1;
616
617
618 regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
619
620#ifdef CONFIG_ALTIVEC
621 regs->msr &= ~MSR_VEC;
622 if (msr & MSR_VEC) {
623
624 if (__copy_from_user(¤t->thread.ckvr_state, &sr->mc_vregs,
625 sizeof(sr->mc_vregs)) ||
626 __copy_from_user(¤t->thread.vr_state,
627 &tm_sr->mc_vregs,
628 sizeof(sr->mc_vregs)))
629 return 1;
630 current->thread.used_vr = true;
631 } else if (current->thread.used_vr) {
632 memset(¤t->thread.vr_state, 0,
633 ELF_NVRREG * sizeof(vector128));
634 memset(¤t->thread.ckvr_state, 0,
635 ELF_NVRREG * sizeof(vector128));
636 }
637
638
639 if (__get_user(current->thread.ckvrsave,
640 (u32 __user *)&sr->mc_vregs[32]) ||
641 __get_user(current->thread.vrsave,
642 (u32 __user *)&tm_sr->mc_vregs[32]))
643 return 1;
644 if (cpu_has_feature(CPU_FTR_ALTIVEC))
645 mtspr(SPRN_VRSAVE, current->thread.ckvrsave);
646#endif
647
648 regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1);
649
650 if (copy_fpr_from_user(current, &sr->mc_fregs) ||
651 copy_ckfpr_from_user(current, &tm_sr->mc_fregs))
652 return 1;
653
654#ifdef CONFIG_VSX
655 regs->msr &= ~MSR_VSX;
656 if (msr & MSR_VSX) {
657
658
659
660
661 if (copy_vsx_from_user(current, &tm_sr->mc_vsregs) ||
662 copy_ckvsx_from_user(current, &sr->mc_vsregs))
663 return 1;
664 current->thread.used_vsr = true;
665 } else if (current->thread.used_vsr)
666 for (i = 0; i < 32 ; i++) {
667 current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
668 current->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
669 }
670#endif
671
672#ifdef CONFIG_SPE
673
674
675
676 regs->msr &= ~MSR_SPE;
677 if (msr & MSR_SPE) {
678 if (__copy_from_user(current->thread.evr, &sr->mc_vregs,
679 ELF_NEVRREG * sizeof(u32)))
680 return 1;
681 current->thread.used_spe = true;
682 } else if (current->thread.used_spe)
683 memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32));
684
685
686 if (__get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs
687 + ELF_NEVRREG))
688 return 1;
689#endif
690
691
692 if (__get_user(msr_hi, &tm_sr->mc_gregs[PT_MSR]))
693 return 1;
694 msr_hi <<= 32;
695
696 if (MSR_TM_RESV(msr_hi))
697 return 1;
698
699
700
701
702
703 preempt_disable();
704
705
706
707
708
709
710
711
712
713
714
715 regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr_hi & MSR_TS_MASK);
716
717
718
719
720 tm_enable();
721
722 current->thread.tm_texasr |= TEXASR_FS;
723
724 tm_recheckpoint(¤t->thread);
725
726
727 msr_check_and_set(msr & (MSR_FP | MSR_VEC));
728 if (msr & MSR_FP) {
729 load_fp_state(¤t->thread.fp_state);
730 regs->msr |= (MSR_FP | current->thread.fpexc_mode);
731 }
732#ifdef CONFIG_ALTIVEC
733 if (msr & MSR_VEC) {
734 load_vr_state(¤t->thread.vr_state);
735 regs->msr |= MSR_VEC;
736 }
737#endif
738
739 preempt_enable();
740
741 return 0;
742}
743#endif
744
745#ifdef CONFIG_PPC64
746
747#define copy_siginfo_to_user copy_siginfo_to_user32
748
749#endif
750
751
752
753
754
755int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
756 struct task_struct *tsk)
757{
758 struct rt_sigframe __user *rt_sf;
759 struct mcontext __user *frame;
760 struct mcontext __user *tm_frame = NULL;
761 void __user *addr;
762 unsigned long newsp = 0;
763 int sigret;
764 unsigned long tramp;
765 struct pt_regs *regs = tsk->thread.regs;
766#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
767
768 unsigned long msr = regs->msr;
769#endif
770
771 BUG_ON(tsk != current);
772
773
774
775 rt_sf = get_sigframe(ksig, get_tm_stackpointer(tsk), sizeof(*rt_sf), 1);
776 addr = rt_sf;
777 if (unlikely(rt_sf == NULL))
778 goto badframe;
779
780
781 if (copy_siginfo_to_user(&rt_sf->info, &ksig->info)
782 || __put_user(0, &rt_sf->uc.uc_flags)
783 || __save_altstack(&rt_sf->uc.uc_stack, regs->gpr[1])
784 || __put_user(to_user_ptr(&rt_sf->uc.uc_mcontext),
785 &rt_sf->uc.uc_regs)
786 || put_sigset_t(&rt_sf->uc.uc_sigmask, oldset))
787 goto badframe;
788
789
790 frame = &rt_sf->uc.uc_mcontext;
791 addr = frame;
792 if (vdso32_rt_sigtramp && tsk->mm->context.vdso_base) {
793 sigret = 0;
794 tramp = tsk->mm->context.vdso_base + vdso32_rt_sigtramp;
795 } else {
796 sigret = __NR_rt_sigreturn;
797 tramp = (unsigned long) frame->tramp;
798 }
799
800#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
801 tm_frame = &rt_sf->uc_transact.uc_mcontext;
802 if (MSR_TM_ACTIVE(msr)) {
803 if (__put_user((unsigned long)&rt_sf->uc_transact,
804 &rt_sf->uc.uc_link) ||
805 __put_user((unsigned long)tm_frame,
806 &rt_sf->uc_transact.uc_regs))
807 goto badframe;
808 if (save_tm_user_regs(regs, frame, tm_frame, sigret, msr))
809 goto badframe;
810 }
811 else
812#endif
813 {
814 if (__put_user(0, &rt_sf->uc.uc_link))
815 goto badframe;
816 if (save_user_regs(regs, frame, tm_frame, sigret, 1))
817 goto badframe;
818 }
819 regs->link = tramp;
820
821 tsk->thread.fp_state.fpscr = 0;
822
823
824 newsp = ((unsigned long)rt_sf) - (__SIGNAL_FRAMESIZE + 16);
825 addr = (void __user *)regs->gpr[1];
826 if (put_user(regs->gpr[1], (u32 __user *)newsp))
827 goto badframe;
828
829
830 regs->gpr[1] = newsp;
831 regs->gpr[3] = ksig->sig;
832 regs->gpr[4] = (unsigned long) &rt_sf->info;
833 regs->gpr[5] = (unsigned long) &rt_sf->uc;
834 regs->gpr[6] = (unsigned long) rt_sf;
835 regs->nip = (unsigned long) ksig->ka.sa.sa_handler;
836
837 regs->msr &= ~MSR_LE;
838 regs->msr |= (MSR_KERNEL & MSR_LE);
839 return 0;
840
841badframe:
842 if (show_unhandled_signals)
843 printk_ratelimited(KERN_INFO
844 "%s[%d]: bad frame in handle_rt_signal32: "
845 "%p nip %08lx lr %08lx\n",
846 tsk->comm, tsk->pid,
847 addr, regs->nip, regs->link);
848
849 return 1;
850}
851
852static int do_setcontext(struct ucontext __user *ucp, struct pt_regs *regs, int sig)
853{
854 sigset_t set;
855 struct mcontext __user *mcp;
856
857 if (get_sigset_t(&set, &ucp->uc_sigmask))
858 return -EFAULT;
859#ifdef CONFIG_PPC64
860 {
861 u32 cmcp;
862
863 if (__get_user(cmcp, &ucp->uc_regs))
864 return -EFAULT;
865 mcp = (struct mcontext __user *)(u64)cmcp;
866
867 }
868#else
869 if (__get_user(mcp, &ucp->uc_regs))
870 return -EFAULT;
871 if (!access_ok(mcp, sizeof(*mcp)))
872 return -EFAULT;
873#endif
874 set_current_blocked(&set);
875 if (restore_user_regs(regs, mcp, sig))
876 return -EFAULT;
877
878 return 0;
879}
880
881#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
882static int do_setcontext_tm(struct ucontext __user *ucp,
883 struct ucontext __user *tm_ucp,
884 struct pt_regs *regs)
885{
886 sigset_t set;
887 struct mcontext __user *mcp;
888 struct mcontext __user *tm_mcp;
889 u32 cmcp;
890 u32 tm_cmcp;
891
892 if (get_sigset_t(&set, &ucp->uc_sigmask))
893 return -EFAULT;
894
895 if (__get_user(cmcp, &ucp->uc_regs) ||
896 __get_user(tm_cmcp, &tm_ucp->uc_regs))
897 return -EFAULT;
898 mcp = (struct mcontext __user *)(u64)cmcp;
899 tm_mcp = (struct mcontext __user *)(u64)tm_cmcp;
900
901
902 set_current_blocked(&set);
903 if (restore_tm_user_regs(regs, mcp, tm_mcp))
904 return -EFAULT;
905
906 return 0;
907}
908#endif
909
910#ifdef CONFIG_PPC64
911COMPAT_SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
912 struct ucontext __user *, new_ctx, int, ctx_size)
913#else
914SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
915 struct ucontext __user *, new_ctx, long, ctx_size)
916#endif
917{
918 struct pt_regs *regs = current_pt_regs();
919 int ctx_has_vsx_region = 0;
920
921#ifdef CONFIG_PPC64
922 unsigned long new_msr = 0;
923
924 if (new_ctx) {
925 struct mcontext __user *mcp;
926 u32 cmcp;
927
928
929
930
931
932
933 if (__get_user(cmcp, &new_ctx->uc_regs))
934 return -EFAULT;
935 mcp = (struct mcontext __user *)(u64)cmcp;
936 if (__get_user(new_msr, &mcp->mc_gregs[PT_MSR]))
937 return -EFAULT;
938 }
939
940
941
942
943 if (ctx_size < UCONTEXTSIZEWITHOUTVSX)
944 return -EINVAL;
945
946
947
948
949 if ((ctx_size < sizeof(struct ucontext)) &&
950 (new_msr & MSR_VSX))
951 return -EINVAL;
952
953 if (ctx_size >= sizeof(struct ucontext))
954 ctx_has_vsx_region = 1;
955#else
956
957
958
959 if (ctx_size < sizeof(struct ucontext))
960 return -EINVAL;
961#endif
962 if (old_ctx != NULL) {
963 struct mcontext __user *mctx;
964
965
966
967
968
969
970
971
972 mctx = (struct mcontext __user *)
973 ((unsigned long) &old_ctx->uc_mcontext & ~0xfUL);
974 if (!access_ok(old_ctx, ctx_size)
975 || save_user_regs(regs, mctx, NULL, 0, ctx_has_vsx_region)
976 || put_sigset_t(&old_ctx->uc_sigmask, ¤t->blocked)
977 || __put_user(to_user_ptr(mctx), &old_ctx->uc_regs))
978 return -EFAULT;
979 }
980 if (new_ctx == NULL)
981 return 0;
982 if (!access_ok(new_ctx, ctx_size) ||
983 fault_in_pages_readable((u8 __user *)new_ctx, ctx_size))
984 return -EFAULT;
985
986
987
988
989
990
991
992
993
994
995
996
997 if (do_setcontext(new_ctx, regs, 0))
998 do_exit(SIGSEGV);
999
1000 set_thread_flag(TIF_RESTOREALL);
1001 return 0;
1002}
1003
1004#ifdef CONFIG_PPC64
1005COMPAT_SYSCALL_DEFINE0(rt_sigreturn)
1006#else
1007SYSCALL_DEFINE0(rt_sigreturn)
1008#endif
1009{
1010 struct rt_sigframe __user *rt_sf;
1011 struct pt_regs *regs = current_pt_regs();
1012 int tm_restore = 0;
1013#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1014 struct ucontext __user *uc_transact;
1015 unsigned long msr_hi;
1016 unsigned long tmp;
1017#endif
1018
1019 current->restart_block.fn = do_no_restart_syscall;
1020
1021 rt_sf = (struct rt_sigframe __user *)
1022 (regs->gpr[1] + __SIGNAL_FRAMESIZE + 16);
1023 if (!access_ok(rt_sf, sizeof(*rt_sf)))
1024 goto bad;
1025
1026#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037 if (MSR_TM_SUSPENDED(mfmsr()))
1038 tm_reclaim_current(0);
1039
1040 if (__get_user(tmp, &rt_sf->uc.uc_link))
1041 goto bad;
1042 uc_transact = (struct ucontext __user *)(uintptr_t)tmp;
1043 if (uc_transact) {
1044 u32 cmcp;
1045 struct mcontext __user *mcp;
1046
1047 if (__get_user(cmcp, &uc_transact->uc_regs))
1048 return -EFAULT;
1049 mcp = (struct mcontext __user *)(u64)cmcp;
1050
1051
1052 if (__get_user(msr_hi, &mcp->mc_gregs[PT_MSR]))
1053 goto bad;
1054
1055 if (MSR_TM_ACTIVE(msr_hi<<32)) {
1056
1057 if (!cpu_has_feature(CPU_FTR_TM))
1058 goto bad;
1059
1060
1061
1062 tm_restore = 1;
1063 if (do_setcontext_tm(&rt_sf->uc, uc_transact, regs))
1064 goto bad;
1065 }
1066 }
1067 if (!tm_restore) {
1068
1069
1070
1071
1072
1073 regs->msr &= ~MSR_TS_MASK;
1074 }
1075
1076#endif
1077 if (!tm_restore)
1078 if (do_setcontext(&rt_sf->uc, regs, 1))
1079 goto bad;
1080
1081
1082
1083
1084
1085
1086
1087
1088#ifdef CONFIG_PPC64
1089 if (compat_restore_altstack(&rt_sf->uc.uc_stack))
1090 goto bad;
1091#else
1092 if (restore_altstack(&rt_sf->uc.uc_stack))
1093 goto bad;
1094#endif
1095 set_thread_flag(TIF_RESTOREALL);
1096 return 0;
1097
1098 bad:
1099 if (show_unhandled_signals)
1100 printk_ratelimited(KERN_INFO
1101 "%s[%d]: bad frame in sys_rt_sigreturn: "
1102 "%p nip %08lx lr %08lx\n",
1103 current->comm, current->pid,
1104 rt_sf, regs->nip, regs->link);
1105
1106 force_sig(SIGSEGV);
1107 return 0;
1108}
1109
1110#ifdef CONFIG_PPC32
1111SYSCALL_DEFINE3(debug_setcontext, struct ucontext __user *, ctx,
1112 int, ndbg, struct sig_dbg_op __user *, dbg)
1113{
1114 struct pt_regs *regs = current_pt_regs();
1115 struct sig_dbg_op op;
1116 int i;
1117 unsigned long new_msr = regs->msr;
1118#ifdef CONFIG_PPC_ADV_DEBUG_REGS
1119 unsigned long new_dbcr0 = current->thread.debug.dbcr0;
1120#endif
1121
1122 for (i=0; i<ndbg; i++) {
1123 if (copy_from_user(&op, dbg + i, sizeof(op)))
1124 return -EFAULT;
1125 switch (op.dbg_type) {
1126 case SIG_DBG_SINGLE_STEPPING:
1127#ifdef CONFIG_PPC_ADV_DEBUG_REGS
1128 if (op.dbg_value) {
1129 new_msr |= MSR_DE;
1130 new_dbcr0 |= (DBCR0_IDM | DBCR0_IC);
1131 } else {
1132 new_dbcr0 &= ~DBCR0_IC;
1133 if (!DBCR_ACTIVE_EVENTS(new_dbcr0,
1134 current->thread.debug.dbcr1)) {
1135 new_msr &= ~MSR_DE;
1136 new_dbcr0 &= ~DBCR0_IDM;
1137 }
1138 }
1139#else
1140 if (op.dbg_value)
1141 new_msr |= MSR_SE;
1142 else
1143 new_msr &= ~MSR_SE;
1144#endif
1145 break;
1146 case SIG_DBG_BRANCH_TRACING:
1147#ifdef CONFIG_PPC_ADV_DEBUG_REGS
1148 return -EINVAL;
1149#else
1150 if (op.dbg_value)
1151 new_msr |= MSR_BE;
1152 else
1153 new_msr &= ~MSR_BE;
1154#endif
1155 break;
1156
1157 default:
1158 return -EINVAL;
1159 }
1160 }
1161
1162
1163
1164
1165
1166
1167 regs->msr = new_msr;
1168#ifdef CONFIG_PPC_ADV_DEBUG_REGS
1169 current->thread.debug.dbcr0 = new_dbcr0;
1170#endif
1171
1172 if (!access_ok(ctx, sizeof(*ctx)) ||
1173 fault_in_pages_readable((u8 __user *)ctx, sizeof(*ctx)))
1174 return -EFAULT;
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187 if (do_setcontext(ctx, regs, 1)) {
1188 if (show_unhandled_signals)
1189 printk_ratelimited(KERN_INFO "%s[%d]: bad frame in "
1190 "sys_debug_setcontext: %p nip %08lx "
1191 "lr %08lx\n",
1192 current->comm, current->pid,
1193 ctx, regs->nip, regs->link);
1194
1195 force_sig(SIGSEGV);
1196 goto out;
1197 }
1198
1199
1200
1201
1202
1203
1204
1205
1206 restore_altstack(&ctx->uc_stack);
1207
1208 set_thread_flag(TIF_RESTOREALL);
1209 out:
1210 return 0;
1211}
1212#endif
1213
1214
1215
1216
1217int handle_signal32(struct ksignal *ksig, sigset_t *oldset,
1218 struct task_struct *tsk)
1219{
1220 struct sigcontext __user *sc;
1221 struct sigframe __user *frame;
1222 struct mcontext __user *tm_mctx = NULL;
1223 unsigned long newsp = 0;
1224 int sigret;
1225 unsigned long tramp;
1226 struct pt_regs *regs = tsk->thread.regs;
1227#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1228
1229 unsigned long msr = regs->msr;
1230#endif
1231
1232 BUG_ON(tsk != current);
1233
1234
1235 frame = get_sigframe(ksig, get_tm_stackpointer(tsk), sizeof(*frame), 1);
1236 if (unlikely(frame == NULL))
1237 goto badframe;
1238 sc = (struct sigcontext __user *) &frame->sctx;
1239
1240#if _NSIG != 64
1241#error "Please adjust handle_signal()"
1242#endif
1243 if (__put_user(to_user_ptr(ksig->ka.sa.sa_handler), &sc->handler)
1244 || __put_user(oldset->sig[0], &sc->oldmask)
1245#ifdef CONFIG_PPC64
1246 || __put_user((oldset->sig[0] >> 32), &sc->_unused[3])
1247#else
1248 || __put_user(oldset->sig[1], &sc->_unused[3])
1249#endif
1250 || __put_user(to_user_ptr(&frame->mctx), &sc->regs)
1251 || __put_user(ksig->sig, &sc->signal))
1252 goto badframe;
1253
1254 if (vdso32_sigtramp && tsk->mm->context.vdso_base) {
1255 sigret = 0;
1256 tramp = tsk->mm->context.vdso_base + vdso32_sigtramp;
1257 } else {
1258 sigret = __NR_sigreturn;
1259 tramp = (unsigned long) frame->mctx.tramp;
1260 }
1261
1262#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1263 tm_mctx = &frame->mctx_transact;
1264 if (MSR_TM_ACTIVE(msr)) {
1265 if (save_tm_user_regs(regs, &frame->mctx, &frame->mctx_transact,
1266 sigret, msr))
1267 goto badframe;
1268 }
1269 else
1270#endif
1271 {
1272 if (save_user_regs(regs, &frame->mctx, tm_mctx, sigret, 1))
1273 goto badframe;
1274 }
1275
1276 regs->link = tramp;
1277
1278 tsk->thread.fp_state.fpscr = 0;
1279
1280
1281 newsp = ((unsigned long)frame) - __SIGNAL_FRAMESIZE;
1282 if (put_user(regs->gpr[1], (u32 __user *)newsp))
1283 goto badframe;
1284
1285 regs->gpr[1] = newsp;
1286 regs->gpr[3] = ksig->sig;
1287 regs->gpr[4] = (unsigned long) sc;
1288 regs->nip = (unsigned long) (unsigned long)ksig->ka.sa.sa_handler;
1289
1290 regs->msr &= ~MSR_LE;
1291 return 0;
1292
1293badframe:
1294 if (show_unhandled_signals)
1295 printk_ratelimited(KERN_INFO
1296 "%s[%d]: bad frame in handle_signal32: "
1297 "%p nip %08lx lr %08lx\n",
1298 tsk->comm, tsk->pid,
1299 frame, regs->nip, regs->link);
1300
1301 return 1;
1302}
1303
1304
1305
1306
1307#ifdef CONFIG_PPC64
1308COMPAT_SYSCALL_DEFINE0(sigreturn)
1309#else
1310SYSCALL_DEFINE0(sigreturn)
1311#endif
1312{
1313 struct pt_regs *regs = current_pt_regs();
1314 struct sigframe __user *sf;
1315 struct sigcontext __user *sc;
1316 struct sigcontext sigctx;
1317 struct mcontext __user *sr;
1318 void __user *addr;
1319 sigset_t set;
1320#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1321 struct mcontext __user *mcp, *tm_mcp;
1322 unsigned long msr_hi;
1323#endif
1324
1325
1326 current->restart_block.fn = do_no_restart_syscall;
1327
1328 sf = (struct sigframe __user *)(regs->gpr[1] + __SIGNAL_FRAMESIZE);
1329 sc = &sf->sctx;
1330 addr = sc;
1331 if (copy_from_user(&sigctx, sc, sizeof(sigctx)))
1332 goto badframe;
1333
1334#ifdef CONFIG_PPC64
1335
1336
1337
1338
1339 set.sig[0] = sigctx.oldmask + ((long)(sigctx._unused[3]) << 32);
1340#else
1341 set.sig[0] = sigctx.oldmask;
1342 set.sig[1] = sigctx._unused[3];
1343#endif
1344 set_current_blocked(&set);
1345
1346#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1347 mcp = (struct mcontext __user *)&sf->mctx;
1348 tm_mcp = (struct mcontext __user *)&sf->mctx_transact;
1349 if (__get_user(msr_hi, &tm_mcp->mc_gregs[PT_MSR]))
1350 goto badframe;
1351 if (MSR_TM_ACTIVE(msr_hi<<32)) {
1352 if (!cpu_has_feature(CPU_FTR_TM))
1353 goto badframe;
1354 if (restore_tm_user_regs(regs, mcp, tm_mcp))
1355 goto badframe;
1356 } else
1357#endif
1358 {
1359 sr = (struct mcontext __user *)from_user_ptr(sigctx.regs);
1360 addr = sr;
1361 if (!access_ok(sr, sizeof(*sr))
1362 || restore_user_regs(regs, sr, 1))
1363 goto badframe;
1364 }
1365
1366 set_thread_flag(TIF_RESTOREALL);
1367 return 0;
1368
1369badframe:
1370 if (show_unhandled_signals)
1371 printk_ratelimited(KERN_INFO
1372 "%s[%d]: bad frame in sys_sigreturn: "
1373 "%p nip %08lx lr %08lx\n",
1374 current->comm, current->pid,
1375 addr, regs->nip, regs->link);
1376
1377 force_sig(SIGSEGV);
1378 return 0;
1379}
1380