1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16#include <linux/sched.h>
17#include <linux/mm.h>
18#include <linux/smp.h>
19#include <linux/kernel.h>
20#include <linux/signal.h>
21#include <linux/errno.h>
22#include <linux/elf.h>
23#include <linux/ptrace.h>
24#include <linux/pagemap.h>
25#include <linux/ratelimit.h>
26#include <linux/syscalls.h>
27#ifdef CONFIG_PPC64
28#include <linux/compat.h>
29#else
30#include <linux/wait.h>
31#include <linux/unistd.h>
32#include <linux/stddef.h>
33#include <linux/tty.h>
34#include <linux/binfmts.h>
35#endif
36
37#include <linux/uaccess.h>
38#include <asm/cacheflush.h>
39#include <asm/syscalls.h>
40#include <asm/sigcontext.h>
41#include <asm/vdso.h>
42#include <asm/switch_to.h>
43#include <asm/tm.h>
44#include <asm/asm-prototypes.h>
45#ifdef CONFIG_PPC64
46#include "ppc32.h"
47#include <asm/unistd.h>
48#else
49#include <asm/ucontext.h>
50#endif
51
52#include "signal.h"
53
54
55#ifdef CONFIG_PPC64
56#define old_sigaction old_sigaction32
57#define sigcontext sigcontext32
58#define mcontext mcontext32
59#define ucontext ucontext32
60
61
62
63
64
65#define UCONTEXTSIZEWITHOUTVSX \
66 (sizeof(struct ucontext) - sizeof(elf_vsrreghalf_t32))
67
68
69
70
71
72
73
74
75#define GP_REGS_SIZE min(sizeof(elf_gregset_t32), sizeof(struct pt_regs32))
76#undef __SIGNAL_FRAMESIZE
77#define __SIGNAL_FRAMESIZE __SIGNAL_FRAMESIZE32
78#undef ELF_NVRREG
79#define ELF_NVRREG ELF_NVRREG32
80
81
82
83
84
85#define unsafe_put_sigset_t unsafe_put_compat_sigset
86#define unsafe_get_sigset_t unsafe_get_compat_sigset
87
88#define to_user_ptr(p) ptr_to_compat(p)
89#define from_user_ptr(p) compat_ptr(p)
90
91static __always_inline int
92__unsafe_save_general_regs(struct pt_regs *regs, struct mcontext __user *frame)
93{
94 elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
95 int val, i;
96
97 for (i = 0; i <= PT_RESULT; i ++) {
98
99 if (i == PT_SOFTE)
100 val = 1;
101 else
102 val = gregs[i];
103
104 unsafe_put_user(val, &frame->mc_gregs[i], failed);
105 }
106 return 0;
107
108failed:
109 return 1;
110}
111
112static __always_inline int
113__unsafe_restore_general_regs(struct pt_regs *regs, struct mcontext __user *sr)
114{
115 elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
116 int i;
117
118 for (i = 0; i <= PT_RESULT; i++) {
119 if ((i == PT_MSR) || (i == PT_SOFTE))
120 continue;
121 unsafe_get_user(gregs[i], &sr->mc_gregs[i], failed);
122 }
123 return 0;
124
125failed:
126 return 1;
127}
128
129#else
130
131#define GP_REGS_SIZE min(sizeof(elf_gregset_t), sizeof(struct pt_regs))
132
133#define unsafe_put_sigset_t(uset, set, label) do { \
134 sigset_t __user *__us = uset ; \
135 const sigset_t *__s = set; \
136 \
137 unsafe_copy_to_user(__us, __s, sizeof(*__us), label); \
138} while (0)
139
140#define unsafe_get_sigset_t unsafe_get_user_sigset
141
142#define to_user_ptr(p) ((unsigned long)(p))
143#define from_user_ptr(p) ((void __user *)(p))
144
145static __always_inline int
146__unsafe_save_general_regs(struct pt_regs *regs, struct mcontext __user *frame)
147{
148 unsafe_copy_to_user(&frame->mc_gregs, regs, GP_REGS_SIZE, failed);
149 return 0;
150
151failed:
152 return 1;
153}
154
155static __always_inline
156int __unsafe_restore_general_regs(struct pt_regs *regs, struct mcontext __user *sr)
157{
158
159 unsafe_copy_from_user(regs, &sr->mc_gregs, PT_MSR * sizeof(elf_greg_t), failed);
160
161
162 unsafe_copy_from_user(®s->orig_gpr3, &sr->mc_gregs[PT_ORIG_R3],
163 GP_REGS_SIZE - PT_ORIG_R3 * sizeof(elf_greg_t), failed);
164
165 return 0;
166
167failed:
168 return 1;
169}
170#endif
171
172#define unsafe_save_general_regs(regs, frame, label) do { \
173 if (__unsafe_save_general_regs(regs, frame)) \
174 goto label; \
175} while (0)
176
177#define unsafe_restore_general_regs(regs, frame, label) do { \
178 if (__unsafe_restore_general_regs(regs, frame)) \
179 goto label; \
180} while (0)
181
182
183
184
185
186
187
188
189
190
191
192
193
194struct sigframe {
195 struct sigcontext sctx;
196 struct mcontext mctx;
197#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
198 struct sigcontext sctx_transact;
199 struct mcontext mctx_transact;
200#endif
201
202
203
204
205 int abigap[56];
206};
207
208
209
210
211
212
213
214
215
216
217
218
219struct rt_sigframe {
220#ifdef CONFIG_PPC64
221 compat_siginfo_t info;
222#else
223 struct siginfo info;
224#endif
225 struct ucontext uc;
226#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
227 struct ucontext uc_transact;
228#endif
229
230
231
232
233 int abigap[56];
234};
235
236
237
238
239
240
241static void prepare_save_user_regs(int ctx_has_vsx_region)
242{
243
244 flush_fp_to_thread(current);
245#ifdef CONFIG_ALTIVEC
246 if (current->thread.used_vr)
247 flush_altivec_to_thread(current);
248 if (cpu_has_feature(CPU_FTR_ALTIVEC))
249 current->thread.vrsave = mfspr(SPRN_VRSAVE);
250#endif
251#ifdef CONFIG_VSX
252 if (current->thread.used_vsr && ctx_has_vsx_region)
253 flush_vsx_to_thread(current);
254#endif
255#ifdef CONFIG_SPE
256 if (current->thread.used_spe)
257 flush_spe_to_thread(current);
258#endif
259}
260
261static int __unsafe_save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
262 struct mcontext __user *tm_frame, int ctx_has_vsx_region)
263{
264 unsigned long msr = regs->msr;
265
266
267 unsafe_save_general_regs(regs, frame, failed);
268
269#ifdef CONFIG_ALTIVEC
270
271 if (current->thread.used_vr) {
272 unsafe_copy_to_user(&frame->mc_vregs, ¤t->thread.vr_state,
273 ELF_NVRREG * sizeof(vector128), failed);
274
275
276 msr |= MSR_VEC;
277 }
278
279
280
281
282
283
284
285
286 unsafe_put_user(current->thread.vrsave, (u32 __user *)&frame->mc_vregs[32],
287 failed);
288#endif
289 unsafe_copy_fpr_to_user(&frame->mc_fregs, current, failed);
290
291
292
293
294
295 msr &= ~MSR_VSX;
296#ifdef CONFIG_VSX
297
298
299
300
301
302
303 if (current->thread.used_vsr && ctx_has_vsx_region) {
304 unsafe_copy_vsx_to_user(&frame->mc_vsregs, current, failed);
305 msr |= MSR_VSX;
306 }
307#endif
308#ifdef CONFIG_SPE
309
310 if (current->thread.used_spe) {
311 unsafe_copy_to_user(&frame->mc_vregs, current->thread.evr,
312 ELF_NEVRREG * sizeof(u32), failed);
313
314
315 msr |= MSR_SPE;
316 }
317
318
319
320 unsafe_put_user(current->thread.spefscr,
321 (u32 __user *)&frame->mc_vregs + ELF_NEVRREG, failed);
322#endif
323
324 unsafe_put_user(msr, &frame->mc_gregs[PT_MSR], failed);
325
326
327
328
329 if (tm_frame)
330 unsafe_put_user(0, &tm_frame->mc_gregs[PT_MSR], failed);
331
332 return 0;
333
334failed:
335 return 1;
336}
337
338#define unsafe_save_user_regs(regs, frame, tm_frame, has_vsx, label) do { \
339 if (__unsafe_save_user_regs(regs, frame, tm_frame, has_vsx)) \
340 goto label; \
341} while (0)
342
343#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
344
345
346
347
348
349
350
351
352
353static void prepare_save_tm_user_regs(void)
354{
355 WARN_ON(tm_suspend_disabled);
356
357#ifdef CONFIG_ALTIVEC
358 if (cpu_has_feature(CPU_FTR_ALTIVEC))
359 current->thread.ckvrsave = mfspr(SPRN_VRSAVE);
360#endif
361#ifdef CONFIG_SPE
362 if (current->thread.used_spe)
363 flush_spe_to_thread(current);
364#endif
365}
366
367static int save_tm_user_regs_unsafe(struct pt_regs *regs, struct mcontext __user *frame,
368 struct mcontext __user *tm_frame, unsigned long msr)
369{
370
371 unsafe_save_general_regs(¤t->thread.ckpt_regs, frame, failed);
372 unsafe_save_general_regs(regs, tm_frame, failed);
373
374
375
376
377
378
379
380 unsafe_put_user((msr >> 32), &tm_frame->mc_gregs[PT_MSR], failed);
381
382#ifdef CONFIG_ALTIVEC
383
384 if (current->thread.used_vr) {
385 unsafe_copy_to_user(&frame->mc_vregs, ¤t->thread.ckvr_state,
386 ELF_NVRREG * sizeof(vector128), failed);
387 if (msr & MSR_VEC)
388 unsafe_copy_to_user(&tm_frame->mc_vregs,
389 ¤t->thread.vr_state,
390 ELF_NVRREG * sizeof(vector128), failed);
391 else
392 unsafe_copy_to_user(&tm_frame->mc_vregs,
393 ¤t->thread.ckvr_state,
394 ELF_NVRREG * sizeof(vector128), failed);
395
396
397
398
399 msr |= MSR_VEC;
400 }
401
402
403
404
405
406
407 unsafe_put_user(current->thread.ckvrsave,
408 (u32 __user *)&frame->mc_vregs[32], failed);
409 if (msr & MSR_VEC)
410 unsafe_put_user(current->thread.vrsave,
411 (u32 __user *)&tm_frame->mc_vregs[32], failed);
412 else
413 unsafe_put_user(current->thread.ckvrsave,
414 (u32 __user *)&tm_frame->mc_vregs[32], failed);
415#endif
416
417 unsafe_copy_ckfpr_to_user(&frame->mc_fregs, current, failed);
418 if (msr & MSR_FP)
419 unsafe_copy_fpr_to_user(&tm_frame->mc_fregs, current, failed);
420 else
421 unsafe_copy_ckfpr_to_user(&tm_frame->mc_fregs, current, failed);
422
423#ifdef CONFIG_VSX
424
425
426
427
428
429
430 if (current->thread.used_vsr) {
431 unsafe_copy_ckvsx_to_user(&frame->mc_vsregs, current, failed);
432 if (msr & MSR_VSX)
433 unsafe_copy_vsx_to_user(&tm_frame->mc_vsregs, current, failed);
434 else
435 unsafe_copy_ckvsx_to_user(&tm_frame->mc_vsregs, current, failed);
436
437 msr |= MSR_VSX;
438 }
439#endif
440#ifdef CONFIG_SPE
441
442
443
444 if (current->thread.used_spe) {
445 unsafe_copy_to_user(&frame->mc_vregs, current->thread.evr,
446 ELF_NEVRREG * sizeof(u32), failed);
447
448
449 msr |= MSR_SPE;
450 }
451
452
453 unsafe_put_user(current->thread.spefscr,
454 (u32 __user *)&frame->mc_vregs + ELF_NEVRREG, failed);
455#endif
456
457 unsafe_put_user(msr, &frame->mc_gregs[PT_MSR], failed);
458
459 return 0;
460
461failed:
462 return 1;
463}
464#else
465static void prepare_save_tm_user_regs(void) { }
466
467static int save_tm_user_regs_unsafe(struct pt_regs *regs, struct mcontext __user *frame,
468 struct mcontext __user *tm_frame, unsigned long msr)
469{
470 return 0;
471}
472#endif
473
474#define unsafe_save_tm_user_regs(regs, frame, tm_frame, msr, label) do { \
475 if (save_tm_user_regs_unsafe(regs, frame, tm_frame, msr)) \
476 goto label; \
477} while (0)
478
479
480
481
482
483static long restore_user_regs(struct pt_regs *regs,
484 struct mcontext __user *sr, int sig)
485{
486 unsigned int save_r2 = 0;
487 unsigned long msr;
488#ifdef CONFIG_VSX
489 int i;
490#endif
491
492 if (!user_read_access_begin(sr, sizeof(*sr)))
493 return 1;
494
495
496
497
498 if (!sig)
499 save_r2 = (unsigned int)regs->gpr[2];
500 unsafe_restore_general_regs(regs, sr, failed);
501 set_trap_norestart(regs);
502 unsafe_get_user(msr, &sr->mc_gregs[PT_MSR], failed);
503 if (!sig)
504 regs->gpr[2] = (unsigned long) save_r2;
505
506
507 if (sig)
508 regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
509
510#ifdef CONFIG_ALTIVEC
511
512
513
514
515 regs->msr &= ~MSR_VEC;
516 if (msr & MSR_VEC) {
517
518 unsafe_copy_from_user(¤t->thread.vr_state, &sr->mc_vregs,
519 sizeof(sr->mc_vregs), failed);
520 current->thread.used_vr = true;
521 } else if (current->thread.used_vr)
522 memset(¤t->thread.vr_state, 0,
523 ELF_NVRREG * sizeof(vector128));
524
525
526 unsafe_get_user(current->thread.vrsave, (u32 __user *)&sr->mc_vregs[32], failed);
527 if (cpu_has_feature(CPU_FTR_ALTIVEC))
528 mtspr(SPRN_VRSAVE, current->thread.vrsave);
529#endif
530 unsafe_copy_fpr_from_user(current, &sr->mc_fregs, failed);
531
532#ifdef CONFIG_VSX
533
534
535
536
537 regs->msr &= ~MSR_VSX;
538 if (msr & MSR_VSX) {
539
540
541
542
543 unsafe_copy_vsx_from_user(current, &sr->mc_vsregs, failed);
544 current->thread.used_vsr = true;
545 } else if (current->thread.used_vsr)
546 for (i = 0; i < 32 ; i++)
547 current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
548#endif
549
550
551
552
553 regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1);
554
555#ifdef CONFIG_SPE
556
557
558 regs->msr &= ~MSR_SPE;
559 if (msr & MSR_SPE) {
560
561 unsafe_copy_from_user(current->thread.evr, &sr->mc_vregs,
562 ELF_NEVRREG * sizeof(u32), failed);
563 current->thread.used_spe = true;
564 } else if (current->thread.used_spe)
565 memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32));
566
567
568 unsafe_get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs + ELF_NEVRREG, failed);
569#endif
570
571 user_read_access_end();
572 return 0;
573
574failed:
575 user_read_access_end();
576 return 1;
577}
578
579#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
580
581
582
583
584
585static long restore_tm_user_regs(struct pt_regs *regs,
586 struct mcontext __user *sr,
587 struct mcontext __user *tm_sr)
588{
589 unsigned long msr, msr_hi;
590#ifdef CONFIG_VSX
591 int i;
592#endif
593
594 if (tm_suspend_disabled)
595 return 1;
596
597
598
599
600
601
602
603 if (!user_read_access_begin(sr, sizeof(*sr)))
604 return 1;
605
606 unsafe_restore_general_regs(¤t->thread.ckpt_regs, sr, failed);
607 unsafe_get_user(current->thread.tm_tfhar, &sr->mc_gregs[PT_NIP], failed);
608 unsafe_get_user(msr, &sr->mc_gregs[PT_MSR], failed);
609
610
611 regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
612
613#ifdef CONFIG_ALTIVEC
614 regs->msr &= ~MSR_VEC;
615 if (msr & MSR_VEC) {
616
617 unsafe_copy_from_user(¤t->thread.ckvr_state, &sr->mc_vregs,
618 sizeof(sr->mc_vregs), failed);
619 current->thread.used_vr = true;
620 } else if (current->thread.used_vr) {
621 memset(¤t->thread.vr_state, 0,
622 ELF_NVRREG * sizeof(vector128));
623 memset(¤t->thread.ckvr_state, 0,
624 ELF_NVRREG * sizeof(vector128));
625 }
626
627
628 unsafe_get_user(current->thread.ckvrsave,
629 (u32 __user *)&sr->mc_vregs[32], failed);
630 if (cpu_has_feature(CPU_FTR_ALTIVEC))
631 mtspr(SPRN_VRSAVE, current->thread.ckvrsave);
632#endif
633
634 regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1);
635
636 unsafe_copy_fpr_from_user(current, &sr->mc_fregs, failed);
637
638#ifdef CONFIG_VSX
639 regs->msr &= ~MSR_VSX;
640 if (msr & MSR_VSX) {
641
642
643
644
645 unsafe_copy_ckvsx_from_user(current, &sr->mc_vsregs, failed);
646 current->thread.used_vsr = true;
647 } else if (current->thread.used_vsr)
648 for (i = 0; i < 32 ; i++) {
649 current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
650 current->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
651 }
652#endif
653
654#ifdef CONFIG_SPE
655
656
657
658 regs->msr &= ~MSR_SPE;
659 if (msr & MSR_SPE) {
660 unsafe_copy_from_user(current->thread.evr, &sr->mc_vregs,
661 ELF_NEVRREG * sizeof(u32), failed);
662 current->thread.used_spe = true;
663 } else if (current->thread.used_spe)
664 memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32));
665
666
667 unsafe_get_user(current->thread.spefscr,
668 (u32 __user *)&sr->mc_vregs + ELF_NEVRREG, failed);
669#endif
670
671 user_read_access_end();
672
673 if (!user_read_access_begin(tm_sr, sizeof(*tm_sr)))
674 return 1;
675
676 unsafe_restore_general_regs(regs, tm_sr, failed);
677
678#ifdef CONFIG_ALTIVEC
679
680 if (msr & MSR_VEC)
681 unsafe_copy_from_user(¤t->thread.vr_state, &tm_sr->mc_vregs,
682 sizeof(sr->mc_vregs), failed);
683
684
685 unsafe_get_user(current->thread.vrsave,
686 (u32 __user *)&tm_sr->mc_vregs[32], failed);
687#endif
688
689 unsafe_copy_ckfpr_from_user(current, &tm_sr->mc_fregs, failed);
690
691#ifdef CONFIG_VSX
692 if (msr & MSR_VSX) {
693
694
695
696
697 unsafe_copy_vsx_from_user(current, &tm_sr->mc_vsregs, failed);
698 current->thread.used_vsr = true;
699 }
700#endif
701
702
703 unsafe_get_user(msr_hi, &tm_sr->mc_gregs[PT_MSR], failed);
704 msr_hi <<= 32;
705
706 user_read_access_end();
707
708
709 if (MSR_TM_RESV(msr_hi))
710 return 1;
711
712
713
714
715
716 preempt_disable();
717
718
719
720
721
722
723
724
725
726
727
728 regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr_hi & MSR_TS_MASK);
729
730
731
732
733 tm_enable();
734
735 current->thread.tm_texasr |= TEXASR_FS;
736
737 tm_recheckpoint(¤t->thread);
738
739
740 msr_check_and_set(msr & (MSR_FP | MSR_VEC));
741 if (msr & MSR_FP) {
742 load_fp_state(¤t->thread.fp_state);
743 regs->msr |= (MSR_FP | current->thread.fpexc_mode);
744 }
745#ifdef CONFIG_ALTIVEC
746 if (msr & MSR_VEC) {
747 load_vr_state(¤t->thread.vr_state);
748 regs->msr |= MSR_VEC;
749 }
750#endif
751
752 preempt_enable();
753
754 return 0;
755
756failed:
757 user_read_access_end();
758 return 1;
759}
760#else
761static long restore_tm_user_regs(struct pt_regs *regs, struct mcontext __user *sr,
762 struct mcontext __user *tm_sr)
763{
764 return 0;
765}
766#endif
767
768#ifdef CONFIG_PPC64
769
770#define copy_siginfo_to_user copy_siginfo_to_user32
771
772#endif
773
774
775
776
777
778int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
779 struct task_struct *tsk)
780{
781 struct rt_sigframe __user *frame;
782 struct mcontext __user *mctx;
783 struct mcontext __user *tm_mctx = NULL;
784 unsigned long newsp = 0;
785 unsigned long tramp;
786 struct pt_regs *regs = tsk->thread.regs;
787
788 unsigned long msr = regs->msr;
789
790
791 frame = get_sigframe(ksig, tsk, sizeof(*frame), 1);
792 mctx = &frame->uc.uc_mcontext;
793#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
794 tm_mctx = &frame->uc_transact.uc_mcontext;
795#endif
796 if (MSR_TM_ACTIVE(msr))
797 prepare_save_tm_user_regs();
798 else
799 prepare_save_user_regs(1);
800
801 if (!user_access_begin(frame, sizeof(*frame)))
802 goto badframe;
803
804
805 unsafe_put_user(0, &frame->uc.uc_flags, failed);
806#ifdef CONFIG_PPC64
807 unsafe_compat_save_altstack(&frame->uc.uc_stack, regs->gpr[1], failed);
808#else
809 unsafe_save_altstack(&frame->uc.uc_stack, regs->gpr[1], failed);
810#endif
811 unsafe_put_user(to_user_ptr(&frame->uc.uc_mcontext), &frame->uc.uc_regs, failed);
812
813 if (MSR_TM_ACTIVE(msr)) {
814#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
815 unsafe_put_user((unsigned long)&frame->uc_transact,
816 &frame->uc.uc_link, failed);
817 unsafe_put_user((unsigned long)tm_mctx,
818 &frame->uc_transact.uc_regs, failed);
819#endif
820 unsafe_save_tm_user_regs(regs, mctx, tm_mctx, msr, failed);
821 } else {
822 unsafe_put_user(0, &frame->uc.uc_link, failed);
823 unsafe_save_user_regs(regs, mctx, tm_mctx, 1, failed);
824 }
825
826
827 if (tsk->mm->context.vdso) {
828 tramp = VDSO32_SYMBOL(tsk->mm->context.vdso, sigtramp_rt32);
829 } else {
830 tramp = (unsigned long)mctx->mc_pad;
831
832 unsafe_put_user(PPC_INST_ADDI + __NR_rt_sigreturn, &mctx->mc_pad[0],
833 failed);
834 unsafe_put_user(PPC_INST_SC, &mctx->mc_pad[1], failed);
835 asm("dcbst %y0; sync; icbi %y0; sync" :: "Z" (mctx->mc_pad[0]));
836 }
837 unsafe_put_sigset_t(&frame->uc.uc_sigmask, oldset, failed);
838
839 user_access_end();
840
841 if (copy_siginfo_to_user(&frame->info, &ksig->info))
842 goto badframe;
843
844 regs->link = tramp;
845
846#ifdef CONFIG_PPC_FPU_REGS
847 tsk->thread.fp_state.fpscr = 0;
848#endif
849
850
851 newsp = ((unsigned long)frame) - (__SIGNAL_FRAMESIZE + 16);
852 if (put_user(regs->gpr[1], (u32 __user *)newsp))
853 goto badframe;
854
855
856 regs->gpr[1] = newsp;
857 regs->gpr[3] = ksig->sig;
858 regs->gpr[4] = (unsigned long)&frame->info;
859 regs->gpr[5] = (unsigned long)&frame->uc;
860 regs->gpr[6] = (unsigned long)frame;
861 regs->nip = (unsigned long) ksig->ka.sa.sa_handler;
862
863 regs->msr &= ~MSR_LE;
864 regs->msr |= (MSR_KERNEL & MSR_LE);
865 return 0;
866
867failed:
868 user_access_end();
869
870badframe:
871 signal_fault(tsk, regs, "handle_rt_signal32", frame);
872
873 return 1;
874}
875
876
877
878
879int handle_signal32(struct ksignal *ksig, sigset_t *oldset,
880 struct task_struct *tsk)
881{
882 struct sigcontext __user *sc;
883 struct sigframe __user *frame;
884 struct mcontext __user *mctx;
885 struct mcontext __user *tm_mctx = NULL;
886 unsigned long newsp = 0;
887 unsigned long tramp;
888 struct pt_regs *regs = tsk->thread.regs;
889
890 unsigned long msr = regs->msr;
891
892
893 frame = get_sigframe(ksig, tsk, sizeof(*frame), 1);
894 mctx = &frame->mctx;
895#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
896 tm_mctx = &frame->mctx_transact;
897#endif
898 if (MSR_TM_ACTIVE(msr))
899 prepare_save_tm_user_regs();
900 else
901 prepare_save_user_regs(1);
902
903 if (!user_access_begin(frame, sizeof(*frame)))
904 goto badframe;
905 sc = (struct sigcontext __user *) &frame->sctx;
906
907#if _NSIG != 64
908#error "Please adjust handle_signal()"
909#endif
910 unsafe_put_user(to_user_ptr(ksig->ka.sa.sa_handler), &sc->handler, failed);
911 unsafe_put_user(oldset->sig[0], &sc->oldmask, failed);
912#ifdef CONFIG_PPC64
913 unsafe_put_user((oldset->sig[0] >> 32), &sc->_unused[3], failed);
914#else
915 unsafe_put_user(oldset->sig[1], &sc->_unused[3], failed);
916#endif
917 unsafe_put_user(to_user_ptr(mctx), &sc->regs, failed);
918 unsafe_put_user(ksig->sig, &sc->signal, failed);
919
920 if (MSR_TM_ACTIVE(msr))
921 unsafe_save_tm_user_regs(regs, mctx, tm_mctx, msr, failed);
922 else
923 unsafe_save_user_regs(regs, mctx, tm_mctx, 1, failed);
924
925 if (tsk->mm->context.vdso) {
926 tramp = VDSO32_SYMBOL(tsk->mm->context.vdso, sigtramp32);
927 } else {
928 tramp = (unsigned long)mctx->mc_pad;
929
930 unsafe_put_user(PPC_INST_ADDI + __NR_sigreturn, &mctx->mc_pad[0], failed);
931 unsafe_put_user(PPC_INST_SC, &mctx->mc_pad[1], failed);
932 asm("dcbst %y0; sync; icbi %y0; sync" :: "Z" (mctx->mc_pad[0]));
933 }
934 user_access_end();
935
936 regs->link = tramp;
937
938#ifdef CONFIG_PPC_FPU_REGS
939 tsk->thread.fp_state.fpscr = 0;
940#endif
941
942
943 newsp = ((unsigned long)frame) - __SIGNAL_FRAMESIZE;
944 if (put_user(regs->gpr[1], (u32 __user *)newsp))
945 goto badframe;
946
947 regs->gpr[1] = newsp;
948 regs->gpr[3] = ksig->sig;
949 regs->gpr[4] = (unsigned long) sc;
950 regs->nip = (unsigned long)ksig->ka.sa.sa_handler;
951
952 regs->msr &= ~MSR_LE;
953 regs->msr |= (MSR_KERNEL & MSR_LE);
954 return 0;
955
956failed:
957 user_access_end();
958
959badframe:
960 signal_fault(tsk, regs, "handle_signal32", frame);
961
962 return 1;
963}
964
965static int do_setcontext(struct ucontext __user *ucp, struct pt_regs *regs, int sig)
966{
967 sigset_t set;
968 struct mcontext __user *mcp;
969
970 if (!user_read_access_begin(ucp, sizeof(*ucp)))
971 return -EFAULT;
972
973 unsafe_get_sigset_t(&set, &ucp->uc_sigmask, failed);
974#ifdef CONFIG_PPC64
975 {
976 u32 cmcp;
977
978 unsafe_get_user(cmcp, &ucp->uc_regs, failed);
979 mcp = (struct mcontext __user *)(u64)cmcp;
980 }
981#else
982 unsafe_get_user(mcp, &ucp->uc_regs, failed);
983#endif
984 user_read_access_end();
985
986 set_current_blocked(&set);
987 if (restore_user_regs(regs, mcp, sig))
988 return -EFAULT;
989
990 return 0;
991
992failed:
993 user_read_access_end();
994 return -EFAULT;
995}
996
997#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
998static int do_setcontext_tm(struct ucontext __user *ucp,
999 struct ucontext __user *tm_ucp,
1000 struct pt_regs *regs)
1001{
1002 sigset_t set;
1003 struct mcontext __user *mcp;
1004 struct mcontext __user *tm_mcp;
1005 u32 cmcp;
1006 u32 tm_cmcp;
1007
1008 if (!user_read_access_begin(ucp, sizeof(*ucp)))
1009 return -EFAULT;
1010
1011 unsafe_get_sigset_t(&set, &ucp->uc_sigmask, failed);
1012 unsafe_get_user(cmcp, &ucp->uc_regs, failed);
1013
1014 user_read_access_end();
1015
1016 if (__get_user(tm_cmcp, &tm_ucp->uc_regs))
1017 return -EFAULT;
1018 mcp = (struct mcontext __user *)(u64)cmcp;
1019 tm_mcp = (struct mcontext __user *)(u64)tm_cmcp;
1020
1021
1022 set_current_blocked(&set);
1023 if (restore_tm_user_regs(regs, mcp, tm_mcp))
1024 return -EFAULT;
1025
1026 return 0;
1027
1028failed:
1029 user_read_access_end();
1030 return -EFAULT;
1031}
1032#endif
1033
1034#ifdef CONFIG_PPC64
1035COMPAT_SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
1036 struct ucontext __user *, new_ctx, int, ctx_size)
1037#else
1038SYSCALL_DEFINE3(swapcontext, struct ucontext __user *, old_ctx,
1039 struct ucontext __user *, new_ctx, long, ctx_size)
1040#endif
1041{
1042 struct pt_regs *regs = current_pt_regs();
1043 int ctx_has_vsx_region = 0;
1044
1045#ifdef CONFIG_PPC64
1046 unsigned long new_msr = 0;
1047
1048 if (new_ctx) {
1049 struct mcontext __user *mcp;
1050 u32 cmcp;
1051
1052
1053
1054
1055
1056
1057 if (__get_user(cmcp, &new_ctx->uc_regs))
1058 return -EFAULT;
1059 mcp = (struct mcontext __user *)(u64)cmcp;
1060 if (__get_user(new_msr, &mcp->mc_gregs[PT_MSR]))
1061 return -EFAULT;
1062 }
1063
1064
1065
1066
1067 if (ctx_size < UCONTEXTSIZEWITHOUTVSX)
1068 return -EINVAL;
1069
1070
1071
1072
1073 if ((ctx_size < sizeof(struct ucontext)) &&
1074 (new_msr & MSR_VSX))
1075 return -EINVAL;
1076
1077 if (ctx_size >= sizeof(struct ucontext))
1078 ctx_has_vsx_region = 1;
1079#else
1080
1081
1082
1083 if (ctx_size < sizeof(struct ucontext))
1084 return -EINVAL;
1085#endif
1086 if (old_ctx != NULL) {
1087 struct mcontext __user *mctx;
1088
1089
1090
1091
1092
1093
1094
1095
1096 mctx = (struct mcontext __user *)
1097 ((unsigned long) &old_ctx->uc_mcontext & ~0xfUL);
1098 prepare_save_user_regs(ctx_has_vsx_region);
1099 if (!user_write_access_begin(old_ctx, ctx_size))
1100 return -EFAULT;
1101 unsafe_save_user_regs(regs, mctx, NULL, ctx_has_vsx_region, failed);
1102 unsafe_put_sigset_t(&old_ctx->uc_sigmask, ¤t->blocked, failed);
1103 unsafe_put_user(to_user_ptr(mctx), &old_ctx->uc_regs, failed);
1104 user_write_access_end();
1105 }
1106 if (new_ctx == NULL)
1107 return 0;
1108 if (!access_ok(new_ctx, ctx_size) ||
1109 fault_in_pages_readable((u8 __user *)new_ctx, ctx_size))
1110 return -EFAULT;
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123 if (do_setcontext(new_ctx, regs, 0))
1124 do_exit(SIGSEGV);
1125
1126 set_thread_flag(TIF_RESTOREALL);
1127 return 0;
1128
1129failed:
1130 user_write_access_end();
1131 return -EFAULT;
1132}
1133
1134#ifdef CONFIG_PPC64
1135COMPAT_SYSCALL_DEFINE0(rt_sigreturn)
1136#else
1137SYSCALL_DEFINE0(rt_sigreturn)
1138#endif
1139{
1140 struct rt_sigframe __user *rt_sf;
1141 struct pt_regs *regs = current_pt_regs();
1142 int tm_restore = 0;
1143#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1144 struct ucontext __user *uc_transact;
1145 unsigned long msr_hi;
1146 unsigned long tmp;
1147#endif
1148
1149 current->restart_block.fn = do_no_restart_syscall;
1150
1151 rt_sf = (struct rt_sigframe __user *)
1152 (regs->gpr[1] + __SIGNAL_FRAMESIZE + 16);
1153 if (!access_ok(rt_sf, sizeof(*rt_sf)))
1154 goto bad;
1155
1156#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167 if (MSR_TM_SUSPENDED(mfmsr()))
1168 tm_reclaim_current(0);
1169
1170 if (__get_user(tmp, &rt_sf->uc.uc_link))
1171 goto bad;
1172 uc_transact = (struct ucontext __user *)(uintptr_t)tmp;
1173 if (uc_transact) {
1174 u32 cmcp;
1175 struct mcontext __user *mcp;
1176
1177 if (__get_user(cmcp, &uc_transact->uc_regs))
1178 return -EFAULT;
1179 mcp = (struct mcontext __user *)(u64)cmcp;
1180
1181
1182 if (__get_user(msr_hi, &mcp->mc_gregs[PT_MSR]))
1183 goto bad;
1184
1185 if (MSR_TM_ACTIVE(msr_hi<<32)) {
1186
1187 if (!cpu_has_feature(CPU_FTR_TM))
1188 goto bad;
1189
1190
1191
1192 tm_restore = 1;
1193 if (do_setcontext_tm(&rt_sf->uc, uc_transact, regs))
1194 goto bad;
1195 }
1196 }
1197 if (!tm_restore) {
1198
1199
1200
1201
1202
1203 regs->msr &= ~MSR_TS_MASK;
1204 }
1205
1206#endif
1207 if (!tm_restore)
1208 if (do_setcontext(&rt_sf->uc, regs, 1))
1209 goto bad;
1210
1211
1212
1213
1214
1215
1216
1217
1218#ifdef CONFIG_PPC64
1219 if (compat_restore_altstack(&rt_sf->uc.uc_stack))
1220 goto bad;
1221#else
1222 if (restore_altstack(&rt_sf->uc.uc_stack))
1223 goto bad;
1224#endif
1225 set_thread_flag(TIF_RESTOREALL);
1226 return 0;
1227
1228 bad:
1229 signal_fault(current, regs, "sys_rt_sigreturn", rt_sf);
1230
1231 force_sig(SIGSEGV);
1232 return 0;
1233}
1234
1235#ifdef CONFIG_PPC32
1236SYSCALL_DEFINE3(debug_setcontext, struct ucontext __user *, ctx,
1237 int, ndbg, struct sig_dbg_op __user *, dbg)
1238{
1239 struct pt_regs *regs = current_pt_regs();
1240 struct sig_dbg_op op;
1241 int i;
1242 unsigned long new_msr = regs->msr;
1243#ifdef CONFIG_PPC_ADV_DEBUG_REGS
1244 unsigned long new_dbcr0 = current->thread.debug.dbcr0;
1245#endif
1246
1247 for (i=0; i<ndbg; i++) {
1248 if (copy_from_user(&op, dbg + i, sizeof(op)))
1249 return -EFAULT;
1250 switch (op.dbg_type) {
1251 case SIG_DBG_SINGLE_STEPPING:
1252#ifdef CONFIG_PPC_ADV_DEBUG_REGS
1253 if (op.dbg_value) {
1254 new_msr |= MSR_DE;
1255 new_dbcr0 |= (DBCR0_IDM | DBCR0_IC);
1256 } else {
1257 new_dbcr0 &= ~DBCR0_IC;
1258 if (!DBCR_ACTIVE_EVENTS(new_dbcr0,
1259 current->thread.debug.dbcr1)) {
1260 new_msr &= ~MSR_DE;
1261 new_dbcr0 &= ~DBCR0_IDM;
1262 }
1263 }
1264#else
1265 if (op.dbg_value)
1266 new_msr |= MSR_SE;
1267 else
1268 new_msr &= ~MSR_SE;
1269#endif
1270 break;
1271 case SIG_DBG_BRANCH_TRACING:
1272#ifdef CONFIG_PPC_ADV_DEBUG_REGS
1273 return -EINVAL;
1274#else
1275 if (op.dbg_value)
1276 new_msr |= MSR_BE;
1277 else
1278 new_msr &= ~MSR_BE;
1279#endif
1280 break;
1281
1282 default:
1283 return -EINVAL;
1284 }
1285 }
1286
1287
1288
1289
1290
1291
1292 regs->msr = new_msr;
1293#ifdef CONFIG_PPC_ADV_DEBUG_REGS
1294 current->thread.debug.dbcr0 = new_dbcr0;
1295#endif
1296
1297 if (!access_ok(ctx, sizeof(*ctx)) ||
1298 fault_in_pages_readable((u8 __user *)ctx, sizeof(*ctx)))
1299 return -EFAULT;
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312 if (do_setcontext(ctx, regs, 1)) {
1313 signal_fault(current, regs, "sys_debug_setcontext", ctx);
1314
1315 force_sig(SIGSEGV);
1316 goto out;
1317 }
1318
1319
1320
1321
1322
1323
1324
1325
1326 restore_altstack(&ctx->uc_stack);
1327
1328 set_thread_flag(TIF_RESTOREALL);
1329 out:
1330 return 0;
1331}
1332#endif
1333
1334
1335
1336
1337#ifdef CONFIG_PPC64
1338COMPAT_SYSCALL_DEFINE0(sigreturn)
1339#else
1340SYSCALL_DEFINE0(sigreturn)
1341#endif
1342{
1343 struct pt_regs *regs = current_pt_regs();
1344 struct sigframe __user *sf;
1345 struct sigcontext __user *sc;
1346 struct sigcontext sigctx;
1347 struct mcontext __user *sr;
1348 sigset_t set;
1349 struct mcontext __user *mcp;
1350 struct mcontext __user *tm_mcp = NULL;
1351 unsigned long long msr_hi = 0;
1352
1353
1354 current->restart_block.fn = do_no_restart_syscall;
1355
1356 sf = (struct sigframe __user *)(regs->gpr[1] + __SIGNAL_FRAMESIZE);
1357 sc = &sf->sctx;
1358 if (copy_from_user(&sigctx, sc, sizeof(sigctx)))
1359 goto badframe;
1360
1361#ifdef CONFIG_PPC64
1362
1363
1364
1365
1366 set.sig[0] = sigctx.oldmask + ((long)(sigctx._unused[3]) << 32);
1367#else
1368 set.sig[0] = sigctx.oldmask;
1369 set.sig[1] = sigctx._unused[3];
1370#endif
1371 set_current_blocked(&set);
1372
1373 mcp = (struct mcontext __user *)&sf->mctx;
1374#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1375 tm_mcp = (struct mcontext __user *)&sf->mctx_transact;
1376 if (__get_user(msr_hi, &tm_mcp->mc_gregs[PT_MSR]))
1377 goto badframe;
1378#endif
1379 if (MSR_TM_ACTIVE(msr_hi<<32)) {
1380 if (!cpu_has_feature(CPU_FTR_TM))
1381 goto badframe;
1382 if (restore_tm_user_regs(regs, mcp, tm_mcp))
1383 goto badframe;
1384 } else {
1385 sr = (struct mcontext __user *)from_user_ptr(sigctx.regs);
1386 if (restore_user_regs(regs, sr, 1)) {
1387 signal_fault(current, regs, "sys_sigreturn", sr);
1388
1389 force_sig(SIGSEGV);
1390 return 0;
1391 }
1392 }
1393
1394 set_thread_flag(TIF_RESTOREALL);
1395 return 0;
1396
1397badframe:
1398 signal_fault(current, regs, "sys_sigreturn", sc);
1399
1400 force_sig(SIGSEGV);
1401 return 0;
1402}
1403