linux/arch/powerpc/kernel/signal_32.c
<<
>>
Prefs
   1/*
   2 * Signal handling for 32bit PPC and 32bit tasks on 64bit PPC
   3 *
   4 *  PowerPC version
   5 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
   6 * Copyright (C) 2001 IBM
   7 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
   8 * Copyright (C) 1997 David S. Miller (davem@caip.rutgers.edu)
   9 *
  10 *  Derived from "arch/i386/kernel/signal.c"
  11 *    Copyright (C) 1991, 1992 Linus Torvalds
  12 *    1997-11-28  Modified for POSIX.1b signals by Richard Henderson
  13 *
  14 *  This program is free software; you can redistribute it and/or
  15 *  modify it under the terms of the GNU General Public License
  16 *  as published by the Free Software Foundation; either version
  17 *  2 of the License, or (at your option) any later version.
  18 */
  19
  20#include <linux/sched.h>
  21#include <linux/mm.h>
  22#include <linux/smp.h>
  23#include <linux/kernel.h>
  24#include <linux/signal.h>
  25#include <linux/errno.h>
  26#include <linux/elf.h>
  27#include <linux/ptrace.h>
  28#include <linux/ratelimit.h>
  29#ifdef CONFIG_PPC64
  30#include <linux/syscalls.h>
  31#include <linux/compat.h>
  32#else
  33#include <linux/wait.h>
  34#include <linux/unistd.h>
  35#include <linux/stddef.h>
  36#include <linux/tty.h>
  37#include <linux/binfmts.h>
  38#endif
  39
  40#include <linux/uaccess.h>
  41#include <asm/cacheflush.h>
  42#include <asm/syscalls.h>
  43#include <asm/sigcontext.h>
  44#include <asm/vdso.h>
  45#include <asm/switch_to.h>
  46#include <asm/tm.h>
  47#include <asm/asm-prototypes.h>
  48#ifdef CONFIG_PPC64
  49#include "ppc32.h"
  50#include <asm/unistd.h>
  51#else
  52#include <asm/ucontext.h>
  53#include <asm/pgtable.h>
  54#endif
  55
  56#include "signal.h"
  57
  58
  59#ifdef CONFIG_PPC64
  60#define sys_rt_sigreturn        compat_sys_rt_sigreturn
  61#define sys_swapcontext compat_sys_swapcontext
  62#define sys_sigreturn   compat_sys_sigreturn
  63
  64#define old_sigaction   old_sigaction32
  65#define sigcontext      sigcontext32
  66#define mcontext        mcontext32
  67#define ucontext        ucontext32
  68
  69#define __save_altstack __compat_save_altstack
  70
  71/*
  72 * Userspace code may pass a ucontext which doesn't include VSX added
  73 * at the end.  We need to check for this case.
  74 */
  75#define UCONTEXTSIZEWITHOUTVSX \
  76                (sizeof(struct ucontext) - sizeof(elf_vsrreghalf_t32))
  77
  78/*
  79 * Returning 0 means we return to userspace via
  80 * ret_from_except and thus restore all user
  81 * registers from *regs.  This is what we need
  82 * to do when a signal has been delivered.
  83 */
  84
  85#define GP_REGS_SIZE    min(sizeof(elf_gregset_t32), sizeof(struct pt_regs32))
  86#undef __SIGNAL_FRAMESIZE
  87#define __SIGNAL_FRAMESIZE      __SIGNAL_FRAMESIZE32
  88#undef ELF_NVRREG
  89#define ELF_NVRREG      ELF_NVRREG32
  90
  91/*
  92 * Functions for flipping sigsets (thanks to brain dead generic
  93 * implementation that makes things simple for little endian only)
  94 */
  95static inline int put_sigset_t(compat_sigset_t __user *uset, sigset_t *set)
  96{
  97        compat_sigset_t cset;
  98
  99        switch (_NSIG_WORDS) {
 100        case 4: cset.sig[6] = set->sig[3] & 0xffffffffull;
 101                cset.sig[7] = set->sig[3] >> 32;
 102        case 3: cset.sig[4] = set->sig[2] & 0xffffffffull;
 103                cset.sig[5] = set->sig[2] >> 32;
 104        case 2: cset.sig[2] = set->sig[1] & 0xffffffffull;
 105                cset.sig[3] = set->sig[1] >> 32;
 106        case 1: cset.sig[0] = set->sig[0] & 0xffffffffull;
 107                cset.sig[1] = set->sig[0] >> 32;
 108        }
 109        return copy_to_user(uset, &cset, sizeof(*uset));
 110}
 111
 112static inline int get_sigset_t(sigset_t *set,
 113                               const compat_sigset_t __user *uset)
 114{
 115        compat_sigset_t s32;
 116
 117        if (copy_from_user(&s32, uset, sizeof(*uset)))
 118                return -EFAULT;
 119
 120        /*
 121         * Swap the 2 words of the 64-bit sigset_t (they are stored
 122         * in the "wrong" endian in 32-bit user storage).
 123         */
 124        switch (_NSIG_WORDS) {
 125        case 4: set->sig[3] = s32.sig[6] | (((long)s32.sig[7]) << 32);
 126        case 3: set->sig[2] = s32.sig[4] | (((long)s32.sig[5]) << 32);
 127        case 2: set->sig[1] = s32.sig[2] | (((long)s32.sig[3]) << 32);
 128        case 1: set->sig[0] = s32.sig[0] | (((long)s32.sig[1]) << 32);
 129        }
 130        return 0;
 131}
 132
 133#define to_user_ptr(p)          ptr_to_compat(p)
 134#define from_user_ptr(p)        compat_ptr(p)
 135
 136static inline int save_general_regs(struct pt_regs *regs,
 137                struct mcontext __user *frame)
 138{
 139        elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
 140        int i;
 141
 142        WARN_ON(!FULL_REGS(regs));
 143
 144        for (i = 0; i <= PT_RESULT; i ++) {
 145                if (i == 14 && !FULL_REGS(regs))
 146                        i = 32;
 147                if (__put_user((unsigned int)gregs[i], &frame->mc_gregs[i]))
 148                        return -EFAULT;
 149        }
 150        return 0;
 151}
 152
 153static inline int restore_general_regs(struct pt_regs *regs,
 154                struct mcontext __user *sr)
 155{
 156        elf_greg_t64 *gregs = (elf_greg_t64 *)regs;
 157        int i;
 158
 159        for (i = 0; i <= PT_RESULT; i++) {
 160                if ((i == PT_MSR) || (i == PT_SOFTE))
 161                        continue;
 162                if (__get_user(gregs[i], &sr->mc_gregs[i]))
 163                        return -EFAULT;
 164        }
 165        return 0;
 166}
 167
 168#else /* CONFIG_PPC64 */
 169
 170#define GP_REGS_SIZE    min(sizeof(elf_gregset_t), sizeof(struct pt_regs))
 171
 172static inline int put_sigset_t(sigset_t __user *uset, sigset_t *set)
 173{
 174        return copy_to_user(uset, set, sizeof(*uset));
 175}
 176
 177static inline int get_sigset_t(sigset_t *set, const sigset_t __user *uset)
 178{
 179        return copy_from_user(set, uset, sizeof(*uset));
 180}
 181
 182#define to_user_ptr(p)          ((unsigned long)(p))
 183#define from_user_ptr(p)        ((void __user *)(p))
 184
 185static inline int save_general_regs(struct pt_regs *regs,
 186                struct mcontext __user *frame)
 187{
 188        WARN_ON(!FULL_REGS(regs));
 189        return __copy_to_user(&frame->mc_gregs, regs, GP_REGS_SIZE);
 190}
 191
 192static inline int restore_general_regs(struct pt_regs *regs,
 193                struct mcontext __user *sr)
 194{
 195        /* copy up to but not including MSR */
 196        if (__copy_from_user(regs, &sr->mc_gregs,
 197                                PT_MSR * sizeof(elf_greg_t)))
 198                return -EFAULT;
 199        /* copy from orig_r3 (the word after the MSR) up to the end */
 200        if (__copy_from_user(&regs->orig_gpr3, &sr->mc_gregs[PT_ORIG_R3],
 201                                GP_REGS_SIZE - PT_ORIG_R3 * sizeof(elf_greg_t)))
 202                return -EFAULT;
 203        return 0;
 204}
 205#endif
 206
 207/*
 208 * When we have signals to deliver, we set up on the
 209 * user stack, going down from the original stack pointer:
 210 *      an ABI gap of 56 words
 211 *      an mcontext struct
 212 *      a sigcontext struct
 213 *      a gap of __SIGNAL_FRAMESIZE bytes
 214 *
 215 * Each of these things must be a multiple of 16 bytes in size. The following
 216 * structure represent all of this except the __SIGNAL_FRAMESIZE gap
 217 *
 218 */
 219struct sigframe {
 220        struct sigcontext sctx;         /* the sigcontext */
 221        struct mcontext mctx;           /* all the register values */
 222#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
 223        struct sigcontext sctx_transact;
 224        struct mcontext mctx_transact;
 225#endif
 226        /*
 227         * Programs using the rs6000/xcoff abi can save up to 19 gp
 228         * regs and 18 fp regs below sp before decrementing it.
 229         */
 230        int                     abigap[56];
 231};
 232
 233/* We use the mc_pad field for the signal return trampoline. */
 234#define tramp   mc_pad
 235
 236/*
 237 *  When we have rt signals to deliver, we set up on the
 238 *  user stack, going down from the original stack pointer:
 239 *      one rt_sigframe struct (siginfo + ucontext + ABI gap)
 240 *      a gap of __SIGNAL_FRAMESIZE+16 bytes
 241 *  (the +16 is to get the siginfo and ucontext in the same
 242 *  positions as in older kernels).
 243 *
 244 *  Each of these things must be a multiple of 16 bytes in size.
 245 *
 246 */
 247struct rt_sigframe {
 248#ifdef CONFIG_PPC64
 249        compat_siginfo_t info;
 250#else
 251        struct siginfo info;
 252#endif
 253        struct ucontext uc;
 254#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
 255        struct ucontext uc_transact;
 256#endif
 257        /*
 258         * Programs using the rs6000/xcoff abi can save up to 19 gp
 259         * regs and 18 fp regs below sp before decrementing it.
 260         */
 261        int                     abigap[56];
 262};
 263
 264#ifdef CONFIG_VSX
 265unsigned long copy_fpr_to_user(void __user *to,
 266                               struct task_struct *task)
 267{
 268        u64 buf[ELF_NFPREG];
 269        int i;
 270
 271        /* save FPR copy to local buffer then write to the thread_struct */
 272        for (i = 0; i < (ELF_NFPREG - 1) ; i++)
 273                buf[i] = task->thread.TS_FPR(i);
 274        buf[i] = task->thread.fp_state.fpscr;
 275        return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double));
 276}
 277
 278unsigned long copy_fpr_from_user(struct task_struct *task,
 279                                 void __user *from)
 280{
 281        u64 buf[ELF_NFPREG];
 282        int i;
 283
 284        if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double)))
 285                return 1;
 286        for (i = 0; i < (ELF_NFPREG - 1) ; i++)
 287                task->thread.TS_FPR(i) = buf[i];
 288        task->thread.fp_state.fpscr = buf[i];
 289
 290        return 0;
 291}
 292
 293unsigned long copy_vsx_to_user(void __user *to,
 294                               struct task_struct *task)
 295{
 296        u64 buf[ELF_NVSRHALFREG];
 297        int i;
 298
 299        /* save FPR copy to local buffer then write to the thread_struct */
 300        for (i = 0; i < ELF_NVSRHALFREG; i++)
 301                buf[i] = task->thread.fp_state.fpr[i][TS_VSRLOWOFFSET];
 302        return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double));
 303}
 304
 305unsigned long copy_vsx_from_user(struct task_struct *task,
 306                                 void __user *from)
 307{
 308        u64 buf[ELF_NVSRHALFREG];
 309        int i;
 310
 311        if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double)))
 312                return 1;
 313        for (i = 0; i < ELF_NVSRHALFREG ; i++)
 314                task->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
 315        return 0;
 316}
 317
 318#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
 319unsigned long copy_ckfpr_to_user(void __user *to,
 320                                  struct task_struct *task)
 321{
 322        u64 buf[ELF_NFPREG];
 323        int i;
 324
 325        /* save FPR copy to local buffer then write to the thread_struct */
 326        for (i = 0; i < (ELF_NFPREG - 1) ; i++)
 327                buf[i] = task->thread.TS_CKFPR(i);
 328        buf[i] = task->thread.ckfp_state.fpscr;
 329        return __copy_to_user(to, buf, ELF_NFPREG * sizeof(double));
 330}
 331
 332unsigned long copy_ckfpr_from_user(struct task_struct *task,
 333                                          void __user *from)
 334{
 335        u64 buf[ELF_NFPREG];
 336        int i;
 337
 338        if (__copy_from_user(buf, from, ELF_NFPREG * sizeof(double)))
 339                return 1;
 340        for (i = 0; i < (ELF_NFPREG - 1) ; i++)
 341                task->thread.TS_CKFPR(i) = buf[i];
 342        task->thread.ckfp_state.fpscr = buf[i];
 343
 344        return 0;
 345}
 346
 347unsigned long copy_ckvsx_to_user(void __user *to,
 348                                  struct task_struct *task)
 349{
 350        u64 buf[ELF_NVSRHALFREG];
 351        int i;
 352
 353        /* save FPR copy to local buffer then write to the thread_struct */
 354        for (i = 0; i < ELF_NVSRHALFREG; i++)
 355                buf[i] = task->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET];
 356        return __copy_to_user(to, buf, ELF_NVSRHALFREG * sizeof(double));
 357}
 358
 359unsigned long copy_ckvsx_from_user(struct task_struct *task,
 360                                          void __user *from)
 361{
 362        u64 buf[ELF_NVSRHALFREG];
 363        int i;
 364
 365        if (__copy_from_user(buf, from, ELF_NVSRHALFREG * sizeof(double)))
 366                return 1;
 367        for (i = 0; i < ELF_NVSRHALFREG ; i++)
 368                task->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = buf[i];
 369        return 0;
 370}
 371#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
 372#else
 373inline unsigned long copy_fpr_to_user(void __user *to,
 374                                      struct task_struct *task)
 375{
 376        return __copy_to_user(to, task->thread.fp_state.fpr,
 377                              ELF_NFPREG * sizeof(double));
 378}
 379
 380inline unsigned long copy_fpr_from_user(struct task_struct *task,
 381                                        void __user *from)
 382{
 383        return __copy_from_user(task->thread.fp_state.fpr, from,
 384                              ELF_NFPREG * sizeof(double));
 385}
 386
 387#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
 388inline unsigned long copy_ckfpr_to_user(void __user *to,
 389                                         struct task_struct *task)
 390{
 391        return __copy_to_user(to, task->thread.ckfp_state.fpr,
 392                              ELF_NFPREG * sizeof(double));
 393}
 394
 395inline unsigned long copy_ckfpr_from_user(struct task_struct *task,
 396                                                 void __user *from)
 397{
 398        return __copy_from_user(task->thread.ckfp_state.fpr, from,
 399                                ELF_NFPREG * sizeof(double));
 400}
 401#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
 402#endif
 403
 404/*
 405 * Save the current user registers on the user stack.
 406 * We only save the altivec/spe registers if the process has used
 407 * altivec/spe instructions at some point.
 408 */
 409static int save_user_regs(struct pt_regs *regs, struct mcontext __user *frame,
 410                          struct mcontext __user *tm_frame, int sigret,
 411                          int ctx_has_vsx_region)
 412{
 413        unsigned long msr = regs->msr;
 414
 415        /* Make sure floating point registers are stored in regs */
 416        flush_fp_to_thread(current);
 417
 418        /* save general registers */
 419        if (save_general_regs(regs, frame))
 420                return 1;
 421
 422#ifdef CONFIG_ALTIVEC
 423        /* save altivec registers */
 424        if (current->thread.used_vr) {
 425                flush_altivec_to_thread(current);
 426                if (__copy_to_user(&frame->mc_vregs, &current->thread.vr_state,
 427                                   ELF_NVRREG * sizeof(vector128)))
 428                        return 1;
 429                /* set MSR_VEC in the saved MSR value to indicate that
 430                   frame->mc_vregs contains valid data */
 431                msr |= MSR_VEC;
 432        }
 433        /* else assert((regs->msr & MSR_VEC) == 0) */
 434
 435        /* We always copy to/from vrsave, it's 0 if we don't have or don't
 436         * use altivec. Since VSCR only contains 32 bits saved in the least
 437         * significant bits of a vector, we "cheat" and stuff VRSAVE in the
 438         * most significant bits of that same vector. --BenH
 439         * Note that the current VRSAVE value is in the SPR at this point.
 440         */
 441        if (cpu_has_feature(CPU_FTR_ALTIVEC))
 442                current->thread.vrsave = mfspr(SPRN_VRSAVE);
 443        if (__put_user(current->thread.vrsave, (u32 __user *)&frame->mc_vregs[32]))
 444                return 1;
 445#endif /* CONFIG_ALTIVEC */
 446        if (copy_fpr_to_user(&frame->mc_fregs, current))
 447                return 1;
 448
 449        /*
 450         * Clear the MSR VSX bit to indicate there is no valid state attached
 451         * to this context, except in the specific case below where we set it.
 452         */
 453        msr &= ~MSR_VSX;
 454#ifdef CONFIG_VSX
 455        /*
 456         * Copy VSR 0-31 upper half from thread_struct to local
 457         * buffer, then write that to userspace.  Also set MSR_VSX in
 458         * the saved MSR value to indicate that frame->mc_vregs
 459         * contains valid data
 460         */
 461        if (current->thread.used_vsr && ctx_has_vsx_region) {
 462                flush_vsx_to_thread(current);
 463                if (copy_vsx_to_user(&frame->mc_vsregs, current))
 464                        return 1;
 465                msr |= MSR_VSX;
 466        }
 467#endif /* CONFIG_VSX */
 468#ifdef CONFIG_SPE
 469        /* save spe registers */
 470        if (current->thread.used_spe) {
 471                flush_spe_to_thread(current);
 472                if (__copy_to_user(&frame->mc_vregs, current->thread.evr,
 473                                   ELF_NEVRREG * sizeof(u32)))
 474                        return 1;
 475                /* set MSR_SPE in the saved MSR value to indicate that
 476                   frame->mc_vregs contains valid data */
 477                msr |= MSR_SPE;
 478        }
 479        /* else assert((regs->msr & MSR_SPE) == 0) */
 480
 481        /* We always copy to/from spefscr */
 482        if (__put_user(current->thread.spefscr, (u32 __user *)&frame->mc_vregs + ELF_NEVRREG))
 483                return 1;
 484#endif /* CONFIG_SPE */
 485
 486        if (__put_user(msr, &frame->mc_gregs[PT_MSR]))
 487                return 1;
 488        /* We need to write 0 the MSR top 32 bits in the tm frame so that we
 489         * can check it on the restore to see if TM is active
 490         */
 491        if (tm_frame && __put_user(0, &tm_frame->mc_gregs[PT_MSR]))
 492                return 1;
 493
 494        if (sigret) {
 495                /* Set up the sigreturn trampoline: li r0,sigret; sc */
 496                if (__put_user(0x38000000UL + sigret, &frame->tramp[0])
 497                    || __put_user(0x44000002UL, &frame->tramp[1]))
 498                        return 1;
 499                flush_icache_range((unsigned long) &frame->tramp[0],
 500                                   (unsigned long) &frame->tramp[2]);
 501        }
 502
 503        return 0;
 504}
 505
 506#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
 507/*
 508 * Save the current user registers on the user stack.
 509 * We only save the altivec/spe registers if the process has used
 510 * altivec/spe instructions at some point.
 511 * We also save the transactional registers to a second ucontext in the
 512 * frame.
 513 *
 514 * See save_user_regs() and signal_64.c:setup_tm_sigcontexts().
 515 */
 516static int save_tm_user_regs(struct pt_regs *regs,
 517                             struct mcontext __user *frame,
 518                             struct mcontext __user *tm_frame, int sigret)
 519{
 520        unsigned long msr = regs->msr;
 521
 522        /* Remove TM bits from thread's MSR.  The MSR in the sigcontext
 523         * just indicates to userland that we were doing a transaction, but we
 524         * don't want to return in transactional state.  This also ensures
 525         * that flush_fp_to_thread won't set TIF_RESTORE_TM again.
 526         */
 527        regs->msr &= ~MSR_TS_MASK;
 528
 529        /* Save both sets of general registers */
 530        if (save_general_regs(&current->thread.ckpt_regs, frame)
 531            || save_general_regs(regs, tm_frame))
 532                return 1;
 533
 534        /* Stash the top half of the 64bit MSR into the 32bit MSR word
 535         * of the transactional mcontext.  This way we have a backward-compatible
 536         * MSR in the 'normal' (checkpointed) mcontext and additionally one can
 537         * also look at what type of transaction (T or S) was active at the
 538         * time of the signal.
 539         */
 540        if (__put_user((msr >> 32), &tm_frame->mc_gregs[PT_MSR]))
 541                return 1;
 542
 543#ifdef CONFIG_ALTIVEC
 544        /* save altivec registers */
 545        if (current->thread.used_vr) {
 546                if (__copy_to_user(&frame->mc_vregs, &current->thread.ckvr_state,
 547                                   ELF_NVRREG * sizeof(vector128)))
 548                        return 1;
 549                if (msr & MSR_VEC) {
 550                        if (__copy_to_user(&tm_frame->mc_vregs,
 551                                           &current->thread.vr_state,
 552                                           ELF_NVRREG * sizeof(vector128)))
 553                                return 1;
 554                } else {
 555                        if (__copy_to_user(&tm_frame->mc_vregs,
 556                                           &current->thread.ckvr_state,
 557                                           ELF_NVRREG * sizeof(vector128)))
 558                                return 1;
 559                }
 560
 561                /* set MSR_VEC in the saved MSR value to indicate that
 562                 * frame->mc_vregs contains valid data
 563                 */
 564                msr |= MSR_VEC;
 565        }
 566
 567        /* We always copy to/from vrsave, it's 0 if we don't have or don't
 568         * use altivec. Since VSCR only contains 32 bits saved in the least
 569         * significant bits of a vector, we "cheat" and stuff VRSAVE in the
 570         * most significant bits of that same vector. --BenH
 571         */
 572        if (cpu_has_feature(CPU_FTR_ALTIVEC))
 573                current->thread.ckvrsave = mfspr(SPRN_VRSAVE);
 574        if (__put_user(current->thread.ckvrsave,
 575                       (u32 __user *)&frame->mc_vregs[32]))
 576                return 1;
 577        if (msr & MSR_VEC) {
 578                if (__put_user(current->thread.vrsave,
 579                               (u32 __user *)&tm_frame->mc_vregs[32]))
 580                        return 1;
 581        } else {
 582                if (__put_user(current->thread.ckvrsave,
 583                               (u32 __user *)&tm_frame->mc_vregs[32]))
 584                        return 1;
 585        }
 586#endif /* CONFIG_ALTIVEC */
 587
 588        if (copy_ckfpr_to_user(&frame->mc_fregs, current))
 589                return 1;
 590        if (msr & MSR_FP) {
 591                if (copy_fpr_to_user(&tm_frame->mc_fregs, current))
 592                        return 1;
 593        } else {
 594                if (copy_ckfpr_to_user(&tm_frame->mc_fregs, current))
 595                        return 1;
 596        }
 597
 598#ifdef CONFIG_VSX
 599        /*
 600         * Copy VSR 0-31 upper half from thread_struct to local
 601         * buffer, then write that to userspace.  Also set MSR_VSX in
 602         * the saved MSR value to indicate that frame->mc_vregs
 603         * contains valid data
 604         */
 605        if (current->thread.used_vsr) {
 606                if (copy_ckvsx_to_user(&frame->mc_vsregs, current))
 607                        return 1;
 608                if (msr & MSR_VSX) {
 609                        if (copy_vsx_to_user(&tm_frame->mc_vsregs,
 610                                                      current))
 611                                return 1;
 612                } else {
 613                        if (copy_ckvsx_to_user(&tm_frame->mc_vsregs, current))
 614                                return 1;
 615                }
 616
 617                msr |= MSR_VSX;
 618        }
 619#endif /* CONFIG_VSX */
 620#ifdef CONFIG_SPE
 621        /* SPE regs are not checkpointed with TM, so this section is
 622         * simply the same as in save_user_regs().
 623         */
 624        if (current->thread.used_spe) {
 625                flush_spe_to_thread(current);
 626                if (__copy_to_user(&frame->mc_vregs, current->thread.evr,
 627                                   ELF_NEVRREG * sizeof(u32)))
 628                        return 1;
 629                /* set MSR_SPE in the saved MSR value to indicate that
 630                 * frame->mc_vregs contains valid data */
 631                msr |= MSR_SPE;
 632        }
 633
 634        /* We always copy to/from spefscr */
 635        if (__put_user(current->thread.spefscr, (u32 __user *)&frame->mc_vregs + ELF_NEVRREG))
 636                return 1;
 637#endif /* CONFIG_SPE */
 638
 639        if (__put_user(msr, &frame->mc_gregs[PT_MSR]))
 640                return 1;
 641        if (sigret) {
 642                /* Set up the sigreturn trampoline: li r0,sigret; sc */
 643                if (__put_user(0x38000000UL + sigret, &frame->tramp[0])
 644                    || __put_user(0x44000002UL, &frame->tramp[1]))
 645                        return 1;
 646                flush_icache_range((unsigned long) &frame->tramp[0],
 647                                   (unsigned long) &frame->tramp[2]);
 648        }
 649
 650        return 0;
 651}
 652#endif
 653
 654/*
 655 * Restore the current user register values from the user stack,
 656 * (except for MSR).
 657 */
 658static long restore_user_regs(struct pt_regs *regs,
 659                              struct mcontext __user *sr, int sig)
 660{
 661        long err;
 662        unsigned int save_r2 = 0;
 663        unsigned long msr;
 664#ifdef CONFIG_VSX
 665        int i;
 666#endif
 667
 668        /*
 669         * restore general registers but not including MSR or SOFTE. Also
 670         * take care of keeping r2 (TLS) intact if not a signal
 671         */
 672        if (!sig)
 673                save_r2 = (unsigned int)regs->gpr[2];
 674        err = restore_general_regs(regs, sr);
 675        regs->trap = 0;
 676        err |= __get_user(msr, &sr->mc_gregs[PT_MSR]);
 677        if (!sig)
 678                regs->gpr[2] = (unsigned long) save_r2;
 679        if (err)
 680                return 1;
 681
 682        /* if doing signal return, restore the previous little-endian mode */
 683        if (sig)
 684                regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
 685
 686#ifdef CONFIG_ALTIVEC
 687        /*
 688         * Force the process to reload the altivec registers from
 689         * current->thread when it next does altivec instructions
 690         */
 691        regs->msr &= ~MSR_VEC;
 692        if (msr & MSR_VEC) {
 693                /* restore altivec registers from the stack */
 694                if (__copy_from_user(&current->thread.vr_state, &sr->mc_vregs,
 695                                     sizeof(sr->mc_vregs)))
 696                        return 1;
 697                current->thread.used_vr = true;
 698        } else if (current->thread.used_vr)
 699                memset(&current->thread.vr_state, 0,
 700                       ELF_NVRREG * sizeof(vector128));
 701
 702        /* Always get VRSAVE back */
 703        if (__get_user(current->thread.vrsave, (u32 __user *)&sr->mc_vregs[32]))
 704                return 1;
 705        if (cpu_has_feature(CPU_FTR_ALTIVEC))
 706                mtspr(SPRN_VRSAVE, current->thread.vrsave);
 707#endif /* CONFIG_ALTIVEC */
 708        if (copy_fpr_from_user(current, &sr->mc_fregs))
 709                return 1;
 710
 711#ifdef CONFIG_VSX
 712        /*
 713         * Force the process to reload the VSX registers from
 714         * current->thread when it next does VSX instruction.
 715         */
 716        regs->msr &= ~MSR_VSX;
 717        if (msr & MSR_VSX) {
 718                /*
 719                 * Restore altivec registers from the stack to a local
 720                 * buffer, then write this out to the thread_struct
 721                 */
 722                if (copy_vsx_from_user(current, &sr->mc_vsregs))
 723                        return 1;
 724                current->thread.used_vsr = true;
 725        } else if (current->thread.used_vsr)
 726                for (i = 0; i < 32 ; i++)
 727                        current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
 728#endif /* CONFIG_VSX */
 729        /*
 730         * force the process to reload the FP registers from
 731         * current->thread when it next does FP instructions
 732         */
 733        regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1);
 734
 735#ifdef CONFIG_SPE
 736        /* force the process to reload the spe registers from
 737           current->thread when it next does spe instructions */
 738        regs->msr &= ~MSR_SPE;
 739        if (msr & MSR_SPE) {
 740                /* restore spe registers from the stack */
 741                if (__copy_from_user(current->thread.evr, &sr->mc_vregs,
 742                                     ELF_NEVRREG * sizeof(u32)))
 743                        return 1;
 744                current->thread.used_spe = true;
 745        } else if (current->thread.used_spe)
 746                memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32));
 747
 748        /* Always get SPEFSCR back */
 749        if (__get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs + ELF_NEVRREG))
 750                return 1;
 751#endif /* CONFIG_SPE */
 752
 753        return 0;
 754}
 755
 756#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
 757/*
 758 * Restore the current user register values from the user stack, except for
 759 * MSR, and recheckpoint the original checkpointed register state for processes
 760 * in transactions.
 761 */
 762static long restore_tm_user_regs(struct pt_regs *regs,
 763                                 struct mcontext __user *sr,
 764                                 struct mcontext __user *tm_sr)
 765{
 766        long err;
 767        unsigned long msr, msr_hi;
 768#ifdef CONFIG_VSX
 769        int i;
 770#endif
 771
 772        /*
 773         * restore general registers but not including MSR or SOFTE. Also
 774         * take care of keeping r2 (TLS) intact if not a signal.
 775         * See comment in signal_64.c:restore_tm_sigcontexts();
 776         * TFHAR is restored from the checkpointed NIP; TEXASR and TFIAR
 777         * were set by the signal delivery.
 778         */
 779        err = restore_general_regs(regs, tm_sr);
 780        err |= restore_general_regs(&current->thread.ckpt_regs, sr);
 781
 782        err |= __get_user(current->thread.tm_tfhar, &sr->mc_gregs[PT_NIP]);
 783
 784        err |= __get_user(msr, &sr->mc_gregs[PT_MSR]);
 785        if (err)
 786                return 1;
 787
 788        /* Restore the previous little-endian mode */
 789        regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
 790
 791#ifdef CONFIG_ALTIVEC
 792        regs->msr &= ~MSR_VEC;
 793        if (msr & MSR_VEC) {
 794                /* restore altivec registers from the stack */
 795                if (__copy_from_user(&current->thread.ckvr_state, &sr->mc_vregs,
 796                                     sizeof(sr->mc_vregs)) ||
 797                    __copy_from_user(&current->thread.vr_state,
 798                                     &tm_sr->mc_vregs,
 799                                     sizeof(sr->mc_vregs)))
 800                        return 1;
 801                current->thread.used_vr = true;
 802        } else if (current->thread.used_vr) {
 803                memset(&current->thread.vr_state, 0,
 804                       ELF_NVRREG * sizeof(vector128));
 805                memset(&current->thread.ckvr_state, 0,
 806                       ELF_NVRREG * sizeof(vector128));
 807        }
 808
 809        /* Always get VRSAVE back */
 810        if (__get_user(current->thread.ckvrsave,
 811                       (u32 __user *)&sr->mc_vregs[32]) ||
 812            __get_user(current->thread.vrsave,
 813                       (u32 __user *)&tm_sr->mc_vregs[32]))
 814                return 1;
 815        if (cpu_has_feature(CPU_FTR_ALTIVEC))
 816                mtspr(SPRN_VRSAVE, current->thread.ckvrsave);
 817#endif /* CONFIG_ALTIVEC */
 818
 819        regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1);
 820
 821        if (copy_fpr_from_user(current, &sr->mc_fregs) ||
 822            copy_ckfpr_from_user(current, &tm_sr->mc_fregs))
 823                return 1;
 824
 825#ifdef CONFIG_VSX
 826        regs->msr &= ~MSR_VSX;
 827        if (msr & MSR_VSX) {
 828                /*
 829                 * Restore altivec registers from the stack to a local
 830                 * buffer, then write this out to the thread_struct
 831                 */
 832                if (copy_vsx_from_user(current, &tm_sr->mc_vsregs) ||
 833                    copy_ckvsx_from_user(current, &sr->mc_vsregs))
 834                        return 1;
 835                current->thread.used_vsr = true;
 836        } else if (current->thread.used_vsr)
 837                for (i = 0; i < 32 ; i++) {
 838                        current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
 839                        current->thread.ckfp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
 840                }
 841#endif /* CONFIG_VSX */
 842
 843#ifdef CONFIG_SPE
 844        /* SPE regs are not checkpointed with TM, so this section is
 845         * simply the same as in restore_user_regs().
 846         */
 847        regs->msr &= ~MSR_SPE;
 848        if (msr & MSR_SPE) {
 849                if (__copy_from_user(current->thread.evr, &sr->mc_vregs,
 850                                     ELF_NEVRREG * sizeof(u32)))
 851                        return 1;
 852                current->thread.used_spe = true;
 853        } else if (current->thread.used_spe)
 854                memset(current->thread.evr, 0, ELF_NEVRREG * sizeof(u32));
 855
 856        /* Always get SPEFSCR back */
 857        if (__get_user(current->thread.spefscr, (u32 __user *)&sr->mc_vregs
 858                       + ELF_NEVRREG))
 859                return 1;
 860#endif /* CONFIG_SPE */
 861
 862        /* Get the top half of the MSR from the user context */
 863        if (__get_user(msr_hi, &tm_sr->mc_gregs[PT_MSR]))
 864                return 1;
 865        msr_hi <<= 32;
 866        /* If TM bits are set to the reserved value, it's an invalid context */
 867        if (MSR_TM_RESV(msr_hi))
 868                return 1;
 869        /* Pull in the MSR TM bits from the user context */
 870        regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr_hi & MSR_TS_MASK);
 871        /* Now, recheckpoint.  This loads up all of the checkpointed (older)
 872         * registers, including FP and V[S]Rs.  After recheckpointing, the
 873         * transactional versions should be loaded.
 874         */
 875        tm_enable();
 876        /* Make sure the transaction is marked as failed */
 877        current->thread.tm_texasr |= TEXASR_FS;
 878        /* This loads the checkpointed FP/VEC state, if used */
 879        tm_recheckpoint(&current->thread, msr);
 880
 881        /* This loads the speculative FP/VEC state, if used */
 882        msr_check_and_set(msr & (MSR_FP | MSR_VEC));
 883        if (msr & MSR_FP) {
 884                load_fp_state(&current->thread.fp_state);
 885                regs->msr |= (MSR_FP | current->thread.fpexc_mode);
 886        }
 887#ifdef CONFIG_ALTIVEC
 888        if (msr & MSR_VEC) {
 889                load_vr_state(&current->thread.vr_state);
 890                regs->msr |= MSR_VEC;
 891        }
 892#endif
 893
 894        return 0;
 895}
 896#endif
 897
 898#ifdef CONFIG_PPC64
 899int copy_siginfo_to_user32(struct compat_siginfo __user *d, const siginfo_t *s)
 900{
 901        int err;
 902
 903        if (!access_ok (VERIFY_WRITE, d, sizeof(*d)))
 904                return -EFAULT;
 905
 906        /* If you change siginfo_t structure, please be sure
 907         * this code is fixed accordingly.
 908         * It should never copy any pad contained in the structure
 909         * to avoid security leaks, but must copy the generic
 910         * 3 ints plus the relevant union member.
 911         * This routine must convert siginfo from 64bit to 32bit as well
 912         * at the same time.
 913         */
 914        err = __put_user(s->si_signo, &d->si_signo);
 915        err |= __put_user(s->si_errno, &d->si_errno);
 916        err |= __put_user(s->si_code, &d->si_code);
 917        if (s->si_code < 0)
 918                err |= __copy_to_user(&d->_sifields._pad, &s->_sifields._pad,
 919                                      SI_PAD_SIZE32);
 920        else switch(siginfo_layout(s->si_signo, s->si_code)) {
 921        case SIL_CHLD:
 922                err |= __put_user(s->si_pid, &d->si_pid);
 923                err |= __put_user(s->si_uid, &d->si_uid);
 924                err |= __put_user(s->si_utime, &d->si_utime);
 925                err |= __put_user(s->si_stime, &d->si_stime);
 926                err |= __put_user(s->si_status, &d->si_status);
 927                break;
 928        case SIL_FAULT:
 929                err |= __put_user((unsigned int)(unsigned long)s->si_addr,
 930                                  &d->si_addr);
 931                break;
 932        case SIL_POLL:
 933                err |= __put_user(s->si_band, &d->si_band);
 934                err |= __put_user(s->si_fd, &d->si_fd);
 935                break;
 936        case SIL_TIMER:
 937                err |= __put_user(s->si_tid, &d->si_tid);
 938                err |= __put_user(s->si_overrun, &d->si_overrun);
 939                err |= __put_user(s->si_int, &d->si_int);
 940                break;
 941        case SIL_SYS:
 942                err |= __put_user(ptr_to_compat(s->si_call_addr), &d->si_call_addr);
 943                err |= __put_user(s->si_syscall, &d->si_syscall);
 944                err |= __put_user(s->si_arch, &d->si_arch);
 945                break;
 946        case SIL_RT:
 947                err |= __put_user(s->si_int, &d->si_int);
 948                /* fallthrough */
 949        case SIL_KILL:
 950                err |= __put_user(s->si_pid, &d->si_pid);
 951                err |= __put_user(s->si_uid, &d->si_uid);
 952                break;
 953        }
 954        return err;
 955}
 956
 957#define copy_siginfo_to_user    copy_siginfo_to_user32
 958
 959int copy_siginfo_from_user32(siginfo_t *to, struct compat_siginfo __user *from)
 960{
 961        if (copy_from_user(to, from, 3*sizeof(int)) ||
 962            copy_from_user(to->_sifields._pad,
 963                           from->_sifields._pad, SI_PAD_SIZE32))
 964                return -EFAULT;
 965
 966        return 0;
 967}
 968#endif /* CONFIG_PPC64 */
 969
 970/*
 971 * Set up a signal frame for a "real-time" signal handler
 972 * (one which gets siginfo).
 973 */
 974int handle_rt_signal32(struct ksignal *ksig, sigset_t *oldset,
 975                       struct task_struct *tsk)
 976{
 977        struct rt_sigframe __user *rt_sf;
 978        struct mcontext __user *frame;
 979        struct mcontext __user *tm_frame = NULL;
 980        void __user *addr;
 981        unsigned long newsp = 0;
 982        int sigret;
 983        unsigned long tramp;
 984        struct pt_regs *regs = tsk->thread.regs;
 985
 986        BUG_ON(tsk != current);
 987
 988        /* Set up Signal Frame */
 989        /* Put a Real Time Context onto stack */
 990        rt_sf = get_sigframe(ksig, get_tm_stackpointer(tsk), sizeof(*rt_sf), 1);
 991        addr = rt_sf;
 992        if (unlikely(rt_sf == NULL))
 993                goto badframe;
 994
 995        /* Put the siginfo & fill in most of the ucontext */
 996        if (copy_siginfo_to_user(&rt_sf->info, &ksig->info)
 997            || __put_user(0, &rt_sf->uc.uc_flags)
 998            || __save_altstack(&rt_sf->uc.uc_stack, regs->gpr[1])
 999            || __put_user(to_user_ptr(&rt_sf->uc.uc_mcontext),
1000                    &rt_sf->uc.uc_regs)
1001            || put_sigset_t(&rt_sf->uc.uc_sigmask, oldset))
1002                goto badframe;
1003
1004        /* Save user registers on the stack */
1005        frame = &rt_sf->uc.uc_mcontext;
1006        addr = frame;
1007        if (vdso32_rt_sigtramp && tsk->mm->context.vdso_base) {
1008                sigret = 0;
1009                tramp = tsk->mm->context.vdso_base + vdso32_rt_sigtramp;
1010        } else {
1011                sigret = __NR_rt_sigreturn;
1012                tramp = (unsigned long) frame->tramp;
1013        }
1014
1015#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1016        tm_frame = &rt_sf->uc_transact.uc_mcontext;
1017        if (MSR_TM_ACTIVE(regs->msr)) {
1018                if (__put_user((unsigned long)&rt_sf->uc_transact,
1019                               &rt_sf->uc.uc_link) ||
1020                    __put_user((unsigned long)tm_frame,
1021                               &rt_sf->uc_transact.uc_regs))
1022                        goto badframe;
1023                if (save_tm_user_regs(regs, frame, tm_frame, sigret))
1024                        goto badframe;
1025        }
1026        else
1027#endif
1028        {
1029                if (__put_user(0, &rt_sf->uc.uc_link))
1030                        goto badframe;
1031                if (save_user_regs(regs, frame, tm_frame, sigret, 1))
1032                        goto badframe;
1033        }
1034        regs->link = tramp;
1035
1036        tsk->thread.fp_state.fpscr = 0; /* turn off all fp exceptions */
1037
1038        /* create a stack frame for the caller of the handler */
1039        newsp = ((unsigned long)rt_sf) - (__SIGNAL_FRAMESIZE + 16);
1040        addr = (void __user *)regs->gpr[1];
1041        if (put_user(regs->gpr[1], (u32 __user *)newsp))
1042                goto badframe;
1043
1044        /* Fill registers for signal handler */
1045        regs->gpr[1] = newsp;
1046        regs->gpr[3] = ksig->sig;
1047        regs->gpr[4] = (unsigned long) &rt_sf->info;
1048        regs->gpr[5] = (unsigned long) &rt_sf->uc;
1049        regs->gpr[6] = (unsigned long) rt_sf;
1050        regs->nip = (unsigned long) ksig->ka.sa.sa_handler;
1051        /* enter the signal handler in native-endian mode */
1052        regs->msr &= ~MSR_LE;
1053        regs->msr |= (MSR_KERNEL & MSR_LE);
1054        return 0;
1055
1056badframe:
1057        if (show_unhandled_signals)
1058                printk_ratelimited(KERN_INFO
1059                                   "%s[%d]: bad frame in handle_rt_signal32: "
1060                                   "%p nip %08lx lr %08lx\n",
1061                                   tsk->comm, tsk->pid,
1062                                   addr, regs->nip, regs->link);
1063
1064        return 1;
1065}
1066
1067static int do_setcontext(struct ucontext __user *ucp, struct pt_regs *regs, int sig)
1068{
1069        sigset_t set;
1070        struct mcontext __user *mcp;
1071
1072        if (get_sigset_t(&set, &ucp->uc_sigmask))
1073                return -EFAULT;
1074#ifdef CONFIG_PPC64
1075        {
1076                u32 cmcp;
1077
1078                if (__get_user(cmcp, &ucp->uc_regs))
1079                        return -EFAULT;
1080                mcp = (struct mcontext __user *)(u64)cmcp;
1081                /* no need to check access_ok(mcp), since mcp < 4GB */
1082        }
1083#else
1084        if (__get_user(mcp, &ucp->uc_regs))
1085                return -EFAULT;
1086        if (!access_ok(VERIFY_READ, mcp, sizeof(*mcp)))
1087                return -EFAULT;
1088#endif
1089        set_current_blocked(&set);
1090        if (restore_user_regs(regs, mcp, sig))
1091                return -EFAULT;
1092
1093        return 0;
1094}
1095
1096#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1097static int do_setcontext_tm(struct ucontext __user *ucp,
1098                            struct ucontext __user *tm_ucp,
1099                            struct pt_regs *regs)
1100{
1101        sigset_t set;
1102        struct mcontext __user *mcp;
1103        struct mcontext __user *tm_mcp;
1104        u32 cmcp;
1105        u32 tm_cmcp;
1106
1107        if (get_sigset_t(&set, &ucp->uc_sigmask))
1108                return -EFAULT;
1109
1110        if (__get_user(cmcp, &ucp->uc_regs) ||
1111            __get_user(tm_cmcp, &tm_ucp->uc_regs))
1112                return -EFAULT;
1113        mcp = (struct mcontext __user *)(u64)cmcp;
1114        tm_mcp = (struct mcontext __user *)(u64)tm_cmcp;
1115        /* no need to check access_ok(mcp), since mcp < 4GB */
1116
1117        set_current_blocked(&set);
1118        if (restore_tm_user_regs(regs, mcp, tm_mcp))
1119                return -EFAULT;
1120
1121        return 0;
1122}
1123#endif
1124
1125long sys_swapcontext(struct ucontext __user *old_ctx,
1126                     struct ucontext __user *new_ctx,
1127                     int ctx_size, int r6, int r7, int r8, struct pt_regs *regs)
1128{
1129        unsigned char tmp;
1130        int ctx_has_vsx_region = 0;
1131
1132#ifdef CONFIG_PPC64
1133        unsigned long new_msr = 0;
1134
1135        if (new_ctx) {
1136                struct mcontext __user *mcp;
1137                u32 cmcp;
1138
1139                /*
1140                 * Get pointer to the real mcontext.  No need for
1141                 * access_ok since we are dealing with compat
1142                 * pointers.
1143                 */
1144                if (__get_user(cmcp, &new_ctx->uc_regs))
1145                        return -EFAULT;
1146                mcp = (struct mcontext __user *)(u64)cmcp;
1147                if (__get_user(new_msr, &mcp->mc_gregs[PT_MSR]))
1148                        return -EFAULT;
1149        }
1150        /*
1151         * Check that the context is not smaller than the original
1152         * size (with VMX but without VSX)
1153         */
1154        if (ctx_size < UCONTEXTSIZEWITHOUTVSX)
1155                return -EINVAL;
1156        /*
1157         * If the new context state sets the MSR VSX bits but
1158         * it doesn't provide VSX state.
1159         */
1160        if ((ctx_size < sizeof(struct ucontext)) &&
1161            (new_msr & MSR_VSX))
1162                return -EINVAL;
1163        /* Does the context have enough room to store VSX data? */
1164        if (ctx_size >= sizeof(struct ucontext))
1165                ctx_has_vsx_region = 1;
1166#else
1167        /* Context size is for future use. Right now, we only make sure
1168         * we are passed something we understand
1169         */
1170        if (ctx_size < sizeof(struct ucontext))
1171                return -EINVAL;
1172#endif
1173        if (old_ctx != NULL) {
1174                struct mcontext __user *mctx;
1175
1176                /*
1177                 * old_ctx might not be 16-byte aligned, in which
1178                 * case old_ctx->uc_mcontext won't be either.
1179                 * Because we have the old_ctx->uc_pad2 field
1180                 * before old_ctx->uc_mcontext, we need to round down
1181                 * from &old_ctx->uc_mcontext to a 16-byte boundary.
1182                 */
1183                mctx = (struct mcontext __user *)
1184                        ((unsigned long) &old_ctx->uc_mcontext & ~0xfUL);
1185                if (!access_ok(VERIFY_WRITE, old_ctx, ctx_size)
1186                    || save_user_regs(regs, mctx, NULL, 0, ctx_has_vsx_region)
1187                    || put_sigset_t(&old_ctx->uc_sigmask, &current->blocked)
1188                    || __put_user(to_user_ptr(mctx), &old_ctx->uc_regs))
1189                        return -EFAULT;
1190        }
1191        if (new_ctx == NULL)
1192                return 0;
1193        if (!access_ok(VERIFY_READ, new_ctx, ctx_size)
1194            || __get_user(tmp, (u8 __user *) new_ctx)
1195            || __get_user(tmp, (u8 __user *) new_ctx + ctx_size - 1))
1196                return -EFAULT;
1197
1198        /*
1199         * If we get a fault copying the context into the kernel's
1200         * image of the user's registers, we can't just return -EFAULT
1201         * because the user's registers will be corrupted.  For instance
1202         * the NIP value may have been updated but not some of the
1203         * other registers.  Given that we have done the access_ok
1204         * and successfully read the first and last bytes of the region
1205         * above, this should only happen in an out-of-memory situation
1206         * or if another thread unmaps the region containing the context.
1207         * We kill the task with a SIGSEGV in this situation.
1208         */
1209        if (do_setcontext(new_ctx, regs, 0))
1210                do_exit(SIGSEGV);
1211
1212        set_thread_flag(TIF_RESTOREALL);
1213        return 0;
1214}
1215
1216long sys_rt_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
1217                     struct pt_regs *regs)
1218{
1219        struct rt_sigframe __user *rt_sf;
1220#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1221        struct ucontext __user *uc_transact;
1222        unsigned long msr_hi;
1223        unsigned long tmp;
1224        int tm_restore = 0;
1225#endif
1226        /* Always make any pending restarted system calls return -EINTR */
1227        current->restart_block.fn = do_no_restart_syscall;
1228
1229        rt_sf = (struct rt_sigframe __user *)
1230                (regs->gpr[1] + __SIGNAL_FRAMESIZE + 16);
1231        if (!access_ok(VERIFY_READ, rt_sf, sizeof(*rt_sf)))
1232                goto bad;
1233
1234#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1235        /*
1236         * If there is a transactional state then throw it away.
1237         * The purpose of a sigreturn is to destroy all traces of the
1238         * signal frame, this includes any transactional state created
1239         * within in. We only check for suspended as we can never be
1240         * active in the kernel, we are active, there is nothing better to
1241         * do than go ahead and Bad Thing later.
1242         * The cause is not important as there will never be a
1243         * recheckpoint so it's not user visible.
1244         */
1245        if (MSR_TM_SUSPENDED(mfmsr()))
1246                tm_reclaim_current(0);
1247
1248        if (__get_user(tmp, &rt_sf->uc.uc_link))
1249                goto bad;
1250        uc_transact = (struct ucontext __user *)(uintptr_t)tmp;
1251        if (uc_transact) {
1252                u32 cmcp;
1253                struct mcontext __user *mcp;
1254
1255                if (__get_user(cmcp, &uc_transact->uc_regs))
1256                        return -EFAULT;
1257                mcp = (struct mcontext __user *)(u64)cmcp;
1258                /* The top 32 bits of the MSR are stashed in the transactional
1259                 * ucontext. */
1260                if (__get_user(msr_hi, &mcp->mc_gregs[PT_MSR]))
1261                        goto bad;
1262
1263                if (MSR_TM_ACTIVE(msr_hi<<32)) {
1264                        /* We only recheckpoint on return if we're
1265                         * transaction.
1266                         */
1267                        tm_restore = 1;
1268                        if (do_setcontext_tm(&rt_sf->uc, uc_transact, regs))
1269                                goto bad;
1270                }
1271        }
1272        if (!tm_restore)
1273                /* Fall through, for non-TM restore */
1274#endif
1275        if (do_setcontext(&rt_sf->uc, regs, 1))
1276                goto bad;
1277
1278        /*
1279         * It's not clear whether or why it is desirable to save the
1280         * sigaltstack setting on signal delivery and restore it on
1281         * signal return.  But other architectures do this and we have
1282         * always done it up until now so it is probably better not to
1283         * change it.  -- paulus
1284         */
1285#ifdef CONFIG_PPC64
1286        if (compat_restore_altstack(&rt_sf->uc.uc_stack))
1287                goto bad;
1288#else
1289        if (restore_altstack(&rt_sf->uc.uc_stack))
1290                goto bad;
1291#endif
1292        set_thread_flag(TIF_RESTOREALL);
1293        return 0;
1294
1295 bad:
1296        if (show_unhandled_signals)
1297                printk_ratelimited(KERN_INFO
1298                                   "%s[%d]: bad frame in sys_rt_sigreturn: "
1299                                   "%p nip %08lx lr %08lx\n",
1300                                   current->comm, current->pid,
1301                                   rt_sf, regs->nip, regs->link);
1302
1303        force_sig(SIGSEGV, current);
1304        return 0;
1305}
1306
1307#ifdef CONFIG_PPC32
1308int sys_debug_setcontext(struct ucontext __user *ctx,
1309                         int ndbg, struct sig_dbg_op __user *dbg,
1310                         int r6, int r7, int r8,
1311                         struct pt_regs *regs)
1312{
1313        struct sig_dbg_op op;
1314        int i;
1315        unsigned char tmp;
1316        unsigned long new_msr = regs->msr;
1317#ifdef CONFIG_PPC_ADV_DEBUG_REGS
1318        unsigned long new_dbcr0 = current->thread.debug.dbcr0;
1319#endif
1320
1321        for (i=0; i<ndbg; i++) {
1322                if (copy_from_user(&op, dbg + i, sizeof(op)))
1323                        return -EFAULT;
1324                switch (op.dbg_type) {
1325                case SIG_DBG_SINGLE_STEPPING:
1326#ifdef CONFIG_PPC_ADV_DEBUG_REGS
1327                        if (op.dbg_value) {
1328                                new_msr |= MSR_DE;
1329                                new_dbcr0 |= (DBCR0_IDM | DBCR0_IC);
1330                        } else {
1331                                new_dbcr0 &= ~DBCR0_IC;
1332                                if (!DBCR_ACTIVE_EVENTS(new_dbcr0,
1333                                                current->thread.debug.dbcr1)) {
1334                                        new_msr &= ~MSR_DE;
1335                                        new_dbcr0 &= ~DBCR0_IDM;
1336                                }
1337                        }
1338#else
1339                        if (op.dbg_value)
1340                                new_msr |= MSR_SE;
1341                        else
1342                                new_msr &= ~MSR_SE;
1343#endif
1344                        break;
1345                case SIG_DBG_BRANCH_TRACING:
1346#ifdef CONFIG_PPC_ADV_DEBUG_REGS
1347                        return -EINVAL;
1348#else
1349                        if (op.dbg_value)
1350                                new_msr |= MSR_BE;
1351                        else
1352                                new_msr &= ~MSR_BE;
1353#endif
1354                        break;
1355
1356                default:
1357                        return -EINVAL;
1358                }
1359        }
1360
1361        /* We wait until here to actually install the values in the
1362           registers so if we fail in the above loop, it will not
1363           affect the contents of these registers.  After this point,
1364           failure is a problem, anyway, and it's very unlikely unless
1365           the user is really doing something wrong. */
1366        regs->msr = new_msr;
1367#ifdef CONFIG_PPC_ADV_DEBUG_REGS
1368        current->thread.debug.dbcr0 = new_dbcr0;
1369#endif
1370
1371        if (!access_ok(VERIFY_READ, ctx, sizeof(*ctx))
1372            || __get_user(tmp, (u8 __user *) ctx)
1373            || __get_user(tmp, (u8 __user *) (ctx + 1) - 1))
1374                return -EFAULT;
1375
1376        /*
1377         * If we get a fault copying the context into the kernel's
1378         * image of the user's registers, we can't just return -EFAULT
1379         * because the user's registers will be corrupted.  For instance
1380         * the NIP value may have been updated but not some of the
1381         * other registers.  Given that we have done the access_ok
1382         * and successfully read the first and last bytes of the region
1383         * above, this should only happen in an out-of-memory situation
1384         * or if another thread unmaps the region containing the context.
1385         * We kill the task with a SIGSEGV in this situation.
1386         */
1387        if (do_setcontext(ctx, regs, 1)) {
1388                if (show_unhandled_signals)
1389                        printk_ratelimited(KERN_INFO "%s[%d]: bad frame in "
1390                                           "sys_debug_setcontext: %p nip %08lx "
1391                                           "lr %08lx\n",
1392                                           current->comm, current->pid,
1393                                           ctx, regs->nip, regs->link);
1394
1395                force_sig(SIGSEGV, current);
1396                goto out;
1397        }
1398
1399        /*
1400         * It's not clear whether or why it is desirable to save the
1401         * sigaltstack setting on signal delivery and restore it on
1402         * signal return.  But other architectures do this and we have
1403         * always done it up until now so it is probably better not to
1404         * change it.  -- paulus
1405         */
1406        restore_altstack(&ctx->uc_stack);
1407
1408        set_thread_flag(TIF_RESTOREALL);
1409 out:
1410        return 0;
1411}
1412#endif
1413
1414/*
1415 * OK, we're invoking a handler
1416 */
1417int handle_signal32(struct ksignal *ksig, sigset_t *oldset,
1418                struct task_struct *tsk)
1419{
1420        struct sigcontext __user *sc;
1421        struct sigframe __user *frame;
1422        struct mcontext __user *tm_mctx = NULL;
1423        unsigned long newsp = 0;
1424        int sigret;
1425        unsigned long tramp;
1426        struct pt_regs *regs = tsk->thread.regs;
1427
1428        BUG_ON(tsk != current);
1429
1430        /* Set up Signal Frame */
1431        frame = get_sigframe(ksig, get_tm_stackpointer(tsk), sizeof(*frame), 1);
1432        if (unlikely(frame == NULL))
1433                goto badframe;
1434        sc = (struct sigcontext __user *) &frame->sctx;
1435
1436#if _NSIG != 64
1437#error "Please adjust handle_signal()"
1438#endif
1439        if (__put_user(to_user_ptr(ksig->ka.sa.sa_handler), &sc->handler)
1440            || __put_user(oldset->sig[0], &sc->oldmask)
1441#ifdef CONFIG_PPC64
1442            || __put_user((oldset->sig[0] >> 32), &sc->_unused[3])
1443#else
1444            || __put_user(oldset->sig[1], &sc->_unused[3])
1445#endif
1446            || __put_user(to_user_ptr(&frame->mctx), &sc->regs)
1447            || __put_user(ksig->sig, &sc->signal))
1448                goto badframe;
1449
1450        if (vdso32_sigtramp && tsk->mm->context.vdso_base) {
1451                sigret = 0;
1452                tramp = tsk->mm->context.vdso_base + vdso32_sigtramp;
1453        } else {
1454                sigret = __NR_sigreturn;
1455                tramp = (unsigned long) frame->mctx.tramp;
1456        }
1457
1458#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1459        tm_mctx = &frame->mctx_transact;
1460        if (MSR_TM_ACTIVE(regs->msr)) {
1461                if (save_tm_user_regs(regs, &frame->mctx, &frame->mctx_transact,
1462                                      sigret))
1463                        goto badframe;
1464        }
1465        else
1466#endif
1467        {
1468                if (save_user_regs(regs, &frame->mctx, tm_mctx, sigret, 1))
1469                        goto badframe;
1470        }
1471
1472        regs->link = tramp;
1473
1474        tsk->thread.fp_state.fpscr = 0; /* turn off all fp exceptions */
1475
1476        /* create a stack frame for the caller of the handler */
1477        newsp = ((unsigned long)frame) - __SIGNAL_FRAMESIZE;
1478        if (put_user(regs->gpr[1], (u32 __user *)newsp))
1479                goto badframe;
1480
1481        regs->gpr[1] = newsp;
1482        regs->gpr[3] = ksig->sig;
1483        regs->gpr[4] = (unsigned long) sc;
1484        regs->nip = (unsigned long) (unsigned long)ksig->ka.sa.sa_handler;
1485        /* enter the signal handler in big-endian mode */
1486        regs->msr &= ~MSR_LE;
1487        return 0;
1488
1489badframe:
1490        if (show_unhandled_signals)
1491                printk_ratelimited(KERN_INFO
1492                                   "%s[%d]: bad frame in handle_signal32: "
1493                                   "%p nip %08lx lr %08lx\n",
1494                                   tsk->comm, tsk->pid,
1495                                   frame, regs->nip, regs->link);
1496
1497        return 1;
1498}
1499
1500/*
1501 * Do a signal return; undo the signal stack.
1502 */
1503long sys_sigreturn(int r3, int r4, int r5, int r6, int r7, int r8,
1504                       struct pt_regs *regs)
1505{
1506        struct sigframe __user *sf;
1507        struct sigcontext __user *sc;
1508        struct sigcontext sigctx;
1509        struct mcontext __user *sr;
1510        void __user *addr;
1511        sigset_t set;
1512#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1513        struct mcontext __user *mcp, *tm_mcp;
1514        unsigned long msr_hi;
1515#endif
1516
1517        /* Always make any pending restarted system calls return -EINTR */
1518        current->restart_block.fn = do_no_restart_syscall;
1519
1520        sf = (struct sigframe __user *)(regs->gpr[1] + __SIGNAL_FRAMESIZE);
1521        sc = &sf->sctx;
1522        addr = sc;
1523        if (copy_from_user(&sigctx, sc, sizeof(sigctx)))
1524                goto badframe;
1525
1526#ifdef CONFIG_PPC64
1527        /*
1528         * Note that PPC32 puts the upper 32 bits of the sigmask in the
1529         * unused part of the signal stackframe
1530         */
1531        set.sig[0] = sigctx.oldmask + ((long)(sigctx._unused[3]) << 32);
1532#else
1533        set.sig[0] = sigctx.oldmask;
1534        set.sig[1] = sigctx._unused[3];
1535#endif
1536        set_current_blocked(&set);
1537
1538#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1539        mcp = (struct mcontext __user *)&sf->mctx;
1540        tm_mcp = (struct mcontext __user *)&sf->mctx_transact;
1541        if (__get_user(msr_hi, &tm_mcp->mc_gregs[PT_MSR]))
1542                goto badframe;
1543        if (MSR_TM_ACTIVE(msr_hi<<32)) {
1544                if (!cpu_has_feature(CPU_FTR_TM))
1545                        goto badframe;
1546                if (restore_tm_user_regs(regs, mcp, tm_mcp))
1547                        goto badframe;
1548        } else
1549#endif
1550        {
1551                sr = (struct mcontext __user *)from_user_ptr(sigctx.regs);
1552                addr = sr;
1553                if (!access_ok(VERIFY_READ, sr, sizeof(*sr))
1554                    || restore_user_regs(regs, sr, 1))
1555                        goto badframe;
1556        }
1557
1558        set_thread_flag(TIF_RESTOREALL);
1559        return 0;
1560
1561badframe:
1562        if (show_unhandled_signals)
1563                printk_ratelimited(KERN_INFO
1564                                   "%s[%d]: bad frame in sys_sigreturn: "
1565                                   "%p nip %08lx lr %08lx\n",
1566                                   current->comm, current->pid,
1567                                   addr, regs->nip, regs->link);
1568
1569        force_sig(SIGSEGV, current);
1570        return 0;
1571}
1572