linux/arch/powerpc/kernel/signal_64.c
<<
>>
Prefs
   1/*
   2 *  PowerPC version 
   3 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
   4 *
   5 *  Derived from "arch/i386/kernel/signal.c"
   6 *    Copyright (C) 1991, 1992 Linus Torvalds
   7 *    1997-11-28  Modified for POSIX.1b signals by Richard Henderson
   8 *
   9 *  This program is free software; you can redistribute it and/or
  10 *  modify it under the terms of the GNU General Public License
  11 *  as published by the Free Software Foundation; either version
  12 *  2 of the License, or (at your option) any later version.
  13 */
  14
  15#include <linux/sched.h>
  16#include <linux/mm.h>
  17#include <linux/smp.h>
  18#include <linux/kernel.h>
  19#include <linux/signal.h>
  20#include <linux/errno.h>
  21#include <linux/wait.h>
  22#include <linux/unistd.h>
  23#include <linux/stddef.h>
  24#include <linux/elf.h>
  25#include <linux/ptrace.h>
  26#include <linux/ratelimit.h>
  27
  28#include <asm/sigcontext.h>
  29#include <asm/ucontext.h>
  30#include <asm/uaccess.h>
  31#include <asm/pgtable.h>
  32#include <asm/unistd.h>
  33#include <asm/cacheflush.h>
  34#include <asm/syscalls.h>
  35#include <asm/vdso.h>
  36#include <asm/switch_to.h>
  37#include <asm/tm.h>
  38
  39#include "signal.h"
  40
  41
  42#define GP_REGS_SIZE    min(sizeof(elf_gregset_t), sizeof(struct pt_regs))
  43#define FP_REGS_SIZE    sizeof(elf_fpregset_t)
  44
  45#define TRAMP_TRACEBACK 3
  46#define TRAMP_SIZE      6
  47
  48/*
  49 * When we have signals to deliver, we set up on the user stack,
  50 * going down from the original stack pointer:
  51 *      1) a rt_sigframe struct which contains the ucontext     
  52 *      2) a gap of __SIGNAL_FRAMESIZE bytes which acts as a dummy caller
  53 *         frame for the signal handler.
  54 */
  55
  56struct rt_sigframe {
  57        /* sys_rt_sigreturn requires the ucontext be the first field */
  58        struct ucontext uc;
  59#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
  60        struct ucontext uc_transact;
  61#endif
  62        unsigned long _unused[2];
  63        unsigned int tramp[TRAMP_SIZE];
  64        struct siginfo __user *pinfo;
  65        void __user *puc;
  66        struct siginfo info;
  67        /* New 64 bit little-endian ABI allows redzone of 512 bytes below sp */
  68        char abigap[USER_REDZONE_SIZE];
  69} __attribute__ ((aligned (16)));
  70
  71static const char fmt32[] = KERN_INFO \
  72        "%s[%d]: bad frame in %s: %08lx nip %08lx lr %08lx\n";
  73static const char fmt64[] = KERN_INFO \
  74        "%s[%d]: bad frame in %s: %016lx nip %016lx lr %016lx\n";
  75
  76/*
  77 * This computes a quad word aligned pointer inside the vmx_reserve array
  78 * element. For historical reasons sigcontext might not be quad word aligned,
  79 * but the location we write the VMX regs to must be. See the comment in
  80 * sigcontext for more detail.
  81 */
  82#ifdef CONFIG_ALTIVEC
  83static elf_vrreg_t __user *sigcontext_vmx_regs(struct sigcontext __user *sc)
  84{
  85        return (elf_vrreg_t __user *) (((unsigned long)sc->vmx_reserve + 15) & ~0xful);
  86}
  87#endif
  88
  89/*
  90 * Set up the sigcontext for the signal frame.
  91 */
  92
  93static long setup_sigcontext(struct sigcontext __user *sc, struct pt_regs *regs,
  94                 int signr, sigset_t *set, unsigned long handler,
  95                 int ctx_has_vsx_region)
  96{
  97        /* When CONFIG_ALTIVEC is set, we _always_ setup v_regs even if the
  98         * process never used altivec yet (MSR_VEC is zero in pt_regs of
  99         * the context). This is very important because we must ensure we
 100         * don't lose the VRSAVE content that may have been set prior to
 101         * the process doing its first vector operation
 102         * Userland shall check AT_HWCAP to know whether it can rely on the
 103         * v_regs pointer or not
 104         */
 105#ifdef CONFIG_ALTIVEC
 106        elf_vrreg_t __user *v_regs = sigcontext_vmx_regs(sc);
 107        unsigned long vrsave;
 108#endif
 109        unsigned long msr = regs->msr;
 110        long err = 0;
 111
 112#ifdef CONFIG_ALTIVEC
 113        err |= __put_user(v_regs, &sc->v_regs);
 114
 115        /* save altivec registers */
 116        if (current->thread.used_vr) {
 117                flush_altivec_to_thread(current);
 118                /* Copy 33 vec registers (vr0..31 and vscr) to the stack */
 119                err |= __copy_to_user(v_regs, &current->thread.vr_state,
 120                                      33 * sizeof(vector128));
 121                /* set MSR_VEC in the MSR value in the frame to indicate that sc->v_reg)
 122                 * contains valid data.
 123                 */
 124                msr |= MSR_VEC;
 125        }
 126        /* We always copy to/from vrsave, it's 0 if we don't have or don't
 127         * use altivec.
 128         */
 129        vrsave = 0;
 130        if (cpu_has_feature(CPU_FTR_ALTIVEC)) {
 131                vrsave = mfspr(SPRN_VRSAVE);
 132                current->thread.vrsave = vrsave;
 133        }
 134
 135        err |= __put_user(vrsave, (u32 __user *)&v_regs[33]);
 136#else /* CONFIG_ALTIVEC */
 137        err |= __put_user(0, &sc->v_regs);
 138#endif /* CONFIG_ALTIVEC */
 139        flush_fp_to_thread(current);
 140        /* copy fpr regs and fpscr */
 141        err |= copy_fpr_to_user(&sc->fp_regs, current);
 142
 143        /*
 144         * Clear the MSR VSX bit to indicate there is no valid state attached
 145         * to this context, except in the specific case below where we set it.
 146         */
 147        msr &= ~MSR_VSX;
 148#ifdef CONFIG_VSX
 149        /*
 150         * Copy VSX low doubleword to local buffer for formatting,
 151         * then out to userspace.  Update v_regs to point after the
 152         * VMX data.
 153         */
 154        if (current->thread.used_vsr && ctx_has_vsx_region) {
 155                flush_vsx_to_thread(current);
 156                v_regs += ELF_NVRREG;
 157                err |= copy_vsx_to_user(v_regs, current);
 158                /* set MSR_VSX in the MSR value in the frame to
 159                 * indicate that sc->vs_reg) contains valid data.
 160                 */
 161                msr |= MSR_VSX;
 162        }
 163#endif /* CONFIG_VSX */
 164        err |= __put_user(&sc->gp_regs, &sc->regs);
 165        WARN_ON(!FULL_REGS(regs));
 166        err |= __copy_to_user(&sc->gp_regs, regs, GP_REGS_SIZE);
 167        err |= __put_user(msr, &sc->gp_regs[PT_MSR]);
 168        err |= __put_user(signr, &sc->signal);
 169        err |= __put_user(handler, &sc->handler);
 170        if (set != NULL)
 171                err |=  __put_user(set->sig[0], &sc->oldmask);
 172
 173        return err;
 174}
 175
 176#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
 177/*
 178 * As above, but Transactional Memory is in use, so deliver sigcontexts
 179 * containing checkpointed and transactional register states.
 180 *
 181 * To do this, we treclaim (done before entering here) to gather both sets of
 182 * registers and set up the 'normal' sigcontext registers with rolled-back
 183 * register values such that a simple signal handler sees a correct
 184 * checkpointed register state.  If interested, a TM-aware sighandler can
 185 * examine the transactional registers in the 2nd sigcontext to determine the
 186 * real origin of the signal.
 187 */
 188static long setup_tm_sigcontexts(struct sigcontext __user *sc,
 189                                 struct sigcontext __user *tm_sc,
 190                                 struct pt_regs *regs,
 191                                 int signr, sigset_t *set, unsigned long handler)
 192{
 193        /* When CONFIG_ALTIVEC is set, we _always_ setup v_regs even if the
 194         * process never used altivec yet (MSR_VEC is zero in pt_regs of
 195         * the context). This is very important because we must ensure we
 196         * don't lose the VRSAVE content that may have been set prior to
 197         * the process doing its first vector operation
 198         * Userland shall check AT_HWCAP to know wether it can rely on the
 199         * v_regs pointer or not.
 200         */
 201#ifdef CONFIG_ALTIVEC
 202        elf_vrreg_t __user *v_regs = sigcontext_vmx_regs(sc);
 203        elf_vrreg_t __user *tm_v_regs = sigcontext_vmx_regs(tm_sc);
 204#endif
 205        unsigned long msr = regs->msr;
 206        long err = 0;
 207
 208        BUG_ON(!MSR_TM_ACTIVE(regs->msr));
 209
 210        /* Remove TM bits from thread's MSR.  The MSR in the sigcontext
 211         * just indicates to userland that we were doing a transaction, but we
 212         * don't want to return in transactional state.  This also ensures
 213         * that flush_fp_to_thread won't set TIF_RESTORE_TM again.
 214         */
 215        regs->msr &= ~MSR_TS_MASK;
 216
 217        flush_fp_to_thread(current);
 218
 219#ifdef CONFIG_ALTIVEC
 220        err |= __put_user(v_regs, &sc->v_regs);
 221        err |= __put_user(tm_v_regs, &tm_sc->v_regs);
 222
 223        /* save altivec registers */
 224        if (current->thread.used_vr) {
 225                flush_altivec_to_thread(current);
 226                /* Copy 33 vec registers (vr0..31 and vscr) to the stack */
 227                err |= __copy_to_user(v_regs, &current->thread.vr_state,
 228                                      33 * sizeof(vector128));
 229                /* If VEC was enabled there are transactional VRs valid too,
 230                 * else they're a copy of the checkpointed VRs.
 231                 */
 232                if (msr & MSR_VEC)
 233                        err |= __copy_to_user(tm_v_regs,
 234                                              &current->thread.transact_vr,
 235                                              33 * sizeof(vector128));
 236                else
 237                        err |= __copy_to_user(tm_v_regs,
 238                                              &current->thread.vr_state,
 239                                              33 * sizeof(vector128));
 240
 241                /* set MSR_VEC in the MSR value in the frame to indicate
 242                 * that sc->v_reg contains valid data.
 243                 */
 244                msr |= MSR_VEC;
 245        }
 246        /* We always copy to/from vrsave, it's 0 if we don't have or don't
 247         * use altivec.
 248         */
 249        if (cpu_has_feature(CPU_FTR_ALTIVEC))
 250                current->thread.vrsave = mfspr(SPRN_VRSAVE);
 251        err |= __put_user(current->thread.vrsave, (u32 __user *)&v_regs[33]);
 252        if (msr & MSR_VEC)
 253                err |= __put_user(current->thread.transact_vrsave,
 254                                  (u32 __user *)&tm_v_regs[33]);
 255        else
 256                err |= __put_user(current->thread.vrsave,
 257                                  (u32 __user *)&tm_v_regs[33]);
 258
 259#else /* CONFIG_ALTIVEC */
 260        err |= __put_user(0, &sc->v_regs);
 261        err |= __put_user(0, &tm_sc->v_regs);
 262#endif /* CONFIG_ALTIVEC */
 263
 264        /* copy fpr regs and fpscr */
 265        err |= copy_fpr_to_user(&sc->fp_regs, current);
 266        if (msr & MSR_FP)
 267                err |= copy_transact_fpr_to_user(&tm_sc->fp_regs, current);
 268        else
 269                err |= copy_fpr_to_user(&tm_sc->fp_regs, current);
 270
 271#ifdef CONFIG_VSX
 272        /*
 273         * Copy VSX low doubleword to local buffer for formatting,
 274         * then out to userspace.  Update v_regs to point after the
 275         * VMX data.
 276         */
 277        if (current->thread.used_vsr) {
 278                flush_vsx_to_thread(current);
 279                v_regs += ELF_NVRREG;
 280                tm_v_regs += ELF_NVRREG;
 281
 282                err |= copy_vsx_to_user(v_regs, current);
 283
 284                if (msr & MSR_VSX)
 285                        err |= copy_transact_vsx_to_user(tm_v_regs, current);
 286                else
 287                        err |= copy_vsx_to_user(tm_v_regs, current);
 288
 289                /* set MSR_VSX in the MSR value in the frame to
 290                 * indicate that sc->vs_reg) contains valid data.
 291                 */
 292                msr |= MSR_VSX;
 293        }
 294#endif /* CONFIG_VSX */
 295
 296        err |= __put_user(&sc->gp_regs, &sc->regs);
 297        err |= __put_user(&tm_sc->gp_regs, &tm_sc->regs);
 298        WARN_ON(!FULL_REGS(regs));
 299        err |= __copy_to_user(&tm_sc->gp_regs, regs, GP_REGS_SIZE);
 300        err |= __copy_to_user(&sc->gp_regs,
 301                              &current->thread.ckpt_regs, GP_REGS_SIZE);
 302        err |= __put_user(msr, &tm_sc->gp_regs[PT_MSR]);
 303        err |= __put_user(msr, &sc->gp_regs[PT_MSR]);
 304        err |= __put_user(signr, &sc->signal);
 305        err |= __put_user(handler, &sc->handler);
 306        if (set != NULL)
 307                err |=  __put_user(set->sig[0], &sc->oldmask);
 308
 309        return err;
 310}
 311#endif
 312
 313/*
 314 * Restore the sigcontext from the signal frame.
 315 */
 316
 317static long restore_sigcontext(struct pt_regs *regs, sigset_t *set, int sig,
 318                              struct sigcontext __user *sc)
 319{
 320#ifdef CONFIG_ALTIVEC
 321        elf_vrreg_t __user *v_regs;
 322#endif
 323        unsigned long err = 0;
 324        unsigned long save_r13 = 0;
 325        unsigned long msr;
 326#ifdef CONFIG_VSX
 327        int i;
 328#endif
 329
 330        /* If this is not a signal return, we preserve the TLS in r13 */
 331        if (!sig)
 332                save_r13 = regs->gpr[13];
 333
 334        /* copy the GPRs */
 335        err |= __copy_from_user(regs->gpr, sc->gp_regs, sizeof(regs->gpr));
 336        err |= __get_user(regs->nip, &sc->gp_regs[PT_NIP]);
 337        /* get MSR separately, transfer the LE bit if doing signal return */
 338        err |= __get_user(msr, &sc->gp_regs[PT_MSR]);
 339        if (sig)
 340                regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
 341        err |= __get_user(regs->orig_gpr3, &sc->gp_regs[PT_ORIG_R3]);
 342        err |= __get_user(regs->ctr, &sc->gp_regs[PT_CTR]);
 343        err |= __get_user(regs->link, &sc->gp_regs[PT_LNK]);
 344        err |= __get_user(regs->xer, &sc->gp_regs[PT_XER]);
 345        err |= __get_user(regs->ccr, &sc->gp_regs[PT_CCR]);
 346        /* skip SOFTE */
 347        regs->trap = 0;
 348        err |= __get_user(regs->dar, &sc->gp_regs[PT_DAR]);
 349        err |= __get_user(regs->dsisr, &sc->gp_regs[PT_DSISR]);
 350        err |= __get_user(regs->result, &sc->gp_regs[PT_RESULT]);
 351
 352        if (!sig)
 353                regs->gpr[13] = save_r13;
 354        if (set != NULL)
 355                err |=  __get_user(set->sig[0], &sc->oldmask);
 356
 357        /*
 358         * Force reload of FP/VEC.
 359         * This has to be done before copying stuff into current->thread.fpr/vr
 360         * for the reasons explained in the previous comment.
 361         */
 362        regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1 | MSR_VEC | MSR_VSX);
 363
 364#ifdef CONFIG_ALTIVEC
 365        err |= __get_user(v_regs, &sc->v_regs);
 366        if (err)
 367                return err;
 368        if (v_regs && !access_ok(VERIFY_READ, v_regs, 34 * sizeof(vector128)))
 369                return -EFAULT;
 370        /* Copy 33 vec registers (vr0..31 and vscr) from the stack */
 371        if (v_regs != NULL && (msr & MSR_VEC) != 0)
 372                err |= __copy_from_user(&current->thread.vr_state, v_regs,
 373                                        33 * sizeof(vector128));
 374        else if (current->thread.used_vr)
 375                memset(&current->thread.vr_state, 0, 33 * sizeof(vector128));
 376        /* Always get VRSAVE back */
 377        if (v_regs != NULL)
 378                err |= __get_user(current->thread.vrsave, (u32 __user *)&v_regs[33]);
 379        else
 380                current->thread.vrsave = 0;
 381        if (cpu_has_feature(CPU_FTR_ALTIVEC))
 382                mtspr(SPRN_VRSAVE, current->thread.vrsave);
 383#endif /* CONFIG_ALTIVEC */
 384        /* restore floating point */
 385        err |= copy_fpr_from_user(current, &sc->fp_regs);
 386#ifdef CONFIG_VSX
 387        /*
 388         * Get additional VSX data. Update v_regs to point after the
 389         * VMX data.  Copy VSX low doubleword from userspace to local
 390         * buffer for formatting, then into the taskstruct.
 391         */
 392        v_regs += ELF_NVRREG;
 393        if ((msr & MSR_VSX) != 0)
 394                err |= copy_vsx_from_user(current, v_regs);
 395        else
 396                for (i = 0; i < 32 ; i++)
 397                        current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
 398#endif
 399        return err;
 400}
 401
 402#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
 403/*
 404 * Restore the two sigcontexts from the frame of a transactional processes.
 405 */
 406
 407static long restore_tm_sigcontexts(struct pt_regs *regs,
 408                                   struct sigcontext __user *sc,
 409                                   struct sigcontext __user *tm_sc)
 410{
 411#ifdef CONFIG_ALTIVEC
 412        elf_vrreg_t __user *v_regs, *tm_v_regs;
 413#endif
 414        unsigned long err = 0;
 415        unsigned long msr;
 416#ifdef CONFIG_VSX
 417        int i;
 418#endif
 419        /* copy the GPRs */
 420        err |= __copy_from_user(regs->gpr, tm_sc->gp_regs, sizeof(regs->gpr));
 421        err |= __copy_from_user(&current->thread.ckpt_regs, sc->gp_regs,
 422                                sizeof(regs->gpr));
 423
 424        /*
 425         * TFHAR is restored from the checkpointed 'wound-back' ucontext's NIP.
 426         * TEXASR was set by the signal delivery reclaim, as was TFIAR.
 427         * Users doing anything abhorrent like thread-switching w/ signals for
 428         * TM-Suspended code will have to back TEXASR/TFIAR up themselves.
 429         * For the case of getting a signal and simply returning from it,
 430         * we don't need to re-copy them here.
 431         */
 432        err |= __get_user(regs->nip, &tm_sc->gp_regs[PT_NIP]);
 433        err |= __get_user(current->thread.tm_tfhar, &sc->gp_regs[PT_NIP]);
 434
 435        /* get MSR separately, transfer the LE bit if doing signal return */
 436        err |= __get_user(msr, &sc->gp_regs[PT_MSR]);
 437        /* Don't allow reserved mode. */
 438        if (MSR_TM_RESV(msr))
 439                return -EINVAL;
 440
 441        /* pull in MSR TM from user context */
 442        regs->msr = (regs->msr & ~MSR_TS_MASK) | (msr & MSR_TS_MASK);
 443
 444        /* pull in MSR LE from user context */
 445        regs->msr = (regs->msr & ~MSR_LE) | (msr & MSR_LE);
 446
 447        /* The following non-GPR non-FPR non-VR state is also checkpointed: */
 448        err |= __get_user(regs->ctr, &tm_sc->gp_regs[PT_CTR]);
 449        err |= __get_user(regs->link, &tm_sc->gp_regs[PT_LNK]);
 450        err |= __get_user(regs->xer, &tm_sc->gp_regs[PT_XER]);
 451        err |= __get_user(regs->ccr, &tm_sc->gp_regs[PT_CCR]);
 452        err |= __get_user(current->thread.ckpt_regs.ctr,
 453                          &sc->gp_regs[PT_CTR]);
 454        err |= __get_user(current->thread.ckpt_regs.link,
 455                          &sc->gp_regs[PT_LNK]);
 456        err |= __get_user(current->thread.ckpt_regs.xer,
 457                          &sc->gp_regs[PT_XER]);
 458        err |= __get_user(current->thread.ckpt_regs.ccr,
 459                          &sc->gp_regs[PT_CCR]);
 460
 461        /* These regs are not checkpointed; they can go in 'regs'. */
 462        err |= __get_user(regs->trap, &sc->gp_regs[PT_TRAP]);
 463        err |= __get_user(regs->dar, &sc->gp_regs[PT_DAR]);
 464        err |= __get_user(regs->dsisr, &sc->gp_regs[PT_DSISR]);
 465        err |= __get_user(regs->result, &sc->gp_regs[PT_RESULT]);
 466
 467        /*
 468         * Force reload of FP/VEC.
 469         * This has to be done before copying stuff into current->thread.fpr/vr
 470         * for the reasons explained in the previous comment.
 471         */
 472        regs->msr &= ~(MSR_FP | MSR_FE0 | MSR_FE1 | MSR_VEC | MSR_VSX);
 473
 474#ifdef CONFIG_ALTIVEC
 475        err |= __get_user(v_regs, &sc->v_regs);
 476        err |= __get_user(tm_v_regs, &tm_sc->v_regs);
 477        if (err)
 478                return err;
 479        if (v_regs && !access_ok(VERIFY_READ, v_regs, 34 * sizeof(vector128)))
 480                return -EFAULT;
 481        if (tm_v_regs && !access_ok(VERIFY_READ,
 482                                    tm_v_regs, 34 * sizeof(vector128)))
 483                return -EFAULT;
 484        /* Copy 33 vec registers (vr0..31 and vscr) from the stack */
 485        if (v_regs != NULL && tm_v_regs != NULL && (msr & MSR_VEC) != 0) {
 486                err |= __copy_from_user(&current->thread.vr_state, v_regs,
 487                                        33 * sizeof(vector128));
 488                err |= __copy_from_user(&current->thread.transact_vr, tm_v_regs,
 489                                        33 * sizeof(vector128));
 490        }
 491        else if (current->thread.used_vr) {
 492                memset(&current->thread.vr_state, 0, 33 * sizeof(vector128));
 493                memset(&current->thread.transact_vr, 0, 33 * sizeof(vector128));
 494        }
 495        /* Always get VRSAVE back */
 496        if (v_regs != NULL && tm_v_regs != NULL) {
 497                err |= __get_user(current->thread.vrsave,
 498                                  (u32 __user *)&v_regs[33]);
 499                err |= __get_user(current->thread.transact_vrsave,
 500                                  (u32 __user *)&tm_v_regs[33]);
 501        }
 502        else {
 503                current->thread.vrsave = 0;
 504                current->thread.transact_vrsave = 0;
 505        }
 506        if (cpu_has_feature(CPU_FTR_ALTIVEC))
 507                mtspr(SPRN_VRSAVE, current->thread.vrsave);
 508#endif /* CONFIG_ALTIVEC */
 509        /* restore floating point */
 510        err |= copy_fpr_from_user(current, &sc->fp_regs);
 511        err |= copy_transact_fpr_from_user(current, &tm_sc->fp_regs);
 512#ifdef CONFIG_VSX
 513        /*
 514         * Get additional VSX data. Update v_regs to point after the
 515         * VMX data.  Copy VSX low doubleword from userspace to local
 516         * buffer for formatting, then into the taskstruct.
 517         */
 518        if (v_regs && ((msr & MSR_VSX) != 0)) {
 519                v_regs += ELF_NVRREG;
 520                tm_v_regs += ELF_NVRREG;
 521                err |= copy_vsx_from_user(current, v_regs);
 522                err |= copy_transact_vsx_from_user(current, tm_v_regs);
 523        } else {
 524                for (i = 0; i < 32 ; i++) {
 525                        current->thread.fp_state.fpr[i][TS_VSRLOWOFFSET] = 0;
 526                        current->thread.transact_fp.fpr[i][TS_VSRLOWOFFSET] = 0;
 527                }
 528        }
 529#endif
 530        tm_enable();
 531        /* Make sure the transaction is marked as failed */
 532        current->thread.tm_texasr |= TEXASR_FS;
 533        /* This loads the checkpointed FP/VEC state, if used */
 534        tm_recheckpoint(&current->thread, msr);
 535
 536        /* This loads the speculative FP/VEC state, if used */
 537        if (msr & MSR_FP) {
 538                do_load_up_transact_fpu(&current->thread);
 539                regs->msr |= (MSR_FP | current->thread.fpexc_mode);
 540        }
 541#ifdef CONFIG_ALTIVEC
 542        if (msr & MSR_VEC) {
 543                do_load_up_transact_altivec(&current->thread);
 544                regs->msr |= MSR_VEC;
 545        }
 546#endif
 547
 548        return err;
 549}
 550#endif
 551
 552/*
 553 * Setup the trampoline code on the stack
 554 */
 555static long setup_trampoline(unsigned int syscall, unsigned int __user *tramp)
 556{
 557        int i;
 558        long err = 0;
 559
 560        /* addi r1, r1, __SIGNAL_FRAMESIZE  # Pop the dummy stackframe */
 561        err |= __put_user(0x38210000UL | (__SIGNAL_FRAMESIZE & 0xffff), &tramp[0]);
 562        /* li r0, __NR_[rt_]sigreturn| */
 563        err |= __put_user(0x38000000UL | (syscall & 0xffff), &tramp[1]);
 564        /* sc */
 565        err |= __put_user(0x44000002UL, &tramp[2]);
 566
 567        /* Minimal traceback info */
 568        for (i=TRAMP_TRACEBACK; i < TRAMP_SIZE ;i++)
 569                err |= __put_user(0, &tramp[i]);
 570
 571        if (!err)
 572                flush_icache_range((unsigned long) &tramp[0],
 573                           (unsigned long) &tramp[TRAMP_SIZE]);
 574
 575        return err;
 576}
 577
 578/*
 579 * Userspace code may pass a ucontext which doesn't include VSX added
 580 * at the end.  We need to check for this case.
 581 */
 582#define UCONTEXTSIZEWITHOUTVSX \
 583                (sizeof(struct ucontext) - 32*sizeof(long))
 584
 585/*
 586 * Handle {get,set,swap}_context operations
 587 */
 588int sys_swapcontext(struct ucontext __user *old_ctx,
 589                    struct ucontext __user *new_ctx,
 590                    long ctx_size, long r6, long r7, long r8, struct pt_regs *regs)
 591{
 592        unsigned char tmp;
 593        sigset_t set;
 594        unsigned long new_msr = 0;
 595        int ctx_has_vsx_region = 0;
 596
 597        if (new_ctx &&
 598            get_user(new_msr, &new_ctx->uc_mcontext.gp_regs[PT_MSR]))
 599                return -EFAULT;
 600        /*
 601         * Check that the context is not smaller than the original
 602         * size (with VMX but without VSX)
 603         */
 604        if (ctx_size < UCONTEXTSIZEWITHOUTVSX)
 605                return -EINVAL;
 606        /*
 607         * If the new context state sets the MSR VSX bits but
 608         * it doesn't provide VSX state.
 609         */
 610        if ((ctx_size < sizeof(struct ucontext)) &&
 611            (new_msr & MSR_VSX))
 612                return -EINVAL;
 613        /* Does the context have enough room to store VSX data? */
 614        if (ctx_size >= sizeof(struct ucontext))
 615                ctx_has_vsx_region = 1;
 616
 617        if (old_ctx != NULL) {
 618                if (!access_ok(VERIFY_WRITE, old_ctx, ctx_size)
 619                    || setup_sigcontext(&old_ctx->uc_mcontext, regs, 0, NULL, 0,
 620                                        ctx_has_vsx_region)
 621                    || __copy_to_user(&old_ctx->uc_sigmask,
 622                                      &current->blocked, sizeof(sigset_t)))
 623                        return -EFAULT;
 624        }
 625        if (new_ctx == NULL)
 626                return 0;
 627        if (!access_ok(VERIFY_READ, new_ctx, ctx_size)
 628            || __get_user(tmp, (u8 __user *) new_ctx)
 629            || __get_user(tmp, (u8 __user *) new_ctx + ctx_size - 1))
 630                return -EFAULT;
 631
 632        /*
 633         * If we get a fault copying the context into the kernel's
 634         * image of the user's registers, we can't just return -EFAULT
 635         * because the user's registers will be corrupted.  For instance
 636         * the NIP value may have been updated but not some of the
 637         * other registers.  Given that we have done the access_ok
 638         * and successfully read the first and last bytes of the region
 639         * above, this should only happen in an out-of-memory situation
 640         * or if another thread unmaps the region containing the context.
 641         * We kill the task with a SIGSEGV in this situation.
 642         */
 643
 644        if (__copy_from_user(&set, &new_ctx->uc_sigmask, sizeof(set)))
 645                do_exit(SIGSEGV);
 646        set_current_blocked(&set);
 647        if (restore_sigcontext(regs, NULL, 0, &new_ctx->uc_mcontext))
 648                do_exit(SIGSEGV);
 649
 650        /* This returns like rt_sigreturn */
 651        set_thread_flag(TIF_RESTOREALL);
 652        return 0;
 653}
 654
 655
 656/*
 657 * Do a signal return; undo the signal stack.
 658 */
 659
 660int sys_rt_sigreturn(unsigned long r3, unsigned long r4, unsigned long r5,
 661                     unsigned long r6, unsigned long r7, unsigned long r8,
 662                     struct pt_regs *regs)
 663{
 664        struct ucontext __user *uc = (struct ucontext __user *)regs->gpr[1];
 665        sigset_t set;
 666#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
 667        unsigned long msr;
 668#endif
 669
 670        /* Always make any pending restarted system calls return -EINTR */
 671        current->restart_block.fn = do_no_restart_syscall;
 672
 673        if (!access_ok(VERIFY_READ, uc, sizeof(*uc)))
 674                goto badframe;
 675
 676        if (__copy_from_user(&set, &uc->uc_sigmask, sizeof(set)))
 677                goto badframe;
 678        set_current_blocked(&set);
 679
 680#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
 681        /*
 682         * If there is a transactional state then throw it away.
 683         * The purpose of a sigreturn is to destroy all traces of the
 684         * signal frame, this includes any transactional state created
 685         * within in. We only check for suspended as we can never be
 686         * active in the kernel, we are active, there is nothing better to
 687         * do than go ahead and Bad Thing later.
 688         * The cause is not important as there will never be a
 689         * recheckpoint so it's not user visible.
 690         */
 691        if (MSR_TM_SUSPENDED(mfmsr()))
 692                tm_reclaim_current(0);
 693
 694        if (__get_user(msr, &uc->uc_mcontext.gp_regs[PT_MSR]))
 695                goto badframe;
 696        if (MSR_TM_ACTIVE(msr)) {
 697                /* We recheckpoint on return. */
 698                struct ucontext __user *uc_transact;
 699                if (__get_user(uc_transact, &uc->uc_link))
 700                        goto badframe;
 701                if (restore_tm_sigcontexts(regs, &uc->uc_mcontext,
 702                                           &uc_transact->uc_mcontext))
 703                        goto badframe;
 704        }
 705        else
 706        /* Fall through, for non-TM restore */
 707#endif
 708        if (restore_sigcontext(regs, NULL, 1, &uc->uc_mcontext))
 709                goto badframe;
 710
 711        if (restore_altstack(&uc->uc_stack))
 712                goto badframe;
 713
 714        set_thread_flag(TIF_RESTOREALL);
 715        return 0;
 716
 717badframe:
 718        if (show_unhandled_signals)
 719                printk_ratelimited(regs->msr & MSR_64BIT ? fmt64 : fmt32,
 720                                   current->comm, current->pid, "rt_sigreturn",
 721                                   (long)uc, regs->nip, regs->link);
 722
 723        force_sig(SIGSEGV, current);
 724        return 0;
 725}
 726
 727int handle_rt_signal64(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
 728{
 729        struct rt_sigframe __user *frame;
 730        unsigned long newsp = 0;
 731        long err = 0;
 732
 733        frame = get_sigframe(ksig, get_tm_stackpointer(regs), sizeof(*frame), 0);
 734        if (unlikely(frame == NULL))
 735                goto badframe;
 736
 737        err |= __put_user(&frame->info, &frame->pinfo);
 738        err |= __put_user(&frame->uc, &frame->puc);
 739        err |= copy_siginfo_to_user(&frame->info, &ksig->info);
 740        if (err)
 741                goto badframe;
 742
 743        /* Create the ucontext.  */
 744        err |= __put_user(0, &frame->uc.uc_flags);
 745        err |= __save_altstack(&frame->uc.uc_stack, regs->gpr[1]);
 746#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
 747        if (MSR_TM_ACTIVE(regs->msr)) {
 748                /* The ucontext_t passed to userland points to the second
 749                 * ucontext_t (for transactional state) with its uc_link ptr.
 750                 */
 751                err |= __put_user(&frame->uc_transact, &frame->uc.uc_link);
 752                err |= setup_tm_sigcontexts(&frame->uc.uc_mcontext,
 753                                            &frame->uc_transact.uc_mcontext,
 754                                            regs, ksig->sig,
 755                                            NULL,
 756                                            (unsigned long)ksig->ka.sa.sa_handler);
 757        } else
 758#endif
 759        {
 760                err |= __put_user(0, &frame->uc.uc_link);
 761                err |= setup_sigcontext(&frame->uc.uc_mcontext, regs, ksig->sig,
 762                                        NULL, (unsigned long)ksig->ka.sa.sa_handler,
 763                                        1);
 764        }
 765        err |= __copy_to_user(&frame->uc.uc_sigmask, set, sizeof(*set));
 766        if (err)
 767                goto badframe;
 768
 769        /* Make sure signal handler doesn't get spurious FP exceptions */
 770        current->thread.fp_state.fpscr = 0;
 771
 772        /* Set up to return from userspace. */
 773        if (vdso64_rt_sigtramp && current->mm->context.vdso_base) {
 774                regs->link = current->mm->context.vdso_base + vdso64_rt_sigtramp;
 775        } else {
 776                err |= setup_trampoline(__NR_rt_sigreturn, &frame->tramp[0]);
 777                if (err)
 778                        goto badframe;
 779                regs->link = (unsigned long) &frame->tramp[0];
 780        }
 781
 782        /* Allocate a dummy caller frame for the signal handler. */
 783        newsp = ((unsigned long)frame) - __SIGNAL_FRAMESIZE;
 784        err |= put_user(regs->gpr[1], (unsigned long __user *)newsp);
 785
 786        /* Set up "regs" so we "return" to the signal handler. */
 787        if (is_elf2_task()) {
 788                regs->nip = (unsigned long) ksig->ka.sa.sa_handler;
 789                regs->gpr[12] = regs->nip;
 790        } else {
 791                /* Handler is *really* a pointer to the function descriptor for
 792                 * the signal routine.  The first entry in the function
 793                 * descriptor is the entry address of signal and the second
 794                 * entry is the TOC value we need to use.
 795                 */
 796                func_descr_t __user *funct_desc_ptr =
 797                        (func_descr_t __user *) ksig->ka.sa.sa_handler;
 798
 799                err |= get_user(regs->nip, &funct_desc_ptr->entry);
 800                err |= get_user(regs->gpr[2], &funct_desc_ptr->toc);
 801        }
 802
 803        /* enter the signal handler in native-endian mode */
 804        regs->msr &= ~MSR_LE;
 805        regs->msr |= (MSR_KERNEL & MSR_LE);
 806        regs->gpr[1] = newsp;
 807        regs->gpr[3] = ksig->sig;
 808        regs->result = 0;
 809        if (ksig->ka.sa.sa_flags & SA_SIGINFO) {
 810                err |= get_user(regs->gpr[4], (unsigned long __user *)&frame->pinfo);
 811                err |= get_user(regs->gpr[5], (unsigned long __user *)&frame->puc);
 812                regs->gpr[6] = (unsigned long) frame;
 813        } else {
 814                regs->gpr[4] = (unsigned long)&frame->uc.uc_mcontext;
 815        }
 816        if (err)
 817                goto badframe;
 818
 819        return 0;
 820
 821badframe:
 822        if (show_unhandled_signals)
 823                printk_ratelimited(regs->msr & MSR_64BIT ? fmt64 : fmt32,
 824                                   current->comm, current->pid, "setup_rt_frame",
 825                                   (long)frame, regs->nip, regs->link);
 826
 827        return 1;
 828}
 829