linux/arch/m68k/kernel/signal.c
<<
>>
Prefs
   1/*
   2 *  linux/arch/m68k/kernel/signal.c
   3 *
   4 *  Copyright (C) 1991, 1992  Linus Torvalds
   5 *
   6 * This file is subject to the terms and conditions of the GNU General Public
   7 * License.  See the file COPYING in the main directory of this archive
   8 * for more details.
   9 */
  10
  11/*
  12 * Linux/m68k support by Hamish Macdonald
  13 *
  14 * 68060 fixes by Jesper Skov
  15 *
  16 * 1997-12-01  Modified for POSIX.1b signals by Andreas Schwab
  17 *
  18 * mathemu support by Roman Zippel
  19 *  (Note: fpstate in the signal context is completely ignored for the emulator
  20 *         and the internal floating point format is put on stack)
  21 */
  22
  23/*
  24 * ++roman (07/09/96): implemented signal stacks (specially for tosemu on
  25 * Atari :-) Current limitation: Only one sigstack can be active at one time.
  26 * If a second signal with SA_ONSTACK set arrives while working on a sigstack,
  27 * SA_ONSTACK is ignored. This behaviour avoids lots of trouble with nested
  28 * signal handlers!
  29 */
  30
  31#include <linux/sched.h>
  32#include <linux/mm.h>
  33#include <linux/kernel.h>
  34#include <linux/signal.h>
  35#include <linux/syscalls.h>
  36#include <linux/errno.h>
  37#include <linux/wait.h>
  38#include <linux/ptrace.h>
  39#include <linux/unistd.h>
  40#include <linux/stddef.h>
  41#include <linux/highuid.h>
  42#include <linux/personality.h>
  43#include <linux/tty.h>
  44#include <linux/binfmts.h>
  45#include <linux/extable.h>
  46#include <linux/tracehook.h>
  47
  48#include <asm/setup.h>
  49#include <linux/uaccess.h>
  50#include <asm/pgtable.h>
  51#include <asm/traps.h>
  52#include <asm/ucontext.h>
  53#include <asm/cacheflush.h>
  54
  55#ifdef CONFIG_MMU
  56
  57/*
  58 * Handle the slight differences in classic 68k and ColdFire trap frames.
  59 */
  60#ifdef CONFIG_COLDFIRE
  61#define FORMAT          4
  62#define FMT4SIZE        0
  63#else
  64#define FORMAT          0
  65#define FMT4SIZE        sizeof(((struct frame *)0)->un.fmt4)
  66#endif
  67
  68static const int frame_size_change[16] = {
  69  [1]   = -1, /* sizeof(((struct frame *)0)->un.fmt1), */
  70  [2]   = sizeof(((struct frame *)0)->un.fmt2),
  71  [3]   = sizeof(((struct frame *)0)->un.fmt3),
  72  [4]   = FMT4SIZE,
  73  [5]   = -1, /* sizeof(((struct frame *)0)->un.fmt5), */
  74  [6]   = -1, /* sizeof(((struct frame *)0)->un.fmt6), */
  75  [7]   = sizeof(((struct frame *)0)->un.fmt7),
  76  [8]   = -1, /* sizeof(((struct frame *)0)->un.fmt8), */
  77  [9]   = sizeof(((struct frame *)0)->un.fmt9),
  78  [10]  = sizeof(((struct frame *)0)->un.fmta),
  79  [11]  = sizeof(((struct frame *)0)->un.fmtb),
  80  [12]  = -1, /* sizeof(((struct frame *)0)->un.fmtc), */
  81  [13]  = -1, /* sizeof(((struct frame *)0)->un.fmtd), */
  82  [14]  = -1, /* sizeof(((struct frame *)0)->un.fmte), */
  83  [15]  = -1, /* sizeof(((struct frame *)0)->un.fmtf), */
  84};
  85
  86static inline int frame_extra_sizes(int f)
  87{
  88        return frame_size_change[f];
  89}
  90
  91int fixup_exception(struct pt_regs *regs)
  92{
  93        const struct exception_table_entry *fixup;
  94        struct pt_regs *tregs;
  95
  96        /* Are we prepared to handle this kernel fault? */
  97        fixup = search_exception_tables(regs->pc);
  98        if (!fixup)
  99                return 0;
 100
 101        /* Create a new four word stack frame, discarding the old one. */
 102        regs->stkadj = frame_extra_sizes(regs->format);
 103        tregs = (struct pt_regs *)((long)regs + regs->stkadj);
 104        tregs->vector = regs->vector;
 105        tregs->format = FORMAT;
 106        tregs->pc = fixup->fixup;
 107        tregs->sr = regs->sr;
 108
 109        return 1;
 110}
 111
 112static inline void push_cache (unsigned long vaddr)
 113{
 114        /*
 115         * Using the old cache_push_v() was really a big waste.
 116         *
 117         * What we are trying to do is to flush 8 bytes to ram.
 118         * Flushing 2 cache lines of 16 bytes is much cheaper than
 119         * flushing 1 or 2 pages, as previously done in
 120         * cache_push_v().
 121         *                                                     Jes
 122         */
 123        if (CPU_IS_040) {
 124                unsigned long temp;
 125
 126                __asm__ __volatile__ (".chip 68040\n\t"
 127                                      "nop\n\t"
 128                                      "ptestr (%1)\n\t"
 129                                      "movec %%mmusr,%0\n\t"
 130                                      ".chip 68k"
 131                                      : "=r" (temp)
 132                                      : "a" (vaddr));
 133
 134                temp &= PAGE_MASK;
 135                temp |= vaddr & ~PAGE_MASK;
 136
 137                __asm__ __volatile__ (".chip 68040\n\t"
 138                                      "nop\n\t"
 139                                      "cpushl %%bc,(%0)\n\t"
 140                                      ".chip 68k"
 141                                      : : "a" (temp));
 142        }
 143        else if (CPU_IS_060) {
 144                unsigned long temp;
 145                __asm__ __volatile__ (".chip 68060\n\t"
 146                                      "plpar (%0)\n\t"
 147                                      ".chip 68k"
 148                                      : "=a" (temp)
 149                                      : "0" (vaddr));
 150                __asm__ __volatile__ (".chip 68060\n\t"
 151                                      "cpushl %%bc,(%0)\n\t"
 152                                      ".chip 68k"
 153                                      : : "a" (temp));
 154        } else if (!CPU_IS_COLDFIRE) {
 155                /*
 156                 * 68030/68020 have no writeback cache;
 157                 * still need to clear icache.
 158                 * Note that vaddr is guaranteed to be long word aligned.
 159                 */
 160                unsigned long temp;
 161                asm volatile ("movec %%cacr,%0" : "=r" (temp));
 162                temp += 4;
 163                asm volatile ("movec %0,%%caar\n\t"
 164                              "movec %1,%%cacr"
 165                              : : "r" (vaddr), "r" (temp));
 166                asm volatile ("movec %0,%%caar\n\t"
 167                              "movec %1,%%cacr"
 168                              : : "r" (vaddr + 4), "r" (temp));
 169        } else {
 170                /* CPU_IS_COLDFIRE */
 171#if defined(CONFIG_CACHE_COPYBACK)
 172                flush_cf_dcache(0, DCACHE_MAX_ADDR);
 173#endif
 174                /* Invalidate instruction cache for the pushed bytes */
 175                clear_cf_icache(vaddr, vaddr + 8);
 176        }
 177}
 178
 179static inline void adjustformat(struct pt_regs *regs)
 180{
 181}
 182
 183static inline void save_a5_state(struct sigcontext *sc, struct pt_regs *regs)
 184{
 185}
 186
 187#else /* CONFIG_MMU */
 188
 189void ret_from_user_signal(void);
 190void ret_from_user_rt_signal(void);
 191
 192static inline int frame_extra_sizes(int f)
 193{
 194        /* No frame size adjustments required on non-MMU CPUs */
 195        return 0;
 196}
 197
 198static inline void adjustformat(struct pt_regs *regs)
 199{
 200        /*
 201         * set format byte to make stack appear modulo 4, which it will
 202         * be when doing the rte
 203         */
 204        regs->format = 0x4;
 205}
 206
 207static inline void save_a5_state(struct sigcontext *sc, struct pt_regs *regs)
 208{
 209        sc->sc_a5 = ((struct switch_stack *)regs - 1)->a5;
 210}
 211
 212static inline void push_cache(unsigned long vaddr)
 213{
 214}
 215
 216#endif /* CONFIG_MMU */
 217
 218/*
 219 * Do a signal return; undo the signal stack.
 220 *
 221 * Keep the return code on the stack quadword aligned!
 222 * That makes the cache flush below easier.
 223 */
 224
 225struct sigframe
 226{
 227        char __user *pretcode;
 228        int sig;
 229        int code;
 230        struct sigcontext __user *psc;
 231        char retcode[8];
 232        unsigned long extramask[_NSIG_WORDS-1];
 233        struct sigcontext sc;
 234};
 235
 236struct rt_sigframe
 237{
 238        char __user *pretcode;
 239        int sig;
 240        struct siginfo __user *pinfo;
 241        void __user *puc;
 242        char retcode[8];
 243        struct siginfo info;
 244        struct ucontext uc;
 245};
 246
 247#define FPCONTEXT_SIZE  216
 248#define uc_fpstate      uc_filler[0]
 249#define uc_formatvec    uc_filler[FPCONTEXT_SIZE/4]
 250#define uc_extra        uc_filler[FPCONTEXT_SIZE/4+1]
 251
 252#ifdef CONFIG_FPU
 253
 254static unsigned char fpu_version;       /* version number of fpu, set by setup_frame */
 255
 256static inline int restore_fpu_state(struct sigcontext *sc)
 257{
 258        int err = 1;
 259
 260        if (FPU_IS_EMU) {
 261            /* restore registers */
 262            memcpy(current->thread.fpcntl, sc->sc_fpcntl, 12);
 263            memcpy(current->thread.fp, sc->sc_fpregs, 24);
 264            return 0;
 265        }
 266
 267        if (CPU_IS_060 ? sc->sc_fpstate[2] : sc->sc_fpstate[0]) {
 268            /* Verify the frame format.  */
 269            if (!(CPU_IS_060 || CPU_IS_COLDFIRE) &&
 270                 (sc->sc_fpstate[0] != fpu_version))
 271                goto out;
 272            if (CPU_IS_020_OR_030) {
 273                if (m68k_fputype & FPU_68881 &&
 274                    !(sc->sc_fpstate[1] == 0x18 || sc->sc_fpstate[1] == 0xb4))
 275                    goto out;
 276                if (m68k_fputype & FPU_68882 &&
 277                    !(sc->sc_fpstate[1] == 0x38 || sc->sc_fpstate[1] == 0xd4))
 278                    goto out;
 279            } else if (CPU_IS_040) {
 280                if (!(sc->sc_fpstate[1] == 0x00 ||
 281                      sc->sc_fpstate[1] == 0x28 ||
 282                      sc->sc_fpstate[1] == 0x60))
 283                    goto out;
 284            } else if (CPU_IS_060) {
 285                if (!(sc->sc_fpstate[3] == 0x00 ||
 286                      sc->sc_fpstate[3] == 0x60 ||
 287                      sc->sc_fpstate[3] == 0xe0))
 288                    goto out;
 289            } else if (CPU_IS_COLDFIRE) {
 290                if (!(sc->sc_fpstate[0] == 0x00 ||
 291                      sc->sc_fpstate[0] == 0x05 ||
 292                      sc->sc_fpstate[0] == 0xe5))
 293                    goto out;
 294            } else
 295                goto out;
 296
 297            if (CPU_IS_COLDFIRE) {
 298                __asm__ volatile ("fmovemd %0,%%fp0-%%fp1\n\t"
 299                                  "fmovel %1,%%fpcr\n\t"
 300                                  "fmovel %2,%%fpsr\n\t"
 301                                  "fmovel %3,%%fpiar"
 302                                  : /* no outputs */
 303                                  : "m" (sc->sc_fpregs[0]),
 304                                    "m" (sc->sc_fpcntl[0]),
 305                                    "m" (sc->sc_fpcntl[1]),
 306                                    "m" (sc->sc_fpcntl[2]));
 307            } else {
 308                __asm__ volatile (".chip 68k/68881\n\t"
 309                                  "fmovemx %0,%%fp0-%%fp1\n\t"
 310                                  "fmoveml %1,%%fpcr/%%fpsr/%%fpiar\n\t"
 311                                  ".chip 68k"
 312                                  : /* no outputs */
 313                                  : "m" (*sc->sc_fpregs),
 314                                    "m" (*sc->sc_fpcntl));
 315            }
 316        }
 317
 318        if (CPU_IS_COLDFIRE) {
 319                __asm__ volatile ("frestore %0" : : "m" (*sc->sc_fpstate));
 320        } else {
 321                __asm__ volatile (".chip 68k/68881\n\t"
 322                                  "frestore %0\n\t"
 323                                  ".chip 68k"
 324                                  : : "m" (*sc->sc_fpstate));
 325        }
 326        err = 0;
 327
 328out:
 329        return err;
 330}
 331
 332static inline int rt_restore_fpu_state(struct ucontext __user *uc)
 333{
 334        unsigned char fpstate[FPCONTEXT_SIZE];
 335        int context_size = CPU_IS_060 ? 8 : (CPU_IS_COLDFIRE ? 12 : 0);
 336        fpregset_t fpregs;
 337        int err = 1;
 338
 339        if (FPU_IS_EMU) {
 340                /* restore fpu control register */
 341                if (__copy_from_user(current->thread.fpcntl,
 342                                uc->uc_mcontext.fpregs.f_fpcntl, 12))
 343                        goto out;
 344                /* restore all other fpu register */
 345                if (__copy_from_user(current->thread.fp,
 346                                uc->uc_mcontext.fpregs.f_fpregs, 96))
 347                        goto out;
 348                return 0;
 349        }
 350
 351        if (__get_user(*(long *)fpstate, (long __user *)&uc->uc_fpstate))
 352                goto out;
 353        if (CPU_IS_060 ? fpstate[2] : fpstate[0]) {
 354                if (!(CPU_IS_060 || CPU_IS_COLDFIRE))
 355                        context_size = fpstate[1];
 356                /* Verify the frame format.  */
 357                if (!(CPU_IS_060 || CPU_IS_COLDFIRE) &&
 358                     (fpstate[0] != fpu_version))
 359                        goto out;
 360                if (CPU_IS_020_OR_030) {
 361                        if (m68k_fputype & FPU_68881 &&
 362                            !(context_size == 0x18 || context_size == 0xb4))
 363                                goto out;
 364                        if (m68k_fputype & FPU_68882 &&
 365                            !(context_size == 0x38 || context_size == 0xd4))
 366                                goto out;
 367                } else if (CPU_IS_040) {
 368                        if (!(context_size == 0x00 ||
 369                              context_size == 0x28 ||
 370                              context_size == 0x60))
 371                                goto out;
 372                } else if (CPU_IS_060) {
 373                        if (!(fpstate[3] == 0x00 ||
 374                              fpstate[3] == 0x60 ||
 375                              fpstate[3] == 0xe0))
 376                                goto out;
 377                } else if (CPU_IS_COLDFIRE) {
 378                        if (!(fpstate[3] == 0x00 ||
 379                              fpstate[3] == 0x05 ||
 380                              fpstate[3] == 0xe5))
 381                                goto out;
 382                } else
 383                        goto out;
 384                if (__copy_from_user(&fpregs, &uc->uc_mcontext.fpregs,
 385                                     sizeof(fpregs)))
 386                        goto out;
 387
 388                if (CPU_IS_COLDFIRE) {
 389                        __asm__ volatile ("fmovemd %0,%%fp0-%%fp7\n\t"
 390                                          "fmovel %1,%%fpcr\n\t"
 391                                          "fmovel %2,%%fpsr\n\t"
 392                                          "fmovel %3,%%fpiar"
 393                                          : /* no outputs */
 394                                          : "m" (fpregs.f_fpregs[0]),
 395                                            "m" (fpregs.f_fpcntl[0]),
 396                                            "m" (fpregs.f_fpcntl[1]),
 397                                            "m" (fpregs.f_fpcntl[2]));
 398                } else {
 399                        __asm__ volatile (".chip 68k/68881\n\t"
 400                                          "fmovemx %0,%%fp0-%%fp7\n\t"
 401                                          "fmoveml %1,%%fpcr/%%fpsr/%%fpiar\n\t"
 402                                          ".chip 68k"
 403                                          : /* no outputs */
 404                                          : "m" (*fpregs.f_fpregs),
 405                                            "m" (*fpregs.f_fpcntl));
 406                }
 407        }
 408        if (context_size &&
 409            __copy_from_user(fpstate + 4, (long __user *)&uc->uc_fpstate + 1,
 410                             context_size))
 411                goto out;
 412
 413        if (CPU_IS_COLDFIRE) {
 414                __asm__ volatile ("frestore %0" : : "m" (*fpstate));
 415        } else {
 416                __asm__ volatile (".chip 68k/68881\n\t"
 417                                  "frestore %0\n\t"
 418                                  ".chip 68k"
 419                                  : : "m" (*fpstate));
 420        }
 421        err = 0;
 422
 423out:
 424        return err;
 425}
 426
 427/*
 428 * Set up a signal frame.
 429 */
 430static inline void save_fpu_state(struct sigcontext *sc, struct pt_regs *regs)
 431{
 432        if (FPU_IS_EMU) {
 433                /* save registers */
 434                memcpy(sc->sc_fpcntl, current->thread.fpcntl, 12);
 435                memcpy(sc->sc_fpregs, current->thread.fp, 24);
 436                return;
 437        }
 438
 439        if (CPU_IS_COLDFIRE) {
 440                __asm__ volatile ("fsave %0"
 441                                  : : "m" (*sc->sc_fpstate) : "memory");
 442        } else {
 443                __asm__ volatile (".chip 68k/68881\n\t"
 444                                  "fsave %0\n\t"
 445                                  ".chip 68k"
 446                                  : : "m" (*sc->sc_fpstate) : "memory");
 447        }
 448
 449        if (CPU_IS_060 ? sc->sc_fpstate[2] : sc->sc_fpstate[0]) {
 450                fpu_version = sc->sc_fpstate[0];
 451                if (CPU_IS_020_OR_030 &&
 452                    regs->vector >= (VEC_FPBRUC * 4) &&
 453                    regs->vector <= (VEC_FPNAN * 4)) {
 454                        /* Clear pending exception in 68882 idle frame */
 455                        if (*(unsigned short *) sc->sc_fpstate == 0x1f38)
 456                                sc->sc_fpstate[0x38] |= 1 << 3;
 457                }
 458
 459                if (CPU_IS_COLDFIRE) {
 460                        __asm__ volatile ("fmovemd %%fp0-%%fp1,%0\n\t"
 461                                          "fmovel %%fpcr,%1\n\t"
 462                                          "fmovel %%fpsr,%2\n\t"
 463                                          "fmovel %%fpiar,%3"
 464                                          : "=m" (sc->sc_fpregs[0]),
 465                                            "=m" (sc->sc_fpcntl[0]),
 466                                            "=m" (sc->sc_fpcntl[1]),
 467                                            "=m" (sc->sc_fpcntl[2])
 468                                          : /* no inputs */
 469                                          : "memory");
 470                } else {
 471                        __asm__ volatile (".chip 68k/68881\n\t"
 472                                          "fmovemx %%fp0-%%fp1,%0\n\t"
 473                                          "fmoveml %%fpcr/%%fpsr/%%fpiar,%1\n\t"
 474                                          ".chip 68k"
 475                                          : "=m" (*sc->sc_fpregs),
 476                                            "=m" (*sc->sc_fpcntl)
 477                                          : /* no inputs */
 478                                          : "memory");
 479                }
 480        }
 481}
 482
 483static inline int rt_save_fpu_state(struct ucontext __user *uc, struct pt_regs *regs)
 484{
 485        unsigned char fpstate[FPCONTEXT_SIZE];
 486        int context_size = CPU_IS_060 ? 8 : (CPU_IS_COLDFIRE ? 12 : 0);
 487        int err = 0;
 488
 489        if (FPU_IS_EMU) {
 490                /* save fpu control register */
 491                err |= copy_to_user(uc->uc_mcontext.fpregs.f_fpcntl,
 492                                current->thread.fpcntl, 12);
 493                /* save all other fpu register */
 494                err |= copy_to_user(uc->uc_mcontext.fpregs.f_fpregs,
 495                                current->thread.fp, 96);
 496                return err;
 497        }
 498
 499        if (CPU_IS_COLDFIRE) {
 500                __asm__ volatile ("fsave %0" : : "m" (*fpstate) : "memory");
 501        } else {
 502                __asm__ volatile (".chip 68k/68881\n\t"
 503                                  "fsave %0\n\t"
 504                                  ".chip 68k"
 505                                  : : "m" (*fpstate) : "memory");
 506        }
 507
 508        err |= __put_user(*(long *)fpstate, (long __user *)&uc->uc_fpstate);
 509        if (CPU_IS_060 ? fpstate[2] : fpstate[0]) {
 510                fpregset_t fpregs;
 511                if (!(CPU_IS_060 || CPU_IS_COLDFIRE))
 512                        context_size = fpstate[1];
 513                fpu_version = fpstate[0];
 514                if (CPU_IS_020_OR_030 &&
 515                    regs->vector >= (VEC_FPBRUC * 4) &&
 516                    regs->vector <= (VEC_FPNAN * 4)) {
 517                        /* Clear pending exception in 68882 idle frame */
 518                        if (*(unsigned short *) fpstate == 0x1f38)
 519                                fpstate[0x38] |= 1 << 3;
 520                }
 521                if (CPU_IS_COLDFIRE) {
 522                        __asm__ volatile ("fmovemd %%fp0-%%fp7,%0\n\t"
 523                                          "fmovel %%fpcr,%1\n\t"
 524                                          "fmovel %%fpsr,%2\n\t"
 525                                          "fmovel %%fpiar,%3"
 526                                          : "=m" (fpregs.f_fpregs[0]),
 527                                            "=m" (fpregs.f_fpcntl[0]),
 528                                            "=m" (fpregs.f_fpcntl[1]),
 529                                            "=m" (fpregs.f_fpcntl[2])
 530                                          : /* no inputs */
 531                                          : "memory");
 532                } else {
 533                        __asm__ volatile (".chip 68k/68881\n\t"
 534                                          "fmovemx %%fp0-%%fp7,%0\n\t"
 535                                          "fmoveml %%fpcr/%%fpsr/%%fpiar,%1\n\t"
 536                                          ".chip 68k"
 537                                          : "=m" (*fpregs.f_fpregs),
 538                                            "=m" (*fpregs.f_fpcntl)
 539                                          : /* no inputs */
 540                                          : "memory");
 541                }
 542                err |= copy_to_user(&uc->uc_mcontext.fpregs, &fpregs,
 543                                    sizeof(fpregs));
 544        }
 545        if (context_size)
 546                err |= copy_to_user((long __user *)&uc->uc_fpstate + 1, fpstate + 4,
 547                                    context_size);
 548        return err;
 549}
 550
 551#else /* CONFIG_FPU */
 552
 553/*
 554 * For the case with no FPU configured these all do nothing.
 555 */
 556static inline int restore_fpu_state(struct sigcontext *sc)
 557{
 558        return 0;
 559}
 560
 561static inline int rt_restore_fpu_state(struct ucontext __user *uc)
 562{
 563        return 0;
 564}
 565
 566static inline void save_fpu_state(struct sigcontext *sc, struct pt_regs *regs)
 567{
 568}
 569
 570static inline int rt_save_fpu_state(struct ucontext __user *uc, struct pt_regs *regs)
 571{
 572        return 0;
 573}
 574
 575#endif /* CONFIG_FPU */
 576
 577static int mangle_kernel_stack(struct pt_regs *regs, int formatvec,
 578                               void __user *fp)
 579{
 580        int fsize = frame_extra_sizes(formatvec >> 12);
 581        if (fsize < 0) {
 582                /*
 583                 * user process trying to return with weird frame format
 584                 */
 585                pr_debug("user process returning with weird frame format\n");
 586                return 1;
 587        }
 588        if (!fsize) {
 589                regs->format = formatvec >> 12;
 590                regs->vector = formatvec & 0xfff;
 591        } else {
 592                struct switch_stack *sw = (struct switch_stack *)regs - 1;
 593                unsigned long buf[fsize / 2]; /* yes, twice as much */
 594
 595                /* that'll make sure that expansion won't crap over data */
 596                if (copy_from_user(buf + fsize / 4, fp, fsize))
 597                        return 1;
 598
 599                /* point of no return */
 600                regs->format = formatvec >> 12;
 601                regs->vector = formatvec & 0xfff;
 602#define frame_offset (sizeof(struct pt_regs)+sizeof(struct switch_stack))
 603                __asm__ __volatile__ (
 604#ifdef CONFIG_COLDFIRE
 605                         "   movel %0,%/sp\n\t"
 606                         "   bra ret_from_signal\n"
 607#else
 608                         "   movel %0,%/a0\n\t"
 609                         "   subl %1,%/a0\n\t"     /* make room on stack */
 610                         "   movel %/a0,%/sp\n\t"  /* set stack pointer */
 611                         /* move switch_stack and pt_regs */
 612                         "1: movel %0@+,%/a0@+\n\t"
 613                         "   dbra %2,1b\n\t"
 614                         "   lea %/sp@(%c3),%/a0\n\t" /* add offset of fmt */
 615                         "   lsrl  #2,%1\n\t"
 616                         "   subql #1,%1\n\t"
 617                         /* copy to the gap we'd made */
 618                         "2: movel %4@+,%/a0@+\n\t"
 619                         "   dbra %1,2b\n\t"
 620                         "   bral ret_from_signal\n"
 621#endif
 622                         : /* no outputs, it doesn't ever return */
 623                         : "a" (sw), "d" (fsize), "d" (frame_offset/4-1),
 624                           "n" (frame_offset), "a" (buf + fsize/4)
 625                         : "a0");
 626#undef frame_offset
 627        }
 628        return 0;
 629}
 630
 631static inline int
 632restore_sigcontext(struct pt_regs *regs, struct sigcontext __user *usc, void __user *fp)
 633{
 634        int formatvec;
 635        struct sigcontext context;
 636        int err = 0;
 637
 638        /* Always make any pending restarted system calls return -EINTR */
 639        current->restart_block.fn = do_no_restart_syscall;
 640
 641        /* get previous context */
 642        if (copy_from_user(&context, usc, sizeof(context)))
 643                goto badframe;
 644
 645        /* restore passed registers */
 646        regs->d0 = context.sc_d0;
 647        regs->d1 = context.sc_d1;
 648        regs->a0 = context.sc_a0;
 649        regs->a1 = context.sc_a1;
 650        regs->sr = (regs->sr & 0xff00) | (context.sc_sr & 0xff);
 651        regs->pc = context.sc_pc;
 652        regs->orig_d0 = -1;             /* disable syscall checks */
 653        wrusp(context.sc_usp);
 654        formatvec = context.sc_formatvec;
 655
 656        err = restore_fpu_state(&context);
 657
 658        if (err || mangle_kernel_stack(regs, formatvec, fp))
 659                goto badframe;
 660
 661        return 0;
 662
 663badframe:
 664        return 1;
 665}
 666
 667static inline int
 668rt_restore_ucontext(struct pt_regs *regs, struct switch_stack *sw,
 669                    struct ucontext __user *uc)
 670{
 671        int temp;
 672        greg_t __user *gregs = uc->uc_mcontext.gregs;
 673        unsigned long usp;
 674        int err;
 675
 676        /* Always make any pending restarted system calls return -EINTR */
 677        current->restart_block.fn = do_no_restart_syscall;
 678
 679        err = __get_user(temp, &uc->uc_mcontext.version);
 680        if (temp != MCONTEXT_VERSION)
 681                goto badframe;
 682        /* restore passed registers */
 683        err |= __get_user(regs->d0, &gregs[0]);
 684        err |= __get_user(regs->d1, &gregs[1]);
 685        err |= __get_user(regs->d2, &gregs[2]);
 686        err |= __get_user(regs->d3, &gregs[3]);
 687        err |= __get_user(regs->d4, &gregs[4]);
 688        err |= __get_user(regs->d5, &gregs[5]);
 689        err |= __get_user(sw->d6, &gregs[6]);
 690        err |= __get_user(sw->d7, &gregs[7]);
 691        err |= __get_user(regs->a0, &gregs[8]);
 692        err |= __get_user(regs->a1, &gregs[9]);
 693        err |= __get_user(regs->a2, &gregs[10]);
 694        err |= __get_user(sw->a3, &gregs[11]);
 695        err |= __get_user(sw->a4, &gregs[12]);
 696        err |= __get_user(sw->a5, &gregs[13]);
 697        err |= __get_user(sw->a6, &gregs[14]);
 698        err |= __get_user(usp, &gregs[15]);
 699        wrusp(usp);
 700        err |= __get_user(regs->pc, &gregs[16]);
 701        err |= __get_user(temp, &gregs[17]);
 702        regs->sr = (regs->sr & 0xff00) | (temp & 0xff);
 703        regs->orig_d0 = -1;             /* disable syscall checks */
 704        err |= __get_user(temp, &uc->uc_formatvec);
 705
 706        err |= rt_restore_fpu_state(uc);
 707        err |= restore_altstack(&uc->uc_stack);
 708
 709        if (err)
 710                goto badframe;
 711
 712        if (mangle_kernel_stack(regs, temp, &uc->uc_extra))
 713                goto badframe;
 714
 715        return 0;
 716
 717badframe:
 718        return 1;
 719}
 720
 721asmlinkage int do_sigreturn(struct pt_regs *regs, struct switch_stack *sw)
 722{
 723        unsigned long usp = rdusp();
 724        struct sigframe __user *frame = (struct sigframe __user *)(usp - 4);
 725        sigset_t set;
 726
 727        if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
 728                goto badframe;
 729        if (__get_user(set.sig[0], &frame->sc.sc_mask) ||
 730            (_NSIG_WORDS > 1 &&
 731             __copy_from_user(&set.sig[1], &frame->extramask,
 732                              sizeof(frame->extramask))))
 733                goto badframe;
 734
 735        set_current_blocked(&set);
 736
 737        if (restore_sigcontext(regs, &frame->sc, frame + 1))
 738                goto badframe;
 739        return regs->d0;
 740
 741badframe:
 742        force_sig(SIGSEGV, current);
 743        return 0;
 744}
 745
 746asmlinkage int do_rt_sigreturn(struct pt_regs *regs, struct switch_stack *sw)
 747{
 748        unsigned long usp = rdusp();
 749        struct rt_sigframe __user *frame = (struct rt_sigframe __user *)(usp - 4);
 750        sigset_t set;
 751
 752        if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
 753                goto badframe;
 754        if (__copy_from_user(&set, &frame->uc.uc_sigmask, sizeof(set)))
 755                goto badframe;
 756
 757        set_current_blocked(&set);
 758
 759        if (rt_restore_ucontext(regs, sw, &frame->uc))
 760                goto badframe;
 761        return regs->d0;
 762
 763badframe:
 764        force_sig(SIGSEGV, current);
 765        return 0;
 766}
 767
 768static void setup_sigcontext(struct sigcontext *sc, struct pt_regs *regs,
 769                             unsigned long mask)
 770{
 771        sc->sc_mask = mask;
 772        sc->sc_usp = rdusp();
 773        sc->sc_d0 = regs->d0;
 774        sc->sc_d1 = regs->d1;
 775        sc->sc_a0 = regs->a0;
 776        sc->sc_a1 = regs->a1;
 777        sc->sc_sr = regs->sr;
 778        sc->sc_pc = regs->pc;
 779        sc->sc_formatvec = regs->format << 12 | regs->vector;
 780        save_a5_state(sc, regs);
 781        save_fpu_state(sc, regs);
 782}
 783
 784static inline int rt_setup_ucontext(struct ucontext __user *uc, struct pt_regs *regs)
 785{
 786        struct switch_stack *sw = (struct switch_stack *)regs - 1;
 787        greg_t __user *gregs = uc->uc_mcontext.gregs;
 788        int err = 0;
 789
 790        err |= __put_user(MCONTEXT_VERSION, &uc->uc_mcontext.version);
 791        err |= __put_user(regs->d0, &gregs[0]);
 792        err |= __put_user(regs->d1, &gregs[1]);
 793        err |= __put_user(regs->d2, &gregs[2]);
 794        err |= __put_user(regs->d3, &gregs[3]);
 795        err |= __put_user(regs->d4, &gregs[4]);
 796        err |= __put_user(regs->d5, &gregs[5]);
 797        err |= __put_user(sw->d6, &gregs[6]);
 798        err |= __put_user(sw->d7, &gregs[7]);
 799        err |= __put_user(regs->a0, &gregs[8]);
 800        err |= __put_user(regs->a1, &gregs[9]);
 801        err |= __put_user(regs->a2, &gregs[10]);
 802        err |= __put_user(sw->a3, &gregs[11]);
 803        err |= __put_user(sw->a4, &gregs[12]);
 804        err |= __put_user(sw->a5, &gregs[13]);
 805        err |= __put_user(sw->a6, &gregs[14]);
 806        err |= __put_user(rdusp(), &gregs[15]);
 807        err |= __put_user(regs->pc, &gregs[16]);
 808        err |= __put_user(regs->sr, &gregs[17]);
 809        err |= __put_user((regs->format << 12) | regs->vector, &uc->uc_formatvec);
 810        err |= rt_save_fpu_state(uc, regs);
 811        return err;
 812}
 813
 814static inline void __user *
 815get_sigframe(struct ksignal *ksig, size_t frame_size)
 816{
 817        unsigned long usp = sigsp(rdusp(), ksig);
 818
 819        return (void __user *)((usp - frame_size) & -8UL);
 820}
 821
 822static int setup_frame(struct ksignal *ksig, sigset_t *set,
 823                        struct pt_regs *regs)
 824{
 825        struct sigframe __user *frame;
 826        int fsize = frame_extra_sizes(regs->format);
 827        struct sigcontext context;
 828        int err = 0, sig = ksig->sig;
 829
 830        if (fsize < 0) {
 831                pr_debug("setup_frame: Unknown frame format %#x\n",
 832                         regs->format);
 833                return -EFAULT;
 834        }
 835
 836        frame = get_sigframe(ksig, sizeof(*frame) + fsize);
 837
 838        if (fsize)
 839                err |= copy_to_user (frame + 1, regs + 1, fsize);
 840
 841        err |= __put_user(sig, &frame->sig);
 842
 843        err |= __put_user(regs->vector, &frame->code);
 844        err |= __put_user(&frame->sc, &frame->psc);
 845
 846        if (_NSIG_WORDS > 1)
 847                err |= copy_to_user(frame->extramask, &set->sig[1],
 848                                    sizeof(frame->extramask));
 849
 850        setup_sigcontext(&context, regs, set->sig[0]);
 851        err |= copy_to_user (&frame->sc, &context, sizeof(context));
 852
 853        /* Set up to return from userspace.  */
 854#ifdef CONFIG_MMU
 855        err |= __put_user(frame->retcode, &frame->pretcode);
 856        /* moveq #,d0; trap #0 */
 857        err |= __put_user(0x70004e40 + (__NR_sigreturn << 16),
 858                          (long __user *)(frame->retcode));
 859#else
 860        err |= __put_user((void *) ret_from_user_signal, &frame->pretcode);
 861#endif
 862
 863        if (err)
 864                return -EFAULT;
 865
 866        push_cache ((unsigned long) &frame->retcode);
 867
 868        /*
 869         * Set up registers for signal handler.  All the state we are about
 870         * to destroy is successfully copied to sigframe.
 871         */
 872        wrusp ((unsigned long) frame);
 873        regs->pc = (unsigned long) ksig->ka.sa.sa_handler;
 874        adjustformat(regs);
 875
 876        /*
 877         * This is subtle; if we build more than one sigframe, all but the
 878         * first one will see frame format 0 and have fsize == 0, so we won't
 879         * screw stkadj.
 880         */
 881        if (fsize)
 882                regs->stkadj = fsize;
 883
 884        /* Prepare to skip over the extra stuff in the exception frame.  */
 885        if (regs->stkadj) {
 886                struct pt_regs *tregs =
 887                        (struct pt_regs *)((ulong)regs + regs->stkadj);
 888                pr_debug("Performing stackadjust=%04lx\n", regs->stkadj);
 889                /* This must be copied with decreasing addresses to
 890                   handle overlaps.  */
 891                tregs->vector = 0;
 892                tregs->format = 0;
 893                tregs->pc = regs->pc;
 894                tregs->sr = regs->sr;
 895        }
 896        return 0;
 897}
 898
 899static int setup_rt_frame(struct ksignal *ksig, sigset_t *set,
 900                           struct pt_regs *regs)
 901{
 902        struct rt_sigframe __user *frame;
 903        int fsize = frame_extra_sizes(regs->format);
 904        int err = 0, sig = ksig->sig;
 905
 906        if (fsize < 0) {
 907                pr_debug("setup_frame: Unknown frame format %#x\n",
 908                         regs->format);
 909                return -EFAULT;
 910        }
 911
 912        frame = get_sigframe(ksig, sizeof(*frame));
 913
 914        if (fsize)
 915                err |= copy_to_user (&frame->uc.uc_extra, regs + 1, fsize);
 916
 917        err |= __put_user(sig, &frame->sig);
 918        err |= __put_user(&frame->info, &frame->pinfo);
 919        err |= __put_user(&frame->uc, &frame->puc);
 920        err |= copy_siginfo_to_user(&frame->info, &ksig->info);
 921
 922        /* Create the ucontext.  */
 923        err |= __put_user(0, &frame->uc.uc_flags);
 924        err |= __put_user(NULL, &frame->uc.uc_link);
 925        err |= __save_altstack(&frame->uc.uc_stack, rdusp());
 926        err |= rt_setup_ucontext(&frame->uc, regs);
 927        err |= copy_to_user (&frame->uc.uc_sigmask, set, sizeof(*set));
 928
 929        /* Set up to return from userspace.  */
 930#ifdef CONFIG_MMU
 931        err |= __put_user(frame->retcode, &frame->pretcode);
 932#ifdef __mcoldfire__
 933        /* movel #__NR_rt_sigreturn,d0; trap #0 */
 934        err |= __put_user(0x203c0000, (long __user *)(frame->retcode + 0));
 935        err |= __put_user(0x00004e40 + (__NR_rt_sigreturn << 16),
 936                          (long __user *)(frame->retcode + 4));
 937#else
 938        /* moveq #,d0; notb d0; trap #0 */
 939        err |= __put_user(0x70004600 + ((__NR_rt_sigreturn ^ 0xff) << 16),
 940                          (long __user *)(frame->retcode + 0));
 941        err |= __put_user(0x4e40, (short __user *)(frame->retcode + 4));
 942#endif
 943#else
 944        err |= __put_user((void *) ret_from_user_rt_signal, &frame->pretcode);
 945#endif /* CONFIG_MMU */
 946
 947        if (err)
 948                return -EFAULT;
 949
 950        push_cache ((unsigned long) &frame->retcode);
 951
 952        /*
 953         * Set up registers for signal handler.  All the state we are about
 954         * to destroy is successfully copied to sigframe.
 955         */
 956        wrusp ((unsigned long) frame);
 957        regs->pc = (unsigned long) ksig->ka.sa.sa_handler;
 958        adjustformat(regs);
 959
 960        /*
 961         * This is subtle; if we build more than one sigframe, all but the
 962         * first one will see frame format 0 and have fsize == 0, so we won't
 963         * screw stkadj.
 964         */
 965        if (fsize)
 966                regs->stkadj = fsize;
 967
 968        /* Prepare to skip over the extra stuff in the exception frame.  */
 969        if (regs->stkadj) {
 970                struct pt_regs *tregs =
 971                        (struct pt_regs *)((ulong)regs + regs->stkadj);
 972                pr_debug("Performing stackadjust=%04lx\n", regs->stkadj);
 973                /* This must be copied with decreasing addresses to
 974                   handle overlaps.  */
 975                tregs->vector = 0;
 976                tregs->format = 0;
 977                tregs->pc = regs->pc;
 978                tregs->sr = regs->sr;
 979        }
 980        return 0;
 981}
 982
 983static inline void
 984handle_restart(struct pt_regs *regs, struct k_sigaction *ka, int has_handler)
 985{
 986        switch (regs->d0) {
 987        case -ERESTARTNOHAND:
 988                if (!has_handler)
 989                        goto do_restart;
 990                regs->d0 = -EINTR;
 991                break;
 992
 993        case -ERESTART_RESTARTBLOCK:
 994                if (!has_handler) {
 995                        regs->d0 = __NR_restart_syscall;
 996                        regs->pc -= 2;
 997                        break;
 998                }
 999                regs->d0 = -EINTR;
1000                break;
1001
1002        case -ERESTARTSYS:
1003                if (has_handler && !(ka->sa.sa_flags & SA_RESTART)) {
1004                        regs->d0 = -EINTR;
1005                        break;
1006                }
1007        /* fallthrough */
1008        case -ERESTARTNOINTR:
1009        do_restart:
1010                regs->d0 = regs->orig_d0;
1011                regs->pc -= 2;
1012                break;
1013        }
1014}
1015
1016/*
1017 * OK, we're invoking a handler
1018 */
1019static void
1020handle_signal(struct ksignal *ksig, struct pt_regs *regs)
1021{
1022        sigset_t *oldset = sigmask_to_save();
1023        int err;
1024        /* are we from a system call? */
1025        if (regs->orig_d0 >= 0)
1026                /* If so, check system call restarting.. */
1027                handle_restart(regs, &ksig->ka, 1);
1028
1029        /* set up the stack frame */
1030        if (ksig->ka.sa.sa_flags & SA_SIGINFO)
1031                err = setup_rt_frame(ksig, oldset, regs);
1032        else
1033                err = setup_frame(ksig, oldset, regs);
1034
1035        signal_setup_done(err, ksig, 0);
1036
1037        if (test_thread_flag(TIF_DELAYED_TRACE)) {
1038                regs->sr &= ~0x8000;
1039                send_sig(SIGTRAP, current, 1);
1040        }
1041}
1042
1043/*
1044 * Note that 'init' is a special process: it doesn't get signals it doesn't
1045 * want to handle. Thus you cannot kill init even with a SIGKILL even by
1046 * mistake.
1047 */
1048static void do_signal(struct pt_regs *regs)
1049{
1050        struct ksignal ksig;
1051
1052        current->thread.esp0 = (unsigned long) regs;
1053
1054        if (get_signal(&ksig)) {
1055                /* Whee!  Actually deliver the signal.  */
1056                handle_signal(&ksig, regs);
1057                return;
1058        }
1059
1060        /* Did we come from a system call? */
1061        if (regs->orig_d0 >= 0)
1062                /* Restart the system call - no handlers present */
1063                handle_restart(regs, NULL, 0);
1064
1065        /* If there's no signal to deliver, we just restore the saved mask.  */
1066        restore_saved_sigmask();
1067}
1068
1069void do_notify_resume(struct pt_regs *regs)
1070{
1071        if (test_thread_flag(TIF_SIGPENDING))
1072                do_signal(regs);
1073
1074        if (test_and_clear_thread_flag(TIF_NOTIFY_RESUME))
1075                tracehook_notify_resume(regs);
1076}
1077