linux/arch/arm/kernel/signal.c
<<
>>
Prefs
   1/*
   2 *  linux/arch/arm/kernel/signal.c
   3 *
   4 *  Copyright (C) 1995-2009 Russell King
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 as
   8 * published by the Free Software Foundation.
   9 */
  10#include <linux/errno.h>
  11#include <linux/random.h>
  12#include <linux/signal.h>
  13#include <linux/personality.h>
  14#include <linux/uaccess.h>
  15#include <linux/tracehook.h>
  16#include <linux/uprobes.h>
  17#include <linux/syscalls.h>
  18
  19#include <asm/elf.h>
  20#include <asm/cacheflush.h>
  21#include <asm/traps.h>
  22#include <asm/ucontext.h>
  23#include <asm/unistd.h>
  24#include <asm/vfp.h>
  25
  26extern const unsigned long sigreturn_codes[7];
  27
  28static unsigned long signal_return_offset;
  29
  30#ifdef CONFIG_CRUNCH
  31static int preserve_crunch_context(struct crunch_sigframe __user *frame)
  32{
  33        char kbuf[sizeof(*frame) + 8];
  34        struct crunch_sigframe *kframe;
  35
  36        /* the crunch context must be 64 bit aligned */
  37        kframe = (struct crunch_sigframe *)((unsigned long)(kbuf + 8) & ~7);
  38        kframe->magic = CRUNCH_MAGIC;
  39        kframe->size = CRUNCH_STORAGE_SIZE;
  40        crunch_task_copy(current_thread_info(), &kframe->storage);
  41        return __copy_to_user(frame, kframe, sizeof(*frame));
  42}
  43
  44static int restore_crunch_context(char __user **auxp)
  45{
  46        struct crunch_sigframe __user *frame =
  47                (struct crunch_sigframe __user *)*auxp;
  48        char kbuf[sizeof(*frame) + 8];
  49        struct crunch_sigframe *kframe;
  50
  51        /* the crunch context must be 64 bit aligned */
  52        kframe = (struct crunch_sigframe *)((unsigned long)(kbuf + 8) & ~7);
  53        if (__copy_from_user(kframe, frame, sizeof(*frame)))
  54                return -1;
  55        if (kframe->magic != CRUNCH_MAGIC ||
  56            kframe->size != CRUNCH_STORAGE_SIZE)
  57                return -1;
  58        *auxp += CRUNCH_STORAGE_SIZE;
  59        crunch_task_restore(current_thread_info(), &kframe->storage);
  60        return 0;
  61}
  62#endif
  63
  64#ifdef CONFIG_IWMMXT
  65
  66static int preserve_iwmmxt_context(struct iwmmxt_sigframe __user *frame)
  67{
  68        char kbuf[sizeof(*frame) + 8];
  69        struct iwmmxt_sigframe *kframe;
  70        int err = 0;
  71
  72        /* the iWMMXt context must be 64 bit aligned */
  73        kframe = (struct iwmmxt_sigframe *)((unsigned long)(kbuf + 8) & ~7);
  74
  75        if (test_thread_flag(TIF_USING_IWMMXT)) {
  76                kframe->magic = IWMMXT_MAGIC;
  77                kframe->size = IWMMXT_STORAGE_SIZE;
  78                iwmmxt_task_copy(current_thread_info(), &kframe->storage);
  79
  80                err = __copy_to_user(frame, kframe, sizeof(*frame));
  81        } else {
  82                /*
  83                 * For bug-compatibility with older kernels, some space
  84                 * has to be reserved for iWMMXt even if it's not used.
  85                 * Set the magic and size appropriately so that properly
  86                 * written userspace can skip it reliably:
  87                 */
  88                __put_user_error(DUMMY_MAGIC, &frame->magic, err);
  89                __put_user_error(IWMMXT_STORAGE_SIZE, &frame->size, err);
  90        }
  91
  92        return err;
  93}
  94
  95static int restore_iwmmxt_context(char __user **auxp)
  96{
  97        struct iwmmxt_sigframe __user *frame =
  98                (struct iwmmxt_sigframe __user *)*auxp;
  99        char kbuf[sizeof(*frame) + 8];
 100        struct iwmmxt_sigframe *kframe;
 101
 102        /* the iWMMXt context must be 64 bit aligned */
 103        kframe = (struct iwmmxt_sigframe *)((unsigned long)(kbuf + 8) & ~7);
 104        if (__copy_from_user(kframe, frame, sizeof(*frame)))
 105                return -1;
 106
 107        /*
 108         * For non-iWMMXt threads: a single iwmmxt_sigframe-sized dummy
 109         * block is discarded for compatibility with setup_sigframe() if
 110         * present, but we don't mandate its presence.  If some other
 111         * magic is here, it's not for us:
 112         */
 113        if (!test_thread_flag(TIF_USING_IWMMXT) &&
 114            kframe->magic != DUMMY_MAGIC)
 115                return 0;
 116
 117        if (kframe->size != IWMMXT_STORAGE_SIZE)
 118                return -1;
 119
 120        if (test_thread_flag(TIF_USING_IWMMXT)) {
 121                if (kframe->magic != IWMMXT_MAGIC)
 122                        return -1;
 123
 124                iwmmxt_task_restore(current_thread_info(), &kframe->storage);
 125        }
 126
 127        *auxp += IWMMXT_STORAGE_SIZE;
 128        return 0;
 129}
 130
 131#endif
 132
 133#ifdef CONFIG_VFP
 134
 135static int preserve_vfp_context(struct vfp_sigframe __user *frame)
 136{
 137        const unsigned long magic = VFP_MAGIC;
 138        const unsigned long size = VFP_STORAGE_SIZE;
 139        int err = 0;
 140
 141        __put_user_error(magic, &frame->magic, err);
 142        __put_user_error(size, &frame->size, err);
 143
 144        if (err)
 145                return -EFAULT;
 146
 147        return vfp_preserve_user_clear_hwstate(&frame->ufp, &frame->ufp_exc);
 148}
 149
 150static int restore_vfp_context(char __user **auxp)
 151{
 152        struct vfp_sigframe __user *frame =
 153                (struct vfp_sigframe __user *)*auxp;
 154        unsigned long magic;
 155        unsigned long size;
 156        int err = 0;
 157
 158        __get_user_error(magic, &frame->magic, err);
 159        __get_user_error(size, &frame->size, err);
 160
 161        if (err)
 162                return -EFAULT;
 163        if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE)
 164                return -EINVAL;
 165
 166        *auxp += size;
 167        return vfp_restore_user_hwstate(&frame->ufp, &frame->ufp_exc);
 168}
 169
 170#endif
 171
 172/*
 173 * Do a signal return; undo the signal stack.  These are aligned to 64-bit.
 174 */
 175struct sigframe {
 176        struct ucontext uc;
 177        unsigned long retcode[2];
 178};
 179
 180struct rt_sigframe {
 181        struct siginfo info;
 182        struct sigframe sig;
 183};
 184
 185static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf)
 186{
 187        char __user *aux;
 188        sigset_t set;
 189        int err;
 190
 191        err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set));
 192        if (err == 0)
 193                set_current_blocked(&set);
 194
 195        __get_user_error(regs->ARM_r0, &sf->uc.uc_mcontext.arm_r0, err);
 196        __get_user_error(regs->ARM_r1, &sf->uc.uc_mcontext.arm_r1, err);
 197        __get_user_error(regs->ARM_r2, &sf->uc.uc_mcontext.arm_r2, err);
 198        __get_user_error(regs->ARM_r3, &sf->uc.uc_mcontext.arm_r3, err);
 199        __get_user_error(regs->ARM_r4, &sf->uc.uc_mcontext.arm_r4, err);
 200        __get_user_error(regs->ARM_r5, &sf->uc.uc_mcontext.arm_r5, err);
 201        __get_user_error(regs->ARM_r6, &sf->uc.uc_mcontext.arm_r6, err);
 202        __get_user_error(regs->ARM_r7, &sf->uc.uc_mcontext.arm_r7, err);
 203        __get_user_error(regs->ARM_r8, &sf->uc.uc_mcontext.arm_r8, err);
 204        __get_user_error(regs->ARM_r9, &sf->uc.uc_mcontext.arm_r9, err);
 205        __get_user_error(regs->ARM_r10, &sf->uc.uc_mcontext.arm_r10, err);
 206        __get_user_error(regs->ARM_fp, &sf->uc.uc_mcontext.arm_fp, err);
 207        __get_user_error(regs->ARM_ip, &sf->uc.uc_mcontext.arm_ip, err);
 208        __get_user_error(regs->ARM_sp, &sf->uc.uc_mcontext.arm_sp, err);
 209        __get_user_error(regs->ARM_lr, &sf->uc.uc_mcontext.arm_lr, err);
 210        __get_user_error(regs->ARM_pc, &sf->uc.uc_mcontext.arm_pc, err);
 211        __get_user_error(regs->ARM_cpsr, &sf->uc.uc_mcontext.arm_cpsr, err);
 212
 213        err |= !valid_user_regs(regs);
 214
 215        aux = (char __user *) sf->uc.uc_regspace;
 216#ifdef CONFIG_CRUNCH
 217        if (err == 0)
 218                err |= restore_crunch_context(&aux);
 219#endif
 220#ifdef CONFIG_IWMMXT
 221        if (err == 0)
 222                err |= restore_iwmmxt_context(&aux);
 223#endif
 224#ifdef CONFIG_VFP
 225        if (err == 0)
 226                err |= restore_vfp_context(&aux);
 227#endif
 228
 229        return err;
 230}
 231
 232asmlinkage int sys_sigreturn(struct pt_regs *regs)
 233{
 234        struct sigframe __user *frame;
 235
 236        /* Always make any pending restarted system calls return -EINTR */
 237        current->restart_block.fn = do_no_restart_syscall;
 238
 239        /*
 240         * Since we stacked the signal on a 64-bit boundary,
 241         * then 'sp' should be word aligned here.  If it's
 242         * not, then the user is trying to mess with us.
 243         */
 244        if (regs->ARM_sp & 7)
 245                goto badframe;
 246
 247        frame = (struct sigframe __user *)regs->ARM_sp;
 248
 249        if (!access_ok(VERIFY_READ, frame, sizeof (*frame)))
 250                goto badframe;
 251
 252        if (restore_sigframe(regs, frame))
 253                goto badframe;
 254
 255        return regs->ARM_r0;
 256
 257badframe:
 258        force_sig(SIGSEGV, current);
 259        return 0;
 260}
 261
 262asmlinkage int sys_rt_sigreturn(struct pt_regs *regs)
 263{
 264        struct rt_sigframe __user *frame;
 265
 266        /* Always make any pending restarted system calls return -EINTR */
 267        current->restart_block.fn = do_no_restart_syscall;
 268
 269        /*
 270         * Since we stacked the signal on a 64-bit boundary,
 271         * then 'sp' should be word aligned here.  If it's
 272         * not, then the user is trying to mess with us.
 273         */
 274        if (regs->ARM_sp & 7)
 275                goto badframe;
 276
 277        frame = (struct rt_sigframe __user *)regs->ARM_sp;
 278
 279        if (!access_ok(VERIFY_READ, frame, sizeof (*frame)))
 280                goto badframe;
 281
 282        if (restore_sigframe(regs, &frame->sig))
 283                goto badframe;
 284
 285        if (restore_altstack(&frame->sig.uc.uc_stack))
 286                goto badframe;
 287
 288        return regs->ARM_r0;
 289
 290badframe:
 291        force_sig(SIGSEGV, current);
 292        return 0;
 293}
 294
 295static int
 296setup_sigframe(struct sigframe __user *sf, struct pt_regs *regs, sigset_t *set)
 297{
 298        struct aux_sigframe __user *aux;
 299        int err = 0;
 300
 301        __put_user_error(regs->ARM_r0, &sf->uc.uc_mcontext.arm_r0, err);
 302        __put_user_error(regs->ARM_r1, &sf->uc.uc_mcontext.arm_r1, err);
 303        __put_user_error(regs->ARM_r2, &sf->uc.uc_mcontext.arm_r2, err);
 304        __put_user_error(regs->ARM_r3, &sf->uc.uc_mcontext.arm_r3, err);
 305        __put_user_error(regs->ARM_r4, &sf->uc.uc_mcontext.arm_r4, err);
 306        __put_user_error(regs->ARM_r5, &sf->uc.uc_mcontext.arm_r5, err);
 307        __put_user_error(regs->ARM_r6, &sf->uc.uc_mcontext.arm_r6, err);
 308        __put_user_error(regs->ARM_r7, &sf->uc.uc_mcontext.arm_r7, err);
 309        __put_user_error(regs->ARM_r8, &sf->uc.uc_mcontext.arm_r8, err);
 310        __put_user_error(regs->ARM_r9, &sf->uc.uc_mcontext.arm_r9, err);
 311        __put_user_error(regs->ARM_r10, &sf->uc.uc_mcontext.arm_r10, err);
 312        __put_user_error(regs->ARM_fp, &sf->uc.uc_mcontext.arm_fp, err);
 313        __put_user_error(regs->ARM_ip, &sf->uc.uc_mcontext.arm_ip, err);
 314        __put_user_error(regs->ARM_sp, &sf->uc.uc_mcontext.arm_sp, err);
 315        __put_user_error(regs->ARM_lr, &sf->uc.uc_mcontext.arm_lr, err);
 316        __put_user_error(regs->ARM_pc, &sf->uc.uc_mcontext.arm_pc, err);
 317        __put_user_error(regs->ARM_cpsr, &sf->uc.uc_mcontext.arm_cpsr, err);
 318
 319        __put_user_error(current->thread.trap_no, &sf->uc.uc_mcontext.trap_no, err);
 320        __put_user_error(current->thread.error_code, &sf->uc.uc_mcontext.error_code, err);
 321        __put_user_error(current->thread.address, &sf->uc.uc_mcontext.fault_address, err);
 322        __put_user_error(set->sig[0], &sf->uc.uc_mcontext.oldmask, err);
 323
 324        err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set));
 325
 326        aux = (struct aux_sigframe __user *) sf->uc.uc_regspace;
 327#ifdef CONFIG_CRUNCH
 328        if (err == 0)
 329                err |= preserve_crunch_context(&aux->crunch);
 330#endif
 331#ifdef CONFIG_IWMMXT
 332        if (err == 0)
 333                err |= preserve_iwmmxt_context(&aux->iwmmxt);
 334#endif
 335#ifdef CONFIG_VFP
 336        if (err == 0)
 337                err |= preserve_vfp_context(&aux->vfp);
 338#endif
 339        __put_user_error(0, &aux->end_magic, err);
 340
 341        return err;
 342}
 343
 344static inline void __user *
 345get_sigframe(struct ksignal *ksig, struct pt_regs *regs, int framesize)
 346{
 347        unsigned long sp = sigsp(regs->ARM_sp, ksig);
 348        void __user *frame;
 349
 350        /*
 351         * ATPCS B01 mandates 8-byte alignment
 352         */
 353        frame = (void __user *)((sp - framesize) & ~7);
 354
 355        /*
 356         * Check that we can actually write to the signal frame.
 357         */
 358        if (!access_ok(VERIFY_WRITE, frame, framesize))
 359                frame = NULL;
 360
 361        return frame;
 362}
 363
 364static int
 365setup_return(struct pt_regs *regs, struct ksignal *ksig,
 366             unsigned long __user *rc, void __user *frame)
 367{
 368        unsigned long handler = (unsigned long)ksig->ka.sa.sa_handler;
 369        unsigned long retcode;
 370        int thumb = 0;
 371        unsigned long cpsr = regs->ARM_cpsr & ~(PSR_f | PSR_E_BIT);
 372
 373        cpsr |= PSR_ENDSTATE;
 374
 375        /*
 376         * Maybe we need to deliver a 32-bit signal to a 26-bit task.
 377         */
 378        if (ksig->ka.sa.sa_flags & SA_THIRTYTWO)
 379                cpsr = (cpsr & ~MODE_MASK) | USR_MODE;
 380
 381#ifdef CONFIG_ARM_THUMB
 382        if (elf_hwcap & HWCAP_THUMB) {
 383                /*
 384                 * The LSB of the handler determines if we're going to
 385                 * be using THUMB or ARM mode for this signal handler.
 386                 */
 387                thumb = handler & 1;
 388
 389                /*
 390                 * Clear the If-Then Thumb-2 execution state.  ARM spec
 391                 * requires this to be all 000s in ARM mode.  Snapdragon
 392                 * S4/Krait misbehaves on a Thumb=>ARM signal transition
 393                 * without this.
 394                 *
 395                 * We must do this whenever we are running on a Thumb-2
 396                 * capable CPU, which includes ARMv6T2.  However, we elect
 397                 * to always do this to simplify the code; this field is
 398                 * marked UNK/SBZP for older architectures.
 399                 */
 400                cpsr &= ~PSR_IT_MASK;
 401
 402                if (thumb) {
 403                        cpsr |= PSR_T_BIT;
 404                } else
 405                        cpsr &= ~PSR_T_BIT;
 406        }
 407#endif
 408
 409        if (ksig->ka.sa.sa_flags & SA_RESTORER) {
 410                retcode = (unsigned long)ksig->ka.sa.sa_restorer;
 411        } else {
 412                unsigned int idx = thumb << 1;
 413
 414                if (ksig->ka.sa.sa_flags & SA_SIGINFO)
 415                        idx += 3;
 416
 417                /*
 418                 * Put the sigreturn code on the stack no matter which return
 419                 * mechanism we use in order to remain ABI compliant
 420                 */
 421                if (__put_user(sigreturn_codes[idx],   rc) ||
 422                    __put_user(sigreturn_codes[idx+1], rc+1))
 423                        return 1;
 424
 425#ifdef CONFIG_MMU
 426                if (cpsr & MODE32_BIT) {
 427                        struct mm_struct *mm = current->mm;
 428
 429                        /*
 430                         * 32-bit code can use the signal return page
 431                         * except when the MPU has protected the vectors
 432                         * page from PL0
 433                         */
 434                        retcode = mm->context.sigpage + signal_return_offset +
 435                                  (idx << 2) + thumb;
 436                } else
 437#endif
 438                {
 439                        /*
 440                         * Ensure that the instruction cache sees
 441                         * the return code written onto the stack.
 442                         */
 443                        flush_icache_range((unsigned long)rc,
 444                                           (unsigned long)(rc + 2));
 445
 446                        retcode = ((unsigned long)rc) + thumb;
 447                }
 448        }
 449
 450        regs->ARM_r0 = ksig->sig;
 451        regs->ARM_sp = (unsigned long)frame;
 452        regs->ARM_lr = retcode;
 453        regs->ARM_pc = handler;
 454        regs->ARM_cpsr = cpsr;
 455
 456        return 0;
 457}
 458
 459static int
 460setup_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
 461{
 462        struct sigframe __user *frame = get_sigframe(ksig, regs, sizeof(*frame));
 463        int err = 0;
 464
 465        if (!frame)
 466                return 1;
 467
 468        /*
 469         * Set uc.uc_flags to a value which sc.trap_no would never have.
 470         */
 471        __put_user_error(0x5ac3c35a, &frame->uc.uc_flags, err);
 472
 473        err |= setup_sigframe(frame, regs, set);
 474        if (err == 0)
 475                err = setup_return(regs, ksig, frame->retcode, frame);
 476
 477        return err;
 478}
 479
 480static int
 481setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
 482{
 483        struct rt_sigframe __user *frame = get_sigframe(ksig, regs, sizeof(*frame));
 484        int err = 0;
 485
 486        if (!frame)
 487                return 1;
 488
 489        err |= copy_siginfo_to_user(&frame->info, &ksig->info);
 490
 491        __put_user_error(0, &frame->sig.uc.uc_flags, err);
 492        __put_user_error(NULL, &frame->sig.uc.uc_link, err);
 493
 494        err |= __save_altstack(&frame->sig.uc.uc_stack, regs->ARM_sp);
 495        err |= setup_sigframe(&frame->sig, regs, set);
 496        if (err == 0)
 497                err = setup_return(regs, ksig, frame->sig.retcode, frame);
 498
 499        if (err == 0) {
 500                /*
 501                 * For realtime signals we must also set the second and third
 502                 * arguments for the signal handler.
 503                 *   -- Peter Maydell <pmaydell@chiark.greenend.org.uk> 2000-12-06
 504                 */
 505                regs->ARM_r1 = (unsigned long)&frame->info;
 506                regs->ARM_r2 = (unsigned long)&frame->sig.uc;
 507        }
 508
 509        return err;
 510}
 511
 512/*
 513 * OK, we're invoking a handler
 514 */     
 515static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
 516{
 517        sigset_t *oldset = sigmask_to_save();
 518        int ret;
 519
 520        /*
 521         * Set up the stack frame
 522         */
 523        if (ksig->ka.sa.sa_flags & SA_SIGINFO)
 524                ret = setup_rt_frame(ksig, oldset, regs);
 525        else
 526                ret = setup_frame(ksig, oldset, regs);
 527
 528        /*
 529         * Check that the resulting registers are actually sane.
 530         */
 531        ret |= !valid_user_regs(regs);
 532
 533        signal_setup_done(ret, ksig, 0);
 534}
 535
 536/*
 537 * Note that 'init' is a special process: it doesn't get signals it doesn't
 538 * want to handle. Thus you cannot kill init even with a SIGKILL even by
 539 * mistake.
 540 *
 541 * Note that we go through the signals twice: once to check the signals that
 542 * the kernel can handle, and then we build all the user-level signal handling
 543 * stack-frames in one go after that.
 544 */
 545static int do_signal(struct pt_regs *regs, int syscall)
 546{
 547        unsigned int retval = 0, continue_addr = 0, restart_addr = 0;
 548        struct ksignal ksig;
 549        int restart = 0;
 550
 551        /*
 552         * If we were from a system call, check for system call restarting...
 553         */
 554        if (syscall) {
 555                continue_addr = regs->ARM_pc;
 556                restart_addr = continue_addr - (thumb_mode(regs) ? 2 : 4);
 557                retval = regs->ARM_r0;
 558
 559                /*
 560                 * Prepare for system call restart.  We do this here so that a
 561                 * debugger will see the already changed PSW.
 562                 */
 563                switch (retval) {
 564                case -ERESTART_RESTARTBLOCK:
 565                        restart -= 2;
 566                case -ERESTARTNOHAND:
 567                case -ERESTARTSYS:
 568                case -ERESTARTNOINTR:
 569                        restart++;
 570                        regs->ARM_r0 = regs->ARM_ORIG_r0;
 571                        regs->ARM_pc = restart_addr;
 572                        break;
 573                }
 574        }
 575
 576        /*
 577         * Get the signal to deliver.  When running under ptrace, at this
 578         * point the debugger may change all our registers ...
 579         */
 580        /*
 581         * Depending on the signal settings we may need to revert the
 582         * decision to restart the system call.  But skip this if a
 583         * debugger has chosen to restart at a different PC.
 584         */
 585        if (get_signal(&ksig)) {
 586                /* handler */
 587                if (unlikely(restart) && regs->ARM_pc == restart_addr) {
 588                        if (retval == -ERESTARTNOHAND ||
 589                            retval == -ERESTART_RESTARTBLOCK
 590                            || (retval == -ERESTARTSYS
 591                                && !(ksig.ka.sa.sa_flags & SA_RESTART))) {
 592                                regs->ARM_r0 = -EINTR;
 593                                regs->ARM_pc = continue_addr;
 594                        }
 595                }
 596                handle_signal(&ksig, regs);
 597        } else {
 598                /* no handler */
 599                restore_saved_sigmask();
 600                if (unlikely(restart) && regs->ARM_pc == restart_addr) {
 601                        regs->ARM_pc = continue_addr;
 602                        return restart;
 603                }
 604        }
 605        return 0;
 606}
 607
 608asmlinkage int
 609do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
 610{
 611        /*
 612         * The assembly code enters us with IRQs off, but it hasn't
 613         * informed the tracing code of that for efficiency reasons.
 614         * Update the trace code with the current status.
 615         */
 616        trace_hardirqs_off();
 617        do {
 618                if (likely(thread_flags & _TIF_NEED_RESCHED)) {
 619                        schedule();
 620                } else {
 621                        if (unlikely(!user_mode(regs)))
 622                                return 0;
 623                        local_irq_enable();
 624                        if (thread_flags & _TIF_SIGPENDING) {
 625                                int restart = do_signal(regs, syscall);
 626                                if (unlikely(restart)) {
 627                                        /*
 628                                         * Restart without handlers.
 629                                         * Deal with it without leaving
 630                                         * the kernel space.
 631                                         */
 632                                        return restart;
 633                                }
 634                                syscall = 0;
 635                        } else if (thread_flags & _TIF_UPROBE) {
 636                                uprobe_notify_resume(regs);
 637                        } else {
 638                                clear_thread_flag(TIF_NOTIFY_RESUME);
 639                                tracehook_notify_resume(regs);
 640                        }
 641                }
 642                local_irq_disable();
 643                thread_flags = current_thread_info()->flags;
 644        } while (thread_flags & _TIF_WORK_MASK);
 645        return 0;
 646}
 647
 648struct page *get_signal_page(void)
 649{
 650        unsigned long ptr;
 651        unsigned offset;
 652        struct page *page;
 653        void *addr;
 654
 655        page = alloc_pages(GFP_KERNEL, 0);
 656
 657        if (!page)
 658                return NULL;
 659
 660        addr = page_address(page);
 661
 662        /* Give the signal return code some randomness */
 663        offset = 0x200 + (get_random_int() & 0x7fc);
 664        signal_return_offset = offset;
 665
 666        /*
 667         * Copy signal return handlers into the vector page, and
 668         * set sigreturn to be a pointer to these.
 669         */
 670        memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes));
 671
 672        ptr = (unsigned long)addr + offset;
 673        flush_icache_range(ptr, ptr + sizeof(sigreturn_codes));
 674
 675        return page;
 676}
 677
 678/* Defer to generic check */
 679asmlinkage void addr_limit_check_failed(void)
 680{
 681        addr_limit_user_check();
 682}
 683