linux/arch/arm/kernel/signal.c
<<
>>
Prefs
   1/*
   2 *  linux/arch/arm/kernel/signal.c
   3 *
   4 *  Copyright (C) 1995-2009 Russell King
   5 *
   6 * This program is free software; you can redistribute it and/or modify
   7 * it under the terms of the GNU General Public License version 2 as
   8 * published by the Free Software Foundation.
   9 */
  10#include <linux/errno.h>
  11#include <linux/random.h>
  12#include <linux/signal.h>
  13#include <linux/personality.h>
  14#include <linux/uaccess.h>
  15#include <linux/tracehook.h>
  16
  17#include <asm/elf.h>
  18#include <asm/cacheflush.h>
  19#include <asm/traps.h>
  20#include <asm/ucontext.h>
  21#include <asm/unistd.h>
  22#include <asm/vfp.h>
  23
  24/*
  25 * For ARM syscalls, we encode the syscall number into the instruction.
  26 */
  27#define SWI_SYS_SIGRETURN       (0xef000000|(__NR_sigreturn)|(__NR_OABI_SYSCALL_BASE))
  28#define SWI_SYS_RT_SIGRETURN    (0xef000000|(__NR_rt_sigreturn)|(__NR_OABI_SYSCALL_BASE))
  29
  30/*
  31 * With EABI, the syscall number has to be loaded into r7.
  32 */
  33#define MOV_R7_NR_SIGRETURN     (0xe3a07000 | (__NR_sigreturn - __NR_SYSCALL_BASE))
  34#define MOV_R7_NR_RT_SIGRETURN  (0xe3a07000 | (__NR_rt_sigreturn - __NR_SYSCALL_BASE))
  35
  36/*
  37 * For Thumb syscalls, we pass the syscall number via r7.  We therefore
  38 * need two 16-bit instructions.
  39 */
  40#define SWI_THUMB_SIGRETURN     (0xdf00 << 16 | 0x2700 | (__NR_sigreturn - __NR_SYSCALL_BASE))
  41#define SWI_THUMB_RT_SIGRETURN  (0xdf00 << 16 | 0x2700 | (__NR_rt_sigreturn - __NR_SYSCALL_BASE))
  42
  43static const unsigned long sigreturn_codes[7] = {
  44        MOV_R7_NR_SIGRETURN,    SWI_SYS_SIGRETURN,    SWI_THUMB_SIGRETURN,
  45        MOV_R7_NR_RT_SIGRETURN, SWI_SYS_RT_SIGRETURN, SWI_THUMB_RT_SIGRETURN,
  46};
  47
  48static unsigned long signal_return_offset;
  49
  50#ifdef CONFIG_CRUNCH
  51static int preserve_crunch_context(struct crunch_sigframe __user *frame)
  52{
  53        char kbuf[sizeof(*frame) + 8];
  54        struct crunch_sigframe *kframe;
  55
  56        /* the crunch context must be 64 bit aligned */
  57        kframe = (struct crunch_sigframe *)((unsigned long)(kbuf + 8) & ~7);
  58        kframe->magic = CRUNCH_MAGIC;
  59        kframe->size = CRUNCH_STORAGE_SIZE;
  60        crunch_task_copy(current_thread_info(), &kframe->storage);
  61        return __copy_to_user(frame, kframe, sizeof(*frame));
  62}
  63
  64static int restore_crunch_context(struct crunch_sigframe __user *frame)
  65{
  66        char kbuf[sizeof(*frame) + 8];
  67        struct crunch_sigframe *kframe;
  68
  69        /* the crunch context must be 64 bit aligned */
  70        kframe = (struct crunch_sigframe *)((unsigned long)(kbuf + 8) & ~7);
  71        if (__copy_from_user(kframe, frame, sizeof(*frame)))
  72                return -1;
  73        if (kframe->magic != CRUNCH_MAGIC ||
  74            kframe->size != CRUNCH_STORAGE_SIZE)
  75                return -1;
  76        crunch_task_restore(current_thread_info(), &kframe->storage);
  77        return 0;
  78}
  79#endif
  80
  81#ifdef CONFIG_IWMMXT
  82
  83static int preserve_iwmmxt_context(struct iwmmxt_sigframe *frame)
  84{
  85        char kbuf[sizeof(*frame) + 8];
  86        struct iwmmxt_sigframe *kframe;
  87
  88        /* the iWMMXt context must be 64 bit aligned */
  89        kframe = (struct iwmmxt_sigframe *)((unsigned long)(kbuf + 8) & ~7);
  90        kframe->magic = IWMMXT_MAGIC;
  91        kframe->size = IWMMXT_STORAGE_SIZE;
  92        iwmmxt_task_copy(current_thread_info(), &kframe->storage);
  93        return __copy_to_user(frame, kframe, sizeof(*frame));
  94}
  95
  96static int restore_iwmmxt_context(struct iwmmxt_sigframe *frame)
  97{
  98        char kbuf[sizeof(*frame) + 8];
  99        struct iwmmxt_sigframe *kframe;
 100
 101        /* the iWMMXt context must be 64 bit aligned */
 102        kframe = (struct iwmmxt_sigframe *)((unsigned long)(kbuf + 8) & ~7);
 103        if (__copy_from_user(kframe, frame, sizeof(*frame)))
 104                return -1;
 105        if (kframe->magic != IWMMXT_MAGIC ||
 106            kframe->size != IWMMXT_STORAGE_SIZE)
 107                return -1;
 108        iwmmxt_task_restore(current_thread_info(), &kframe->storage);
 109        return 0;
 110}
 111
 112#endif
 113
 114#ifdef CONFIG_VFP
 115
 116static int preserve_vfp_context(struct vfp_sigframe __user *frame)
 117{
 118        const unsigned long magic = VFP_MAGIC;
 119        const unsigned long size = VFP_STORAGE_SIZE;
 120        int err = 0;
 121
 122        __put_user_error(magic, &frame->magic, err);
 123        __put_user_error(size, &frame->size, err);
 124
 125        if (err)
 126                return -EFAULT;
 127
 128        return vfp_preserve_user_clear_hwstate(&frame->ufp, &frame->ufp_exc);
 129}
 130
 131static int restore_vfp_context(struct vfp_sigframe __user *frame)
 132{
 133        unsigned long magic;
 134        unsigned long size;
 135        int err = 0;
 136
 137        __get_user_error(magic, &frame->magic, err);
 138        __get_user_error(size, &frame->size, err);
 139
 140        if (err)
 141                return -EFAULT;
 142        if (magic != VFP_MAGIC || size != VFP_STORAGE_SIZE)
 143                return -EINVAL;
 144
 145        return vfp_restore_user_hwstate(&frame->ufp, &frame->ufp_exc);
 146}
 147
 148#endif
 149
 150/*
 151 * Do a signal return; undo the signal stack.  These are aligned to 64-bit.
 152 */
 153struct sigframe {
 154        struct ucontext uc;
 155        unsigned long retcode[2];
 156};
 157
 158struct rt_sigframe {
 159        struct siginfo info;
 160        struct sigframe sig;
 161};
 162
 163static int restore_sigframe(struct pt_regs *regs, struct sigframe __user *sf)
 164{
 165        struct aux_sigframe __user *aux;
 166        sigset_t set;
 167        int err;
 168
 169        err = __copy_from_user(&set, &sf->uc.uc_sigmask, sizeof(set));
 170        if (err == 0)
 171                set_current_blocked(&set);
 172
 173        __get_user_error(regs->ARM_r0, &sf->uc.uc_mcontext.arm_r0, err);
 174        __get_user_error(regs->ARM_r1, &sf->uc.uc_mcontext.arm_r1, err);
 175        __get_user_error(regs->ARM_r2, &sf->uc.uc_mcontext.arm_r2, err);
 176        __get_user_error(regs->ARM_r3, &sf->uc.uc_mcontext.arm_r3, err);
 177        __get_user_error(regs->ARM_r4, &sf->uc.uc_mcontext.arm_r4, err);
 178        __get_user_error(regs->ARM_r5, &sf->uc.uc_mcontext.arm_r5, err);
 179        __get_user_error(regs->ARM_r6, &sf->uc.uc_mcontext.arm_r6, err);
 180        __get_user_error(regs->ARM_r7, &sf->uc.uc_mcontext.arm_r7, err);
 181        __get_user_error(regs->ARM_r8, &sf->uc.uc_mcontext.arm_r8, err);
 182        __get_user_error(regs->ARM_r9, &sf->uc.uc_mcontext.arm_r9, err);
 183        __get_user_error(regs->ARM_r10, &sf->uc.uc_mcontext.arm_r10, err);
 184        __get_user_error(regs->ARM_fp, &sf->uc.uc_mcontext.arm_fp, err);
 185        __get_user_error(regs->ARM_ip, &sf->uc.uc_mcontext.arm_ip, err);
 186        __get_user_error(regs->ARM_sp, &sf->uc.uc_mcontext.arm_sp, err);
 187        __get_user_error(regs->ARM_lr, &sf->uc.uc_mcontext.arm_lr, err);
 188        __get_user_error(regs->ARM_pc, &sf->uc.uc_mcontext.arm_pc, err);
 189        __get_user_error(regs->ARM_cpsr, &sf->uc.uc_mcontext.arm_cpsr, err);
 190
 191        err |= !valid_user_regs(regs);
 192
 193        aux = (struct aux_sigframe __user *) sf->uc.uc_regspace;
 194#ifdef CONFIG_CRUNCH
 195        if (err == 0)
 196                err |= restore_crunch_context(&aux->crunch);
 197#endif
 198#ifdef CONFIG_IWMMXT
 199        if (err == 0 && test_thread_flag(TIF_USING_IWMMXT))
 200                err |= restore_iwmmxt_context(&aux->iwmmxt);
 201#endif
 202#ifdef CONFIG_VFP
 203        if (err == 0)
 204                err |= restore_vfp_context(&aux->vfp);
 205#endif
 206
 207        return err;
 208}
 209
 210asmlinkage int sys_sigreturn(struct pt_regs *regs)
 211{
 212        struct sigframe __user *frame;
 213
 214        /* Always make any pending restarted system calls return -EINTR */
 215        current_thread_info()->restart_block.fn = do_no_restart_syscall;
 216
 217        /*
 218         * Since we stacked the signal on a 64-bit boundary,
 219         * then 'sp' should be word aligned here.  If it's
 220         * not, then the user is trying to mess with us.
 221         */
 222        if (regs->ARM_sp & 7)
 223                goto badframe;
 224
 225        frame = (struct sigframe __user *)regs->ARM_sp;
 226
 227        if (!access_ok(VERIFY_READ, frame, sizeof (*frame)))
 228                goto badframe;
 229
 230        if (restore_sigframe(regs, frame))
 231                goto badframe;
 232
 233        return regs->ARM_r0;
 234
 235badframe:
 236        force_sig(SIGSEGV, current);
 237        return 0;
 238}
 239
 240asmlinkage int sys_rt_sigreturn(struct pt_regs *regs)
 241{
 242        struct rt_sigframe __user *frame;
 243
 244        /* Always make any pending restarted system calls return -EINTR */
 245        current_thread_info()->restart_block.fn = do_no_restart_syscall;
 246
 247        /*
 248         * Since we stacked the signal on a 64-bit boundary,
 249         * then 'sp' should be word aligned here.  If it's
 250         * not, then the user is trying to mess with us.
 251         */
 252        if (regs->ARM_sp & 7)
 253                goto badframe;
 254
 255        frame = (struct rt_sigframe __user *)regs->ARM_sp;
 256
 257        if (!access_ok(VERIFY_READ, frame, sizeof (*frame)))
 258                goto badframe;
 259
 260        if (restore_sigframe(regs, &frame->sig))
 261                goto badframe;
 262
 263        if (restore_altstack(&frame->sig.uc.uc_stack))
 264                goto badframe;
 265
 266        return regs->ARM_r0;
 267
 268badframe:
 269        force_sig(SIGSEGV, current);
 270        return 0;
 271}
 272
 273static int
 274setup_sigframe(struct sigframe __user *sf, struct pt_regs *regs, sigset_t *set)
 275{
 276        struct aux_sigframe __user *aux;
 277        int err = 0;
 278
 279        __put_user_error(regs->ARM_r0, &sf->uc.uc_mcontext.arm_r0, err);
 280        __put_user_error(regs->ARM_r1, &sf->uc.uc_mcontext.arm_r1, err);
 281        __put_user_error(regs->ARM_r2, &sf->uc.uc_mcontext.arm_r2, err);
 282        __put_user_error(regs->ARM_r3, &sf->uc.uc_mcontext.arm_r3, err);
 283        __put_user_error(regs->ARM_r4, &sf->uc.uc_mcontext.arm_r4, err);
 284        __put_user_error(regs->ARM_r5, &sf->uc.uc_mcontext.arm_r5, err);
 285        __put_user_error(regs->ARM_r6, &sf->uc.uc_mcontext.arm_r6, err);
 286        __put_user_error(regs->ARM_r7, &sf->uc.uc_mcontext.arm_r7, err);
 287        __put_user_error(regs->ARM_r8, &sf->uc.uc_mcontext.arm_r8, err);
 288        __put_user_error(regs->ARM_r9, &sf->uc.uc_mcontext.arm_r9, err);
 289        __put_user_error(regs->ARM_r10, &sf->uc.uc_mcontext.arm_r10, err);
 290        __put_user_error(regs->ARM_fp, &sf->uc.uc_mcontext.arm_fp, err);
 291        __put_user_error(regs->ARM_ip, &sf->uc.uc_mcontext.arm_ip, err);
 292        __put_user_error(regs->ARM_sp, &sf->uc.uc_mcontext.arm_sp, err);
 293        __put_user_error(regs->ARM_lr, &sf->uc.uc_mcontext.arm_lr, err);
 294        __put_user_error(regs->ARM_pc, &sf->uc.uc_mcontext.arm_pc, err);
 295        __put_user_error(regs->ARM_cpsr, &sf->uc.uc_mcontext.arm_cpsr, err);
 296
 297        __put_user_error(current->thread.trap_no, &sf->uc.uc_mcontext.trap_no, err);
 298        __put_user_error(current->thread.error_code, &sf->uc.uc_mcontext.error_code, err);
 299        __put_user_error(current->thread.address, &sf->uc.uc_mcontext.fault_address, err);
 300        __put_user_error(set->sig[0], &sf->uc.uc_mcontext.oldmask, err);
 301
 302        err |= __copy_to_user(&sf->uc.uc_sigmask, set, sizeof(*set));
 303
 304        aux = (struct aux_sigframe __user *) sf->uc.uc_regspace;
 305#ifdef CONFIG_CRUNCH
 306        if (err == 0)
 307                err |= preserve_crunch_context(&aux->crunch);
 308#endif
 309#ifdef CONFIG_IWMMXT
 310        if (err == 0 && test_thread_flag(TIF_USING_IWMMXT))
 311                err |= preserve_iwmmxt_context(&aux->iwmmxt);
 312#endif
 313#ifdef CONFIG_VFP
 314        if (err == 0)
 315                err |= preserve_vfp_context(&aux->vfp);
 316#endif
 317        __put_user_error(0, &aux->end_magic, err);
 318
 319        return err;
 320}
 321
 322static inline void __user *
 323get_sigframe(struct ksignal *ksig, struct pt_regs *regs, int framesize)
 324{
 325        unsigned long sp = sigsp(regs->ARM_sp, ksig);
 326        void __user *frame;
 327
 328        /*
 329         * ATPCS B01 mandates 8-byte alignment
 330         */
 331        frame = (void __user *)((sp - framesize) & ~7);
 332
 333        /*
 334         * Check that we can actually write to the signal frame.
 335         */
 336        if (!access_ok(VERIFY_WRITE, frame, framesize))
 337                frame = NULL;
 338
 339        return frame;
 340}
 341
 342/*
 343 * translate the signal
 344 */
 345static inline int map_sig(int sig)
 346{
 347        struct thread_info *thread = current_thread_info();
 348        if (sig < 32 && thread->exec_domain && thread->exec_domain->signal_invmap)
 349                sig = thread->exec_domain->signal_invmap[sig];
 350        return sig;
 351}
 352
 353static int
 354setup_return(struct pt_regs *regs, struct ksignal *ksig,
 355             unsigned long __user *rc, void __user *frame)
 356{
 357        unsigned long handler = (unsigned long)ksig->ka.sa.sa_handler;
 358        unsigned long retcode;
 359        int thumb = 0;
 360        unsigned long cpsr = regs->ARM_cpsr & ~(PSR_f | PSR_E_BIT);
 361
 362        cpsr |= PSR_ENDSTATE;
 363
 364        /*
 365         * Maybe we need to deliver a 32-bit signal to a 26-bit task.
 366         */
 367        if (ksig->ka.sa.sa_flags & SA_THIRTYTWO)
 368                cpsr = (cpsr & ~MODE_MASK) | USR_MODE;
 369
 370#ifdef CONFIG_ARM_THUMB
 371        if (elf_hwcap & HWCAP_THUMB) {
 372                /*
 373                 * The LSB of the handler determines if we're going to
 374                 * be using THUMB or ARM mode for this signal handler.
 375                 */
 376                thumb = handler & 1;
 377
 378                if (thumb) {
 379                        cpsr |= PSR_T_BIT;
 380#if __LINUX_ARM_ARCH__ >= 7
 381                        /* clear the If-Then Thumb-2 execution state */
 382                        cpsr &= ~PSR_IT_MASK;
 383#endif
 384                } else
 385                        cpsr &= ~PSR_T_BIT;
 386        }
 387#endif
 388
 389        if (ksig->ka.sa.sa_flags & SA_RESTORER) {
 390                retcode = (unsigned long)ksig->ka.sa.sa_restorer;
 391        } else {
 392                unsigned int idx = thumb << 1;
 393
 394                if (ksig->ka.sa.sa_flags & SA_SIGINFO)
 395                        idx += 3;
 396
 397                /*
 398                 * Put the sigreturn code on the stack no matter which return
 399                 * mechanism we use in order to remain ABI compliant
 400                 */
 401                if (__put_user(sigreturn_codes[idx],   rc) ||
 402                    __put_user(sigreturn_codes[idx+1], rc+1))
 403                        return 1;
 404
 405#ifdef CONFIG_MMU
 406                if (cpsr & MODE32_BIT) {
 407                        struct mm_struct *mm = current->mm;
 408
 409                        /*
 410                         * 32-bit code can use the signal return page
 411                         * except when the MPU has protected the vectors
 412                         * page from PL0
 413                         */
 414                        retcode = mm->context.sigpage + signal_return_offset +
 415                                  (idx << 2) + thumb;
 416                } else
 417#endif
 418                {
 419                        /*
 420                         * Ensure that the instruction cache sees
 421                         * the return code written onto the stack.
 422                         */
 423                        flush_icache_range((unsigned long)rc,
 424                                           (unsigned long)(rc + 2));
 425
 426                        retcode = ((unsigned long)rc) + thumb;
 427                }
 428        }
 429
 430        regs->ARM_r0 = map_sig(ksig->sig);
 431        regs->ARM_sp = (unsigned long)frame;
 432        regs->ARM_lr = retcode;
 433        regs->ARM_pc = handler;
 434        regs->ARM_cpsr = cpsr;
 435
 436        return 0;
 437}
 438
 439static int
 440setup_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
 441{
 442        struct sigframe __user *frame = get_sigframe(ksig, regs, sizeof(*frame));
 443        int err = 0;
 444
 445        if (!frame)
 446                return 1;
 447
 448        /*
 449         * Set uc.uc_flags to a value which sc.trap_no would never have.
 450         */
 451        __put_user_error(0x5ac3c35a, &frame->uc.uc_flags, err);
 452
 453        err |= setup_sigframe(frame, regs, set);
 454        if (err == 0)
 455                err = setup_return(regs, ksig, frame->retcode, frame);
 456
 457        return err;
 458}
 459
 460static int
 461setup_rt_frame(struct ksignal *ksig, sigset_t *set, struct pt_regs *regs)
 462{
 463        struct rt_sigframe __user *frame = get_sigframe(ksig, regs, sizeof(*frame));
 464        int err = 0;
 465
 466        if (!frame)
 467                return 1;
 468
 469        err |= copy_siginfo_to_user(&frame->info, &ksig->info);
 470
 471        __put_user_error(0, &frame->sig.uc.uc_flags, err);
 472        __put_user_error(NULL, &frame->sig.uc.uc_link, err);
 473
 474        err |= __save_altstack(&frame->sig.uc.uc_stack, regs->ARM_sp);
 475        err |= setup_sigframe(&frame->sig, regs, set);
 476        if (err == 0)
 477                err = setup_return(regs, ksig, frame->sig.retcode, frame);
 478
 479        if (err == 0) {
 480                /*
 481                 * For realtime signals we must also set the second and third
 482                 * arguments for the signal handler.
 483                 *   -- Peter Maydell <pmaydell@chiark.greenend.org.uk> 2000-12-06
 484                 */
 485                regs->ARM_r1 = (unsigned long)&frame->info;
 486                regs->ARM_r2 = (unsigned long)&frame->sig.uc;
 487        }
 488
 489        return err;
 490}
 491
 492/*
 493 * OK, we're invoking a handler
 494 */     
 495static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
 496{
 497        sigset_t *oldset = sigmask_to_save();
 498        int ret;
 499
 500        /*
 501         * Set up the stack frame
 502         */
 503        if (ksig->ka.sa.sa_flags & SA_SIGINFO)
 504                ret = setup_rt_frame(ksig, oldset, regs);
 505        else
 506                ret = setup_frame(ksig, oldset, regs);
 507
 508        /*
 509         * Check that the resulting registers are actually sane.
 510         */
 511        ret |= !valid_user_regs(regs);
 512
 513        signal_setup_done(ret, ksig, 0);
 514}
 515
 516/*
 517 * Note that 'init' is a special process: it doesn't get signals it doesn't
 518 * want to handle. Thus you cannot kill init even with a SIGKILL even by
 519 * mistake.
 520 *
 521 * Note that we go through the signals twice: once to check the signals that
 522 * the kernel can handle, and then we build all the user-level signal handling
 523 * stack-frames in one go after that.
 524 */
 525static int do_signal(struct pt_regs *regs, int syscall)
 526{
 527        unsigned int retval = 0, continue_addr = 0, restart_addr = 0;
 528        struct ksignal ksig;
 529        int restart = 0;
 530
 531        /*
 532         * If we were from a system call, check for system call restarting...
 533         */
 534        if (syscall) {
 535                continue_addr = regs->ARM_pc;
 536                restart_addr = continue_addr - (thumb_mode(regs) ? 2 : 4);
 537                retval = regs->ARM_r0;
 538
 539                /*
 540                 * Prepare for system call restart.  We do this here so that a
 541                 * debugger will see the already changed PSW.
 542                 */
 543                switch (retval) {
 544                case -ERESTART_RESTARTBLOCK:
 545                        restart -= 2;
 546                case -ERESTARTNOHAND:
 547                case -ERESTARTSYS:
 548                case -ERESTARTNOINTR:
 549                        restart++;
 550                        regs->ARM_r0 = regs->ARM_ORIG_r0;
 551                        regs->ARM_pc = restart_addr;
 552                        break;
 553                }
 554        }
 555
 556        /*
 557         * Get the signal to deliver.  When running under ptrace, at this
 558         * point the debugger may change all our registers ...
 559         */
 560        /*
 561         * Depending on the signal settings we may need to revert the
 562         * decision to restart the system call.  But skip this if a
 563         * debugger has chosen to restart at a different PC.
 564         */
 565        if (get_signal(&ksig)) {
 566                /* handler */
 567                if (unlikely(restart) && regs->ARM_pc == restart_addr) {
 568                        if (retval == -ERESTARTNOHAND ||
 569                            retval == -ERESTART_RESTARTBLOCK
 570                            || (retval == -ERESTARTSYS
 571                                && !(ksig.ka.sa.sa_flags & SA_RESTART))) {
 572                                regs->ARM_r0 = -EINTR;
 573                                regs->ARM_pc = continue_addr;
 574                        }
 575                }
 576                handle_signal(&ksig, regs);
 577        } else {
 578                /* no handler */
 579                restore_saved_sigmask();
 580                if (unlikely(restart) && regs->ARM_pc == restart_addr) {
 581                        regs->ARM_pc = continue_addr;
 582                        return restart;
 583                }
 584        }
 585        return 0;
 586}
 587
 588asmlinkage int
 589do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
 590{
 591        do {
 592                if (likely(thread_flags & _TIF_NEED_RESCHED)) {
 593                        schedule();
 594                } else {
 595                        if (unlikely(!user_mode(regs)))
 596                                return 0;
 597                        local_irq_enable();
 598                        if (thread_flags & _TIF_SIGPENDING) {
 599                                int restart = do_signal(regs, syscall);
 600                                if (unlikely(restart)) {
 601                                        /*
 602                                         * Restart without handlers.
 603                                         * Deal with it without leaving
 604                                         * the kernel space.
 605                                         */
 606                                        return restart;
 607                                }
 608                                syscall = 0;
 609                        } else {
 610                                clear_thread_flag(TIF_NOTIFY_RESUME);
 611                                tracehook_notify_resume(regs);
 612                        }
 613                }
 614                local_irq_disable();
 615                thread_flags = current_thread_info()->flags;
 616        } while (thread_flags & _TIF_WORK_MASK);
 617        return 0;
 618}
 619
 620struct page *get_signal_page(void)
 621{
 622        unsigned long ptr;
 623        unsigned offset;
 624        struct page *page;
 625        void *addr;
 626
 627        page = alloc_pages(GFP_KERNEL, 0);
 628
 629        if (!page)
 630                return NULL;
 631
 632        addr = page_address(page);
 633
 634        /* Give the signal return code some randomness */
 635        offset = 0x200 + (get_random_int() & 0x7fc);
 636        signal_return_offset = offset;
 637
 638        /*
 639         * Copy signal return handlers into the vector page, and
 640         * set sigreturn to be a pointer to these.
 641         */
 642        memcpy(addr + offset, sigreturn_codes, sizeof(sigreturn_codes));
 643
 644        ptr = (unsigned long)addr + offset;
 645        flush_icache_range(ptr, ptr + sizeof(sigreturn_codes));
 646
 647        return page;
 648}
 649