linux/arch/arm/kernel/traps.c
<<
>>
Prefs
   1/*
   2 *  linux/arch/arm/kernel/traps.c
   3 *
   4 *  Copyright (C) 1995-2009 Russell King
   5 *  Fragments that appear the same as linux/arch/i386/kernel/traps.c (C) Linus Torvalds
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License version 2 as
   9 * published by the Free Software Foundation.
  10 *
  11 *  'traps.c' handles hardware exceptions after we have saved some state in
  12 *  'linux/arch/arm/lib/traps.S'.  Mostly a debugging aid, but will probably
  13 *  kill the offending process.
  14 */
  15#include <linux/signal.h>
  16#include <linux/personality.h>
  17#include <linux/kallsyms.h>
  18#include <linux/spinlock.h>
  19#include <linux/uaccess.h>
  20#include <linux/hardirq.h>
  21#include <linux/kdebug.h>
  22#include <linux/module.h>
  23#include <linux/kexec.h>
  24#include <linux/bug.h>
  25#include <linux/delay.h>
  26#include <linux/init.h>
  27#include <linux/sched.h>
  28
  29#include <linux/atomic.h>
  30#include <asm/cacheflush.h>
  31#include <asm/exception.h>
  32#include <asm/unistd.h>
  33#include <asm/traps.h>
  34#include <asm/unwind.h>
  35#include <asm/tls.h>
  36#include <asm/system_misc.h>
  37#include <asm/opcodes.h>
  38
  39static const char *handler[]= {
  40        "prefetch abort",
  41        "data abort",
  42        "address exception",
  43        "interrupt",
  44        "undefined instruction",
  45};
  46
  47void *vectors_page;
  48
  49#ifdef CONFIG_DEBUG_USER
  50unsigned int user_debug;
  51
  52static int __init user_debug_setup(char *str)
  53{
  54        get_option(&str, &user_debug);
  55        return 1;
  56}
  57__setup("user_debug=", user_debug_setup);
  58#endif
  59
  60static void dump_mem(const char *, const char *, unsigned long, unsigned long);
  61
  62void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
  63{
  64#ifdef CONFIG_KALLSYMS
  65        printk("[<%08lx>] (%pS) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from);
  66#else
  67        printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
  68#endif
  69
  70        if (in_exception_text(where))
  71                dump_mem("", "Exception stack", frame + 4, frame + 4 + sizeof(struct pt_regs));
  72}
  73
  74#ifndef CONFIG_ARM_UNWIND
  75/*
  76 * Stack pointers should always be within the kernels view of
  77 * physical memory.  If it is not there, then we can't dump
  78 * out any information relating to the stack.
  79 */
  80static int verify_stack(unsigned long sp)
  81{
  82        if (sp < PAGE_OFFSET ||
  83            (sp > (unsigned long)high_memory && high_memory != NULL))
  84                return -EFAULT;
  85
  86        return 0;
  87}
  88#endif
  89
  90/*
  91 * Dump out the contents of some memory nicely...
  92 */
  93static void dump_mem(const char *lvl, const char *str, unsigned long bottom,
  94                     unsigned long top)
  95{
  96        unsigned long first;
  97        mm_segment_t fs;
  98        int i;
  99
 100        /*
 101         * We need to switch to kernel mode so that we can use __get_user
 102         * to safely read from kernel space.  Note that we now dump the
 103         * code first, just in case the backtrace kills us.
 104         */
 105        fs = get_fs();
 106        set_fs(KERNEL_DS);
 107
 108        printk("%s%s(0x%08lx to 0x%08lx)\n", lvl, str, bottom, top);
 109
 110        for (first = bottom & ~31; first < top; first += 32) {
 111                unsigned long p;
 112                char str[sizeof(" 12345678") * 8 + 1];
 113
 114                memset(str, ' ', sizeof(str));
 115                str[sizeof(str) - 1] = '\0';
 116
 117                for (p = first, i = 0; i < 8 && p < top; i++, p += 4) {
 118                        if (p >= bottom && p < top) {
 119                                unsigned long val;
 120                                if (__get_user(val, (unsigned long *)p) == 0)
 121                                        sprintf(str + i * 9, " %08lx", val);
 122                                else
 123                                        sprintf(str + i * 9, " ????????");
 124                        }
 125                }
 126                printk("%s%04lx:%s\n", lvl, first & 0xffff, str);
 127        }
 128
 129        set_fs(fs);
 130}
 131
 132static void dump_instr(const char *lvl, struct pt_regs *regs)
 133{
 134        unsigned long addr = instruction_pointer(regs);
 135        const int thumb = thumb_mode(regs);
 136        const int width = thumb ? 4 : 8;
 137        mm_segment_t fs;
 138        char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str;
 139        int i;
 140
 141        /*
 142         * We need to switch to kernel mode so that we can use __get_user
 143         * to safely read from kernel space.  Note that we now dump the
 144         * code first, just in case the backtrace kills us.
 145         */
 146        fs = get_fs();
 147        set_fs(KERNEL_DS);
 148
 149        for (i = -4; i < 1 + !!thumb; i++) {
 150                unsigned int val, bad;
 151
 152                if (thumb)
 153                        bad = __get_user(val, &((u16 *)addr)[i]);
 154                else
 155                        bad = __get_user(val, &((u32 *)addr)[i]);
 156
 157                if (!bad)
 158                        p += sprintf(p, i == 0 ? "(%0*x) " : "%0*x ",
 159                                        width, val);
 160                else {
 161                        p += sprintf(p, "bad PC value");
 162                        break;
 163                }
 164        }
 165        printk("%sCode: %s\n", lvl, str);
 166
 167        set_fs(fs);
 168}
 169
 170#ifdef CONFIG_ARM_UNWIND
 171static inline void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
 172{
 173        unwind_backtrace(regs, tsk);
 174}
 175#else
 176static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
 177{
 178        unsigned int fp, mode;
 179        int ok = 1;
 180
 181        printk("Backtrace: ");
 182
 183        if (!tsk)
 184                tsk = current;
 185
 186        if (regs) {
 187                fp = regs->ARM_fp;
 188                mode = processor_mode(regs);
 189        } else if (tsk != current) {
 190                fp = thread_saved_fp(tsk);
 191                mode = 0x10;
 192        } else {
 193                asm("mov %0, fp" : "=r" (fp) : : "cc");
 194                mode = 0x10;
 195        }
 196
 197        if (!fp) {
 198                printk("no frame pointer");
 199                ok = 0;
 200        } else if (verify_stack(fp)) {
 201                printk("invalid frame pointer 0x%08x", fp);
 202                ok = 0;
 203        } else if (fp < (unsigned long)end_of_stack(tsk))
 204                printk("frame pointer underflow");
 205        printk("\n");
 206
 207        if (ok)
 208                c_backtrace(fp, mode);
 209}
 210#endif
 211
 212void show_stack(struct task_struct *tsk, unsigned long *sp)
 213{
 214        dump_backtrace(NULL, tsk);
 215        barrier();
 216}
 217
 218#ifdef CONFIG_PREEMPT
 219#define S_PREEMPT " PREEMPT"
 220#else
 221#define S_PREEMPT ""
 222#endif
 223#ifdef CONFIG_SMP
 224#define S_SMP " SMP"
 225#else
 226#define S_SMP ""
 227#endif
 228#ifdef CONFIG_THUMB2_KERNEL
 229#define S_ISA " THUMB2"
 230#else
 231#define S_ISA " ARM"
 232#endif
 233
 234static int __die(const char *str, int err, struct pt_regs *regs)
 235{
 236        struct task_struct *tsk = current;
 237        static int die_counter;
 238        int ret;
 239
 240        printk(KERN_EMERG "Internal error: %s: %x [#%d]" S_PREEMPT S_SMP
 241               S_ISA "\n", str, err, ++die_counter);
 242
 243        /* trap and error numbers are mostly meaningless on ARM */
 244        ret = notify_die(DIE_OOPS, str, regs, err, tsk->thread.trap_no, SIGSEGV);
 245        if (ret == NOTIFY_STOP)
 246                return 1;
 247
 248        print_modules();
 249        __show_regs(regs);
 250        printk(KERN_EMERG "Process %.*s (pid: %d, stack limit = 0x%p)\n",
 251                TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), end_of_stack(tsk));
 252
 253        if (!user_mode(regs) || in_interrupt()) {
 254                dump_mem(KERN_EMERG, "Stack: ", regs->ARM_sp,
 255                         THREAD_SIZE + (unsigned long)task_stack_page(tsk));
 256                dump_backtrace(regs, tsk);
 257                dump_instr(KERN_EMERG, regs);
 258        }
 259
 260        return 0;
 261}
 262
 263static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
 264static int die_owner = -1;
 265static unsigned int die_nest_count;
 266
 267static unsigned long oops_begin(void)
 268{
 269        int cpu;
 270        unsigned long flags;
 271
 272        oops_enter();
 273
 274        /* racy, but better than risking deadlock. */
 275        raw_local_irq_save(flags);
 276        cpu = smp_processor_id();
 277        if (!arch_spin_trylock(&die_lock)) {
 278                if (cpu == die_owner)
 279                        /* nested oops. should stop eventually */;
 280                else
 281                        arch_spin_lock(&die_lock);
 282        }
 283        die_nest_count++;
 284        die_owner = cpu;
 285        console_verbose();
 286        bust_spinlocks(1);
 287        return flags;
 288}
 289
 290static void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
 291{
 292        if (regs && kexec_should_crash(current))
 293                crash_kexec(regs);
 294
 295        bust_spinlocks(0);
 296        die_owner = -1;
 297        add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
 298        die_nest_count--;
 299        if (!die_nest_count)
 300                /* Nest count reaches zero, release the lock. */
 301                arch_spin_unlock(&die_lock);
 302        raw_local_irq_restore(flags);
 303        oops_exit();
 304
 305        if (in_interrupt())
 306                panic("Fatal exception in interrupt");
 307        if (panic_on_oops)
 308                panic("Fatal exception");
 309        if (signr)
 310                do_exit(signr);
 311}
 312
 313/*
 314 * This function is protected against re-entrancy.
 315 */
 316void die(const char *str, struct pt_regs *regs, int err)
 317{
 318        enum bug_trap_type bug_type = BUG_TRAP_TYPE_NONE;
 319        unsigned long flags = oops_begin();
 320        int sig = SIGSEGV;
 321
 322        if (!user_mode(regs))
 323                bug_type = report_bug(regs->ARM_pc, regs);
 324        if (bug_type != BUG_TRAP_TYPE_NONE)
 325                str = "Oops - BUG";
 326
 327        if (__die(str, err, regs))
 328                sig = 0;
 329
 330        oops_end(flags, regs, sig);
 331}
 332
 333void arm_notify_die(const char *str, struct pt_regs *regs,
 334                struct siginfo *info, unsigned long err, unsigned long trap)
 335{
 336        if (user_mode(regs)) {
 337                current->thread.error_code = err;
 338                current->thread.trap_no = trap;
 339
 340                force_sig_info(info->si_signo, info, current);
 341        } else {
 342                die(str, regs, err);
 343        }
 344}
 345
 346#ifdef CONFIG_GENERIC_BUG
 347
 348int is_valid_bugaddr(unsigned long pc)
 349{
 350#ifdef CONFIG_THUMB2_KERNEL
 351        u16 bkpt;
 352        u16 insn = __opcode_to_mem_thumb16(BUG_INSTR_VALUE);
 353#else
 354        u32 bkpt;
 355        u32 insn = __opcode_to_mem_arm(BUG_INSTR_VALUE);
 356#endif
 357
 358        if (probe_kernel_address((unsigned *)pc, bkpt))
 359                return 0;
 360
 361        return bkpt == insn;
 362}
 363
 364#endif
 365
 366static LIST_HEAD(undef_hook);
 367static DEFINE_RAW_SPINLOCK(undef_lock);
 368
 369void register_undef_hook(struct undef_hook *hook)
 370{
 371        unsigned long flags;
 372
 373        raw_spin_lock_irqsave(&undef_lock, flags);
 374        list_add(&hook->node, &undef_hook);
 375        raw_spin_unlock_irqrestore(&undef_lock, flags);
 376}
 377
 378void unregister_undef_hook(struct undef_hook *hook)
 379{
 380        unsigned long flags;
 381
 382        raw_spin_lock_irqsave(&undef_lock, flags);
 383        list_del(&hook->node);
 384        raw_spin_unlock_irqrestore(&undef_lock, flags);
 385}
 386
 387static int call_undef_hook(struct pt_regs *regs, unsigned int instr)
 388{
 389        struct undef_hook *hook;
 390        unsigned long flags;
 391        int (*fn)(struct pt_regs *regs, unsigned int instr) = NULL;
 392
 393        raw_spin_lock_irqsave(&undef_lock, flags);
 394        list_for_each_entry(hook, &undef_hook, node)
 395                if ((instr & hook->instr_mask) == hook->instr_val &&
 396                    (regs->ARM_cpsr & hook->cpsr_mask) == hook->cpsr_val)
 397                        fn = hook->fn;
 398        raw_spin_unlock_irqrestore(&undef_lock, flags);
 399
 400        return fn ? fn(regs, instr) : 1;
 401}
 402
 403asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
 404{
 405        unsigned int instr;
 406        siginfo_t info;
 407        void __user *pc;
 408
 409        pc = (void __user *)instruction_pointer(regs);
 410
 411        if (processor_mode(regs) == SVC_MODE) {
 412#ifdef CONFIG_THUMB2_KERNEL
 413                if (thumb_mode(regs)) {
 414                        instr = __mem_to_opcode_thumb16(((u16 *)pc)[0]);
 415                        if (is_wide_instruction(instr)) {
 416                                u16 inst2;
 417                                inst2 = __mem_to_opcode_thumb16(((u16 *)pc)[1]);
 418                                instr = __opcode_thumb32_compose(instr, inst2);
 419                        }
 420                } else
 421#endif
 422                        instr = __mem_to_opcode_arm(*(u32 *) pc);
 423        } else if (thumb_mode(regs)) {
 424                if (get_user(instr, (u16 __user *)pc))
 425                        goto die_sig;
 426                instr = __mem_to_opcode_thumb16(instr);
 427                if (is_wide_instruction(instr)) {
 428                        unsigned int instr2;
 429                        if (get_user(instr2, (u16 __user *)pc+1))
 430                                goto die_sig;
 431                        instr2 = __mem_to_opcode_thumb16(instr2);
 432                        instr = __opcode_thumb32_compose(instr, instr2);
 433                }
 434        } else {
 435                if (get_user(instr, (u32 __user *)pc))
 436                        goto die_sig;
 437                instr = __mem_to_opcode_arm(instr);
 438        }
 439
 440        if (call_undef_hook(regs, instr) == 0)
 441                return;
 442
 443die_sig:
 444#ifdef CONFIG_DEBUG_USER
 445        if (user_debug & UDBG_UNDEFINED) {
 446                printk(KERN_INFO "%s (%d): undefined instruction: pc=%p\n",
 447                        current->comm, task_pid_nr(current), pc);
 448                dump_instr(KERN_INFO, regs);
 449        }
 450#endif
 451
 452        info.si_signo = SIGILL;
 453        info.si_errno = 0;
 454        info.si_code  = ILL_ILLOPC;
 455        info.si_addr  = pc;
 456
 457        arm_notify_die("Oops - undefined instruction", regs, &info, 0, 6);
 458}
 459
 460asmlinkage void do_unexp_fiq (struct pt_regs *regs)
 461{
 462        printk("Hmm.  Unexpected FIQ received, but trying to continue\n");
 463        printk("You may have a hardware problem...\n");
 464}
 465
 466/*
 467 * bad_mode handles the impossible case in the vectors.  If you see one of
 468 * these, then it's extremely serious, and could mean you have buggy hardware.
 469 * It never returns, and never tries to sync.  We hope that we can at least
 470 * dump out some state information...
 471 */
 472asmlinkage void bad_mode(struct pt_regs *regs, int reason)
 473{
 474        console_verbose();
 475
 476        printk(KERN_CRIT "Bad mode in %s handler detected\n", handler[reason]);
 477
 478        die("Oops - bad mode", regs, 0);
 479        local_irq_disable();
 480        panic("bad mode");
 481}
 482
 483static int bad_syscall(int n, struct pt_regs *regs)
 484{
 485        struct thread_info *thread = current_thread_info();
 486        siginfo_t info;
 487
 488        if ((current->personality & PER_MASK) != PER_LINUX &&
 489            thread->exec_domain->handler) {
 490                thread->exec_domain->handler(n, regs);
 491                return regs->ARM_r0;
 492        }
 493
 494#ifdef CONFIG_DEBUG_USER
 495        if (user_debug & UDBG_SYSCALL) {
 496                printk(KERN_ERR "[%d] %s: obsolete system call %08x.\n",
 497                        task_pid_nr(current), current->comm, n);
 498                dump_instr(KERN_ERR, regs);
 499        }
 500#endif
 501
 502        info.si_signo = SIGILL;
 503        info.si_errno = 0;
 504        info.si_code  = ILL_ILLTRP;
 505        info.si_addr  = (void __user *)instruction_pointer(regs) -
 506                         (thumb_mode(regs) ? 2 : 4);
 507
 508        arm_notify_die("Oops - bad syscall", regs, &info, n, 0);
 509
 510        return regs->ARM_r0;
 511}
 512
 513static long do_cache_op_restart(struct restart_block *);
 514
 515static inline int
 516__do_cache_op(unsigned long start, unsigned long end)
 517{
 518        int ret;
 519
 520        do {
 521                unsigned long chunk = min(PAGE_SIZE, end - start);
 522
 523                if (signal_pending(current)) {
 524                        struct thread_info *ti = current_thread_info();
 525
 526                        ti->restart_block = (struct restart_block) {
 527                                .fn     = do_cache_op_restart,
 528                        };
 529
 530                        ti->arm_restart_block = (struct arm_restart_block) {
 531                                {
 532                                        .cache = {
 533                                                .start  = start,
 534                                                .end    = end,
 535                                        },
 536                                },
 537                        };
 538
 539                        return -ERESTART_RESTARTBLOCK;
 540                }
 541
 542                ret = flush_cache_user_range(start, start + chunk);
 543                if (ret)
 544                        return ret;
 545
 546                cond_resched();
 547                start += chunk;
 548        } while (start < end);
 549
 550        return 0;
 551}
 552
 553static long do_cache_op_restart(struct restart_block *unused)
 554{
 555        struct arm_restart_block *restart_block;
 556
 557        restart_block = &current_thread_info()->arm_restart_block;
 558        return __do_cache_op(restart_block->cache.start,
 559                             restart_block->cache.end);
 560}
 561
 562static inline int
 563do_cache_op(unsigned long start, unsigned long end, int flags)
 564{
 565        if (end < start || flags)
 566                return -EINVAL;
 567
 568        if (!access_ok(VERIFY_READ, start, end - start))
 569                return -EFAULT;
 570
 571        return __do_cache_op(start, end);
 572}
 573
 574/*
 575 * Handle all unrecognised system calls.
 576 *  0x9f0000 - 0x9fffff are some more esoteric system calls
 577 */
 578#define NR(x) ((__ARM_NR_##x) - __ARM_NR_BASE)
 579asmlinkage int arm_syscall(int no, struct pt_regs *regs)
 580{
 581        struct thread_info *thread = current_thread_info();
 582        siginfo_t info;
 583
 584        if ((no >> 16) != (__ARM_NR_BASE>> 16))
 585                return bad_syscall(no, regs);
 586
 587        switch (no & 0xffff) {
 588        case 0: /* branch through 0 */
 589                info.si_signo = SIGSEGV;
 590                info.si_errno = 0;
 591                info.si_code  = SEGV_MAPERR;
 592                info.si_addr  = NULL;
 593
 594                arm_notify_die("branch through zero", regs, &info, 0, 0);
 595                return 0;
 596
 597        case NR(breakpoint): /* SWI BREAK_POINT */
 598                regs->ARM_pc -= thumb_mode(regs) ? 2 : 4;
 599                ptrace_break(current, regs);
 600                return regs->ARM_r0;
 601
 602        /*
 603         * Flush a region from virtual address 'r0' to virtual address 'r1'
 604         * _exclusive_.  There is no alignment requirement on either address;
 605         * user space does not need to know the hardware cache layout.
 606         *
 607         * r2 contains flags.  It should ALWAYS be passed as ZERO until it
 608         * is defined to be something else.  For now we ignore it, but may
 609         * the fires of hell burn in your belly if you break this rule. ;)
 610         *
 611         * (at a later date, we may want to allow this call to not flush
 612         * various aspects of the cache.  Passing '0' will guarantee that
 613         * everything necessary gets flushed to maintain consistency in
 614         * the specified region).
 615         */
 616        case NR(cacheflush):
 617                return do_cache_op(regs->ARM_r0, regs->ARM_r1, regs->ARM_r2);
 618
 619        case NR(usr26):
 620                if (!(elf_hwcap & HWCAP_26BIT))
 621                        break;
 622                regs->ARM_cpsr &= ~MODE32_BIT;
 623                return regs->ARM_r0;
 624
 625        case NR(usr32):
 626                if (!(elf_hwcap & HWCAP_26BIT))
 627                        break;
 628                regs->ARM_cpsr |= MODE32_BIT;
 629                return regs->ARM_r0;
 630
 631        case NR(set_tls):
 632                thread->tp_value[0] = regs->ARM_r0;
 633                if (tls_emu)
 634                        return 0;
 635                if (has_tls_reg) {
 636                        asm ("mcr p15, 0, %0, c13, c0, 3"
 637                                : : "r" (regs->ARM_r0));
 638                } else {
 639                        /*
 640                         * User space must never try to access this directly.
 641                         * Expect your app to break eventually if you do so.
 642                         * The user helper at 0xffff0fe0 must be used instead.
 643                         * (see entry-armv.S for details)
 644                         */
 645                        *((unsigned int *)0xffff0ff0) = regs->ARM_r0;
 646                }
 647                return 0;
 648
 649#ifdef CONFIG_NEEDS_SYSCALL_FOR_CMPXCHG
 650        /*
 651         * Atomically store r1 in *r2 if *r2 is equal to r0 for user space.
 652         * Return zero in r0 if *MEM was changed or non-zero if no exchange
 653         * happened.  Also set the user C flag accordingly.
 654         * If access permissions have to be fixed up then non-zero is
 655         * returned and the operation has to be re-attempted.
 656         *
 657         * *NOTE*: This is a ghost syscall private to the kernel.  Only the
 658         * __kuser_cmpxchg code in entry-armv.S should be aware of its
 659         * existence.  Don't ever use this from user code.
 660         */
 661        case NR(cmpxchg):
 662        for (;;) {
 663                extern void do_DataAbort(unsigned long addr, unsigned int fsr,
 664                                         struct pt_regs *regs);
 665                unsigned long val;
 666                unsigned long addr = regs->ARM_r2;
 667                struct mm_struct *mm = current->mm;
 668                pgd_t *pgd; pmd_t *pmd; pte_t *pte;
 669                spinlock_t *ptl;
 670
 671                regs->ARM_cpsr &= ~PSR_C_BIT;
 672                down_read(&mm->mmap_sem);
 673                pgd = pgd_offset(mm, addr);
 674                if (!pgd_present(*pgd))
 675                        goto bad_access;
 676                pmd = pmd_offset(pgd, addr);
 677                if (!pmd_present(*pmd))
 678                        goto bad_access;
 679                pte = pte_offset_map_lock(mm, pmd, addr, &ptl);
 680                if (!pte_present(*pte) || !pte_write(*pte) || !pte_dirty(*pte)) {
 681                        pte_unmap_unlock(pte, ptl);
 682                        goto bad_access;
 683                }
 684                val = *(unsigned long *)addr;
 685                val -= regs->ARM_r0;
 686                if (val == 0) {
 687                        *(unsigned long *)addr = regs->ARM_r1;
 688                        regs->ARM_cpsr |= PSR_C_BIT;
 689                }
 690                pte_unmap_unlock(pte, ptl);
 691                up_read(&mm->mmap_sem);
 692                return val;
 693
 694                bad_access:
 695                up_read(&mm->mmap_sem);
 696                /* simulate a write access fault */
 697                do_DataAbort(addr, 15 + (1 << 11), regs);
 698        }
 699#endif
 700
 701        default:
 702                /* Calls 9f00xx..9f07ff are defined to return -ENOSYS
 703                   if not implemented, rather than raising SIGILL.  This
 704                   way the calling program can gracefully determine whether
 705                   a feature is supported.  */
 706                if ((no & 0xffff) <= 0x7ff)
 707                        return -ENOSYS;
 708                break;
 709        }
 710#ifdef CONFIG_DEBUG_USER
 711        /*
 712         * experience shows that these seem to indicate that
 713         * something catastrophic has happened
 714         */
 715        if (user_debug & UDBG_SYSCALL) {
 716                printk("[%d] %s: arm syscall %d\n",
 717                       task_pid_nr(current), current->comm, no);
 718                dump_instr("", regs);
 719                if (user_mode(regs)) {
 720                        __show_regs(regs);
 721                        c_backtrace(regs->ARM_fp, processor_mode(regs));
 722                }
 723        }
 724#endif
 725        info.si_signo = SIGILL;
 726        info.si_errno = 0;
 727        info.si_code  = ILL_ILLTRP;
 728        info.si_addr  = (void __user *)instruction_pointer(regs) -
 729                         (thumb_mode(regs) ? 2 : 4);
 730
 731        arm_notify_die("Oops - bad syscall(2)", regs, &info, no, 0);
 732        return 0;
 733}
 734
 735#ifdef CONFIG_TLS_REG_EMUL
 736
 737/*
 738 * We might be running on an ARMv6+ processor which should have the TLS
 739 * register but for some reason we can't use it, or maybe an SMP system
 740 * using a pre-ARMv6 processor (there are apparently a few prototypes like
 741 * that in existence) and therefore access to that register must be
 742 * emulated.
 743 */
 744
 745static int get_tp_trap(struct pt_regs *regs, unsigned int instr)
 746{
 747        int reg = (instr >> 12) & 15;
 748        if (reg == 15)
 749                return 1;
 750        regs->uregs[reg] = current_thread_info()->tp_value[0];
 751        regs->ARM_pc += 4;
 752        return 0;
 753}
 754
 755static struct undef_hook arm_mrc_hook = {
 756        .instr_mask     = 0x0fff0fff,
 757        .instr_val      = 0x0e1d0f70,
 758        .cpsr_mask      = PSR_T_BIT,
 759        .cpsr_val       = 0,
 760        .fn             = get_tp_trap,
 761};
 762
 763static int __init arm_mrc_hook_init(void)
 764{
 765        register_undef_hook(&arm_mrc_hook);
 766        return 0;
 767}
 768
 769late_initcall(arm_mrc_hook_init);
 770
 771#endif
 772
 773void __bad_xchg(volatile void *ptr, int size)
 774{
 775        printk("xchg: bad data size: pc 0x%p, ptr 0x%p, size %d\n",
 776                __builtin_return_address(0), ptr, size);
 777        BUG();
 778}
 779EXPORT_SYMBOL(__bad_xchg);
 780
 781/*
 782 * A data abort trap was taken, but we did not handle the instruction.
 783 * Try to abort the user program, or panic if it was the kernel.
 784 */
 785asmlinkage void
 786baddataabort(int code, unsigned long instr, struct pt_regs *regs)
 787{
 788        unsigned long addr = instruction_pointer(regs);
 789        siginfo_t info;
 790
 791#ifdef CONFIG_DEBUG_USER
 792        if (user_debug & UDBG_BADABORT) {
 793                printk(KERN_ERR "[%d] %s: bad data abort: code %d instr 0x%08lx\n",
 794                        task_pid_nr(current), current->comm, code, instr);
 795                dump_instr(KERN_ERR, regs);
 796                show_pte(current->mm, addr);
 797        }
 798#endif
 799
 800        info.si_signo = SIGILL;
 801        info.si_errno = 0;
 802        info.si_code  = ILL_ILLOPC;
 803        info.si_addr  = (void __user *)addr;
 804
 805        arm_notify_die("unknown data abort code", regs, &info, instr, 0);
 806}
 807
 808void __readwrite_bug(const char *fn)
 809{
 810        printk("%s called, but not implemented\n", fn);
 811        BUG();
 812}
 813EXPORT_SYMBOL(__readwrite_bug);
 814
 815void __pte_error(const char *file, int line, pte_t pte)
 816{
 817        printk("%s:%d: bad pte %08llx.\n", file, line, (long long)pte_val(pte));
 818}
 819
 820void __pmd_error(const char *file, int line, pmd_t pmd)
 821{
 822        printk("%s:%d: bad pmd %08llx.\n", file, line, (long long)pmd_val(pmd));
 823}
 824
 825void __pgd_error(const char *file, int line, pgd_t pgd)
 826{
 827        printk("%s:%d: bad pgd %08llx.\n", file, line, (long long)pgd_val(pgd));
 828}
 829
 830asmlinkage void __div0(void)
 831{
 832        printk("Division by zero in kernel.\n");
 833        dump_stack();
 834}
 835EXPORT_SYMBOL(__div0);
 836
 837void abort(void)
 838{
 839        BUG();
 840
 841        /* if that doesn't kill us, halt */
 842        panic("Oops failed to kill thread");
 843}
 844EXPORT_SYMBOL(abort);
 845
 846void __init trap_init(void)
 847{
 848        return;
 849}
 850
 851#ifdef CONFIG_KUSER_HELPERS
 852static void __init kuser_init(void *vectors)
 853{
 854        extern char __kuser_helper_start[], __kuser_helper_end[];
 855        int kuser_sz = __kuser_helper_end - __kuser_helper_start;
 856
 857        memcpy(vectors + 0x1000 - kuser_sz, __kuser_helper_start, kuser_sz);
 858
 859        /*
 860         * vectors + 0xfe0 = __kuser_get_tls
 861         * vectors + 0xfe8 = hardware TLS instruction at 0xffff0fe8
 862         */
 863        if (tls_emu || has_tls_reg)
 864                memcpy(vectors + 0xfe0, vectors + 0xfe8, 4);
 865}
 866#else
 867static inline void __init kuser_init(void *vectors)
 868{
 869}
 870#endif
 871
 872void __init early_trap_init(void *vectors_base)
 873{
 874#ifndef CONFIG_CPU_V7M
 875        unsigned long vectors = (unsigned long)vectors_base;
 876        extern char __stubs_start[], __stubs_end[];
 877        extern char __vectors_start[], __vectors_end[];
 878        unsigned i;
 879
 880        vectors_page = vectors_base;
 881
 882        /*
 883         * Poison the vectors page with an undefined instruction.  This
 884         * instruction is chosen to be undefined for both ARM and Thumb
 885         * ISAs.  The Thumb version is an undefined instruction with a
 886         * branch back to the undefined instruction.
 887         */
 888        for (i = 0; i < PAGE_SIZE / sizeof(u32); i++)
 889                ((u32 *)vectors_base)[i] = 0xe7fddef1;
 890
 891        /*
 892         * Copy the vectors, stubs and kuser helpers (in entry-armv.S)
 893         * into the vector page, mapped at 0xffff0000, and ensure these
 894         * are visible to the instruction stream.
 895         */
 896        memcpy((void *)vectors, __vectors_start, __vectors_end - __vectors_start);
 897        memcpy((void *)vectors + 0x1000, __stubs_start, __stubs_end - __stubs_start);
 898
 899        kuser_init(vectors_base);
 900
 901        flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
 902        modify_domain(DOMAIN_USER, DOMAIN_CLIENT);
 903#else /* ifndef CONFIG_CPU_V7M */
 904        /*
 905         * on V7-M there is no need to copy the vector table to a dedicated
 906         * memory area. The address is configurable and so a table in the kernel
 907         * image can be used.
 908         */
 909#endif
 910}
 911