linux/arch/arm/kernel/traps.c
<<
>>
Prefs
   1/*
   2 *  linux/arch/arm/kernel/traps.c
   3 *
   4 *  Copyright (C) 1995-2009 Russell King
   5 *  Fragments that appear the same as linux/arch/i386/kernel/traps.c (C) Linus Torvalds
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License version 2 as
   9 * published by the Free Software Foundation.
  10 *
  11 *  'traps.c' handles hardware exceptions after we have saved some state in
  12 *  'linux/arch/arm/lib/traps.S'.  Mostly a debugging aid, but will probably
  13 *  kill the offending process.
  14 */
  15#include <linux/signal.h>
  16#include <linux/personality.h>
  17#include <linux/kallsyms.h>
  18#include <linux/spinlock.h>
  19#include <linux/uaccess.h>
  20#include <linux/hardirq.h>
  21#include <linux/kdebug.h>
  22#include <linux/kprobes.h>
  23#include <linux/module.h>
  24#include <linux/kexec.h>
  25#include <linux/bug.h>
  26#include <linux/delay.h>
  27#include <linux/init.h>
  28#include <linux/sched/signal.h>
  29#include <linux/sched/debug.h>
  30#include <linux/sched/task_stack.h>
  31#include <linux/irq.h>
  32
  33#include <linux/atomic.h>
  34#include <asm/cacheflush.h>
  35#include <asm/exception.h>
  36#include <asm/unistd.h>
  37#include <asm/traps.h>
  38#include <asm/ptrace.h>
  39#include <asm/unwind.h>
  40#include <asm/tls.h>
  41#include <asm/system_misc.h>
  42#include <asm/opcodes.h>
  43
  44
  45static const char *handler[]= {
  46        "prefetch abort",
  47        "data abort",
  48        "address exception",
  49        "interrupt",
  50        "undefined instruction",
  51};
  52
  53void *vectors_page;
  54
  55#ifdef CONFIG_DEBUG_USER
  56unsigned int user_debug;
  57
  58static int __init user_debug_setup(char *str)
  59{
  60        get_option(&str, &user_debug);
  61        return 1;
  62}
  63__setup("user_debug=", user_debug_setup);
  64#endif
  65
  66static void dump_mem(const char *, const char *, unsigned long, unsigned long);
  67
  68void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
  69{
  70#ifdef CONFIG_KALLSYMS
  71        printk("[<%08lx>] (%ps) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from);
  72#else
  73        printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
  74#endif
  75
  76        if (in_entry_text(from))
  77                dump_mem("", "Exception stack", frame + 4, frame + 4 + sizeof(struct pt_regs));
  78}
  79
  80void dump_backtrace_stm(u32 *stack, u32 instruction)
  81{
  82        char str[80], *p;
  83        unsigned int x;
  84        int reg;
  85
  86        for (reg = 10, x = 0, p = str; reg >= 0; reg--) {
  87                if (instruction & BIT(reg)) {
  88                        p += sprintf(p, " r%d:%08x", reg, *stack--);
  89                        if (++x == 6) {
  90                                x = 0;
  91                                p = str;
  92                                printk("%s\n", str);
  93                        }
  94                }
  95        }
  96        if (p != str)
  97                printk("%s\n", str);
  98}
  99
 100#ifndef CONFIG_ARM_UNWIND
 101/*
 102 * Stack pointers should always be within the kernels view of
 103 * physical memory.  If it is not there, then we can't dump
 104 * out any information relating to the stack.
 105 */
 106static int verify_stack(unsigned long sp)
 107{
 108        if (sp < PAGE_OFFSET ||
 109            (sp > (unsigned long)high_memory && high_memory != NULL))
 110                return -EFAULT;
 111
 112        return 0;
 113}
 114#endif
 115
 116/*
 117 * Dump out the contents of some memory nicely...
 118 */
 119static void dump_mem(const char *lvl, const char *str, unsigned long bottom,
 120                     unsigned long top)
 121{
 122        unsigned long first;
 123        mm_segment_t fs;
 124        int i;
 125
 126        /*
 127         * We need to switch to kernel mode so that we can use __get_user
 128         * to safely read from kernel space.  Note that we now dump the
 129         * code first, just in case the backtrace kills us.
 130         */
 131        fs = get_fs();
 132        set_fs(KERNEL_DS);
 133
 134        printk("%s%s(0x%08lx to 0x%08lx)\n", lvl, str, bottom, top);
 135
 136        for (first = bottom & ~31; first < top; first += 32) {
 137                unsigned long p;
 138                char str[sizeof(" 12345678") * 8 + 1];
 139
 140                memset(str, ' ', sizeof(str));
 141                str[sizeof(str) - 1] = '\0';
 142
 143                for (p = first, i = 0; i < 8 && p < top; i++, p += 4) {
 144                        if (p >= bottom && p < top) {
 145                                unsigned long val;
 146                                if (__get_user(val, (unsigned long *)p) == 0)
 147                                        sprintf(str + i * 9, " %08lx", val);
 148                                else
 149                                        sprintf(str + i * 9, " ????????");
 150                        }
 151                }
 152                printk("%s%04lx:%s\n", lvl, first & 0xffff, str);
 153        }
 154
 155        set_fs(fs);
 156}
 157
 158static void __dump_instr(const char *lvl, struct pt_regs *regs)
 159{
 160        unsigned long addr = instruction_pointer(regs);
 161        const int thumb = thumb_mode(regs);
 162        const int width = thumb ? 4 : 8;
 163        char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str;
 164        int i;
 165
 166        /*
 167         * Note that we now dump the code first, just in case the backtrace
 168         * kills us.
 169         */
 170
 171        for (i = -4; i < 1 + !!thumb; i++) {
 172                unsigned int val, bad;
 173
 174                if (thumb)
 175                        bad = get_user(val, &((u16 *)addr)[i]);
 176                else
 177                        bad = get_user(val, &((u32 *)addr)[i]);
 178
 179                if (!bad)
 180                        p += sprintf(p, i == 0 ? "(%0*x) " : "%0*x ",
 181                                        width, val);
 182                else {
 183                        p += sprintf(p, "bad PC value");
 184                        break;
 185                }
 186        }
 187        printk("%sCode: %s\n", lvl, str);
 188}
 189
 190static void dump_instr(const char *lvl, struct pt_regs *regs)
 191{
 192        mm_segment_t fs;
 193
 194        if (!user_mode(regs)) {
 195                fs = get_fs();
 196                set_fs(KERNEL_DS);
 197                __dump_instr(lvl, regs);
 198                set_fs(fs);
 199        } else {
 200                __dump_instr(lvl, regs);
 201        }
 202}
 203
 204#ifdef CONFIG_ARM_UNWIND
 205static inline void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
 206{
 207        unwind_backtrace(regs, tsk);
 208}
 209#else
 210static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
 211{
 212        unsigned int fp, mode;
 213        int ok = 1;
 214
 215        printk("Backtrace: ");
 216
 217        if (!tsk)
 218                tsk = current;
 219
 220        if (regs) {
 221                fp = frame_pointer(regs);
 222                mode = processor_mode(regs);
 223        } else if (tsk != current) {
 224                fp = thread_saved_fp(tsk);
 225                mode = 0x10;
 226        } else {
 227                asm("mov %0, fp" : "=r" (fp) : : "cc");
 228                mode = 0x10;
 229        }
 230
 231        if (!fp) {
 232                pr_cont("no frame pointer");
 233                ok = 0;
 234        } else if (verify_stack(fp)) {
 235                pr_cont("invalid frame pointer 0x%08x", fp);
 236                ok = 0;
 237        } else if (fp < (unsigned long)end_of_stack(tsk))
 238                pr_cont("frame pointer underflow");
 239        pr_cont("\n");
 240
 241        if (ok)
 242                c_backtrace(fp, mode);
 243}
 244#endif
 245
 246void show_stack(struct task_struct *tsk, unsigned long *sp)
 247{
 248        dump_backtrace(NULL, tsk);
 249        barrier();
 250}
 251
 252#ifdef CONFIG_PREEMPT
 253#define S_PREEMPT " PREEMPT"
 254#else
 255#define S_PREEMPT ""
 256#endif
 257#ifdef CONFIG_SMP
 258#define S_SMP " SMP"
 259#else
 260#define S_SMP ""
 261#endif
 262#ifdef CONFIG_THUMB2_KERNEL
 263#define S_ISA " THUMB2"
 264#else
 265#define S_ISA " ARM"
 266#endif
 267
 268static int __die(const char *str, int err, struct pt_regs *regs)
 269{
 270        struct task_struct *tsk = current;
 271        static int die_counter;
 272        int ret;
 273
 274        pr_emerg("Internal error: %s: %x [#%d]" S_PREEMPT S_SMP S_ISA "\n",
 275                 str, err, ++die_counter);
 276
 277        /* trap and error numbers are mostly meaningless on ARM */
 278        ret = notify_die(DIE_OOPS, str, regs, err, tsk->thread.trap_no, SIGSEGV);
 279        if (ret == NOTIFY_STOP)
 280                return 1;
 281
 282        print_modules();
 283        __show_regs(regs);
 284        pr_emerg("Process %.*s (pid: %d, stack limit = 0x%p)\n",
 285                 TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), end_of_stack(tsk));
 286
 287        if (!user_mode(regs) || in_interrupt()) {
 288                dump_mem(KERN_EMERG, "Stack: ", regs->ARM_sp,
 289                         THREAD_SIZE + (unsigned long)task_stack_page(tsk));
 290                dump_backtrace(regs, tsk);
 291                dump_instr(KERN_EMERG, regs);
 292        }
 293
 294        return 0;
 295}
 296
 297static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
 298static int die_owner = -1;
 299static unsigned int die_nest_count;
 300
 301static unsigned long oops_begin(void)
 302{
 303        int cpu;
 304        unsigned long flags;
 305
 306        oops_enter();
 307
 308        /* racy, but better than risking deadlock. */
 309        raw_local_irq_save(flags);
 310        cpu = smp_processor_id();
 311        if (!arch_spin_trylock(&die_lock)) {
 312                if (cpu == die_owner)
 313                        /* nested oops. should stop eventually */;
 314                else
 315                        arch_spin_lock(&die_lock);
 316        }
 317        die_nest_count++;
 318        die_owner = cpu;
 319        console_verbose();
 320        bust_spinlocks(1);
 321        return flags;
 322}
 323
 324static void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
 325{
 326        if (regs && kexec_should_crash(current))
 327                crash_kexec(regs);
 328
 329        bust_spinlocks(0);
 330        die_owner = -1;
 331        add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
 332        die_nest_count--;
 333        if (!die_nest_count)
 334                /* Nest count reaches zero, release the lock. */
 335                arch_spin_unlock(&die_lock);
 336        raw_local_irq_restore(flags);
 337        oops_exit();
 338
 339        if (in_interrupt())
 340                panic("Fatal exception in interrupt");
 341        if (panic_on_oops)
 342                panic("Fatal exception");
 343        if (signr)
 344                do_exit(signr);
 345}
 346
 347/*
 348 * This function is protected against re-entrancy.
 349 */
 350void die(const char *str, struct pt_regs *regs, int err)
 351{
 352        enum bug_trap_type bug_type = BUG_TRAP_TYPE_NONE;
 353        unsigned long flags = oops_begin();
 354        int sig = SIGSEGV;
 355
 356        if (!user_mode(regs))
 357                bug_type = report_bug(regs->ARM_pc, regs);
 358        if (bug_type != BUG_TRAP_TYPE_NONE)
 359                str = "Oops - BUG";
 360
 361        if (__die(str, err, regs))
 362                sig = 0;
 363
 364        oops_end(flags, regs, sig);
 365}
 366
 367void arm_notify_die(const char *str, struct pt_regs *regs,
 368                int signo, int si_code, void __user *addr,
 369                unsigned long err, unsigned long trap)
 370{
 371        if (user_mode(regs)) {
 372                current->thread.error_code = err;
 373                current->thread.trap_no = trap;
 374
 375                force_sig_fault(signo, si_code, addr, current);
 376        } else {
 377                die(str, regs, err);
 378        }
 379}
 380
 381#ifdef CONFIG_GENERIC_BUG
 382
 383int is_valid_bugaddr(unsigned long pc)
 384{
 385#ifdef CONFIG_THUMB2_KERNEL
 386        u16 bkpt;
 387        u16 insn = __opcode_to_mem_thumb16(BUG_INSTR_VALUE);
 388#else
 389        u32 bkpt;
 390        u32 insn = __opcode_to_mem_arm(BUG_INSTR_VALUE);
 391#endif
 392
 393        if (probe_kernel_address((unsigned *)pc, bkpt))
 394                return 0;
 395
 396        return bkpt == insn;
 397}
 398
 399#endif
 400
 401static LIST_HEAD(undef_hook);
 402static DEFINE_RAW_SPINLOCK(undef_lock);
 403
 404void register_undef_hook(struct undef_hook *hook)
 405{
 406        unsigned long flags;
 407
 408        raw_spin_lock_irqsave(&undef_lock, flags);
 409        list_add(&hook->node, &undef_hook);
 410        raw_spin_unlock_irqrestore(&undef_lock, flags);
 411}
 412
 413void unregister_undef_hook(struct undef_hook *hook)
 414{
 415        unsigned long flags;
 416
 417        raw_spin_lock_irqsave(&undef_lock, flags);
 418        list_del(&hook->node);
 419        raw_spin_unlock_irqrestore(&undef_lock, flags);
 420}
 421
 422static nokprobe_inline
 423int call_undef_hook(struct pt_regs *regs, unsigned int instr)
 424{
 425        struct undef_hook *hook;
 426        unsigned long flags;
 427        int (*fn)(struct pt_regs *regs, unsigned int instr) = NULL;
 428
 429        raw_spin_lock_irqsave(&undef_lock, flags);
 430        list_for_each_entry(hook, &undef_hook, node)
 431                if ((instr & hook->instr_mask) == hook->instr_val &&
 432                    (regs->ARM_cpsr & hook->cpsr_mask) == hook->cpsr_val)
 433                        fn = hook->fn;
 434        raw_spin_unlock_irqrestore(&undef_lock, flags);
 435
 436        return fn ? fn(regs, instr) : 1;
 437}
 438
 439asmlinkage void do_undefinstr(struct pt_regs *regs)
 440{
 441        unsigned int instr;
 442        void __user *pc;
 443
 444        pc = (void __user *)instruction_pointer(regs);
 445
 446        if (processor_mode(regs) == SVC_MODE) {
 447#ifdef CONFIG_THUMB2_KERNEL
 448                if (thumb_mode(regs)) {
 449                        instr = __mem_to_opcode_thumb16(((u16 *)pc)[0]);
 450                        if (is_wide_instruction(instr)) {
 451                                u16 inst2;
 452                                inst2 = __mem_to_opcode_thumb16(((u16 *)pc)[1]);
 453                                instr = __opcode_thumb32_compose(instr, inst2);
 454                        }
 455                } else
 456#endif
 457                        instr = __mem_to_opcode_arm(*(u32 *) pc);
 458        } else if (thumb_mode(regs)) {
 459                if (get_user(instr, (u16 __user *)pc))
 460                        goto die_sig;
 461                instr = __mem_to_opcode_thumb16(instr);
 462                if (is_wide_instruction(instr)) {
 463                        unsigned int instr2;
 464                        if (get_user(instr2, (u16 __user *)pc+1))
 465                                goto die_sig;
 466                        instr2 = __mem_to_opcode_thumb16(instr2);
 467                        instr = __opcode_thumb32_compose(instr, instr2);
 468                }
 469        } else {
 470                if (get_user(instr, (u32 __user *)pc))
 471                        goto die_sig;
 472                instr = __mem_to_opcode_arm(instr);
 473        }
 474
 475        if (call_undef_hook(regs, instr) == 0)
 476                return;
 477
 478die_sig:
 479#ifdef CONFIG_DEBUG_USER
 480        if (user_debug & UDBG_UNDEFINED) {
 481                pr_info("%s (%d): undefined instruction: pc=%p\n",
 482                        current->comm, task_pid_nr(current), pc);
 483                __show_regs(regs);
 484                dump_instr(KERN_INFO, regs);
 485        }
 486#endif
 487        arm_notify_die("Oops - undefined instruction", regs,
 488                       SIGILL, ILL_ILLOPC, pc, 0, 6);
 489}
 490NOKPROBE_SYMBOL(do_undefinstr)
 491
 492/*
 493 * Handle FIQ similarly to NMI on x86 systems.
 494 *
 495 * The runtime environment for NMIs is extremely restrictive
 496 * (NMIs can pre-empt critical sections meaning almost all locking is
 497 * forbidden) meaning this default FIQ handling must only be used in
 498 * circumstances where non-maskability improves robustness, such as
 499 * watchdog or debug logic.
 500 *
 501 * This handler is not appropriate for general purpose use in drivers
 502 * platform code and can be overrideen using set_fiq_handler.
 503 */
 504asmlinkage void __exception_irq_entry handle_fiq_as_nmi(struct pt_regs *regs)
 505{
 506        struct pt_regs *old_regs = set_irq_regs(regs);
 507
 508        nmi_enter();
 509
 510        /* nop. FIQ handlers for special arch/arm features can be added here. */
 511
 512        nmi_exit();
 513
 514        set_irq_regs(old_regs);
 515}
 516
 517/*
 518 * bad_mode handles the impossible case in the vectors.  If you see one of
 519 * these, then it's extremely serious, and could mean you have buggy hardware.
 520 * It never returns, and never tries to sync.  We hope that we can at least
 521 * dump out some state information...
 522 */
 523asmlinkage void bad_mode(struct pt_regs *regs, int reason)
 524{
 525        console_verbose();
 526
 527        pr_crit("Bad mode in %s handler detected\n", handler[reason]);
 528
 529        die("Oops - bad mode", regs, 0);
 530        local_irq_disable();
 531        panic("bad mode");
 532}
 533
 534static int bad_syscall(int n, struct pt_regs *regs)
 535{
 536        if ((current->personality & PER_MASK) != PER_LINUX) {
 537                send_sig(SIGSEGV, current, 1);
 538                return regs->ARM_r0;
 539        }
 540
 541#ifdef CONFIG_DEBUG_USER
 542        if (user_debug & UDBG_SYSCALL) {
 543                pr_err("[%d] %s: obsolete system call %08x.\n",
 544                        task_pid_nr(current), current->comm, n);
 545                dump_instr(KERN_ERR, regs);
 546        }
 547#endif
 548
 549        arm_notify_die("Oops - bad syscall", regs, SIGILL, ILL_ILLTRP,
 550                       (void __user *)instruction_pointer(regs) -
 551                         (thumb_mode(regs) ? 2 : 4),
 552                       n, 0);
 553
 554        return regs->ARM_r0;
 555}
 556
 557static inline int
 558__do_cache_op(unsigned long start, unsigned long end)
 559{
 560        int ret;
 561
 562        do {
 563                unsigned long chunk = min(PAGE_SIZE, end - start);
 564
 565                if (fatal_signal_pending(current))
 566                        return 0;
 567
 568                ret = flush_cache_user_range(start, start + chunk);
 569                if (ret)
 570                        return ret;
 571
 572                cond_resched();
 573                start += chunk;
 574        } while (start < end);
 575
 576        return 0;
 577}
 578
 579static inline int
 580do_cache_op(unsigned long start, unsigned long end, int flags)
 581{
 582        if (end < start || flags)
 583                return -EINVAL;
 584
 585        if (!access_ok(start, end - start))
 586                return -EFAULT;
 587
 588        return __do_cache_op(start, end);
 589}
 590
 591/*
 592 * Handle all unrecognised system calls.
 593 *  0x9f0000 - 0x9fffff are some more esoteric system calls
 594 */
 595#define NR(x) ((__ARM_NR_##x) - __ARM_NR_BASE)
 596asmlinkage int arm_syscall(int no, struct pt_regs *regs)
 597{
 598        if ((no >> 16) != (__ARM_NR_BASE>> 16))
 599                return bad_syscall(no, regs);
 600
 601        switch (no & 0xffff) {
 602        case 0: /* branch through 0 */
 603                arm_notify_die("branch through zero", regs,
 604                               SIGSEGV, SEGV_MAPERR, NULL, 0, 0);
 605                return 0;
 606
 607        case NR(breakpoint): /* SWI BREAK_POINT */
 608                regs->ARM_pc -= thumb_mode(regs) ? 2 : 4;
 609                ptrace_break(current, regs);
 610                return regs->ARM_r0;
 611
 612        /*
 613         * Flush a region from virtual address 'r0' to virtual address 'r1'
 614         * _exclusive_.  There is no alignment requirement on either address;
 615         * user space does not need to know the hardware cache layout.
 616         *
 617         * r2 contains flags.  It should ALWAYS be passed as ZERO until it
 618         * is defined to be something else.  For now we ignore it, but may
 619         * the fires of hell burn in your belly if you break this rule. ;)
 620         *
 621         * (at a later date, we may want to allow this call to not flush
 622         * various aspects of the cache.  Passing '0' will guarantee that
 623         * everything necessary gets flushed to maintain consistency in
 624         * the specified region).
 625         */
 626        case NR(cacheflush):
 627                return do_cache_op(regs->ARM_r0, regs->ARM_r1, regs->ARM_r2);
 628
 629        case NR(usr26):
 630                if (!(elf_hwcap & HWCAP_26BIT))
 631                        break;
 632                regs->ARM_cpsr &= ~MODE32_BIT;
 633                return regs->ARM_r0;
 634
 635        case NR(usr32):
 636                if (!(elf_hwcap & HWCAP_26BIT))
 637                        break;
 638                regs->ARM_cpsr |= MODE32_BIT;
 639                return regs->ARM_r0;
 640
 641        case NR(set_tls):
 642                set_tls(regs->ARM_r0);
 643                return 0;
 644
 645        case NR(get_tls):
 646                return current_thread_info()->tp_value[0];
 647
 648        default:
 649                /* Calls 9f00xx..9f07ff are defined to return -ENOSYS
 650                   if not implemented, rather than raising SIGILL.  This
 651                   way the calling program can gracefully determine whether
 652                   a feature is supported.  */
 653                if ((no & 0xffff) <= 0x7ff)
 654                        return -ENOSYS;
 655                break;
 656        }
 657#ifdef CONFIG_DEBUG_USER
 658        /*
 659         * experience shows that these seem to indicate that
 660         * something catastrophic has happened
 661         */
 662        if (user_debug & UDBG_SYSCALL) {
 663                pr_err("[%d] %s: arm syscall %d\n",
 664                       task_pid_nr(current), current->comm, no);
 665                dump_instr("", regs);
 666                if (user_mode(regs)) {
 667                        __show_regs(regs);
 668                        c_backtrace(frame_pointer(regs), processor_mode(regs));
 669                }
 670        }
 671#endif
 672        arm_notify_die("Oops - bad syscall(2)", regs, SIGILL, ILL_ILLTRP,
 673                       (void __user *)instruction_pointer(regs) -
 674                         (thumb_mode(regs) ? 2 : 4),
 675                       no, 0);
 676        return 0;
 677}
 678
 679#ifdef CONFIG_TLS_REG_EMUL
 680
 681/*
 682 * We might be running on an ARMv6+ processor which should have the TLS
 683 * register but for some reason we can't use it, or maybe an SMP system
 684 * using a pre-ARMv6 processor (there are apparently a few prototypes like
 685 * that in existence) and therefore access to that register must be
 686 * emulated.
 687 */
 688
 689static int get_tp_trap(struct pt_regs *regs, unsigned int instr)
 690{
 691        int reg = (instr >> 12) & 15;
 692        if (reg == 15)
 693                return 1;
 694        regs->uregs[reg] = current_thread_info()->tp_value[0];
 695        regs->ARM_pc += 4;
 696        return 0;
 697}
 698
 699static struct undef_hook arm_mrc_hook = {
 700        .instr_mask     = 0x0fff0fff,
 701        .instr_val      = 0x0e1d0f70,
 702        .cpsr_mask      = PSR_T_BIT,
 703        .cpsr_val       = 0,
 704        .fn             = get_tp_trap,
 705};
 706
 707static int __init arm_mrc_hook_init(void)
 708{
 709        register_undef_hook(&arm_mrc_hook);
 710        return 0;
 711}
 712
 713late_initcall(arm_mrc_hook_init);
 714
 715#endif
 716
 717/*
 718 * A data abort trap was taken, but we did not handle the instruction.
 719 * Try to abort the user program, or panic if it was the kernel.
 720 */
 721asmlinkage void
 722baddataabort(int code, unsigned long instr, struct pt_regs *regs)
 723{
 724        unsigned long addr = instruction_pointer(regs);
 725
 726#ifdef CONFIG_DEBUG_USER
 727        if (user_debug & UDBG_BADABORT) {
 728                pr_err("[%d] %s: bad data abort: code %d instr 0x%08lx\n",
 729                       task_pid_nr(current), current->comm, code, instr);
 730                dump_instr(KERN_ERR, regs);
 731                show_pte(current->mm, addr);
 732        }
 733#endif
 734
 735        arm_notify_die("unknown data abort code", regs,
 736                       SIGILL, ILL_ILLOPC, (void __user *)addr, instr, 0);
 737}
 738
 739void __readwrite_bug(const char *fn)
 740{
 741        pr_err("%s called, but not implemented\n", fn);
 742        BUG();
 743}
 744EXPORT_SYMBOL(__readwrite_bug);
 745
 746void __pte_error(const char *file, int line, pte_t pte)
 747{
 748        pr_err("%s:%d: bad pte %08llx.\n", file, line, (long long)pte_val(pte));
 749}
 750
 751void __pmd_error(const char *file, int line, pmd_t pmd)
 752{
 753        pr_err("%s:%d: bad pmd %08llx.\n", file, line, (long long)pmd_val(pmd));
 754}
 755
 756void __pgd_error(const char *file, int line, pgd_t pgd)
 757{
 758        pr_err("%s:%d: bad pgd %08llx.\n", file, line, (long long)pgd_val(pgd));
 759}
 760
 761asmlinkage void __div0(void)
 762{
 763        pr_err("Division by zero in kernel.\n");
 764        dump_stack();
 765}
 766EXPORT_SYMBOL(__div0);
 767
 768void abort(void)
 769{
 770        BUG();
 771
 772        /* if that doesn't kill us, halt */
 773        panic("Oops failed to kill thread");
 774}
 775
 776void __init trap_init(void)
 777{
 778        return;
 779}
 780
 781#ifdef CONFIG_KUSER_HELPERS
 782static void __init kuser_init(void *vectors)
 783{
 784        extern char __kuser_helper_start[], __kuser_helper_end[];
 785        int kuser_sz = __kuser_helper_end - __kuser_helper_start;
 786
 787        memcpy(vectors + 0x1000 - kuser_sz, __kuser_helper_start, kuser_sz);
 788
 789        /*
 790         * vectors + 0xfe0 = __kuser_get_tls
 791         * vectors + 0xfe8 = hardware TLS instruction at 0xffff0fe8
 792         */
 793        if (tls_emu || has_tls_reg)
 794                memcpy(vectors + 0xfe0, vectors + 0xfe8, 4);
 795}
 796#else
 797static inline void __init kuser_init(void *vectors)
 798{
 799}
 800#endif
 801
 802void __init early_trap_init(void *vectors_base)
 803{
 804#ifndef CONFIG_CPU_V7M
 805        unsigned long vectors = (unsigned long)vectors_base;
 806        extern char __stubs_start[], __stubs_end[];
 807        extern char __vectors_start[], __vectors_end[];
 808        unsigned i;
 809
 810        vectors_page = vectors_base;
 811
 812        /*
 813         * Poison the vectors page with an undefined instruction.  This
 814         * instruction is chosen to be undefined for both ARM and Thumb
 815         * ISAs.  The Thumb version is an undefined instruction with a
 816         * branch back to the undefined instruction.
 817         */
 818        for (i = 0; i < PAGE_SIZE / sizeof(u32); i++)
 819                ((u32 *)vectors_base)[i] = 0xe7fddef1;
 820
 821        /*
 822         * Copy the vectors, stubs and kuser helpers (in entry-armv.S)
 823         * into the vector page, mapped at 0xffff0000, and ensure these
 824         * are visible to the instruction stream.
 825         */
 826        memcpy((void *)vectors, __vectors_start, __vectors_end - __vectors_start);
 827        memcpy((void *)vectors + 0x1000, __stubs_start, __stubs_end - __stubs_start);
 828
 829        kuser_init(vectors_base);
 830
 831        flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
 832#else /* ifndef CONFIG_CPU_V7M */
 833        /*
 834         * on V7-M there is no need to copy the vector table to a dedicated
 835         * memory area. The address is configurable and so a table in the kernel
 836         * image can be used.
 837         */
 838#endif
 839}
 840