linux/arch/arm/kernel/traps.c
<<
>>
Prefs
   1/*
   2 *  linux/arch/arm/kernel/traps.c
   3 *
   4 *  Copyright (C) 1995-2009 Russell King
   5 *  Fragments that appear the same as linux/arch/i386/kernel/traps.c (C) Linus Torvalds
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License version 2 as
   9 * published by the Free Software Foundation.
  10 *
  11 *  'traps.c' handles hardware exceptions after we have saved some state in
  12 *  'linux/arch/arm/lib/traps.S'.  Mostly a debugging aid, but will probably
  13 *  kill the offending process.
  14 */
  15#include <linux/signal.h>
  16#include <linux/personality.h>
  17#include <linux/kallsyms.h>
  18#include <linux/spinlock.h>
  19#include <linux/uaccess.h>
  20#include <linux/hardirq.h>
  21#include <linux/kdebug.h>
  22#include <linux/module.h>
  23#include <linux/kexec.h>
  24#include <linux/bug.h>
  25#include <linux/delay.h>
  26#include <linux/init.h>
  27#include <linux/sched/signal.h>
  28#include <linux/sched/debug.h>
  29#include <linux/sched/task_stack.h>
  30#include <linux/irq.h>
  31
  32#include <linux/atomic.h>
  33#include <asm/cacheflush.h>
  34#include <asm/exception.h>
  35#include <asm/unistd.h>
  36#include <asm/traps.h>
  37#include <asm/ptrace.h>
  38#include <asm/unwind.h>
  39#include <asm/tls.h>
  40#include <asm/system_misc.h>
  41#include <asm/opcodes.h>
  42
  43
  44static const char *handler[]= {
  45        "prefetch abort",
  46        "data abort",
  47        "address exception",
  48        "interrupt",
  49        "undefined instruction",
  50};
  51
  52void *vectors_page;
  53
  54#ifdef CONFIG_DEBUG_USER
  55unsigned int user_debug;
  56
  57static int __init user_debug_setup(char *str)
  58{
  59        get_option(&str, &user_debug);
  60        return 1;
  61}
  62__setup("user_debug=", user_debug_setup);
  63#endif
  64
  65static void dump_mem(const char *, const char *, unsigned long, unsigned long);
  66
  67void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
  68{
  69#ifdef CONFIG_KALLSYMS
  70        printk("[<%08lx>] (%ps) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from);
  71#else
  72        printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
  73#endif
  74
  75        if (in_exception_text(where))
  76                dump_mem("", "Exception stack", frame + 4, frame + 4 + sizeof(struct pt_regs));
  77}
  78
  79void dump_backtrace_stm(u32 *stack, u32 instruction)
  80{
  81        char str[80], *p;
  82        unsigned int x;
  83        int reg;
  84
  85        for (reg = 10, x = 0, p = str; reg >= 0; reg--) {
  86                if (instruction & BIT(reg)) {
  87                        p += sprintf(p, " r%d:%08x", reg, *stack--);
  88                        if (++x == 6) {
  89                                x = 0;
  90                                p = str;
  91                                printk("%s\n", str);
  92                        }
  93                }
  94        }
  95        if (p != str)
  96                printk("%s\n", str);
  97}
  98
  99#ifndef CONFIG_ARM_UNWIND
 100/*
 101 * Stack pointers should always be within the kernels view of
 102 * physical memory.  If it is not there, then we can't dump
 103 * out any information relating to the stack.
 104 */
 105static int verify_stack(unsigned long sp)
 106{
 107        if (sp < PAGE_OFFSET ||
 108            (sp > (unsigned long)high_memory && high_memory != NULL))
 109                return -EFAULT;
 110
 111        return 0;
 112}
 113#endif
 114
 115/*
 116 * Dump out the contents of some memory nicely...
 117 */
 118static void dump_mem(const char *lvl, const char *str, unsigned long bottom,
 119                     unsigned long top)
 120{
 121        unsigned long first;
 122        mm_segment_t fs;
 123        int i;
 124
 125        /*
 126         * We need to switch to kernel mode so that we can use __get_user
 127         * to safely read from kernel space.  Note that we now dump the
 128         * code first, just in case the backtrace kills us.
 129         */
 130        fs = get_fs();
 131        set_fs(KERNEL_DS);
 132
 133        printk("%s%s(0x%08lx to 0x%08lx)\n", lvl, str, bottom, top);
 134
 135        for (first = bottom & ~31; first < top; first += 32) {
 136                unsigned long p;
 137                char str[sizeof(" 12345678") * 8 + 1];
 138
 139                memset(str, ' ', sizeof(str));
 140                str[sizeof(str) - 1] = '\0';
 141
 142                for (p = first, i = 0; i < 8 && p < top; i++, p += 4) {
 143                        if (p >= bottom && p < top) {
 144                                unsigned long val;
 145                                if (__get_user(val, (unsigned long *)p) == 0)
 146                                        sprintf(str + i * 9, " %08lx", val);
 147                                else
 148                                        sprintf(str + i * 9, " ????????");
 149                        }
 150                }
 151                printk("%s%04lx:%s\n", lvl, first & 0xffff, str);
 152        }
 153
 154        set_fs(fs);
 155}
 156
 157static void __dump_instr(const char *lvl, struct pt_regs *regs)
 158{
 159        unsigned long addr = instruction_pointer(regs);
 160        const int thumb = thumb_mode(regs);
 161        const int width = thumb ? 4 : 8;
 162        char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str;
 163        int i;
 164
 165        /*
 166         * Note that we now dump the code first, just in case the backtrace
 167         * kills us.
 168         */
 169
 170        for (i = -4; i < 1 + !!thumb; i++) {
 171                unsigned int val, bad;
 172
 173                if (thumb)
 174                        bad = get_user(val, &((u16 *)addr)[i]);
 175                else
 176                        bad = get_user(val, &((u32 *)addr)[i]);
 177
 178                if (!bad)
 179                        p += sprintf(p, i == 0 ? "(%0*x) " : "%0*x ",
 180                                        width, val);
 181                else {
 182                        p += sprintf(p, "bad PC value");
 183                        break;
 184                }
 185        }
 186        printk("%sCode: %s\n", lvl, str);
 187}
 188
 189static void dump_instr(const char *lvl, struct pt_regs *regs)
 190{
 191        mm_segment_t fs;
 192
 193        if (!user_mode(regs)) {
 194                fs = get_fs();
 195                set_fs(KERNEL_DS);
 196                __dump_instr(lvl, regs);
 197                set_fs(fs);
 198        } else {
 199                __dump_instr(lvl, regs);
 200        }
 201}
 202
 203#ifdef CONFIG_ARM_UNWIND
 204static inline void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
 205{
 206        unwind_backtrace(regs, tsk);
 207}
 208#else
 209static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
 210{
 211        unsigned int fp, mode;
 212        int ok = 1;
 213
 214        printk("Backtrace: ");
 215
 216        if (!tsk)
 217                tsk = current;
 218
 219        if (regs) {
 220                fp = frame_pointer(regs);
 221                mode = processor_mode(regs);
 222        } else if (tsk != current) {
 223                fp = thread_saved_fp(tsk);
 224                mode = 0x10;
 225        } else {
 226                asm("mov %0, fp" : "=r" (fp) : : "cc");
 227                mode = 0x10;
 228        }
 229
 230        if (!fp) {
 231                pr_cont("no frame pointer");
 232                ok = 0;
 233        } else if (verify_stack(fp)) {
 234                pr_cont("invalid frame pointer 0x%08x", fp);
 235                ok = 0;
 236        } else if (fp < (unsigned long)end_of_stack(tsk))
 237                pr_cont("frame pointer underflow");
 238        pr_cont("\n");
 239
 240        if (ok)
 241                c_backtrace(fp, mode);
 242}
 243#endif
 244
 245void show_stack(struct task_struct *tsk, unsigned long *sp)
 246{
 247        dump_backtrace(NULL, tsk);
 248        barrier();
 249}
 250
 251#ifdef CONFIG_PREEMPT
 252#define S_PREEMPT " PREEMPT"
 253#else
 254#define S_PREEMPT ""
 255#endif
 256#ifdef CONFIG_SMP
 257#define S_SMP " SMP"
 258#else
 259#define S_SMP ""
 260#endif
 261#ifdef CONFIG_THUMB2_KERNEL
 262#define S_ISA " THUMB2"
 263#else
 264#define S_ISA " ARM"
 265#endif
 266
 267static int __die(const char *str, int err, struct pt_regs *regs)
 268{
 269        struct task_struct *tsk = current;
 270        static int die_counter;
 271        int ret;
 272
 273        pr_emerg("Internal error: %s: %x [#%d]" S_PREEMPT S_SMP S_ISA "\n",
 274                 str, err, ++die_counter);
 275
 276        /* trap and error numbers are mostly meaningless on ARM */
 277        ret = notify_die(DIE_OOPS, str, regs, err, tsk->thread.trap_no, SIGSEGV);
 278        if (ret == NOTIFY_STOP)
 279                return 1;
 280
 281        print_modules();
 282        __show_regs(regs);
 283        pr_emerg("Process %.*s (pid: %d, stack limit = 0x%p)\n",
 284                 TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), end_of_stack(tsk));
 285
 286        if (!user_mode(regs) || in_interrupt()) {
 287                dump_mem(KERN_EMERG, "Stack: ", regs->ARM_sp,
 288                         THREAD_SIZE + (unsigned long)task_stack_page(tsk));
 289                dump_backtrace(regs, tsk);
 290                dump_instr(KERN_EMERG, regs);
 291        }
 292
 293        return 0;
 294}
 295
 296static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
 297static int die_owner = -1;
 298static unsigned int die_nest_count;
 299
 300static unsigned long oops_begin(void)
 301{
 302        int cpu;
 303        unsigned long flags;
 304
 305        oops_enter();
 306
 307        /* racy, but better than risking deadlock. */
 308        raw_local_irq_save(flags);
 309        cpu = smp_processor_id();
 310        if (!arch_spin_trylock(&die_lock)) {
 311                if (cpu == die_owner)
 312                        /* nested oops. should stop eventually */;
 313                else
 314                        arch_spin_lock(&die_lock);
 315        }
 316        die_nest_count++;
 317        die_owner = cpu;
 318        console_verbose();
 319        bust_spinlocks(1);
 320        return flags;
 321}
 322
 323static void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
 324{
 325        if (regs && kexec_should_crash(current))
 326                crash_kexec(regs);
 327
 328        bust_spinlocks(0);
 329        die_owner = -1;
 330        add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
 331        die_nest_count--;
 332        if (!die_nest_count)
 333                /* Nest count reaches zero, release the lock. */
 334                arch_spin_unlock(&die_lock);
 335        raw_local_irq_restore(flags);
 336        oops_exit();
 337
 338        if (in_interrupt())
 339                panic("Fatal exception in interrupt");
 340        if (panic_on_oops)
 341                panic("Fatal exception");
 342        if (signr)
 343                do_exit(signr);
 344}
 345
 346/*
 347 * This function is protected against re-entrancy.
 348 */
 349void die(const char *str, struct pt_regs *regs, int err)
 350{
 351        enum bug_trap_type bug_type = BUG_TRAP_TYPE_NONE;
 352        unsigned long flags = oops_begin();
 353        int sig = SIGSEGV;
 354
 355        if (!user_mode(regs))
 356                bug_type = report_bug(regs->ARM_pc, regs);
 357        if (bug_type != BUG_TRAP_TYPE_NONE)
 358                str = "Oops - BUG";
 359
 360        if (__die(str, err, regs))
 361                sig = 0;
 362
 363        oops_end(flags, regs, sig);
 364}
 365
 366void arm_notify_die(const char *str, struct pt_regs *regs,
 367                struct siginfo *info, unsigned long err, unsigned long trap)
 368{
 369        if (user_mode(regs)) {
 370                current->thread.error_code = err;
 371                current->thread.trap_no = trap;
 372
 373                force_sig_info(info->si_signo, info, current);
 374        } else {
 375                die(str, regs, err);
 376        }
 377}
 378
 379#ifdef CONFIG_GENERIC_BUG
 380
 381int is_valid_bugaddr(unsigned long pc)
 382{
 383#ifdef CONFIG_THUMB2_KERNEL
 384        u16 bkpt;
 385        u16 insn = __opcode_to_mem_thumb16(BUG_INSTR_VALUE);
 386#else
 387        u32 bkpt;
 388        u32 insn = __opcode_to_mem_arm(BUG_INSTR_VALUE);
 389#endif
 390
 391        if (probe_kernel_address((unsigned *)pc, bkpt))
 392                return 0;
 393
 394        return bkpt == insn;
 395}
 396
 397#endif
 398
 399static LIST_HEAD(undef_hook);
 400static DEFINE_RAW_SPINLOCK(undef_lock);
 401
 402void register_undef_hook(struct undef_hook *hook)
 403{
 404        unsigned long flags;
 405
 406        raw_spin_lock_irqsave(&undef_lock, flags);
 407        list_add(&hook->node, &undef_hook);
 408        raw_spin_unlock_irqrestore(&undef_lock, flags);
 409}
 410
 411void unregister_undef_hook(struct undef_hook *hook)
 412{
 413        unsigned long flags;
 414
 415        raw_spin_lock_irqsave(&undef_lock, flags);
 416        list_del(&hook->node);
 417        raw_spin_unlock_irqrestore(&undef_lock, flags);
 418}
 419
 420static int call_undef_hook(struct pt_regs *regs, unsigned int instr)
 421{
 422        struct undef_hook *hook;
 423        unsigned long flags;
 424        int (*fn)(struct pt_regs *regs, unsigned int instr) = NULL;
 425
 426        raw_spin_lock_irqsave(&undef_lock, flags);
 427        list_for_each_entry(hook, &undef_hook, node)
 428                if ((instr & hook->instr_mask) == hook->instr_val &&
 429                    (regs->ARM_cpsr & hook->cpsr_mask) == hook->cpsr_val)
 430                        fn = hook->fn;
 431        raw_spin_unlock_irqrestore(&undef_lock, flags);
 432
 433        return fn ? fn(regs, instr) : 1;
 434}
 435
 436asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
 437{
 438        unsigned int instr;
 439        siginfo_t info;
 440        void __user *pc;
 441
 442        pc = (void __user *)instruction_pointer(regs);
 443
 444        if (processor_mode(regs) == SVC_MODE) {
 445#ifdef CONFIG_THUMB2_KERNEL
 446                if (thumb_mode(regs)) {
 447                        instr = __mem_to_opcode_thumb16(((u16 *)pc)[0]);
 448                        if (is_wide_instruction(instr)) {
 449                                u16 inst2;
 450                                inst2 = __mem_to_opcode_thumb16(((u16 *)pc)[1]);
 451                                instr = __opcode_thumb32_compose(instr, inst2);
 452                        }
 453                } else
 454#endif
 455                        instr = __mem_to_opcode_arm(*(u32 *) pc);
 456        } else if (thumb_mode(regs)) {
 457                if (get_user(instr, (u16 __user *)pc))
 458                        goto die_sig;
 459                instr = __mem_to_opcode_thumb16(instr);
 460                if (is_wide_instruction(instr)) {
 461                        unsigned int instr2;
 462                        if (get_user(instr2, (u16 __user *)pc+1))
 463                                goto die_sig;
 464                        instr2 = __mem_to_opcode_thumb16(instr2);
 465                        instr = __opcode_thumb32_compose(instr, instr2);
 466                }
 467        } else {
 468                if (get_user(instr, (u32 __user *)pc))
 469                        goto die_sig;
 470                instr = __mem_to_opcode_arm(instr);
 471        }
 472
 473        if (call_undef_hook(regs, instr) == 0)
 474                return;
 475
 476die_sig:
 477#ifdef CONFIG_DEBUG_USER
 478        if (user_debug & UDBG_UNDEFINED) {
 479                pr_info("%s (%d): undefined instruction: pc=%p\n",
 480                        current->comm, task_pid_nr(current), pc);
 481                __show_regs(regs);
 482                dump_instr(KERN_INFO, regs);
 483        }
 484#endif
 485
 486        info.si_signo = SIGILL;
 487        info.si_errno = 0;
 488        info.si_code  = ILL_ILLOPC;
 489        info.si_addr  = pc;
 490
 491        arm_notify_die("Oops - undefined instruction", regs, &info, 0, 6);
 492}
 493
 494/*
 495 * Handle FIQ similarly to NMI on x86 systems.
 496 *
 497 * The runtime environment for NMIs is extremely restrictive
 498 * (NMIs can pre-empt critical sections meaning almost all locking is
 499 * forbidden) meaning this default FIQ handling must only be used in
 500 * circumstances where non-maskability improves robustness, such as
 501 * watchdog or debug logic.
 502 *
 503 * This handler is not appropriate for general purpose use in drivers
 504 * platform code and can be overrideen using set_fiq_handler.
 505 */
 506asmlinkage void __exception_irq_entry handle_fiq_as_nmi(struct pt_regs *regs)
 507{
 508        struct pt_regs *old_regs = set_irq_regs(regs);
 509
 510        nmi_enter();
 511
 512        /* nop. FIQ handlers for special arch/arm features can be added here. */
 513
 514        nmi_exit();
 515
 516        set_irq_regs(old_regs);
 517}
 518
 519/*
 520 * bad_mode handles the impossible case in the vectors.  If you see one of
 521 * these, then it's extremely serious, and could mean you have buggy hardware.
 522 * It never returns, and never tries to sync.  We hope that we can at least
 523 * dump out some state information...
 524 */
 525asmlinkage void bad_mode(struct pt_regs *regs, int reason)
 526{
 527        console_verbose();
 528
 529        pr_crit("Bad mode in %s handler detected\n", handler[reason]);
 530
 531        die("Oops - bad mode", regs, 0);
 532        local_irq_disable();
 533        panic("bad mode");
 534}
 535
 536static int bad_syscall(int n, struct pt_regs *regs)
 537{
 538        siginfo_t info;
 539
 540        if ((current->personality & PER_MASK) != PER_LINUX) {
 541                send_sig(SIGSEGV, current, 1);
 542                return regs->ARM_r0;
 543        }
 544
 545#ifdef CONFIG_DEBUG_USER
 546        if (user_debug & UDBG_SYSCALL) {
 547                pr_err("[%d] %s: obsolete system call %08x.\n",
 548                        task_pid_nr(current), current->comm, n);
 549                dump_instr(KERN_ERR, regs);
 550        }
 551#endif
 552
 553        info.si_signo = SIGILL;
 554        info.si_errno = 0;
 555        info.si_code  = ILL_ILLTRP;
 556        info.si_addr  = (void __user *)instruction_pointer(regs) -
 557                         (thumb_mode(regs) ? 2 : 4);
 558
 559        arm_notify_die("Oops - bad syscall", regs, &info, n, 0);
 560
 561        return regs->ARM_r0;
 562}
 563
 564static inline int
 565__do_cache_op(unsigned long start, unsigned long end)
 566{
 567        int ret;
 568
 569        do {
 570                unsigned long chunk = min(PAGE_SIZE, end - start);
 571
 572                if (fatal_signal_pending(current))
 573                        return 0;
 574
 575                ret = flush_cache_user_range(start, start + chunk);
 576                if (ret)
 577                        return ret;
 578
 579                cond_resched();
 580                start += chunk;
 581        } while (start < end);
 582
 583        return 0;
 584}
 585
 586static inline int
 587do_cache_op(unsigned long start, unsigned long end, int flags)
 588{
 589        if (end < start || flags)
 590                return -EINVAL;
 591
 592        if (!access_ok(VERIFY_READ, start, end - start))
 593                return -EFAULT;
 594
 595        return __do_cache_op(start, end);
 596}
 597
 598/*
 599 * Handle all unrecognised system calls.
 600 *  0x9f0000 - 0x9fffff are some more esoteric system calls
 601 */
 602#define NR(x) ((__ARM_NR_##x) - __ARM_NR_BASE)
 603asmlinkage int arm_syscall(int no, struct pt_regs *regs)
 604{
 605        siginfo_t info;
 606
 607        if ((no >> 16) != (__ARM_NR_BASE>> 16))
 608                return bad_syscall(no, regs);
 609
 610        switch (no & 0xffff) {
 611        case 0: /* branch through 0 */
 612                info.si_signo = SIGSEGV;
 613                info.si_errno = 0;
 614                info.si_code  = SEGV_MAPERR;
 615                info.si_addr  = NULL;
 616
 617                arm_notify_die("branch through zero", regs, &info, 0, 0);
 618                return 0;
 619
 620        case NR(breakpoint): /* SWI BREAK_POINT */
 621                regs->ARM_pc -= thumb_mode(regs) ? 2 : 4;
 622                ptrace_break(current, regs);
 623                return regs->ARM_r0;
 624
 625        /*
 626         * Flush a region from virtual address 'r0' to virtual address 'r1'
 627         * _exclusive_.  There is no alignment requirement on either address;
 628         * user space does not need to know the hardware cache layout.
 629         *
 630         * r2 contains flags.  It should ALWAYS be passed as ZERO until it
 631         * is defined to be something else.  For now we ignore it, but may
 632         * the fires of hell burn in your belly if you break this rule. ;)
 633         *
 634         * (at a later date, we may want to allow this call to not flush
 635         * various aspects of the cache.  Passing '0' will guarantee that
 636         * everything necessary gets flushed to maintain consistency in
 637         * the specified region).
 638         */
 639        case NR(cacheflush):
 640                return do_cache_op(regs->ARM_r0, regs->ARM_r1, regs->ARM_r2);
 641
 642        case NR(usr26):
 643                if (!(elf_hwcap & HWCAP_26BIT))
 644                        break;
 645                regs->ARM_cpsr &= ~MODE32_BIT;
 646                return regs->ARM_r0;
 647
 648        case NR(usr32):
 649                if (!(elf_hwcap & HWCAP_26BIT))
 650                        break;
 651                regs->ARM_cpsr |= MODE32_BIT;
 652                return regs->ARM_r0;
 653
 654        case NR(set_tls):
 655                set_tls(regs->ARM_r0);
 656                return 0;
 657
 658        default:
 659                /* Calls 9f00xx..9f07ff are defined to return -ENOSYS
 660                   if not implemented, rather than raising SIGILL.  This
 661                   way the calling program can gracefully determine whether
 662                   a feature is supported.  */
 663                if ((no & 0xffff) <= 0x7ff)
 664                        return -ENOSYS;
 665                break;
 666        }
 667#ifdef CONFIG_DEBUG_USER
 668        /*
 669         * experience shows that these seem to indicate that
 670         * something catastrophic has happened
 671         */
 672        if (user_debug & UDBG_SYSCALL) {
 673                pr_err("[%d] %s: arm syscall %d\n",
 674                       task_pid_nr(current), current->comm, no);
 675                dump_instr("", regs);
 676                if (user_mode(regs)) {
 677                        __show_regs(regs);
 678                        c_backtrace(frame_pointer(regs), processor_mode(regs));
 679                }
 680        }
 681#endif
 682        info.si_signo = SIGILL;
 683        info.si_errno = 0;
 684        info.si_code  = ILL_ILLTRP;
 685        info.si_addr  = (void __user *)instruction_pointer(regs) -
 686                         (thumb_mode(regs) ? 2 : 4);
 687
 688        arm_notify_die("Oops - bad syscall(2)", regs, &info, no, 0);
 689        return 0;
 690}
 691
 692#ifdef CONFIG_TLS_REG_EMUL
 693
 694/*
 695 * We might be running on an ARMv6+ processor which should have the TLS
 696 * register but for some reason we can't use it, or maybe an SMP system
 697 * using a pre-ARMv6 processor (there are apparently a few prototypes like
 698 * that in existence) and therefore access to that register must be
 699 * emulated.
 700 */
 701
 702static int get_tp_trap(struct pt_regs *regs, unsigned int instr)
 703{
 704        int reg = (instr >> 12) & 15;
 705        if (reg == 15)
 706                return 1;
 707        regs->uregs[reg] = current_thread_info()->tp_value[0];
 708        regs->ARM_pc += 4;
 709        return 0;
 710}
 711
 712static struct undef_hook arm_mrc_hook = {
 713        .instr_mask     = 0x0fff0fff,
 714        .instr_val      = 0x0e1d0f70,
 715        .cpsr_mask      = PSR_T_BIT,
 716        .cpsr_val       = 0,
 717        .fn             = get_tp_trap,
 718};
 719
 720static int __init arm_mrc_hook_init(void)
 721{
 722        register_undef_hook(&arm_mrc_hook);
 723        return 0;
 724}
 725
 726late_initcall(arm_mrc_hook_init);
 727
 728#endif
 729
 730/*
 731 * A data abort trap was taken, but we did not handle the instruction.
 732 * Try to abort the user program, or panic if it was the kernel.
 733 */
 734asmlinkage void
 735baddataabort(int code, unsigned long instr, struct pt_regs *regs)
 736{
 737        unsigned long addr = instruction_pointer(regs);
 738        siginfo_t info;
 739
 740#ifdef CONFIG_DEBUG_USER
 741        if (user_debug & UDBG_BADABORT) {
 742                pr_err("[%d] %s: bad data abort: code %d instr 0x%08lx\n",
 743                       task_pid_nr(current), current->comm, code, instr);
 744                dump_instr(KERN_ERR, regs);
 745                show_pte(current->mm, addr);
 746        }
 747#endif
 748
 749        info.si_signo = SIGILL;
 750        info.si_errno = 0;
 751        info.si_code  = ILL_ILLOPC;
 752        info.si_addr  = (void __user *)addr;
 753
 754        arm_notify_die("unknown data abort code", regs, &info, instr, 0);
 755}
 756
 757void __readwrite_bug(const char *fn)
 758{
 759        pr_err("%s called, but not implemented\n", fn);
 760        BUG();
 761}
 762EXPORT_SYMBOL(__readwrite_bug);
 763
 764void __pte_error(const char *file, int line, pte_t pte)
 765{
 766        pr_err("%s:%d: bad pte %08llx.\n", file, line, (long long)pte_val(pte));
 767}
 768
 769void __pmd_error(const char *file, int line, pmd_t pmd)
 770{
 771        pr_err("%s:%d: bad pmd %08llx.\n", file, line, (long long)pmd_val(pmd));
 772}
 773
 774void __pgd_error(const char *file, int line, pgd_t pgd)
 775{
 776        pr_err("%s:%d: bad pgd %08llx.\n", file, line, (long long)pgd_val(pgd));
 777}
 778
 779asmlinkage void __div0(void)
 780{
 781        pr_err("Division by zero in kernel.\n");
 782        dump_stack();
 783}
 784EXPORT_SYMBOL(__div0);
 785
 786void abort(void)
 787{
 788        BUG();
 789
 790        /* if that doesn't kill us, halt */
 791        panic("Oops failed to kill thread");
 792}
 793EXPORT_SYMBOL(abort);
 794
 795void __init trap_init(void)
 796{
 797        return;
 798}
 799
 800#ifdef CONFIG_KUSER_HELPERS
 801static void __init kuser_init(void *vectors)
 802{
 803        extern char __kuser_helper_start[], __kuser_helper_end[];
 804        int kuser_sz = __kuser_helper_end - __kuser_helper_start;
 805
 806        memcpy(vectors + 0x1000 - kuser_sz, __kuser_helper_start, kuser_sz);
 807
 808        /*
 809         * vectors + 0xfe0 = __kuser_get_tls
 810         * vectors + 0xfe8 = hardware TLS instruction at 0xffff0fe8
 811         */
 812        if (tls_emu || has_tls_reg)
 813                memcpy(vectors + 0xfe0, vectors + 0xfe8, 4);
 814}
 815#else
 816static inline void __init kuser_init(void *vectors)
 817{
 818}
 819#endif
 820
 821void __init early_trap_init(void *vectors_base)
 822{
 823#ifndef CONFIG_CPU_V7M
 824        unsigned long vectors = (unsigned long)vectors_base;
 825        extern char __stubs_start[], __stubs_end[];
 826        extern char __vectors_start[], __vectors_end[];
 827        unsigned i;
 828
 829        vectors_page = vectors_base;
 830
 831        /*
 832         * Poison the vectors page with an undefined instruction.  This
 833         * instruction is chosen to be undefined for both ARM and Thumb
 834         * ISAs.  The Thumb version is an undefined instruction with a
 835         * branch back to the undefined instruction.
 836         */
 837        for (i = 0; i < PAGE_SIZE / sizeof(u32); i++)
 838                ((u32 *)vectors_base)[i] = 0xe7fddef1;
 839
 840        /*
 841         * Copy the vectors, stubs and kuser helpers (in entry-armv.S)
 842         * into the vector page, mapped at 0xffff0000, and ensure these
 843         * are visible to the instruction stream.
 844         */
 845        memcpy((void *)vectors, __vectors_start, __vectors_end - __vectors_start);
 846        memcpy((void *)vectors + 0x1000, __stubs_start, __stubs_end - __stubs_start);
 847
 848        kuser_init(vectors_base);
 849
 850        flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
 851#else /* ifndef CONFIG_CPU_V7M */
 852        /*
 853         * on V7-M there is no need to copy the vector table to a dedicated
 854         * memory area. The address is configurable and so a table in the kernel
 855         * image can be used.
 856         */
 857#endif
 858}
 859