linux/arch/arm/kernel/traps.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 *  linux/arch/arm/kernel/traps.c
   4 *
   5 *  Copyright (C) 1995-2009 Russell King
   6 *  Fragments that appear the same as linux/arch/i386/kernel/traps.c (C) Linus Torvalds
   7 *
   8 *  'traps.c' handles hardware exceptions after we have saved some state in
   9 *  'linux/arch/arm/lib/traps.S'.  Mostly a debugging aid, but will probably
  10 *  kill the offending process.
  11 */
  12#include <linux/signal.h>
  13#include <linux/personality.h>
  14#include <linux/kallsyms.h>
  15#include <linux/spinlock.h>
  16#include <linux/uaccess.h>
  17#include <linux/hardirq.h>
  18#include <linux/kdebug.h>
  19#include <linux/kprobes.h>
  20#include <linux/module.h>
  21#include <linux/kexec.h>
  22#include <linux/bug.h>
  23#include <linux/delay.h>
  24#include <linux/init.h>
  25#include <linux/sched/signal.h>
  26#include <linux/sched/debug.h>
  27#include <linux/sched/task_stack.h>
  28#include <linux/irq.h>
  29
  30#include <linux/atomic.h>
  31#include <asm/cacheflush.h>
  32#include <asm/exception.h>
  33#include <asm/unistd.h>
  34#include <asm/traps.h>
  35#include <asm/ptrace.h>
  36#include <asm/unwind.h>
  37#include <asm/tls.h>
  38#include <asm/system_misc.h>
  39#include <asm/opcodes.h>
  40
  41
  42static const char *handler[]= {
  43        "prefetch abort",
  44        "data abort",
  45        "address exception",
  46        "interrupt",
  47        "undefined instruction",
  48};
  49
  50void *vectors_page;
  51
  52#ifdef CONFIG_DEBUG_USER
  53unsigned int user_debug;
  54
  55static int __init user_debug_setup(char *str)
  56{
  57        get_option(&str, &user_debug);
  58        return 1;
  59}
  60__setup("user_debug=", user_debug_setup);
  61#endif
  62
  63static void dump_mem(const char *, const char *, unsigned long, unsigned long);
  64
  65void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
  66{
  67#ifdef CONFIG_KALLSYMS
  68        printk("[<%08lx>] (%ps) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from);
  69#else
  70        printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
  71#endif
  72
  73        if (in_entry_text(from))
  74                dump_mem("", "Exception stack", frame + 4, frame + 4 + sizeof(struct pt_regs));
  75}
  76
  77void dump_backtrace_stm(u32 *stack, u32 instruction)
  78{
  79        char str[80], *p;
  80        unsigned int x;
  81        int reg;
  82
  83        for (reg = 10, x = 0, p = str; reg >= 0; reg--) {
  84                if (instruction & BIT(reg)) {
  85                        p += sprintf(p, " r%d:%08x", reg, *stack--);
  86                        if (++x == 6) {
  87                                x = 0;
  88                                p = str;
  89                                printk("%s\n", str);
  90                        }
  91                }
  92        }
  93        if (p != str)
  94                printk("%s\n", str);
  95}
  96
  97#ifndef CONFIG_ARM_UNWIND
  98/*
  99 * Stack pointers should always be within the kernels view of
 100 * physical memory.  If it is not there, then we can't dump
 101 * out any information relating to the stack.
 102 */
 103static int verify_stack(unsigned long sp)
 104{
 105        if (sp < PAGE_OFFSET ||
 106            (sp > (unsigned long)high_memory && high_memory != NULL))
 107                return -EFAULT;
 108
 109        return 0;
 110}
 111#endif
 112
 113/*
 114 * Dump out the contents of some memory nicely...
 115 */
 116static void dump_mem(const char *lvl, const char *str, unsigned long bottom,
 117                     unsigned long top)
 118{
 119        unsigned long first;
 120        mm_segment_t fs;
 121        int i;
 122
 123        /*
 124         * We need to switch to kernel mode so that we can use __get_user
 125         * to safely read from kernel space.  Note that we now dump the
 126         * code first, just in case the backtrace kills us.
 127         */
 128        fs = get_fs();
 129        set_fs(KERNEL_DS);
 130
 131        printk("%s%s(0x%08lx to 0x%08lx)\n", lvl, str, bottom, top);
 132
 133        for (first = bottom & ~31; first < top; first += 32) {
 134                unsigned long p;
 135                char str[sizeof(" 12345678") * 8 + 1];
 136
 137                memset(str, ' ', sizeof(str));
 138                str[sizeof(str) - 1] = '\0';
 139
 140                for (p = first, i = 0; i < 8 && p < top; i++, p += 4) {
 141                        if (p >= bottom && p < top) {
 142                                unsigned long val;
 143                                if (__get_user(val, (unsigned long *)p) == 0)
 144                                        sprintf(str + i * 9, " %08lx", val);
 145                                else
 146                                        sprintf(str + i * 9, " ????????");
 147                        }
 148                }
 149                printk("%s%04lx:%s\n", lvl, first & 0xffff, str);
 150        }
 151
 152        set_fs(fs);
 153}
 154
 155static void __dump_instr(const char *lvl, struct pt_regs *regs)
 156{
 157        unsigned long addr = instruction_pointer(regs);
 158        const int thumb = thumb_mode(regs);
 159        const int width = thumb ? 4 : 8;
 160        char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str;
 161        int i;
 162
 163        /*
 164         * Note that we now dump the code first, just in case the backtrace
 165         * kills us.
 166         */
 167
 168        for (i = -4; i < 1 + !!thumb; i++) {
 169                unsigned int val, bad;
 170
 171                if (thumb)
 172                        bad = get_user(val, &((u16 *)addr)[i]);
 173                else
 174                        bad = get_user(val, &((u32 *)addr)[i]);
 175
 176                if (!bad)
 177                        p += sprintf(p, i == 0 ? "(%0*x) " : "%0*x ",
 178                                        width, val);
 179                else {
 180                        p += sprintf(p, "bad PC value");
 181                        break;
 182                }
 183        }
 184        printk("%sCode: %s\n", lvl, str);
 185}
 186
 187static void dump_instr(const char *lvl, struct pt_regs *regs)
 188{
 189        mm_segment_t fs;
 190
 191        if (!user_mode(regs)) {
 192                fs = get_fs();
 193                set_fs(KERNEL_DS);
 194                __dump_instr(lvl, regs);
 195                set_fs(fs);
 196        } else {
 197                __dump_instr(lvl, regs);
 198        }
 199}
 200
 201#ifdef CONFIG_ARM_UNWIND
 202static inline void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
 203{
 204        unwind_backtrace(regs, tsk);
 205}
 206#else
 207static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
 208{
 209        unsigned int fp, mode;
 210        int ok = 1;
 211
 212        printk("Backtrace: ");
 213
 214        if (!tsk)
 215                tsk = current;
 216
 217        if (regs) {
 218                fp = frame_pointer(regs);
 219                mode = processor_mode(regs);
 220        } else if (tsk != current) {
 221                fp = thread_saved_fp(tsk);
 222                mode = 0x10;
 223        } else {
 224                asm("mov %0, fp" : "=r" (fp) : : "cc");
 225                mode = 0x10;
 226        }
 227
 228        if (!fp) {
 229                pr_cont("no frame pointer");
 230                ok = 0;
 231        } else if (verify_stack(fp)) {
 232                pr_cont("invalid frame pointer 0x%08x", fp);
 233                ok = 0;
 234        } else if (fp < (unsigned long)end_of_stack(tsk))
 235                pr_cont("frame pointer underflow");
 236        pr_cont("\n");
 237
 238        if (ok)
 239                c_backtrace(fp, mode);
 240}
 241#endif
 242
 243void show_stack(struct task_struct *tsk, unsigned long *sp)
 244{
 245        dump_backtrace(NULL, tsk);
 246        barrier();
 247}
 248
 249#ifdef CONFIG_PREEMPT
 250#define S_PREEMPT " PREEMPT"
 251#else
 252#define S_PREEMPT ""
 253#endif
 254#ifdef CONFIG_SMP
 255#define S_SMP " SMP"
 256#else
 257#define S_SMP ""
 258#endif
 259#ifdef CONFIG_THUMB2_KERNEL
 260#define S_ISA " THUMB2"
 261#else
 262#define S_ISA " ARM"
 263#endif
 264
 265static int __die(const char *str, int err, struct pt_regs *regs)
 266{
 267        struct task_struct *tsk = current;
 268        static int die_counter;
 269        int ret;
 270
 271        pr_emerg("Internal error: %s: %x [#%d]" S_PREEMPT S_SMP S_ISA "\n",
 272                 str, err, ++die_counter);
 273
 274        /* trap and error numbers are mostly meaningless on ARM */
 275        ret = notify_die(DIE_OOPS, str, regs, err, tsk->thread.trap_no, SIGSEGV);
 276        if (ret == NOTIFY_STOP)
 277                return 1;
 278
 279        print_modules();
 280        __show_regs(regs);
 281        pr_emerg("Process %.*s (pid: %d, stack limit = 0x%p)\n",
 282                 TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), end_of_stack(tsk));
 283
 284        if (!user_mode(regs) || in_interrupt()) {
 285                dump_mem(KERN_EMERG, "Stack: ", regs->ARM_sp,
 286                         THREAD_SIZE + (unsigned long)task_stack_page(tsk));
 287                dump_backtrace(regs, tsk);
 288                dump_instr(KERN_EMERG, regs);
 289        }
 290
 291        return 0;
 292}
 293
 294static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
 295static int die_owner = -1;
 296static unsigned int die_nest_count;
 297
 298static unsigned long oops_begin(void)
 299{
 300        int cpu;
 301        unsigned long flags;
 302
 303        oops_enter();
 304
 305        /* racy, but better than risking deadlock. */
 306        raw_local_irq_save(flags);
 307        cpu = smp_processor_id();
 308        if (!arch_spin_trylock(&die_lock)) {
 309                if (cpu == die_owner)
 310                        /* nested oops. should stop eventually */;
 311                else
 312                        arch_spin_lock(&die_lock);
 313        }
 314        die_nest_count++;
 315        die_owner = cpu;
 316        console_verbose();
 317        bust_spinlocks(1);
 318        return flags;
 319}
 320
 321static void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
 322{
 323        if (regs && kexec_should_crash(current))
 324                crash_kexec(regs);
 325
 326        bust_spinlocks(0);
 327        die_owner = -1;
 328        add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
 329        die_nest_count--;
 330        if (!die_nest_count)
 331                /* Nest count reaches zero, release the lock. */
 332                arch_spin_unlock(&die_lock);
 333        raw_local_irq_restore(flags);
 334        oops_exit();
 335
 336        if (in_interrupt())
 337                panic("Fatal exception in interrupt");
 338        if (panic_on_oops)
 339                panic("Fatal exception");
 340        if (signr)
 341                do_exit(signr);
 342}
 343
 344/*
 345 * This function is protected against re-entrancy.
 346 */
 347void die(const char *str, struct pt_regs *regs, int err)
 348{
 349        enum bug_trap_type bug_type = BUG_TRAP_TYPE_NONE;
 350        unsigned long flags = oops_begin();
 351        int sig = SIGSEGV;
 352
 353        if (!user_mode(regs))
 354                bug_type = report_bug(regs->ARM_pc, regs);
 355        if (bug_type != BUG_TRAP_TYPE_NONE)
 356                str = "Oops - BUG";
 357
 358        if (__die(str, err, regs))
 359                sig = 0;
 360
 361        oops_end(flags, regs, sig);
 362}
 363
 364void arm_notify_die(const char *str, struct pt_regs *regs,
 365                int signo, int si_code, void __user *addr,
 366                unsigned long err, unsigned long trap)
 367{
 368        if (user_mode(regs)) {
 369                current->thread.error_code = err;
 370                current->thread.trap_no = trap;
 371
 372                force_sig_fault(signo, si_code, addr);
 373        } else {
 374                die(str, regs, err);
 375        }
 376}
 377
 378#ifdef CONFIG_GENERIC_BUG
 379
 380int is_valid_bugaddr(unsigned long pc)
 381{
 382#ifdef CONFIG_THUMB2_KERNEL
 383        u16 bkpt;
 384        u16 insn = __opcode_to_mem_thumb16(BUG_INSTR_VALUE);
 385#else
 386        u32 bkpt;
 387        u32 insn = __opcode_to_mem_arm(BUG_INSTR_VALUE);
 388#endif
 389
 390        if (probe_kernel_address((unsigned *)pc, bkpt))
 391                return 0;
 392
 393        return bkpt == insn;
 394}
 395
 396#endif
 397
 398static LIST_HEAD(undef_hook);
 399static DEFINE_RAW_SPINLOCK(undef_lock);
 400
 401void register_undef_hook(struct undef_hook *hook)
 402{
 403        unsigned long flags;
 404
 405        raw_spin_lock_irqsave(&undef_lock, flags);
 406        list_add(&hook->node, &undef_hook);
 407        raw_spin_unlock_irqrestore(&undef_lock, flags);
 408}
 409
 410void unregister_undef_hook(struct undef_hook *hook)
 411{
 412        unsigned long flags;
 413
 414        raw_spin_lock_irqsave(&undef_lock, flags);
 415        list_del(&hook->node);
 416        raw_spin_unlock_irqrestore(&undef_lock, flags);
 417}
 418
 419static nokprobe_inline
 420int call_undef_hook(struct pt_regs *regs, unsigned int instr)
 421{
 422        struct undef_hook *hook;
 423        unsigned long flags;
 424        int (*fn)(struct pt_regs *regs, unsigned int instr) = NULL;
 425
 426        raw_spin_lock_irqsave(&undef_lock, flags);
 427        list_for_each_entry(hook, &undef_hook, node)
 428                if ((instr & hook->instr_mask) == hook->instr_val &&
 429                    (regs->ARM_cpsr & hook->cpsr_mask) == hook->cpsr_val)
 430                        fn = hook->fn;
 431        raw_spin_unlock_irqrestore(&undef_lock, flags);
 432
 433        return fn ? fn(regs, instr) : 1;
 434}
 435
 436asmlinkage void do_undefinstr(struct pt_regs *regs)
 437{
 438        unsigned int instr;
 439        void __user *pc;
 440
 441        pc = (void __user *)instruction_pointer(regs);
 442
 443        if (processor_mode(regs) == SVC_MODE) {
 444#ifdef CONFIG_THUMB2_KERNEL
 445                if (thumb_mode(regs)) {
 446                        instr = __mem_to_opcode_thumb16(((u16 *)pc)[0]);
 447                        if (is_wide_instruction(instr)) {
 448                                u16 inst2;
 449                                inst2 = __mem_to_opcode_thumb16(((u16 *)pc)[1]);
 450                                instr = __opcode_thumb32_compose(instr, inst2);
 451                        }
 452                } else
 453#endif
 454                        instr = __mem_to_opcode_arm(*(u32 *) pc);
 455        } else if (thumb_mode(regs)) {
 456                if (get_user(instr, (u16 __user *)pc))
 457                        goto die_sig;
 458                instr = __mem_to_opcode_thumb16(instr);
 459                if (is_wide_instruction(instr)) {
 460                        unsigned int instr2;
 461                        if (get_user(instr2, (u16 __user *)pc+1))
 462                                goto die_sig;
 463                        instr2 = __mem_to_opcode_thumb16(instr2);
 464                        instr = __opcode_thumb32_compose(instr, instr2);
 465                }
 466        } else {
 467                if (get_user(instr, (u32 __user *)pc))
 468                        goto die_sig;
 469                instr = __mem_to_opcode_arm(instr);
 470        }
 471
 472        if (call_undef_hook(regs, instr) == 0)
 473                return;
 474
 475die_sig:
 476#ifdef CONFIG_DEBUG_USER
 477        if (user_debug & UDBG_UNDEFINED) {
 478                pr_info("%s (%d): undefined instruction: pc=%p\n",
 479                        current->comm, task_pid_nr(current), pc);
 480                __show_regs(regs);
 481                dump_instr(KERN_INFO, regs);
 482        }
 483#endif
 484        arm_notify_die("Oops - undefined instruction", regs,
 485                       SIGILL, ILL_ILLOPC, pc, 0, 6);
 486}
 487NOKPROBE_SYMBOL(do_undefinstr)
 488
 489/*
 490 * Handle FIQ similarly to NMI on x86 systems.
 491 *
 492 * The runtime environment for NMIs is extremely restrictive
 493 * (NMIs can pre-empt critical sections meaning almost all locking is
 494 * forbidden) meaning this default FIQ handling must only be used in
 495 * circumstances where non-maskability improves robustness, such as
 496 * watchdog or debug logic.
 497 *
 498 * This handler is not appropriate for general purpose use in drivers
 499 * platform code and can be overrideen using set_fiq_handler.
 500 */
 501asmlinkage void __exception_irq_entry handle_fiq_as_nmi(struct pt_regs *regs)
 502{
 503        struct pt_regs *old_regs = set_irq_regs(regs);
 504
 505        nmi_enter();
 506
 507        /* nop. FIQ handlers for special arch/arm features can be added here. */
 508
 509        nmi_exit();
 510
 511        set_irq_regs(old_regs);
 512}
 513
 514/*
 515 * bad_mode handles the impossible case in the vectors.  If you see one of
 516 * these, then it's extremely serious, and could mean you have buggy hardware.
 517 * It never returns, and never tries to sync.  We hope that we can at least
 518 * dump out some state information...
 519 */
 520asmlinkage void bad_mode(struct pt_regs *regs, int reason)
 521{
 522        console_verbose();
 523
 524        pr_crit("Bad mode in %s handler detected\n", handler[reason]);
 525
 526        die("Oops - bad mode", regs, 0);
 527        local_irq_disable();
 528        panic("bad mode");
 529}
 530
 531static int bad_syscall(int n, struct pt_regs *regs)
 532{
 533        if ((current->personality & PER_MASK) != PER_LINUX) {
 534                send_sig(SIGSEGV, current, 1);
 535                return regs->ARM_r0;
 536        }
 537
 538#ifdef CONFIG_DEBUG_USER
 539        if (user_debug & UDBG_SYSCALL) {
 540                pr_err("[%d] %s: obsolete system call %08x.\n",
 541                        task_pid_nr(current), current->comm, n);
 542                dump_instr(KERN_ERR, regs);
 543        }
 544#endif
 545
 546        arm_notify_die("Oops - bad syscall", regs, SIGILL, ILL_ILLTRP,
 547                       (void __user *)instruction_pointer(regs) -
 548                         (thumb_mode(regs) ? 2 : 4),
 549                       n, 0);
 550
 551        return regs->ARM_r0;
 552}
 553
 554static inline int
 555__do_cache_op(unsigned long start, unsigned long end)
 556{
 557        int ret;
 558
 559        do {
 560                unsigned long chunk = min(PAGE_SIZE, end - start);
 561
 562                if (fatal_signal_pending(current))
 563                        return 0;
 564
 565                ret = flush_cache_user_range(start, start + chunk);
 566                if (ret)
 567                        return ret;
 568
 569                cond_resched();
 570                start += chunk;
 571        } while (start < end);
 572
 573        return 0;
 574}
 575
 576static inline int
 577do_cache_op(unsigned long start, unsigned long end, int flags)
 578{
 579        if (end < start || flags)
 580                return -EINVAL;
 581
 582        if (!access_ok(start, end - start))
 583                return -EFAULT;
 584
 585        return __do_cache_op(start, end);
 586}
 587
 588/*
 589 * Handle all unrecognised system calls.
 590 *  0x9f0000 - 0x9fffff are some more esoteric system calls
 591 */
 592#define NR(x) ((__ARM_NR_##x) - __ARM_NR_BASE)
 593asmlinkage int arm_syscall(int no, struct pt_regs *regs)
 594{
 595        if ((no >> 16) != (__ARM_NR_BASE>> 16))
 596                return bad_syscall(no, regs);
 597
 598        switch (no & 0xffff) {
 599        case 0: /* branch through 0 */
 600                arm_notify_die("branch through zero", regs,
 601                               SIGSEGV, SEGV_MAPERR, NULL, 0, 0);
 602                return 0;
 603
 604        case NR(breakpoint): /* SWI BREAK_POINT */
 605                regs->ARM_pc -= thumb_mode(regs) ? 2 : 4;
 606                ptrace_break(regs);
 607                return regs->ARM_r0;
 608
 609        /*
 610         * Flush a region from virtual address 'r0' to virtual address 'r1'
 611         * _exclusive_.  There is no alignment requirement on either address;
 612         * user space does not need to know the hardware cache layout.
 613         *
 614         * r2 contains flags.  It should ALWAYS be passed as ZERO until it
 615         * is defined to be something else.  For now we ignore it, but may
 616         * the fires of hell burn in your belly if you break this rule. ;)
 617         *
 618         * (at a later date, we may want to allow this call to not flush
 619         * various aspects of the cache.  Passing '0' will guarantee that
 620         * everything necessary gets flushed to maintain consistency in
 621         * the specified region).
 622         */
 623        case NR(cacheflush):
 624                return do_cache_op(regs->ARM_r0, regs->ARM_r1, regs->ARM_r2);
 625
 626        case NR(usr26):
 627                if (!(elf_hwcap & HWCAP_26BIT))
 628                        break;
 629                regs->ARM_cpsr &= ~MODE32_BIT;
 630                return regs->ARM_r0;
 631
 632        case NR(usr32):
 633                if (!(elf_hwcap & HWCAP_26BIT))
 634                        break;
 635                regs->ARM_cpsr |= MODE32_BIT;
 636                return regs->ARM_r0;
 637
 638        case NR(set_tls):
 639                set_tls(regs->ARM_r0);
 640                return 0;
 641
 642        case NR(get_tls):
 643                return current_thread_info()->tp_value[0];
 644
 645        default:
 646                /* Calls 9f00xx..9f07ff are defined to return -ENOSYS
 647                   if not implemented, rather than raising SIGILL.  This
 648                   way the calling program can gracefully determine whether
 649                   a feature is supported.  */
 650                if ((no & 0xffff) <= 0x7ff)
 651                        return -ENOSYS;
 652                break;
 653        }
 654#ifdef CONFIG_DEBUG_USER
 655        /*
 656         * experience shows that these seem to indicate that
 657         * something catastrophic has happened
 658         */
 659        if (user_debug & UDBG_SYSCALL) {
 660                pr_err("[%d] %s: arm syscall %d\n",
 661                       task_pid_nr(current), current->comm, no);
 662                dump_instr("", regs);
 663                if (user_mode(regs)) {
 664                        __show_regs(regs);
 665                        c_backtrace(frame_pointer(regs), processor_mode(regs));
 666                }
 667        }
 668#endif
 669        arm_notify_die("Oops - bad syscall(2)", regs, SIGILL, ILL_ILLTRP,
 670                       (void __user *)instruction_pointer(regs) -
 671                         (thumb_mode(regs) ? 2 : 4),
 672                       no, 0);
 673        return 0;
 674}
 675
 676#ifdef CONFIG_TLS_REG_EMUL
 677
 678/*
 679 * We might be running on an ARMv6+ processor which should have the TLS
 680 * register but for some reason we can't use it, or maybe an SMP system
 681 * using a pre-ARMv6 processor (there are apparently a few prototypes like
 682 * that in existence) and therefore access to that register must be
 683 * emulated.
 684 */
 685
 686static int get_tp_trap(struct pt_regs *regs, unsigned int instr)
 687{
 688        int reg = (instr >> 12) & 15;
 689        if (reg == 15)
 690                return 1;
 691        regs->uregs[reg] = current_thread_info()->tp_value[0];
 692        regs->ARM_pc += 4;
 693        return 0;
 694}
 695
 696static struct undef_hook arm_mrc_hook = {
 697        .instr_mask     = 0x0fff0fff,
 698        .instr_val      = 0x0e1d0f70,
 699        .cpsr_mask      = PSR_T_BIT,
 700        .cpsr_val       = 0,
 701        .fn             = get_tp_trap,
 702};
 703
 704static int __init arm_mrc_hook_init(void)
 705{
 706        register_undef_hook(&arm_mrc_hook);
 707        return 0;
 708}
 709
 710late_initcall(arm_mrc_hook_init);
 711
 712#endif
 713
 714/*
 715 * A data abort trap was taken, but we did not handle the instruction.
 716 * Try to abort the user program, or panic if it was the kernel.
 717 */
 718asmlinkage void
 719baddataabort(int code, unsigned long instr, struct pt_regs *regs)
 720{
 721        unsigned long addr = instruction_pointer(regs);
 722
 723#ifdef CONFIG_DEBUG_USER
 724        if (user_debug & UDBG_BADABORT) {
 725                pr_err("8<--- cut here ---\n");
 726                pr_err("[%d] %s: bad data abort: code %d instr 0x%08lx\n",
 727                       task_pid_nr(current), current->comm, code, instr);
 728                dump_instr(KERN_ERR, regs);
 729                show_pte(KERN_ERR, current->mm, addr);
 730        }
 731#endif
 732
 733        arm_notify_die("unknown data abort code", regs,
 734                       SIGILL, ILL_ILLOPC, (void __user *)addr, instr, 0);
 735}
 736
 737void __readwrite_bug(const char *fn)
 738{
 739        pr_err("%s called, but not implemented\n", fn);
 740        BUG();
 741}
 742EXPORT_SYMBOL(__readwrite_bug);
 743
 744void __pte_error(const char *file, int line, pte_t pte)
 745{
 746        pr_err("%s:%d: bad pte %08llx.\n", file, line, (long long)pte_val(pte));
 747}
 748
 749void __pmd_error(const char *file, int line, pmd_t pmd)
 750{
 751        pr_err("%s:%d: bad pmd %08llx.\n", file, line, (long long)pmd_val(pmd));
 752}
 753
 754void __pgd_error(const char *file, int line, pgd_t pgd)
 755{
 756        pr_err("%s:%d: bad pgd %08llx.\n", file, line, (long long)pgd_val(pgd));
 757}
 758
 759asmlinkage void __div0(void)
 760{
 761        pr_err("Division by zero in kernel.\n");
 762        dump_stack();
 763}
 764EXPORT_SYMBOL(__div0);
 765
 766void abort(void)
 767{
 768        BUG();
 769
 770        /* if that doesn't kill us, halt */
 771        panic("Oops failed to kill thread");
 772}
 773
 774void __init trap_init(void)
 775{
 776        return;
 777}
 778
 779#ifdef CONFIG_KUSER_HELPERS
 780static void __init kuser_init(void *vectors)
 781{
 782        extern char __kuser_helper_start[], __kuser_helper_end[];
 783        int kuser_sz = __kuser_helper_end - __kuser_helper_start;
 784
 785        memcpy(vectors + 0x1000 - kuser_sz, __kuser_helper_start, kuser_sz);
 786
 787        /*
 788         * vectors + 0xfe0 = __kuser_get_tls
 789         * vectors + 0xfe8 = hardware TLS instruction at 0xffff0fe8
 790         */
 791        if (tls_emu || has_tls_reg)
 792                memcpy(vectors + 0xfe0, vectors + 0xfe8, 4);
 793}
 794#else
 795static inline void __init kuser_init(void *vectors)
 796{
 797}
 798#endif
 799
 800void __init early_trap_init(void *vectors_base)
 801{
 802#ifndef CONFIG_CPU_V7M
 803        unsigned long vectors = (unsigned long)vectors_base;
 804        extern char __stubs_start[], __stubs_end[];
 805        extern char __vectors_start[], __vectors_end[];
 806        unsigned i;
 807
 808        vectors_page = vectors_base;
 809
 810        /*
 811         * Poison the vectors page with an undefined instruction.  This
 812         * instruction is chosen to be undefined for both ARM and Thumb
 813         * ISAs.  The Thumb version is an undefined instruction with a
 814         * branch back to the undefined instruction.
 815         */
 816        for (i = 0; i < PAGE_SIZE / sizeof(u32); i++)
 817                ((u32 *)vectors_base)[i] = 0xe7fddef1;
 818
 819        /*
 820         * Copy the vectors, stubs and kuser helpers (in entry-armv.S)
 821         * into the vector page, mapped at 0xffff0000, and ensure these
 822         * are visible to the instruction stream.
 823         */
 824        memcpy((void *)vectors, __vectors_start, __vectors_end - __vectors_start);
 825        memcpy((void *)vectors + 0x1000, __stubs_start, __stubs_end - __stubs_start);
 826
 827        kuser_init(vectors_base);
 828
 829        flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
 830#else /* ifndef CONFIG_CPU_V7M */
 831        /*
 832         * on V7-M there is no need to copy the vector table to a dedicated
 833         * memory area. The address is configurable and so a table in the kernel
 834         * image can be used.
 835         */
 836#endif
 837}
 838