linux/arch/arm/kernel/traps.c
<<
>>
Prefs
   1/*
   2 *  linux/arch/arm/kernel/traps.c
   3 *
   4 *  Copyright (C) 1995-2009 Russell King
   5 *  Fragments that appear the same as linux/arch/i386/kernel/traps.c (C) Linus Torvalds
   6 *
   7 * This program is free software; you can redistribute it and/or modify
   8 * it under the terms of the GNU General Public License version 2 as
   9 * published by the Free Software Foundation.
  10 *
  11 *  'traps.c' handles hardware exceptions after we have saved some state in
  12 *  'linux/arch/arm/lib/traps.S'.  Mostly a debugging aid, but will probably
  13 *  kill the offending process.
  14 */
  15#include <linux/signal.h>
  16#include <linux/personality.h>
  17#include <linux/kallsyms.h>
  18#include <linux/spinlock.h>
  19#include <linux/uaccess.h>
  20#include <linux/hardirq.h>
  21#include <linux/kdebug.h>
  22#include <linux/module.h>
  23#include <linux/kexec.h>
  24#include <linux/bug.h>
  25#include <linux/delay.h>
  26#include <linux/init.h>
  27#include <linux/sched.h>
  28#include <linux/irq.h>
  29
  30#include <linux/atomic.h>
  31#include <asm/cacheflush.h>
  32#include <asm/exception.h>
  33#include <asm/unistd.h>
  34#include <asm/traps.h>
  35#include <asm/ptrace.h>
  36#include <asm/unwind.h>
  37#include <asm/tls.h>
  38#include <asm/system_misc.h>
  39#include <asm/opcodes.h>
  40
  41
  42static const char *handler[]= {
  43        "prefetch abort",
  44        "data abort",
  45        "address exception",
  46        "interrupt",
  47        "undefined instruction",
  48};
  49
  50void *vectors_page;
  51
  52#ifdef CONFIG_DEBUG_USER
  53unsigned int user_debug;
  54
  55static int __init user_debug_setup(char *str)
  56{
  57        get_option(&str, &user_debug);
  58        return 1;
  59}
  60__setup("user_debug=", user_debug_setup);
  61#endif
  62
  63static void dump_mem(const char *, const char *, unsigned long, unsigned long);
  64
  65void dump_backtrace_entry(unsigned long where, unsigned long from, unsigned long frame)
  66{
  67#ifdef CONFIG_KALLSYMS
  68        printk("[<%08lx>] (%ps) from [<%08lx>] (%pS)\n", where, (void *)where, from, (void *)from);
  69#else
  70        printk("Function entered at [<%08lx>] from [<%08lx>]\n", where, from);
  71#endif
  72
  73        if (in_exception_text(where))
  74                dump_mem("", "Exception stack", frame + 4, frame + 4 + sizeof(struct pt_regs));
  75}
  76
  77#ifndef CONFIG_ARM_UNWIND
  78/*
  79 * Stack pointers should always be within the kernels view of
  80 * physical memory.  If it is not there, then we can't dump
  81 * out any information relating to the stack.
  82 */
  83static int verify_stack(unsigned long sp)
  84{
  85        if (sp < PAGE_OFFSET ||
  86            (sp > (unsigned long)high_memory && high_memory != NULL))
  87                return -EFAULT;
  88
  89        return 0;
  90}
  91#endif
  92
  93/*
  94 * Dump out the contents of some memory nicely...
  95 */
  96static void dump_mem(const char *lvl, const char *str, unsigned long bottom,
  97                     unsigned long top)
  98{
  99        unsigned long first;
 100        mm_segment_t fs;
 101        int i;
 102
 103        /*
 104         * We need to switch to kernel mode so that we can use __get_user
 105         * to safely read from kernel space.  Note that we now dump the
 106         * code first, just in case the backtrace kills us.
 107         */
 108        fs = get_fs();
 109        set_fs(KERNEL_DS);
 110
 111        printk("%s%s(0x%08lx to 0x%08lx)\n", lvl, str, bottom, top);
 112
 113        for (first = bottom & ~31; first < top; first += 32) {
 114                unsigned long p;
 115                char str[sizeof(" 12345678") * 8 + 1];
 116
 117                memset(str, ' ', sizeof(str));
 118                str[sizeof(str) - 1] = '\0';
 119
 120                for (p = first, i = 0; i < 8 && p < top; i++, p += 4) {
 121                        if (p >= bottom && p < top) {
 122                                unsigned long val;
 123                                if (__get_user(val, (unsigned long *)p) == 0)
 124                                        sprintf(str + i * 9, " %08lx", val);
 125                                else
 126                                        sprintf(str + i * 9, " ????????");
 127                        }
 128                }
 129                printk("%s%04lx:%s\n", lvl, first & 0xffff, str);
 130        }
 131
 132        set_fs(fs);
 133}
 134
 135static void dump_instr(const char *lvl, struct pt_regs *regs)
 136{
 137        unsigned long addr = instruction_pointer(regs);
 138        const int thumb = thumb_mode(regs);
 139        const int width = thumb ? 4 : 8;
 140        mm_segment_t fs;
 141        char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str;
 142        int i;
 143
 144        /*
 145         * We need to switch to kernel mode so that we can use __get_user
 146         * to safely read from kernel space.  Note that we now dump the
 147         * code first, just in case the backtrace kills us.
 148         */
 149        fs = get_fs();
 150        set_fs(KERNEL_DS);
 151
 152        for (i = -4; i < 1 + !!thumb; i++) {
 153                unsigned int val, bad;
 154
 155                if (thumb)
 156                        bad = __get_user(val, &((u16 *)addr)[i]);
 157                else
 158                        bad = __get_user(val, &((u32 *)addr)[i]);
 159
 160                if (!bad)
 161                        p += sprintf(p, i == 0 ? "(%0*x) " : "%0*x ",
 162                                        width, val);
 163                else {
 164                        p += sprintf(p, "bad PC value");
 165                        break;
 166                }
 167        }
 168        printk("%sCode: %s\n", lvl, str);
 169
 170        set_fs(fs);
 171}
 172
 173#ifdef CONFIG_ARM_UNWIND
 174static inline void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
 175{
 176        unwind_backtrace(regs, tsk);
 177}
 178#else
 179static void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
 180{
 181        unsigned int fp, mode;
 182        int ok = 1;
 183
 184        printk("Backtrace: ");
 185
 186        if (!tsk)
 187                tsk = current;
 188
 189        if (regs) {
 190                fp = frame_pointer(regs);
 191                mode = processor_mode(regs);
 192        } else if (tsk != current) {
 193                fp = thread_saved_fp(tsk);
 194                mode = 0x10;
 195        } else {
 196                asm("mov %0, fp" : "=r" (fp) : : "cc");
 197                mode = 0x10;
 198        }
 199
 200        if (!fp) {
 201                pr_cont("no frame pointer");
 202                ok = 0;
 203        } else if (verify_stack(fp)) {
 204                pr_cont("invalid frame pointer 0x%08x", fp);
 205                ok = 0;
 206        } else if (fp < (unsigned long)end_of_stack(tsk))
 207                pr_cont("frame pointer underflow");
 208        pr_cont("\n");
 209
 210        if (ok)
 211                c_backtrace(fp, mode);
 212}
 213#endif
 214
 215void show_stack(struct task_struct *tsk, unsigned long *sp)
 216{
 217        dump_backtrace(NULL, tsk);
 218        barrier();
 219}
 220
 221#ifdef CONFIG_PREEMPT
 222#define S_PREEMPT " PREEMPT"
 223#else
 224#define S_PREEMPT ""
 225#endif
 226#ifdef CONFIG_SMP
 227#define S_SMP " SMP"
 228#else
 229#define S_SMP ""
 230#endif
 231#ifdef CONFIG_THUMB2_KERNEL
 232#define S_ISA " THUMB2"
 233#else
 234#define S_ISA " ARM"
 235#endif
 236
 237static int __die(const char *str, int err, struct pt_regs *regs)
 238{
 239        struct task_struct *tsk = current;
 240        static int die_counter;
 241        int ret;
 242
 243        pr_emerg("Internal error: %s: %x [#%d]" S_PREEMPT S_SMP S_ISA "\n",
 244                 str, err, ++die_counter);
 245
 246        /* trap and error numbers are mostly meaningless on ARM */
 247        ret = notify_die(DIE_OOPS, str, regs, err, tsk->thread.trap_no, SIGSEGV);
 248        if (ret == NOTIFY_STOP)
 249                return 1;
 250
 251        print_modules();
 252        __show_regs(regs);
 253        pr_emerg("Process %.*s (pid: %d, stack limit = 0x%p)\n",
 254                 TASK_COMM_LEN, tsk->comm, task_pid_nr(tsk), end_of_stack(tsk));
 255
 256        if (!user_mode(regs) || in_interrupt()) {
 257                dump_mem(KERN_EMERG, "Stack: ", regs->ARM_sp,
 258                         THREAD_SIZE + (unsigned long)task_stack_page(tsk));
 259                dump_backtrace(regs, tsk);
 260                dump_instr(KERN_EMERG, regs);
 261        }
 262
 263        return 0;
 264}
 265
 266static arch_spinlock_t die_lock = __ARCH_SPIN_LOCK_UNLOCKED;
 267static int die_owner = -1;
 268static unsigned int die_nest_count;
 269
 270static unsigned long oops_begin(void)
 271{
 272        int cpu;
 273        unsigned long flags;
 274
 275        oops_enter();
 276
 277        /* racy, but better than risking deadlock. */
 278        raw_local_irq_save(flags);
 279        cpu = smp_processor_id();
 280        if (!arch_spin_trylock(&die_lock)) {
 281                if (cpu == die_owner)
 282                        /* nested oops. should stop eventually */;
 283                else
 284                        arch_spin_lock(&die_lock);
 285        }
 286        die_nest_count++;
 287        die_owner = cpu;
 288        console_verbose();
 289        bust_spinlocks(1);
 290        return flags;
 291}
 292
 293static void oops_end(unsigned long flags, struct pt_regs *regs, int signr)
 294{
 295        if (regs && kexec_should_crash(current))
 296                crash_kexec(regs);
 297
 298        bust_spinlocks(0);
 299        die_owner = -1;
 300        add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
 301        die_nest_count--;
 302        if (!die_nest_count)
 303                /* Nest count reaches zero, release the lock. */
 304                arch_spin_unlock(&die_lock);
 305        raw_local_irq_restore(flags);
 306        oops_exit();
 307
 308        if (in_interrupt())
 309                panic("Fatal exception in interrupt");
 310        if (panic_on_oops)
 311                panic("Fatal exception");
 312        if (signr)
 313                do_exit(signr);
 314}
 315
 316/*
 317 * This function is protected against re-entrancy.
 318 */
 319void die(const char *str, struct pt_regs *regs, int err)
 320{
 321        enum bug_trap_type bug_type = BUG_TRAP_TYPE_NONE;
 322        unsigned long flags = oops_begin();
 323        int sig = SIGSEGV;
 324
 325        if (!user_mode(regs))
 326                bug_type = report_bug(regs->ARM_pc, regs);
 327        if (bug_type != BUG_TRAP_TYPE_NONE)
 328                str = "Oops - BUG";
 329
 330        if (__die(str, err, regs))
 331                sig = 0;
 332
 333        oops_end(flags, regs, sig);
 334}
 335
 336void arm_notify_die(const char *str, struct pt_regs *regs,
 337                struct siginfo *info, unsigned long err, unsigned long trap)
 338{
 339        if (user_mode(regs)) {
 340                current->thread.error_code = err;
 341                current->thread.trap_no = trap;
 342
 343                force_sig_info(info->si_signo, info, current);
 344        } else {
 345                die(str, regs, err);
 346        }
 347}
 348
 349#ifdef CONFIG_GENERIC_BUG
 350
 351int is_valid_bugaddr(unsigned long pc)
 352{
 353#ifdef CONFIG_THUMB2_KERNEL
 354        u16 bkpt;
 355        u16 insn = __opcode_to_mem_thumb16(BUG_INSTR_VALUE);
 356#else
 357        u32 bkpt;
 358        u32 insn = __opcode_to_mem_arm(BUG_INSTR_VALUE);
 359#endif
 360
 361        if (probe_kernel_address((unsigned *)pc, bkpt))
 362                return 0;
 363
 364        return bkpt == insn;
 365}
 366
 367#endif
 368
 369static LIST_HEAD(undef_hook);
 370static DEFINE_RAW_SPINLOCK(undef_lock);
 371
 372void register_undef_hook(struct undef_hook *hook)
 373{
 374        unsigned long flags;
 375
 376        raw_spin_lock_irqsave(&undef_lock, flags);
 377        list_add(&hook->node, &undef_hook);
 378        raw_spin_unlock_irqrestore(&undef_lock, flags);
 379}
 380
 381void unregister_undef_hook(struct undef_hook *hook)
 382{
 383        unsigned long flags;
 384
 385        raw_spin_lock_irqsave(&undef_lock, flags);
 386        list_del(&hook->node);
 387        raw_spin_unlock_irqrestore(&undef_lock, flags);
 388}
 389
 390static int call_undef_hook(struct pt_regs *regs, unsigned int instr)
 391{
 392        struct undef_hook *hook;
 393        unsigned long flags;
 394        int (*fn)(struct pt_regs *regs, unsigned int instr) = NULL;
 395
 396        raw_spin_lock_irqsave(&undef_lock, flags);
 397        list_for_each_entry(hook, &undef_hook, node)
 398                if ((instr & hook->instr_mask) == hook->instr_val &&
 399                    (regs->ARM_cpsr & hook->cpsr_mask) == hook->cpsr_val)
 400                        fn = hook->fn;
 401        raw_spin_unlock_irqrestore(&undef_lock, flags);
 402
 403        return fn ? fn(regs, instr) : 1;
 404}
 405
 406asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
 407{
 408        unsigned int instr;
 409        siginfo_t info;
 410        void __user *pc;
 411
 412        pc = (void __user *)instruction_pointer(regs);
 413
 414        if (processor_mode(regs) == SVC_MODE) {
 415#ifdef CONFIG_THUMB2_KERNEL
 416                if (thumb_mode(regs)) {
 417                        instr = __mem_to_opcode_thumb16(((u16 *)pc)[0]);
 418                        if (is_wide_instruction(instr)) {
 419                                u16 inst2;
 420                                inst2 = __mem_to_opcode_thumb16(((u16 *)pc)[1]);
 421                                instr = __opcode_thumb32_compose(instr, inst2);
 422                        }
 423                } else
 424#endif
 425                        instr = __mem_to_opcode_arm(*(u32 *) pc);
 426        } else if (thumb_mode(regs)) {
 427                if (get_user(instr, (u16 __user *)pc))
 428                        goto die_sig;
 429                instr = __mem_to_opcode_thumb16(instr);
 430                if (is_wide_instruction(instr)) {
 431                        unsigned int instr2;
 432                        if (get_user(instr2, (u16 __user *)pc+1))
 433                                goto die_sig;
 434                        instr2 = __mem_to_opcode_thumb16(instr2);
 435                        instr = __opcode_thumb32_compose(instr, instr2);
 436                }
 437        } else {
 438                if (get_user(instr, (u32 __user *)pc))
 439                        goto die_sig;
 440                instr = __mem_to_opcode_arm(instr);
 441        }
 442
 443        if (call_undef_hook(regs, instr) == 0)
 444                return;
 445
 446die_sig:
 447#ifdef CONFIG_DEBUG_USER
 448        if (user_debug & UDBG_UNDEFINED) {
 449                pr_info("%s (%d): undefined instruction: pc=%p\n",
 450                        current->comm, task_pid_nr(current), pc);
 451                __show_regs(regs);
 452                dump_instr(KERN_INFO, regs);
 453        }
 454#endif
 455
 456        info.si_signo = SIGILL;
 457        info.si_errno = 0;
 458        info.si_code  = ILL_ILLOPC;
 459        info.si_addr  = pc;
 460
 461        arm_notify_die("Oops - undefined instruction", regs, &info, 0, 6);
 462}
 463
 464/*
 465 * Handle FIQ similarly to NMI on x86 systems.
 466 *
 467 * The runtime environment for NMIs is extremely restrictive
 468 * (NMIs can pre-empt critical sections meaning almost all locking is
 469 * forbidden) meaning this default FIQ handling must only be used in
 470 * circumstances where non-maskability improves robustness, such as
 471 * watchdog or debug logic.
 472 *
 473 * This handler is not appropriate for general purpose use in drivers
 474 * platform code and can be overrideen using set_fiq_handler.
 475 */
 476asmlinkage void __exception_irq_entry handle_fiq_as_nmi(struct pt_regs *regs)
 477{
 478        struct pt_regs *old_regs = set_irq_regs(regs);
 479
 480        nmi_enter();
 481
 482        /* nop. FIQ handlers for special arch/arm features can be added here. */
 483
 484        nmi_exit();
 485
 486        set_irq_regs(old_regs);
 487}
 488
 489/*
 490 * bad_mode handles the impossible case in the vectors.  If you see one of
 491 * these, then it's extremely serious, and could mean you have buggy hardware.
 492 * It never returns, and never tries to sync.  We hope that we can at least
 493 * dump out some state information...
 494 */
 495asmlinkage void bad_mode(struct pt_regs *regs, int reason)
 496{
 497        console_verbose();
 498
 499        pr_crit("Bad mode in %s handler detected\n", handler[reason]);
 500
 501        die("Oops - bad mode", regs, 0);
 502        local_irq_disable();
 503        panic("bad mode");
 504}
 505
 506static int bad_syscall(int n, struct pt_regs *regs)
 507{
 508        siginfo_t info;
 509
 510        if ((current->personality & PER_MASK) != PER_LINUX) {
 511                send_sig(SIGSEGV, current, 1);
 512                return regs->ARM_r0;
 513        }
 514
 515#ifdef CONFIG_DEBUG_USER
 516        if (user_debug & UDBG_SYSCALL) {
 517                pr_err("[%d] %s: obsolete system call %08x.\n",
 518                        task_pid_nr(current), current->comm, n);
 519                dump_instr(KERN_ERR, regs);
 520        }
 521#endif
 522
 523        info.si_signo = SIGILL;
 524        info.si_errno = 0;
 525        info.si_code  = ILL_ILLTRP;
 526        info.si_addr  = (void __user *)instruction_pointer(regs) -
 527                         (thumb_mode(regs) ? 2 : 4);
 528
 529        arm_notify_die("Oops - bad syscall", regs, &info, n, 0);
 530
 531        return regs->ARM_r0;
 532}
 533
 534static inline int
 535__do_cache_op(unsigned long start, unsigned long end)
 536{
 537        int ret;
 538
 539        do {
 540                unsigned long chunk = min(PAGE_SIZE, end - start);
 541
 542                if (fatal_signal_pending(current))
 543                        return 0;
 544
 545                ret = flush_cache_user_range(start, start + chunk);
 546                if (ret)
 547                        return ret;
 548
 549                cond_resched();
 550                start += chunk;
 551        } while (start < end);
 552
 553        return 0;
 554}
 555
 556static inline int
 557do_cache_op(unsigned long start, unsigned long end, int flags)
 558{
 559        if (end < start || flags)
 560                return -EINVAL;
 561
 562        if (!access_ok(VERIFY_READ, start, end - start))
 563                return -EFAULT;
 564
 565        return __do_cache_op(start, end);
 566}
 567
 568/*
 569 * Handle all unrecognised system calls.
 570 *  0x9f0000 - 0x9fffff are some more esoteric system calls
 571 */
 572#define NR(x) ((__ARM_NR_##x) - __ARM_NR_BASE)
 573asmlinkage int arm_syscall(int no, struct pt_regs *regs)
 574{
 575        siginfo_t info;
 576
 577        if ((no >> 16) != (__ARM_NR_BASE>> 16))
 578                return bad_syscall(no, regs);
 579
 580        switch (no & 0xffff) {
 581        case 0: /* branch through 0 */
 582                info.si_signo = SIGSEGV;
 583                info.si_errno = 0;
 584                info.si_code  = SEGV_MAPERR;
 585                info.si_addr  = NULL;
 586
 587                arm_notify_die("branch through zero", regs, &info, 0, 0);
 588                return 0;
 589
 590        case NR(breakpoint): /* SWI BREAK_POINT */
 591                regs->ARM_pc -= thumb_mode(regs) ? 2 : 4;
 592                ptrace_break(current, regs);
 593                return regs->ARM_r0;
 594
 595        /*
 596         * Flush a region from virtual address 'r0' to virtual address 'r1'
 597         * _exclusive_.  There is no alignment requirement on either address;
 598         * user space does not need to know the hardware cache layout.
 599         *
 600         * r2 contains flags.  It should ALWAYS be passed as ZERO until it
 601         * is defined to be something else.  For now we ignore it, but may
 602         * the fires of hell burn in your belly if you break this rule. ;)
 603         *
 604         * (at a later date, we may want to allow this call to not flush
 605         * various aspects of the cache.  Passing '0' will guarantee that
 606         * everything necessary gets flushed to maintain consistency in
 607         * the specified region).
 608         */
 609        case NR(cacheflush):
 610                return do_cache_op(regs->ARM_r0, regs->ARM_r1, regs->ARM_r2);
 611
 612        case NR(usr26):
 613                if (!(elf_hwcap & HWCAP_26BIT))
 614                        break;
 615                regs->ARM_cpsr &= ~MODE32_BIT;
 616                return regs->ARM_r0;
 617
 618        case NR(usr32):
 619                if (!(elf_hwcap & HWCAP_26BIT))
 620                        break;
 621                regs->ARM_cpsr |= MODE32_BIT;
 622                return regs->ARM_r0;
 623
 624        case NR(set_tls):
 625                set_tls(regs->ARM_r0);
 626                return 0;
 627
 628        default:
 629                /* Calls 9f00xx..9f07ff are defined to return -ENOSYS
 630                   if not implemented, rather than raising SIGILL.  This
 631                   way the calling program can gracefully determine whether
 632                   a feature is supported.  */
 633                if ((no & 0xffff) <= 0x7ff)
 634                        return -ENOSYS;
 635                break;
 636        }
 637#ifdef CONFIG_DEBUG_USER
 638        /*
 639         * experience shows that these seem to indicate that
 640         * something catastrophic has happened
 641         */
 642        if (user_debug & UDBG_SYSCALL) {
 643                pr_err("[%d] %s: arm syscall %d\n",
 644                       task_pid_nr(current), current->comm, no);
 645                dump_instr("", regs);
 646                if (user_mode(regs)) {
 647                        __show_regs(regs);
 648                        c_backtrace(frame_pointer(regs), processor_mode(regs));
 649                }
 650        }
 651#endif
 652        info.si_signo = SIGILL;
 653        info.si_errno = 0;
 654        info.si_code  = ILL_ILLTRP;
 655        info.si_addr  = (void __user *)instruction_pointer(regs) -
 656                         (thumb_mode(regs) ? 2 : 4);
 657
 658        arm_notify_die("Oops - bad syscall(2)", regs, &info, no, 0);
 659        return 0;
 660}
 661
 662#ifdef CONFIG_TLS_REG_EMUL
 663
 664/*
 665 * We might be running on an ARMv6+ processor which should have the TLS
 666 * register but for some reason we can't use it, or maybe an SMP system
 667 * using a pre-ARMv6 processor (there are apparently a few prototypes like
 668 * that in existence) and therefore access to that register must be
 669 * emulated.
 670 */
 671
 672static int get_tp_trap(struct pt_regs *regs, unsigned int instr)
 673{
 674        int reg = (instr >> 12) & 15;
 675        if (reg == 15)
 676                return 1;
 677        regs->uregs[reg] = current_thread_info()->tp_value[0];
 678        regs->ARM_pc += 4;
 679        return 0;
 680}
 681
 682static struct undef_hook arm_mrc_hook = {
 683        .instr_mask     = 0x0fff0fff,
 684        .instr_val      = 0x0e1d0f70,
 685        .cpsr_mask      = PSR_T_BIT,
 686        .cpsr_val       = 0,
 687        .fn             = get_tp_trap,
 688};
 689
 690static int __init arm_mrc_hook_init(void)
 691{
 692        register_undef_hook(&arm_mrc_hook);
 693        return 0;
 694}
 695
 696late_initcall(arm_mrc_hook_init);
 697
 698#endif
 699
 700/*
 701 * A data abort trap was taken, but we did not handle the instruction.
 702 * Try to abort the user program, or panic if it was the kernel.
 703 */
 704asmlinkage void
 705baddataabort(int code, unsigned long instr, struct pt_regs *regs)
 706{
 707        unsigned long addr = instruction_pointer(regs);
 708        siginfo_t info;
 709
 710#ifdef CONFIG_DEBUG_USER
 711        if (user_debug & UDBG_BADABORT) {
 712                pr_err("[%d] %s: bad data abort: code %d instr 0x%08lx\n",
 713                       task_pid_nr(current), current->comm, code, instr);
 714                dump_instr(KERN_ERR, regs);
 715                show_pte(current->mm, addr);
 716        }
 717#endif
 718
 719        info.si_signo = SIGILL;
 720        info.si_errno = 0;
 721        info.si_code  = ILL_ILLOPC;
 722        info.si_addr  = (void __user *)addr;
 723
 724        arm_notify_die("unknown data abort code", regs, &info, instr, 0);
 725}
 726
 727void __readwrite_bug(const char *fn)
 728{
 729        pr_err("%s called, but not implemented\n", fn);
 730        BUG();
 731}
 732EXPORT_SYMBOL(__readwrite_bug);
 733
 734void __pte_error(const char *file, int line, pte_t pte)
 735{
 736        pr_err("%s:%d: bad pte %08llx.\n", file, line, (long long)pte_val(pte));
 737}
 738
 739void __pmd_error(const char *file, int line, pmd_t pmd)
 740{
 741        pr_err("%s:%d: bad pmd %08llx.\n", file, line, (long long)pmd_val(pmd));
 742}
 743
 744void __pgd_error(const char *file, int line, pgd_t pgd)
 745{
 746        pr_err("%s:%d: bad pgd %08llx.\n", file, line, (long long)pgd_val(pgd));
 747}
 748
 749asmlinkage void __div0(void)
 750{
 751        pr_err("Division by zero in kernel.\n");
 752        dump_stack();
 753}
 754EXPORT_SYMBOL(__div0);
 755
 756void abort(void)
 757{
 758        BUG();
 759
 760        /* if that doesn't kill us, halt */
 761        panic("Oops failed to kill thread");
 762}
 763EXPORT_SYMBOL(abort);
 764
 765void __init trap_init(void)
 766{
 767        return;
 768}
 769
 770#ifdef CONFIG_KUSER_HELPERS
 771static void __init kuser_init(void *vectors)
 772{
 773        extern char __kuser_helper_start[], __kuser_helper_end[];
 774        int kuser_sz = __kuser_helper_end - __kuser_helper_start;
 775
 776        memcpy(vectors + 0x1000 - kuser_sz, __kuser_helper_start, kuser_sz);
 777
 778        /*
 779         * vectors + 0xfe0 = __kuser_get_tls
 780         * vectors + 0xfe8 = hardware TLS instruction at 0xffff0fe8
 781         */
 782        if (tls_emu || has_tls_reg)
 783                memcpy(vectors + 0xfe0, vectors + 0xfe8, 4);
 784}
 785#else
 786static inline void __init kuser_init(void *vectors)
 787{
 788}
 789#endif
 790
 791void __init early_trap_init(void *vectors_base)
 792{
 793#ifndef CONFIG_CPU_V7M
 794        unsigned long vectors = (unsigned long)vectors_base;
 795        extern char __stubs_start[], __stubs_end[];
 796        extern char __vectors_start[], __vectors_end[];
 797        unsigned i;
 798
 799        vectors_page = vectors_base;
 800
 801        /*
 802         * Poison the vectors page with an undefined instruction.  This
 803         * instruction is chosen to be undefined for both ARM and Thumb
 804         * ISAs.  The Thumb version is an undefined instruction with a
 805         * branch back to the undefined instruction.
 806         */
 807        for (i = 0; i < PAGE_SIZE / sizeof(u32); i++)
 808                ((u32 *)vectors_base)[i] = 0xe7fddef1;
 809
 810        /*
 811         * Copy the vectors, stubs and kuser helpers (in entry-armv.S)
 812         * into the vector page, mapped at 0xffff0000, and ensure these
 813         * are visible to the instruction stream.
 814         */
 815        memcpy((void *)vectors, __vectors_start, __vectors_end - __vectors_start);
 816        memcpy((void *)vectors + 0x1000, __stubs_start, __stubs_end - __stubs_start);
 817
 818        kuser_init(vectors_base);
 819
 820        flush_icache_range(vectors, vectors + PAGE_SIZE * 2);
 821#else /* ifndef CONFIG_CPU_V7M */
 822        /*
 823         * on V7-M there is no need to copy the vector table to a dedicated
 824         * memory area. The address is configurable and so a table in the kernel
 825         * image can be used.
 826         */
 827#endif
 828}
 829