linux/arch/arm64/kernel/traps.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * Based on arch/arm/kernel/traps.c
   4 *
   5 * Copyright (C) 1995-2009 Russell King
   6 * Copyright (C) 2012 ARM Ltd.
   7 */
   8
   9#include <linux/bug.h>
  10#include <linux/signal.h>
  11#include <linux/personality.h>
  12#include <linux/kallsyms.h>
  13#include <linux/spinlock.h>
  14#include <linux/uaccess.h>
  15#include <linux/hardirq.h>
  16#include <linux/kdebug.h>
  17#include <linux/module.h>
  18#include <linux/kexec.h>
  19#include <linux/delay.h>
  20#include <linux/init.h>
  21#include <linux/sched/signal.h>
  22#include <linux/sched/debug.h>
  23#include <linux/sched/task_stack.h>
  24#include <linux/sizes.h>
  25#include <linux/syscalls.h>
  26#include <linux/mm_types.h>
  27#include <linux/kasan.h>
  28
  29#include <asm/atomic.h>
  30#include <asm/bug.h>
  31#include <asm/cpufeature.h>
  32#include <asm/daifflags.h>
  33#include <asm/debug-monitors.h>
  34#include <asm/esr.h>
  35#include <asm/insn.h>
  36#include <asm/traps.h>
  37#include <asm/smp.h>
  38#include <asm/stack_pointer.h>
  39#include <asm/stacktrace.h>
  40#include <asm/exception.h>
  41#include <asm/system_misc.h>
  42#include <asm/sysreg.h>
  43
  44static const char *handler[]= {
  45        "Synchronous Abort",
  46        "IRQ",
  47        "FIQ",
  48        "Error"
  49};
  50
  51int show_unhandled_signals = 0;
  52
  53static void dump_backtrace_entry(unsigned long where)
  54{
  55        printk(" %pS\n", (void *)where);
  56}
  57
  58static void dump_kernel_instr(const char *lvl, struct pt_regs *regs)
  59{
  60        unsigned long addr = instruction_pointer(regs);
  61        char str[sizeof("00000000 ") * 5 + 2 + 1], *p = str;
  62        int i;
  63
  64        if (user_mode(regs))
  65                return;
  66
  67        for (i = -4; i < 1; i++) {
  68                unsigned int val, bad;
  69
  70                bad = aarch64_insn_read(&((u32 *)addr)[i], &val);
  71
  72                if (!bad)
  73                        p += sprintf(p, i == 0 ? "(%08x) " : "%08x ", val);
  74                else {
  75                        p += sprintf(p, "bad PC value");
  76                        break;
  77                }
  78        }
  79
  80        printk("%sCode: %s\n", lvl, str);
  81}
  82
  83void dump_backtrace(struct pt_regs *regs, struct task_struct *tsk)
  84{
  85        struct stackframe frame;
  86        int skip = 0;
  87
  88        pr_debug("%s(regs = %p tsk = %p)\n", __func__, regs, tsk);
  89
  90        if (regs) {
  91                if (user_mode(regs))
  92                        return;
  93                skip = 1;
  94        }
  95
  96        if (!tsk)
  97                tsk = current;
  98
  99        if (!try_get_task_stack(tsk))
 100                return;
 101
 102        if (tsk == current) {
 103                start_backtrace(&frame,
 104                                (unsigned long)__builtin_frame_address(0),
 105                                (unsigned long)dump_backtrace);
 106        } else {
 107                /*
 108                 * task blocked in __switch_to
 109                 */
 110                start_backtrace(&frame,
 111                                thread_saved_fp(tsk),
 112                                thread_saved_pc(tsk));
 113        }
 114
 115        printk("Call trace:\n");
 116        do {
 117                /* skip until specified stack frame */
 118                if (!skip) {
 119                        dump_backtrace_entry(frame.pc);
 120                } else if (frame.fp == regs->regs[29]) {
 121                        skip = 0;
 122                        /*
 123                         * Mostly, this is the case where this function is
 124                         * called in panic/abort. As exception handler's
 125                         * stack frame does not contain the corresponding pc
 126                         * at which an exception has taken place, use regs->pc
 127                         * instead.
 128                         */
 129                        dump_backtrace_entry(regs->pc);
 130                }
 131        } while (!unwind_frame(tsk, &frame));
 132
 133        put_task_stack(tsk);
 134}
 135
 136void show_stack(struct task_struct *tsk, unsigned long *sp)
 137{
 138        dump_backtrace(NULL, tsk);
 139        barrier();
 140}
 141
 142#ifdef CONFIG_PREEMPT
 143#define S_PREEMPT " PREEMPT"
 144#else
 145#define S_PREEMPT ""
 146#endif
 147#define S_SMP " SMP"
 148
 149static int __die(const char *str, int err, struct pt_regs *regs)
 150{
 151        static int die_counter;
 152        int ret;
 153
 154        pr_emerg("Internal error: %s: %x [#%d]" S_PREEMPT S_SMP "\n",
 155                 str, err, ++die_counter);
 156
 157        /* trap and error numbers are mostly meaningless on ARM */
 158        ret = notify_die(DIE_OOPS, str, regs, err, 0, SIGSEGV);
 159        if (ret == NOTIFY_STOP)
 160                return ret;
 161
 162        print_modules();
 163        show_regs(regs);
 164
 165        dump_kernel_instr(KERN_EMERG, regs);
 166
 167        return ret;
 168}
 169
 170static DEFINE_RAW_SPINLOCK(die_lock);
 171
 172/*
 173 * This function is protected against re-entrancy.
 174 */
 175void die(const char *str, struct pt_regs *regs, int err)
 176{
 177        int ret;
 178        unsigned long flags;
 179
 180        raw_spin_lock_irqsave(&die_lock, flags);
 181
 182        oops_enter();
 183
 184        console_verbose();
 185        bust_spinlocks(1);
 186        ret = __die(str, err, regs);
 187
 188        if (regs && kexec_should_crash(current))
 189                crash_kexec(regs);
 190
 191        bust_spinlocks(0);
 192        add_taint(TAINT_DIE, LOCKDEP_NOW_UNRELIABLE);
 193        oops_exit();
 194
 195        if (in_interrupt())
 196                panic("Fatal exception in interrupt");
 197        if (panic_on_oops)
 198                panic("Fatal exception");
 199
 200        raw_spin_unlock_irqrestore(&die_lock, flags);
 201
 202        if (ret != NOTIFY_STOP)
 203                do_exit(SIGSEGV);
 204}
 205
 206static void arm64_show_signal(int signo, const char *str)
 207{
 208        static DEFINE_RATELIMIT_STATE(rs, DEFAULT_RATELIMIT_INTERVAL,
 209                                      DEFAULT_RATELIMIT_BURST);
 210        struct task_struct *tsk = current;
 211        unsigned int esr = tsk->thread.fault_code;
 212        struct pt_regs *regs = task_pt_regs(tsk);
 213
 214        /* Leave if the signal won't be shown */
 215        if (!show_unhandled_signals ||
 216            !unhandled_signal(tsk, signo) ||
 217            !__ratelimit(&rs))
 218                return;
 219
 220        pr_info("%s[%d]: unhandled exception: ", tsk->comm, task_pid_nr(tsk));
 221        if (esr)
 222                pr_cont("%s, ESR 0x%08x, ", esr_get_class_string(esr), esr);
 223
 224        pr_cont("%s", str);
 225        print_vma_addr(KERN_CONT " in ", regs->pc);
 226        pr_cont("\n");
 227        __show_regs(regs);
 228}
 229
 230void arm64_force_sig_fault(int signo, int code, void __user *addr,
 231                           const char *str)
 232{
 233        arm64_show_signal(signo, str);
 234        if (signo == SIGKILL)
 235                force_sig(SIGKILL);
 236        else
 237                force_sig_fault(signo, code, addr);
 238}
 239
 240void arm64_force_sig_mceerr(int code, void __user *addr, short lsb,
 241                            const char *str)
 242{
 243        arm64_show_signal(SIGBUS, str);
 244        force_sig_mceerr(code, addr, lsb);
 245}
 246
 247void arm64_force_sig_ptrace_errno_trap(int errno, void __user *addr,
 248                                       const char *str)
 249{
 250        arm64_show_signal(SIGTRAP, str);
 251        force_sig_ptrace_errno_trap(errno, addr);
 252}
 253
 254void arm64_notify_die(const char *str, struct pt_regs *regs,
 255                      int signo, int sicode, void __user *addr,
 256                      int err)
 257{
 258        if (user_mode(regs)) {
 259                WARN_ON(regs != current_pt_regs());
 260                current->thread.fault_address = 0;
 261                current->thread.fault_code = err;
 262
 263                arm64_force_sig_fault(signo, sicode, addr, str);
 264        } else {
 265                die(str, regs, err);
 266        }
 267}
 268
 269void arm64_skip_faulting_instruction(struct pt_regs *regs, unsigned long size)
 270{
 271        regs->pc += size;
 272
 273        /*
 274         * If we were single stepping, we want to get the step exception after
 275         * we return from the trap.
 276         */
 277        if (user_mode(regs))
 278                user_fastforward_single_step(current);
 279}
 280
 281static LIST_HEAD(undef_hook);
 282static DEFINE_RAW_SPINLOCK(undef_lock);
 283
 284void register_undef_hook(struct undef_hook *hook)
 285{
 286        unsigned long flags;
 287
 288        raw_spin_lock_irqsave(&undef_lock, flags);
 289        list_add(&hook->node, &undef_hook);
 290        raw_spin_unlock_irqrestore(&undef_lock, flags);
 291}
 292
 293void unregister_undef_hook(struct undef_hook *hook)
 294{
 295        unsigned long flags;
 296
 297        raw_spin_lock_irqsave(&undef_lock, flags);
 298        list_del(&hook->node);
 299        raw_spin_unlock_irqrestore(&undef_lock, flags);
 300}
 301
 302static int call_undef_hook(struct pt_regs *regs)
 303{
 304        struct undef_hook *hook;
 305        unsigned long flags;
 306        u32 instr;
 307        int (*fn)(struct pt_regs *regs, u32 instr) = NULL;
 308        void __user *pc = (void __user *)instruction_pointer(regs);
 309
 310        if (!user_mode(regs)) {
 311                __le32 instr_le;
 312                if (probe_kernel_address((__force __le32 *)pc, instr_le))
 313                        goto exit;
 314                instr = le32_to_cpu(instr_le);
 315        } else if (compat_thumb_mode(regs)) {
 316                /* 16-bit Thumb instruction */
 317                __le16 instr_le;
 318                if (get_user(instr_le, (__le16 __user *)pc))
 319                        goto exit;
 320                instr = le16_to_cpu(instr_le);
 321                if (aarch32_insn_is_wide(instr)) {
 322                        u32 instr2;
 323
 324                        if (get_user(instr_le, (__le16 __user *)(pc + 2)))
 325                                goto exit;
 326                        instr2 = le16_to_cpu(instr_le);
 327                        instr = (instr << 16) | instr2;
 328                }
 329        } else {
 330                /* 32-bit ARM instruction */
 331                __le32 instr_le;
 332                if (get_user(instr_le, (__le32 __user *)pc))
 333                        goto exit;
 334                instr = le32_to_cpu(instr_le);
 335        }
 336
 337        raw_spin_lock_irqsave(&undef_lock, flags);
 338        list_for_each_entry(hook, &undef_hook, node)
 339                if ((instr & hook->instr_mask) == hook->instr_val &&
 340                        (regs->pstate & hook->pstate_mask) == hook->pstate_val)
 341                        fn = hook->fn;
 342
 343        raw_spin_unlock_irqrestore(&undef_lock, flags);
 344exit:
 345        return fn ? fn(regs, instr) : 1;
 346}
 347
 348void force_signal_inject(int signal, int code, unsigned long address)
 349{
 350        const char *desc;
 351        struct pt_regs *regs = current_pt_regs();
 352
 353        if (WARN_ON(!user_mode(regs)))
 354                return;
 355
 356        switch (signal) {
 357        case SIGILL:
 358                desc = "undefined instruction";
 359                break;
 360        case SIGSEGV:
 361                desc = "illegal memory access";
 362                break;
 363        default:
 364                desc = "unknown or unrecoverable error";
 365                break;
 366        }
 367
 368        /* Force signals we don't understand to SIGKILL */
 369        if (WARN_ON(signal != SIGKILL &&
 370                    siginfo_layout(signal, code) != SIL_FAULT)) {
 371                signal = SIGKILL;
 372        }
 373
 374        arm64_notify_die(desc, regs, signal, code, (void __user *)address, 0);
 375}
 376
 377/*
 378 * Set up process info to signal segmentation fault - called on access error.
 379 */
 380void arm64_notify_segfault(unsigned long addr)
 381{
 382        int code;
 383
 384        down_read(&current->mm->mmap_sem);
 385        if (find_vma(current->mm, addr) == NULL)
 386                code = SEGV_MAPERR;
 387        else
 388                code = SEGV_ACCERR;
 389        up_read(&current->mm->mmap_sem);
 390
 391        force_signal_inject(SIGSEGV, code, addr);
 392}
 393
 394asmlinkage void __exception do_undefinstr(struct pt_regs *regs)
 395{
 396        /* check for AArch32 breakpoint instructions */
 397        if (!aarch32_break_handler(regs))
 398                return;
 399
 400        if (call_undef_hook(regs) == 0)
 401                return;
 402
 403        BUG_ON(!user_mode(regs));
 404        force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc);
 405}
 406
 407#define __user_cache_maint(insn, address, res)                  \
 408        if (address >= user_addr_max()) {                       \
 409                res = -EFAULT;                                  \
 410        } else {                                                \
 411                uaccess_ttbr0_enable();                         \
 412                asm volatile (                                  \
 413                        "1:     " insn ", %1\n"                 \
 414                        "       mov     %w0, #0\n"              \
 415                        "2:\n"                                  \
 416                        "       .pushsection .fixup,\"ax\"\n"   \
 417                        "       .align  2\n"                    \
 418                        "3:     mov     %w0, %w2\n"             \
 419                        "       b       2b\n"                   \
 420                        "       .popsection\n"                  \
 421                        _ASM_EXTABLE(1b, 3b)                    \
 422                        : "=r" (res)                            \
 423                        : "r" (address), "i" (-EFAULT));        \
 424                uaccess_ttbr0_disable();                        \
 425        }
 426
 427static void user_cache_maint_handler(unsigned int esr, struct pt_regs *regs)
 428{
 429        unsigned long address;
 430        int rt = ESR_ELx_SYS64_ISS_RT(esr);
 431        int crm = (esr & ESR_ELx_SYS64_ISS_CRM_MASK) >> ESR_ELx_SYS64_ISS_CRM_SHIFT;
 432        int ret = 0;
 433
 434        address = untagged_addr(pt_regs_read_reg(regs, rt));
 435
 436        switch (crm) {
 437        case ESR_ELx_SYS64_ISS_CRM_DC_CVAU:     /* DC CVAU, gets promoted */
 438                __user_cache_maint("dc civac", address, ret);
 439                break;
 440        case ESR_ELx_SYS64_ISS_CRM_DC_CVAC:     /* DC CVAC, gets promoted */
 441                __user_cache_maint("dc civac", address, ret);
 442                break;
 443        case ESR_ELx_SYS64_ISS_CRM_DC_CVADP:    /* DC CVADP */
 444                __user_cache_maint("sys 3, c7, c13, 1", address, ret);
 445                break;
 446        case ESR_ELx_SYS64_ISS_CRM_DC_CVAP:     /* DC CVAP */
 447                __user_cache_maint("sys 3, c7, c12, 1", address, ret);
 448                break;
 449        case ESR_ELx_SYS64_ISS_CRM_DC_CIVAC:    /* DC CIVAC */
 450                __user_cache_maint("dc civac", address, ret);
 451                break;
 452        case ESR_ELx_SYS64_ISS_CRM_IC_IVAU:     /* IC IVAU */
 453                __user_cache_maint("ic ivau", address, ret);
 454                break;
 455        default:
 456                force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc);
 457                return;
 458        }
 459
 460        if (ret)
 461                arm64_notify_segfault(address);
 462        else
 463                arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
 464}
 465
 466static void ctr_read_handler(unsigned int esr, struct pt_regs *regs)
 467{
 468        int rt = ESR_ELx_SYS64_ISS_RT(esr);
 469        unsigned long val = arm64_ftr_reg_user_value(&arm64_ftr_reg_ctrel0);
 470
 471        pt_regs_write_reg(regs, rt, val);
 472
 473        arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
 474}
 475
 476static void cntvct_read_handler(unsigned int esr, struct pt_regs *regs)
 477{
 478        int rt = ESR_ELx_SYS64_ISS_RT(esr);
 479
 480        pt_regs_write_reg(regs, rt, arch_timer_read_counter());
 481        arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
 482}
 483
 484static void cntfrq_read_handler(unsigned int esr, struct pt_regs *regs)
 485{
 486        int rt = ESR_ELx_SYS64_ISS_RT(esr);
 487
 488        pt_regs_write_reg(regs, rt, arch_timer_get_rate());
 489        arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
 490}
 491
 492static void mrs_handler(unsigned int esr, struct pt_regs *regs)
 493{
 494        u32 sysreg, rt;
 495
 496        rt = ESR_ELx_SYS64_ISS_RT(esr);
 497        sysreg = esr_sys64_to_sysreg(esr);
 498
 499        if (do_emulate_mrs(regs, sysreg, rt) != 0)
 500                force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc);
 501}
 502
 503static void wfi_handler(unsigned int esr, struct pt_regs *regs)
 504{
 505        arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
 506}
 507
 508struct sys64_hook {
 509        unsigned int esr_mask;
 510        unsigned int esr_val;
 511        void (*handler)(unsigned int esr, struct pt_regs *regs);
 512};
 513
 514static struct sys64_hook sys64_hooks[] = {
 515        {
 516                .esr_mask = ESR_ELx_SYS64_ISS_EL0_CACHE_OP_MASK,
 517                .esr_val = ESR_ELx_SYS64_ISS_EL0_CACHE_OP_VAL,
 518                .handler = user_cache_maint_handler,
 519        },
 520        {
 521                /* Trap read access to CTR_EL0 */
 522                .esr_mask = ESR_ELx_SYS64_ISS_SYS_OP_MASK,
 523                .esr_val = ESR_ELx_SYS64_ISS_SYS_CTR_READ,
 524                .handler = ctr_read_handler,
 525        },
 526        {
 527                /* Trap read access to CNTVCT_EL0 */
 528                .esr_mask = ESR_ELx_SYS64_ISS_SYS_OP_MASK,
 529                .esr_val = ESR_ELx_SYS64_ISS_SYS_CNTVCT,
 530                .handler = cntvct_read_handler,
 531        },
 532        {
 533                /* Trap read access to CNTFRQ_EL0 */
 534                .esr_mask = ESR_ELx_SYS64_ISS_SYS_OP_MASK,
 535                .esr_val = ESR_ELx_SYS64_ISS_SYS_CNTFRQ,
 536                .handler = cntfrq_read_handler,
 537        },
 538        {
 539                /* Trap read access to CPUID registers */
 540                .esr_mask = ESR_ELx_SYS64_ISS_SYS_MRS_OP_MASK,
 541                .esr_val = ESR_ELx_SYS64_ISS_SYS_MRS_OP_VAL,
 542                .handler = mrs_handler,
 543        },
 544        {
 545                /* Trap WFI instructions executed in userspace */
 546                .esr_mask = ESR_ELx_WFx_MASK,
 547                .esr_val = ESR_ELx_WFx_WFI_VAL,
 548                .handler = wfi_handler,
 549        },
 550        {},
 551};
 552
 553
 554#ifdef CONFIG_COMPAT
 555#define PSTATE_IT_1_0_SHIFT     25
 556#define PSTATE_IT_1_0_MASK      (0x3 << PSTATE_IT_1_0_SHIFT)
 557#define PSTATE_IT_7_2_SHIFT     10
 558#define PSTATE_IT_7_2_MASK      (0x3f << PSTATE_IT_7_2_SHIFT)
 559
 560static u32 compat_get_it_state(struct pt_regs *regs)
 561{
 562        u32 it, pstate = regs->pstate;
 563
 564        it  = (pstate & PSTATE_IT_1_0_MASK) >> PSTATE_IT_1_0_SHIFT;
 565        it |= ((pstate & PSTATE_IT_7_2_MASK) >> PSTATE_IT_7_2_SHIFT) << 2;
 566
 567        return it;
 568}
 569
 570static void compat_set_it_state(struct pt_regs *regs, u32 it)
 571{
 572        u32 pstate_it;
 573
 574        pstate_it  = (it << PSTATE_IT_1_0_SHIFT) & PSTATE_IT_1_0_MASK;
 575        pstate_it |= ((it >> 2) << PSTATE_IT_7_2_SHIFT) & PSTATE_IT_7_2_MASK;
 576
 577        regs->pstate &= ~PSR_AA32_IT_MASK;
 578        regs->pstate |= pstate_it;
 579}
 580
 581static bool cp15_cond_valid(unsigned int esr, struct pt_regs *regs)
 582{
 583        int cond;
 584
 585        /* Only a T32 instruction can trap without CV being set */
 586        if (!(esr & ESR_ELx_CV)) {
 587                u32 it;
 588
 589                it = compat_get_it_state(regs);
 590                if (!it)
 591                        return true;
 592
 593                cond = it >> 4;
 594        } else {
 595                cond = (esr & ESR_ELx_COND_MASK) >> ESR_ELx_COND_SHIFT;
 596        }
 597
 598        return aarch32_opcode_cond_checks[cond](regs->pstate);
 599}
 600
 601static void advance_itstate(struct pt_regs *regs)
 602{
 603        u32 it;
 604
 605        /* ARM mode */
 606        if (!(regs->pstate & PSR_AA32_T_BIT) ||
 607            !(regs->pstate & PSR_AA32_IT_MASK))
 608                return;
 609
 610        it  = compat_get_it_state(regs);
 611
 612        /*
 613         * If this is the last instruction of the block, wipe the IT
 614         * state. Otherwise advance it.
 615         */
 616        if (!(it & 7))
 617                it = 0;
 618        else
 619                it = (it & 0xe0) | ((it << 1) & 0x1f);
 620
 621        compat_set_it_state(regs, it);
 622}
 623
 624static void arm64_compat_skip_faulting_instruction(struct pt_regs *regs,
 625                                                   unsigned int sz)
 626{
 627        advance_itstate(regs);
 628        arm64_skip_faulting_instruction(regs, sz);
 629}
 630
 631static void compat_cntfrq_read_handler(unsigned int esr, struct pt_regs *regs)
 632{
 633        int reg = (esr & ESR_ELx_CP15_32_ISS_RT_MASK) >> ESR_ELx_CP15_32_ISS_RT_SHIFT;
 634
 635        pt_regs_write_reg(regs, reg, arch_timer_get_rate());
 636        arm64_compat_skip_faulting_instruction(regs, 4);
 637}
 638
 639static struct sys64_hook cp15_32_hooks[] = {
 640        {
 641                .esr_mask = ESR_ELx_CP15_32_ISS_SYS_MASK,
 642                .esr_val = ESR_ELx_CP15_32_ISS_SYS_CNTFRQ,
 643                .handler = compat_cntfrq_read_handler,
 644        },
 645        {},
 646};
 647
 648static void compat_cntvct_read_handler(unsigned int esr, struct pt_regs *regs)
 649{
 650        int rt = (esr & ESR_ELx_CP15_64_ISS_RT_MASK) >> ESR_ELx_CP15_64_ISS_RT_SHIFT;
 651        int rt2 = (esr & ESR_ELx_CP15_64_ISS_RT2_MASK) >> ESR_ELx_CP15_64_ISS_RT2_SHIFT;
 652        u64 val = arch_timer_read_counter();
 653
 654        pt_regs_write_reg(regs, rt, lower_32_bits(val));
 655        pt_regs_write_reg(regs, rt2, upper_32_bits(val));
 656        arm64_compat_skip_faulting_instruction(regs, 4);
 657}
 658
 659static struct sys64_hook cp15_64_hooks[] = {
 660        {
 661                .esr_mask = ESR_ELx_CP15_64_ISS_SYS_MASK,
 662                .esr_val = ESR_ELx_CP15_64_ISS_SYS_CNTVCT,
 663                .handler = compat_cntvct_read_handler,
 664        },
 665        {},
 666};
 667
 668asmlinkage void __exception do_cp15instr(unsigned int esr, struct pt_regs *regs)
 669{
 670        struct sys64_hook *hook, *hook_base;
 671
 672        if (!cp15_cond_valid(esr, regs)) {
 673                /*
 674                 * There is no T16 variant of a CP access, so we
 675                 * always advance PC by 4 bytes.
 676                 */
 677                arm64_compat_skip_faulting_instruction(regs, 4);
 678                return;
 679        }
 680
 681        switch (ESR_ELx_EC(esr)) {
 682        case ESR_ELx_EC_CP15_32:
 683                hook_base = cp15_32_hooks;
 684                break;
 685        case ESR_ELx_EC_CP15_64:
 686                hook_base = cp15_64_hooks;
 687                break;
 688        default:
 689                do_undefinstr(regs);
 690                return;
 691        }
 692
 693        for (hook = hook_base; hook->handler; hook++)
 694                if ((hook->esr_mask & esr) == hook->esr_val) {
 695                        hook->handler(esr, regs);
 696                        return;
 697                }
 698
 699        /*
 700         * New cp15 instructions may previously have been undefined at
 701         * EL0. Fall back to our usual undefined instruction handler
 702         * so that we handle these consistently.
 703         */
 704        do_undefinstr(regs);
 705}
 706#endif
 707
 708asmlinkage void __exception do_sysinstr(unsigned int esr, struct pt_regs *regs)
 709{
 710        struct sys64_hook *hook;
 711
 712        for (hook = sys64_hooks; hook->handler; hook++)
 713                if ((hook->esr_mask & esr) == hook->esr_val) {
 714                        hook->handler(esr, regs);
 715                        return;
 716                }
 717
 718        /*
 719         * New SYS instructions may previously have been undefined at EL0. Fall
 720         * back to our usual undefined instruction handler so that we handle
 721         * these consistently.
 722         */
 723        do_undefinstr(regs);
 724}
 725
 726static const char *esr_class_str[] = {
 727        [0 ... ESR_ELx_EC_MAX]          = "UNRECOGNIZED EC",
 728        [ESR_ELx_EC_UNKNOWN]            = "Unknown/Uncategorized",
 729        [ESR_ELx_EC_WFx]                = "WFI/WFE",
 730        [ESR_ELx_EC_CP15_32]            = "CP15 MCR/MRC",
 731        [ESR_ELx_EC_CP15_64]            = "CP15 MCRR/MRRC",
 732        [ESR_ELx_EC_CP14_MR]            = "CP14 MCR/MRC",
 733        [ESR_ELx_EC_CP14_LS]            = "CP14 LDC/STC",
 734        [ESR_ELx_EC_FP_ASIMD]           = "ASIMD",
 735        [ESR_ELx_EC_CP10_ID]            = "CP10 MRC/VMRS",
 736        [ESR_ELx_EC_PAC]                = "PAC",
 737        [ESR_ELx_EC_CP14_64]            = "CP14 MCRR/MRRC",
 738        [ESR_ELx_EC_ILL]                = "PSTATE.IL",
 739        [ESR_ELx_EC_SVC32]              = "SVC (AArch32)",
 740        [ESR_ELx_EC_HVC32]              = "HVC (AArch32)",
 741        [ESR_ELx_EC_SMC32]              = "SMC (AArch32)",
 742        [ESR_ELx_EC_SVC64]              = "SVC (AArch64)",
 743        [ESR_ELx_EC_HVC64]              = "HVC (AArch64)",
 744        [ESR_ELx_EC_SMC64]              = "SMC (AArch64)",
 745        [ESR_ELx_EC_SYS64]              = "MSR/MRS (AArch64)",
 746        [ESR_ELx_EC_SVE]                = "SVE",
 747        [ESR_ELx_EC_IMP_DEF]            = "EL3 IMP DEF",
 748        [ESR_ELx_EC_IABT_LOW]           = "IABT (lower EL)",
 749        [ESR_ELx_EC_IABT_CUR]           = "IABT (current EL)",
 750        [ESR_ELx_EC_PC_ALIGN]           = "PC Alignment",
 751        [ESR_ELx_EC_DABT_LOW]           = "DABT (lower EL)",
 752        [ESR_ELx_EC_DABT_CUR]           = "DABT (current EL)",
 753        [ESR_ELx_EC_SP_ALIGN]           = "SP Alignment",
 754        [ESR_ELx_EC_FP_EXC32]           = "FP (AArch32)",
 755        [ESR_ELx_EC_FP_EXC64]           = "FP (AArch64)",
 756        [ESR_ELx_EC_SERROR]             = "SError",
 757        [ESR_ELx_EC_BREAKPT_LOW]        = "Breakpoint (lower EL)",
 758        [ESR_ELx_EC_BREAKPT_CUR]        = "Breakpoint (current EL)",
 759        [ESR_ELx_EC_SOFTSTP_LOW]        = "Software Step (lower EL)",
 760        [ESR_ELx_EC_SOFTSTP_CUR]        = "Software Step (current EL)",
 761        [ESR_ELx_EC_WATCHPT_LOW]        = "Watchpoint (lower EL)",
 762        [ESR_ELx_EC_WATCHPT_CUR]        = "Watchpoint (current EL)",
 763        [ESR_ELx_EC_BKPT32]             = "BKPT (AArch32)",
 764        [ESR_ELx_EC_VECTOR32]           = "Vector catch (AArch32)",
 765        [ESR_ELx_EC_BRK64]              = "BRK (AArch64)",
 766};
 767
 768const char *esr_get_class_string(u32 esr)
 769{
 770        return esr_class_str[ESR_ELx_EC(esr)];
 771}
 772
 773/*
 774 * bad_mode handles the impossible case in the exception vector. This is always
 775 * fatal.
 776 */
 777asmlinkage void bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
 778{
 779        console_verbose();
 780
 781        pr_crit("Bad mode in %s handler detected on CPU%d, code 0x%08x -- %s\n",
 782                handler[reason], smp_processor_id(), esr,
 783                esr_get_class_string(esr));
 784
 785        local_daif_mask();
 786        panic("bad mode");
 787}
 788
 789/*
 790 * bad_el0_sync handles unexpected, but potentially recoverable synchronous
 791 * exceptions taken from EL0. Unlike bad_mode, this returns.
 792 */
 793asmlinkage void bad_el0_sync(struct pt_regs *regs, int reason, unsigned int esr)
 794{
 795        void __user *pc = (void __user *)instruction_pointer(regs);
 796
 797        current->thread.fault_address = 0;
 798        current->thread.fault_code = esr;
 799
 800        arm64_force_sig_fault(SIGILL, ILL_ILLOPC, pc,
 801                              "Bad EL0 synchronous exception");
 802}
 803
 804#ifdef CONFIG_VMAP_STACK
 805
 806DEFINE_PER_CPU(unsigned long [OVERFLOW_STACK_SIZE/sizeof(long)], overflow_stack)
 807        __aligned(16);
 808
 809asmlinkage void handle_bad_stack(struct pt_regs *regs)
 810{
 811        unsigned long tsk_stk = (unsigned long)current->stack;
 812        unsigned long irq_stk = (unsigned long)this_cpu_read(irq_stack_ptr);
 813        unsigned long ovf_stk = (unsigned long)this_cpu_ptr(overflow_stack);
 814        unsigned int esr = read_sysreg(esr_el1);
 815        unsigned long far = read_sysreg(far_el1);
 816
 817        console_verbose();
 818        pr_emerg("Insufficient stack space to handle exception!");
 819
 820        pr_emerg("ESR: 0x%08x -- %s\n", esr, esr_get_class_string(esr));
 821        pr_emerg("FAR: 0x%016lx\n", far);
 822
 823        pr_emerg("Task stack:     [0x%016lx..0x%016lx]\n",
 824                 tsk_stk, tsk_stk + THREAD_SIZE);
 825        pr_emerg("IRQ stack:      [0x%016lx..0x%016lx]\n",
 826                 irq_stk, irq_stk + THREAD_SIZE);
 827        pr_emerg("Overflow stack: [0x%016lx..0x%016lx]\n",
 828                 ovf_stk, ovf_stk + OVERFLOW_STACK_SIZE);
 829
 830        __show_regs(regs);
 831
 832        /*
 833         * We use nmi_panic to limit the potential for recusive overflows, and
 834         * to get a better stack trace.
 835         */
 836        nmi_panic(NULL, "kernel stack overflow");
 837        cpu_park_loop();
 838}
 839#endif
 840
 841void __noreturn arm64_serror_panic(struct pt_regs *regs, u32 esr)
 842{
 843        console_verbose();
 844
 845        pr_crit("SError Interrupt on CPU%d, code 0x%08x -- %s\n",
 846                smp_processor_id(), esr, esr_get_class_string(esr));
 847        if (regs)
 848                __show_regs(regs);
 849
 850        nmi_panic(regs, "Asynchronous SError Interrupt");
 851
 852        cpu_park_loop();
 853        unreachable();
 854}
 855
 856bool arm64_is_fatal_ras_serror(struct pt_regs *regs, unsigned int esr)
 857{
 858        u32 aet = arm64_ras_serror_get_severity(esr);
 859
 860        switch (aet) {
 861        case ESR_ELx_AET_CE:    /* corrected error */
 862        case ESR_ELx_AET_UEO:   /* restartable, not yet consumed */
 863                /*
 864                 * The CPU can make progress. We may take UEO again as
 865                 * a more severe error.
 866                 */
 867                return false;
 868
 869        case ESR_ELx_AET_UEU:   /* Uncorrected Unrecoverable */
 870        case ESR_ELx_AET_UER:   /* Uncorrected Recoverable */
 871                /*
 872                 * The CPU can't make progress. The exception may have
 873                 * been imprecise.
 874                 *
 875                 * Neoverse-N1 #1349291 means a non-KVM SError reported as
 876                 * Unrecoverable should be treated as Uncontainable. We
 877                 * call arm64_serror_panic() in both cases.
 878                 */
 879                return true;
 880
 881        case ESR_ELx_AET_UC:    /* Uncontainable or Uncategorized error */
 882        default:
 883                /* Error has been silently propagated */
 884                arm64_serror_panic(regs, esr);
 885        }
 886}
 887
 888asmlinkage void do_serror(struct pt_regs *regs, unsigned int esr)
 889{
 890        const bool was_in_nmi = in_nmi();
 891
 892        if (!was_in_nmi)
 893                nmi_enter();
 894
 895        /* non-RAS errors are not containable */
 896        if (!arm64_is_ras_serror(esr) || arm64_is_fatal_ras_serror(regs, esr))
 897                arm64_serror_panic(regs, esr);
 898
 899        if (!was_in_nmi)
 900                nmi_exit();
 901}
 902
 903void __pte_error(const char *file, int line, unsigned long val)
 904{
 905        pr_err("%s:%d: bad pte %016lx.\n", file, line, val);
 906}
 907
 908void __pmd_error(const char *file, int line, unsigned long val)
 909{
 910        pr_err("%s:%d: bad pmd %016lx.\n", file, line, val);
 911}
 912
 913void __pud_error(const char *file, int line, unsigned long val)
 914{
 915        pr_err("%s:%d: bad pud %016lx.\n", file, line, val);
 916}
 917
 918void __pgd_error(const char *file, int line, unsigned long val)
 919{
 920        pr_err("%s:%d: bad pgd %016lx.\n", file, line, val);
 921}
 922
 923/* GENERIC_BUG traps */
 924
 925int is_valid_bugaddr(unsigned long addr)
 926{
 927        /*
 928         * bug_handler() only called for BRK #BUG_BRK_IMM.
 929         * So the answer is trivial -- any spurious instances with no
 930         * bug table entry will be rejected by report_bug() and passed
 931         * back to the debug-monitors code and handled as a fatal
 932         * unexpected debug exception.
 933         */
 934        return 1;
 935}
 936
 937static int bug_handler(struct pt_regs *regs, unsigned int esr)
 938{
 939        switch (report_bug(regs->pc, regs)) {
 940        case BUG_TRAP_TYPE_BUG:
 941                die("Oops - BUG", regs, 0);
 942                break;
 943
 944        case BUG_TRAP_TYPE_WARN:
 945                break;
 946
 947        default:
 948                /* unknown/unrecognised bug trap type */
 949                return DBG_HOOK_ERROR;
 950        }
 951
 952        /* If thread survives, skip over the BUG instruction and continue: */
 953        arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
 954        return DBG_HOOK_HANDLED;
 955}
 956
 957static struct break_hook bug_break_hook = {
 958        .fn = bug_handler,
 959        .imm = BUG_BRK_IMM,
 960};
 961
 962#ifdef CONFIG_KASAN_SW_TAGS
 963
 964#define KASAN_ESR_RECOVER       0x20
 965#define KASAN_ESR_WRITE 0x10
 966#define KASAN_ESR_SIZE_MASK     0x0f
 967#define KASAN_ESR_SIZE(esr)     (1 << ((esr) & KASAN_ESR_SIZE_MASK))
 968
 969static int kasan_handler(struct pt_regs *regs, unsigned int esr)
 970{
 971        bool recover = esr & KASAN_ESR_RECOVER;
 972        bool write = esr & KASAN_ESR_WRITE;
 973        size_t size = KASAN_ESR_SIZE(esr);
 974        u64 addr = regs->regs[0];
 975        u64 pc = regs->pc;
 976
 977        kasan_report(addr, size, write, pc);
 978
 979        /*
 980         * The instrumentation allows to control whether we can proceed after
 981         * a crash was detected. This is done by passing the -recover flag to
 982         * the compiler. Disabling recovery allows to generate more compact
 983         * code.
 984         *
 985         * Unfortunately disabling recovery doesn't work for the kernel right
 986         * now. KASAN reporting is disabled in some contexts (for example when
 987         * the allocator accesses slab object metadata; this is controlled by
 988         * current->kasan_depth). All these accesses are detected by the tool,
 989         * even though the reports for them are not printed.
 990         *
 991         * This is something that might be fixed at some point in the future.
 992         */
 993        if (!recover)
 994                die("Oops - KASAN", regs, 0);
 995
 996        /* If thread survives, skip over the brk instruction and continue: */
 997        arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
 998        return DBG_HOOK_HANDLED;
 999}
1000
1001static struct break_hook kasan_break_hook = {
1002        .fn     = kasan_handler,
1003        .imm    = KASAN_BRK_IMM,
1004        .mask   = KASAN_BRK_MASK,
1005};
1006#endif
1007
1008/*
1009 * Initial handler for AArch64 BRK exceptions
1010 * This handler only used until debug_traps_init().
1011 */
1012int __init early_brk64(unsigned long addr, unsigned int esr,
1013                struct pt_regs *regs)
1014{
1015#ifdef CONFIG_KASAN_SW_TAGS
1016        unsigned int comment = esr & ESR_ELx_BRK64_ISS_COMMENT_MASK;
1017
1018        if ((comment & ~KASAN_BRK_MASK) == KASAN_BRK_IMM)
1019                return kasan_handler(regs, esr) != DBG_HOOK_HANDLED;
1020#endif
1021        return bug_handler(regs, esr) != DBG_HOOK_HANDLED;
1022}
1023
1024/* This registration must happen early, before debug_traps_init(). */
1025void __init trap_init(void)
1026{
1027        register_kernel_break_hook(&bug_break_hook);
1028#ifdef CONFIG_KASAN_SW_TAGS
1029        register_kernel_break_hook(&kasan_break_hook);
1030#endif
1031}
1032