linux/arch/arm64/kernel/entry-common.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Exception handling code
   4 *
   5 * Copyright (C) 2019 ARM Ltd.
   6 */
   7
   8#include <linux/context_tracking.h>
   9#include <linux/linkage.h>
  10#include <linux/lockdep.h>
  11#include <linux/ptrace.h>
  12#include <linux/sched.h>
  13#include <linux/sched/debug.h>
  14#include <linux/thread_info.h>
  15
  16#include <asm/cpufeature.h>
  17#include <asm/daifflags.h>
  18#include <asm/esr.h>
  19#include <asm/exception.h>
  20#include <asm/kprobes.h>
  21#include <asm/mmu.h>
  22#include <asm/processor.h>
  23#include <asm/sdei.h>
  24#include <asm/stacktrace.h>
  25#include <asm/sysreg.h>
  26#include <asm/system_misc.h>
  27
  28/*
  29 * Handle IRQ/context state management when entering from kernel mode.
  30 * Before this function is called it is not safe to call regular kernel code,
  31 * intrumentable code, or any code which may trigger an exception.
  32 *
  33 * This is intended to match the logic in irqentry_enter(), handling the kernel
  34 * mode transitions only.
  35 */
  36static __always_inline void __enter_from_kernel_mode(struct pt_regs *regs)
  37{
  38        regs->exit_rcu = false;
  39
  40        if (!IS_ENABLED(CONFIG_TINY_RCU) && is_idle_task(current)) {
  41                lockdep_hardirqs_off(CALLER_ADDR0);
  42                rcu_irq_enter();
  43                trace_hardirqs_off_finish();
  44
  45                regs->exit_rcu = true;
  46                return;
  47        }
  48
  49        lockdep_hardirqs_off(CALLER_ADDR0);
  50        rcu_irq_enter_check_tick();
  51        trace_hardirqs_off_finish();
  52}
  53
  54static void noinstr enter_from_kernel_mode(struct pt_regs *regs)
  55{
  56        __enter_from_kernel_mode(regs);
  57        mte_check_tfsr_entry();
  58}
  59
  60/*
  61 * Handle IRQ/context state management when exiting to kernel mode.
  62 * After this function returns it is not safe to call regular kernel code,
  63 * intrumentable code, or any code which may trigger an exception.
  64 *
  65 * This is intended to match the logic in irqentry_exit(), handling the kernel
  66 * mode transitions only, and with preemption handled elsewhere.
  67 */
  68static __always_inline void __exit_to_kernel_mode(struct pt_regs *regs)
  69{
  70        lockdep_assert_irqs_disabled();
  71
  72        if (interrupts_enabled(regs)) {
  73                if (regs->exit_rcu) {
  74                        trace_hardirqs_on_prepare();
  75                        lockdep_hardirqs_on_prepare(CALLER_ADDR0);
  76                        rcu_irq_exit();
  77                        lockdep_hardirqs_on(CALLER_ADDR0);
  78                        return;
  79                }
  80
  81                trace_hardirqs_on();
  82        } else {
  83                if (regs->exit_rcu)
  84                        rcu_irq_exit();
  85        }
  86}
  87
  88static void noinstr exit_to_kernel_mode(struct pt_regs *regs)
  89{
  90        mte_check_tfsr_exit();
  91        __exit_to_kernel_mode(regs);
  92}
  93
  94/*
  95 * Handle IRQ/context state management when entering from user mode.
  96 * Before this function is called it is not safe to call regular kernel code,
  97 * intrumentable code, or any code which may trigger an exception.
  98 */
  99static __always_inline void __enter_from_user_mode(void)
 100{
 101        lockdep_hardirqs_off(CALLER_ADDR0);
 102        CT_WARN_ON(ct_state() != CONTEXT_USER);
 103        user_exit_irqoff();
 104        trace_hardirqs_off_finish();
 105}
 106
 107static __always_inline void enter_from_user_mode(struct pt_regs *regs)
 108{
 109        __enter_from_user_mode();
 110}
 111
 112/*
 113 * Handle IRQ/context state management when exiting to user mode.
 114 * After this function returns it is not safe to call regular kernel code,
 115 * intrumentable code, or any code which may trigger an exception.
 116 */
 117static __always_inline void __exit_to_user_mode(void)
 118{
 119        trace_hardirqs_on_prepare();
 120        lockdep_hardirqs_on_prepare(CALLER_ADDR0);
 121        user_enter_irqoff();
 122        lockdep_hardirqs_on(CALLER_ADDR0);
 123}
 124
 125static __always_inline void prepare_exit_to_user_mode(struct pt_regs *regs)
 126{
 127        unsigned long flags;
 128
 129        local_daif_mask();
 130
 131        flags = READ_ONCE(current_thread_info()->flags);
 132        if (unlikely(flags & _TIF_WORK_MASK))
 133                do_notify_resume(regs, flags);
 134}
 135
 136static __always_inline void exit_to_user_mode(struct pt_regs *regs)
 137{
 138        prepare_exit_to_user_mode(regs);
 139        mte_check_tfsr_exit();
 140        __exit_to_user_mode();
 141}
 142
 143asmlinkage void noinstr asm_exit_to_user_mode(struct pt_regs *regs)
 144{
 145        exit_to_user_mode(regs);
 146}
 147
 148/*
 149 * Handle IRQ/context state management when entering an NMI from user/kernel
 150 * mode. Before this function is called it is not safe to call regular kernel
 151 * code, intrumentable code, or any code which may trigger an exception.
 152 */
 153static void noinstr arm64_enter_nmi(struct pt_regs *regs)
 154{
 155        regs->lockdep_hardirqs = lockdep_hardirqs_enabled();
 156
 157        __nmi_enter();
 158        lockdep_hardirqs_off(CALLER_ADDR0);
 159        lockdep_hardirq_enter();
 160        rcu_nmi_enter();
 161
 162        trace_hardirqs_off_finish();
 163        ftrace_nmi_enter();
 164}
 165
 166/*
 167 * Handle IRQ/context state management when exiting an NMI from user/kernel
 168 * mode. After this function returns it is not safe to call regular kernel
 169 * code, intrumentable code, or any code which may trigger an exception.
 170 */
 171static void noinstr arm64_exit_nmi(struct pt_regs *regs)
 172{
 173        bool restore = regs->lockdep_hardirqs;
 174
 175        ftrace_nmi_exit();
 176        if (restore) {
 177                trace_hardirqs_on_prepare();
 178                lockdep_hardirqs_on_prepare(CALLER_ADDR0);
 179        }
 180
 181        rcu_nmi_exit();
 182        lockdep_hardirq_exit();
 183        if (restore)
 184                lockdep_hardirqs_on(CALLER_ADDR0);
 185        __nmi_exit();
 186}
 187
 188/*
 189 * Handle IRQ/context state management when entering a debug exception from
 190 * kernel mode. Before this function is called it is not safe to call regular
 191 * kernel code, intrumentable code, or any code which may trigger an exception.
 192 */
 193static void noinstr arm64_enter_el1_dbg(struct pt_regs *regs)
 194{
 195        regs->lockdep_hardirqs = lockdep_hardirqs_enabled();
 196
 197        lockdep_hardirqs_off(CALLER_ADDR0);
 198        rcu_nmi_enter();
 199
 200        trace_hardirqs_off_finish();
 201}
 202
 203/*
 204 * Handle IRQ/context state management when exiting a debug exception from
 205 * kernel mode. After this function returns it is not safe to call regular
 206 * kernel code, intrumentable code, or any code which may trigger an exception.
 207 */
 208static void noinstr arm64_exit_el1_dbg(struct pt_regs *regs)
 209{
 210        bool restore = regs->lockdep_hardirqs;
 211
 212        if (restore) {
 213                trace_hardirqs_on_prepare();
 214                lockdep_hardirqs_on_prepare(CALLER_ADDR0);
 215        }
 216
 217        rcu_nmi_exit();
 218        if (restore)
 219                lockdep_hardirqs_on(CALLER_ADDR0);
 220}
 221
 222static void noinstr enter_el1_irq_or_nmi(struct pt_regs *regs)
 223{
 224        if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs))
 225                arm64_enter_nmi(regs);
 226        else
 227                enter_from_kernel_mode(regs);
 228}
 229
 230static void noinstr exit_el1_irq_or_nmi(struct pt_regs *regs)
 231{
 232        if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs))
 233                arm64_exit_nmi(regs);
 234        else
 235                exit_to_kernel_mode(regs);
 236}
 237
 238static void __sched arm64_preempt_schedule_irq(void)
 239{
 240        lockdep_assert_irqs_disabled();
 241
 242        /*
 243         * DAIF.DA are cleared at the start of IRQ/FIQ handling, and when GIC
 244         * priority masking is used the GIC irqchip driver will clear DAIF.IF
 245         * using gic_arch_enable_irqs() for normal IRQs. If anything is set in
 246         * DAIF we must have handled an NMI, so skip preemption.
 247         */
 248        if (system_uses_irq_prio_masking() && read_sysreg(daif))
 249                return;
 250
 251        /*
 252         * Preempting a task from an IRQ means we leave copies of PSTATE
 253         * on the stack. cpufeature's enable calls may modify PSTATE, but
 254         * resuming one of these preempted tasks would undo those changes.
 255         *
 256         * Only allow a task to be preempted once cpufeatures have been
 257         * enabled.
 258         */
 259        if (system_capabilities_finalized())
 260                preempt_schedule_irq();
 261}
 262
 263static void do_interrupt_handler(struct pt_regs *regs,
 264                                 void (*handler)(struct pt_regs *))
 265{
 266        if (on_thread_stack())
 267                call_on_irq_stack(regs, handler);
 268        else
 269                handler(regs);
 270}
 271
 272extern void (*handle_arch_irq)(struct pt_regs *);
 273extern void (*handle_arch_fiq)(struct pt_regs *);
 274
 275static void noinstr __panic_unhandled(struct pt_regs *regs, const char *vector,
 276                                      unsigned int esr)
 277{
 278        arm64_enter_nmi(regs);
 279
 280        console_verbose();
 281
 282        pr_crit("Unhandled %s exception on CPU%d, ESR 0x%08x -- %s\n",
 283                vector, smp_processor_id(), esr,
 284                esr_get_class_string(esr));
 285
 286        __show_regs(regs);
 287        panic("Unhandled exception");
 288}
 289
 290#define UNHANDLED(el, regsize, vector)                                                  \
 291asmlinkage void noinstr el##_##regsize##_##vector##_handler(struct pt_regs *regs)       \
 292{                                                                                       \
 293        const char *desc = #regsize "-bit " #el " " #vector;                            \
 294        __panic_unhandled(regs, desc, read_sysreg(esr_el1));                            \
 295}
 296
 297#ifdef CONFIG_ARM64_ERRATUM_1463225
 298static DEFINE_PER_CPU(int, __in_cortex_a76_erratum_1463225_wa);
 299
 300static void cortex_a76_erratum_1463225_svc_handler(void)
 301{
 302        u32 reg, val;
 303
 304        if (!unlikely(test_thread_flag(TIF_SINGLESTEP)))
 305                return;
 306
 307        if (!unlikely(this_cpu_has_cap(ARM64_WORKAROUND_1463225)))
 308                return;
 309
 310        __this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 1);
 311        reg = read_sysreg(mdscr_el1);
 312        val = reg | DBG_MDSCR_SS | DBG_MDSCR_KDE;
 313        write_sysreg(val, mdscr_el1);
 314        asm volatile("msr daifclr, #8");
 315        isb();
 316
 317        /* We will have taken a single-step exception by this point */
 318
 319        write_sysreg(reg, mdscr_el1);
 320        __this_cpu_write(__in_cortex_a76_erratum_1463225_wa, 0);
 321}
 322
 323static bool cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
 324{
 325        if (!__this_cpu_read(__in_cortex_a76_erratum_1463225_wa))
 326                return false;
 327
 328        /*
 329         * We've taken a dummy step exception from the kernel to ensure
 330         * that interrupts are re-enabled on the syscall path. Return back
 331         * to cortex_a76_erratum_1463225_svc_handler() with debug exceptions
 332         * masked so that we can safely restore the mdscr and get on with
 333         * handling the syscall.
 334         */
 335        regs->pstate |= PSR_D_BIT;
 336        return true;
 337}
 338#else /* CONFIG_ARM64_ERRATUM_1463225 */
 339static void cortex_a76_erratum_1463225_svc_handler(void) { }
 340static bool cortex_a76_erratum_1463225_debug_handler(struct pt_regs *regs)
 341{
 342        return false;
 343}
 344#endif /* CONFIG_ARM64_ERRATUM_1463225 */
 345
 346UNHANDLED(el1t, 64, sync)
 347UNHANDLED(el1t, 64, irq)
 348UNHANDLED(el1t, 64, fiq)
 349UNHANDLED(el1t, 64, error)
 350
 351static void noinstr el1_abort(struct pt_regs *regs, unsigned long esr)
 352{
 353        unsigned long far = read_sysreg(far_el1);
 354
 355        enter_from_kernel_mode(regs);
 356        local_daif_inherit(regs);
 357        do_mem_abort(far, esr, regs);
 358        local_daif_mask();
 359        exit_to_kernel_mode(regs);
 360}
 361
 362static void noinstr el1_pc(struct pt_regs *regs, unsigned long esr)
 363{
 364        unsigned long far = read_sysreg(far_el1);
 365
 366        enter_from_kernel_mode(regs);
 367        local_daif_inherit(regs);
 368        do_sp_pc_abort(far, esr, regs);
 369        local_daif_mask();
 370        exit_to_kernel_mode(regs);
 371}
 372
 373static void noinstr el1_undef(struct pt_regs *regs)
 374{
 375        enter_from_kernel_mode(regs);
 376        local_daif_inherit(regs);
 377        do_undefinstr(regs);
 378        local_daif_mask();
 379        exit_to_kernel_mode(regs);
 380}
 381
 382static void noinstr el1_dbg(struct pt_regs *regs, unsigned long esr)
 383{
 384        unsigned long far = read_sysreg(far_el1);
 385
 386        arm64_enter_el1_dbg(regs);
 387        if (!cortex_a76_erratum_1463225_debug_handler(regs))
 388                do_debug_exception(far, esr, regs);
 389        arm64_exit_el1_dbg(regs);
 390}
 391
 392static void noinstr el1_fpac(struct pt_regs *regs, unsigned long esr)
 393{
 394        enter_from_kernel_mode(regs);
 395        local_daif_inherit(regs);
 396        do_ptrauth_fault(regs, esr);
 397        local_daif_mask();
 398        exit_to_kernel_mode(regs);
 399}
 400
 401asmlinkage void noinstr el1h_64_sync_handler(struct pt_regs *regs)
 402{
 403        unsigned long esr = read_sysreg(esr_el1);
 404
 405        switch (ESR_ELx_EC(esr)) {
 406        case ESR_ELx_EC_DABT_CUR:
 407        case ESR_ELx_EC_IABT_CUR:
 408                el1_abort(regs, esr);
 409                break;
 410        /*
 411         * We don't handle ESR_ELx_EC_SP_ALIGN, since we will have hit a
 412         * recursive exception when trying to push the initial pt_regs.
 413         */
 414        case ESR_ELx_EC_PC_ALIGN:
 415                el1_pc(regs, esr);
 416                break;
 417        case ESR_ELx_EC_SYS64:
 418        case ESR_ELx_EC_UNKNOWN:
 419                el1_undef(regs);
 420                break;
 421        case ESR_ELx_EC_BREAKPT_CUR:
 422        case ESR_ELx_EC_SOFTSTP_CUR:
 423        case ESR_ELx_EC_WATCHPT_CUR:
 424        case ESR_ELx_EC_BRK64:
 425                el1_dbg(regs, esr);
 426                break;
 427        case ESR_ELx_EC_FPAC:
 428                el1_fpac(regs, esr);
 429                break;
 430        default:
 431                __panic_unhandled(regs, "64-bit el1h sync", esr);
 432        }
 433}
 434
 435static void noinstr el1_interrupt(struct pt_regs *regs,
 436                                  void (*handler)(struct pt_regs *))
 437{
 438        write_sysreg(DAIF_PROCCTX_NOIRQ, daif);
 439
 440        enter_el1_irq_or_nmi(regs);
 441        do_interrupt_handler(regs, handler);
 442
 443        /*
 444         * Note: thread_info::preempt_count includes both thread_info::count
 445         * and thread_info::need_resched, and is not equivalent to
 446         * preempt_count().
 447         */
 448        if (IS_ENABLED(CONFIG_PREEMPTION) &&
 449            READ_ONCE(current_thread_info()->preempt_count) == 0)
 450                arm64_preempt_schedule_irq();
 451
 452        exit_el1_irq_or_nmi(regs);
 453}
 454
 455asmlinkage void noinstr el1h_64_irq_handler(struct pt_regs *regs)
 456{
 457        el1_interrupt(regs, handle_arch_irq);
 458}
 459
 460asmlinkage void noinstr el1h_64_fiq_handler(struct pt_regs *regs)
 461{
 462        el1_interrupt(regs, handle_arch_fiq);
 463}
 464
 465asmlinkage void noinstr el1h_64_error_handler(struct pt_regs *regs)
 466{
 467        unsigned long esr = read_sysreg(esr_el1);
 468
 469        local_daif_restore(DAIF_ERRCTX);
 470        arm64_enter_nmi(regs);
 471        do_serror(regs, esr);
 472        arm64_exit_nmi(regs);
 473}
 474
 475static void noinstr el0_da(struct pt_regs *regs, unsigned long esr)
 476{
 477        unsigned long far = read_sysreg(far_el1);
 478
 479        enter_from_user_mode(regs);
 480        local_daif_restore(DAIF_PROCCTX);
 481        do_mem_abort(far, esr, regs);
 482        exit_to_user_mode(regs);
 483}
 484
 485static void noinstr el0_ia(struct pt_regs *regs, unsigned long esr)
 486{
 487        unsigned long far = read_sysreg(far_el1);
 488
 489        /*
 490         * We've taken an instruction abort from userspace and not yet
 491         * re-enabled IRQs. If the address is a kernel address, apply
 492         * BP hardening prior to enabling IRQs and pre-emption.
 493         */
 494        if (!is_ttbr0_addr(far))
 495                arm64_apply_bp_hardening();
 496
 497        enter_from_user_mode(regs);
 498        local_daif_restore(DAIF_PROCCTX);
 499        do_mem_abort(far, esr, regs);
 500        exit_to_user_mode(regs);
 501}
 502
 503static void noinstr el0_fpsimd_acc(struct pt_regs *regs, unsigned long esr)
 504{
 505        enter_from_user_mode(regs);
 506        local_daif_restore(DAIF_PROCCTX);
 507        do_fpsimd_acc(esr, regs);
 508        exit_to_user_mode(regs);
 509}
 510
 511static void noinstr el0_sve_acc(struct pt_regs *regs, unsigned long esr)
 512{
 513        enter_from_user_mode(regs);
 514        local_daif_restore(DAIF_PROCCTX);
 515        do_sve_acc(esr, regs);
 516        exit_to_user_mode(regs);
 517}
 518
 519static void noinstr el0_fpsimd_exc(struct pt_regs *regs, unsigned long esr)
 520{
 521        enter_from_user_mode(regs);
 522        local_daif_restore(DAIF_PROCCTX);
 523        do_fpsimd_exc(esr, regs);
 524        exit_to_user_mode(regs);
 525}
 526
 527static void noinstr el0_sys(struct pt_regs *regs, unsigned long esr)
 528{
 529        enter_from_user_mode(regs);
 530        local_daif_restore(DAIF_PROCCTX);
 531        do_sysinstr(esr, regs);
 532        exit_to_user_mode(regs);
 533}
 534
 535static void noinstr el0_pc(struct pt_regs *regs, unsigned long esr)
 536{
 537        unsigned long far = read_sysreg(far_el1);
 538
 539        if (!is_ttbr0_addr(instruction_pointer(regs)))
 540                arm64_apply_bp_hardening();
 541
 542        enter_from_user_mode(regs);
 543        local_daif_restore(DAIF_PROCCTX);
 544        do_sp_pc_abort(far, esr, regs);
 545        exit_to_user_mode(regs);
 546}
 547
 548static void noinstr el0_sp(struct pt_regs *regs, unsigned long esr)
 549{
 550        enter_from_user_mode(regs);
 551        local_daif_restore(DAIF_PROCCTX);
 552        do_sp_pc_abort(regs->sp, esr, regs);
 553        exit_to_user_mode(regs);
 554}
 555
 556static void noinstr el0_undef(struct pt_regs *regs)
 557{
 558        enter_from_user_mode(regs);
 559        local_daif_restore(DAIF_PROCCTX);
 560        do_undefinstr(regs);
 561        exit_to_user_mode(regs);
 562}
 563
 564static void noinstr el0_bti(struct pt_regs *regs)
 565{
 566        enter_from_user_mode(regs);
 567        local_daif_restore(DAIF_PROCCTX);
 568        do_bti(regs);
 569        exit_to_user_mode(regs);
 570}
 571
 572static void noinstr el0_inv(struct pt_regs *regs, unsigned long esr)
 573{
 574        enter_from_user_mode(regs);
 575        local_daif_restore(DAIF_PROCCTX);
 576        bad_el0_sync(regs, 0, esr);
 577        exit_to_user_mode(regs);
 578}
 579
 580static void noinstr el0_dbg(struct pt_regs *regs, unsigned long esr)
 581{
 582        /* Only watchpoints write FAR_EL1, otherwise its UNKNOWN */
 583        unsigned long far = read_sysreg(far_el1);
 584
 585        enter_from_user_mode(regs);
 586        do_debug_exception(far, esr, regs);
 587        local_daif_restore(DAIF_PROCCTX);
 588        exit_to_user_mode(regs);
 589}
 590
 591static void noinstr el0_svc(struct pt_regs *regs)
 592{
 593        enter_from_user_mode(regs);
 594        cortex_a76_erratum_1463225_svc_handler();
 595        do_el0_svc(regs);
 596        exit_to_user_mode(regs);
 597}
 598
 599static void noinstr el0_fpac(struct pt_regs *regs, unsigned long esr)
 600{
 601        enter_from_user_mode(regs);
 602        local_daif_restore(DAIF_PROCCTX);
 603        do_ptrauth_fault(regs, esr);
 604        exit_to_user_mode(regs);
 605}
 606
 607asmlinkage void noinstr el0t_64_sync_handler(struct pt_regs *regs)
 608{
 609        unsigned long esr = read_sysreg(esr_el1);
 610
 611        switch (ESR_ELx_EC(esr)) {
 612        case ESR_ELx_EC_SVC64:
 613                el0_svc(regs);
 614                break;
 615        case ESR_ELx_EC_DABT_LOW:
 616                el0_da(regs, esr);
 617                break;
 618        case ESR_ELx_EC_IABT_LOW:
 619                el0_ia(regs, esr);
 620                break;
 621        case ESR_ELx_EC_FP_ASIMD:
 622                el0_fpsimd_acc(regs, esr);
 623                break;
 624        case ESR_ELx_EC_SVE:
 625                el0_sve_acc(regs, esr);
 626                break;
 627        case ESR_ELx_EC_FP_EXC64:
 628                el0_fpsimd_exc(regs, esr);
 629                break;
 630        case ESR_ELx_EC_SYS64:
 631        case ESR_ELx_EC_WFx:
 632                el0_sys(regs, esr);
 633                break;
 634        case ESR_ELx_EC_SP_ALIGN:
 635                el0_sp(regs, esr);
 636                break;
 637        case ESR_ELx_EC_PC_ALIGN:
 638                el0_pc(regs, esr);
 639                break;
 640        case ESR_ELx_EC_UNKNOWN:
 641                el0_undef(regs);
 642                break;
 643        case ESR_ELx_EC_BTI:
 644                el0_bti(regs);
 645                break;
 646        case ESR_ELx_EC_BREAKPT_LOW:
 647        case ESR_ELx_EC_SOFTSTP_LOW:
 648        case ESR_ELx_EC_WATCHPT_LOW:
 649        case ESR_ELx_EC_BRK64:
 650                el0_dbg(regs, esr);
 651                break;
 652        case ESR_ELx_EC_FPAC:
 653                el0_fpac(regs, esr);
 654                break;
 655        default:
 656                el0_inv(regs, esr);
 657        }
 658}
 659
 660static void noinstr el0_interrupt(struct pt_regs *regs,
 661                                  void (*handler)(struct pt_regs *))
 662{
 663        enter_from_user_mode(regs);
 664
 665        write_sysreg(DAIF_PROCCTX_NOIRQ, daif);
 666
 667        if (regs->pc & BIT(55))
 668                arm64_apply_bp_hardening();
 669
 670        do_interrupt_handler(regs, handler);
 671
 672        exit_to_user_mode(regs);
 673}
 674
 675static void noinstr __el0_irq_handler_common(struct pt_regs *regs)
 676{
 677        el0_interrupt(regs, handle_arch_irq);
 678}
 679
 680asmlinkage void noinstr el0t_64_irq_handler(struct pt_regs *regs)
 681{
 682        __el0_irq_handler_common(regs);
 683}
 684
 685static void noinstr __el0_fiq_handler_common(struct pt_regs *regs)
 686{
 687        el0_interrupt(regs, handle_arch_fiq);
 688}
 689
 690asmlinkage void noinstr el0t_64_fiq_handler(struct pt_regs *regs)
 691{
 692        __el0_fiq_handler_common(regs);
 693}
 694
 695static void noinstr __el0_error_handler_common(struct pt_regs *regs)
 696{
 697        unsigned long esr = read_sysreg(esr_el1);
 698
 699        enter_from_user_mode(regs);
 700        local_daif_restore(DAIF_ERRCTX);
 701        arm64_enter_nmi(regs);
 702        do_serror(regs, esr);
 703        arm64_exit_nmi(regs);
 704        local_daif_restore(DAIF_PROCCTX);
 705        exit_to_user_mode(regs);
 706}
 707
 708asmlinkage void noinstr el0t_64_error_handler(struct pt_regs *regs)
 709{
 710        __el0_error_handler_common(regs);
 711}
 712
 713#ifdef CONFIG_COMPAT
 714static void noinstr el0_cp15(struct pt_regs *regs, unsigned long esr)
 715{
 716        enter_from_user_mode(regs);
 717        local_daif_restore(DAIF_PROCCTX);
 718        do_cp15instr(esr, regs);
 719        exit_to_user_mode(regs);
 720}
 721
 722static void noinstr el0_svc_compat(struct pt_regs *regs)
 723{
 724        enter_from_user_mode(regs);
 725        cortex_a76_erratum_1463225_svc_handler();
 726        do_el0_svc_compat(regs);
 727        exit_to_user_mode(regs);
 728}
 729
 730asmlinkage void noinstr el0t_32_sync_handler(struct pt_regs *regs)
 731{
 732        unsigned long esr = read_sysreg(esr_el1);
 733
 734        switch (ESR_ELx_EC(esr)) {
 735        case ESR_ELx_EC_SVC32:
 736                el0_svc_compat(regs);
 737                break;
 738        case ESR_ELx_EC_DABT_LOW:
 739                el0_da(regs, esr);
 740                break;
 741        case ESR_ELx_EC_IABT_LOW:
 742                el0_ia(regs, esr);
 743                break;
 744        case ESR_ELx_EC_FP_ASIMD:
 745                el0_fpsimd_acc(regs, esr);
 746                break;
 747        case ESR_ELx_EC_FP_EXC32:
 748                el0_fpsimd_exc(regs, esr);
 749                break;
 750        case ESR_ELx_EC_PC_ALIGN:
 751                el0_pc(regs, esr);
 752                break;
 753        case ESR_ELx_EC_UNKNOWN:
 754        case ESR_ELx_EC_CP14_MR:
 755        case ESR_ELx_EC_CP14_LS:
 756        case ESR_ELx_EC_CP14_64:
 757                el0_undef(regs);
 758                break;
 759        case ESR_ELx_EC_CP15_32:
 760        case ESR_ELx_EC_CP15_64:
 761                el0_cp15(regs, esr);
 762                break;
 763        case ESR_ELx_EC_BREAKPT_LOW:
 764        case ESR_ELx_EC_SOFTSTP_LOW:
 765        case ESR_ELx_EC_WATCHPT_LOW:
 766        case ESR_ELx_EC_BKPT32:
 767                el0_dbg(regs, esr);
 768                break;
 769        default:
 770                el0_inv(regs, esr);
 771        }
 772}
 773
 774asmlinkage void noinstr el0t_32_irq_handler(struct pt_regs *regs)
 775{
 776        __el0_irq_handler_common(regs);
 777}
 778
 779asmlinkage void noinstr el0t_32_fiq_handler(struct pt_regs *regs)
 780{
 781        __el0_fiq_handler_common(regs);
 782}
 783
 784asmlinkage void noinstr el0t_32_error_handler(struct pt_regs *regs)
 785{
 786        __el0_error_handler_common(regs);
 787}
 788#else /* CONFIG_COMPAT */
 789UNHANDLED(el0t, 32, sync)
 790UNHANDLED(el0t, 32, irq)
 791UNHANDLED(el0t, 32, fiq)
 792UNHANDLED(el0t, 32, error)
 793#endif /* CONFIG_COMPAT */
 794
 795#ifdef CONFIG_VMAP_STACK
 796asmlinkage void noinstr handle_bad_stack(struct pt_regs *regs)
 797{
 798        unsigned int esr = read_sysreg(esr_el1);
 799        unsigned long far = read_sysreg(far_el1);
 800
 801        arm64_enter_nmi(regs);
 802        panic_bad_stack(regs, esr, far);
 803}
 804#endif /* CONFIG_VMAP_STACK */
 805
 806#ifdef CONFIG_ARM_SDE_INTERFACE
 807asmlinkage noinstr unsigned long
 808__sdei_handler(struct pt_regs *regs, struct sdei_registered_event *arg)
 809{
 810        unsigned long ret;
 811
 812        /*
 813         * We didn't take an exception to get here, so the HW hasn't
 814         * set/cleared bits in PSTATE that we may rely on.
 815         *
 816         * The original SDEI spec (ARM DEN 0054A) can be read ambiguously as to
 817         * whether PSTATE bits are inherited unchanged or generated from
 818         * scratch, and the TF-A implementation always clears PAN and always
 819         * clears UAO. There are no other known implementations.
 820         *
 821         * Subsequent revisions (ARM DEN 0054B) follow the usual rules for how
 822         * PSTATE is modified upon architectural exceptions, and so PAN is
 823         * either inherited or set per SCTLR_ELx.SPAN, and UAO is always
 824         * cleared.
 825         *
 826         * We must explicitly reset PAN to the expected state, including
 827         * clearing it when the host isn't using it, in case a VM had it set.
 828         */
 829        if (system_uses_hw_pan())
 830                set_pstate_pan(1);
 831        else if (cpu_has_pan())
 832                set_pstate_pan(0);
 833
 834        arm64_enter_nmi(regs);
 835        ret = do_sdei_event(regs, arg);
 836        arm64_exit_nmi(regs);
 837
 838        return ret;
 839}
 840#endif /* CONFIG_ARM_SDE_INTERFACE */
 841