linux/arch/arm64/kernel/entry.S
<<
>>
Prefs
   1/*
   2 * Low-level exception handling code
   3 *
   4 * Copyright (C) 2012 ARM Ltd.
   5 * Authors:     Catalin Marinas <catalin.marinas@arm.com>
   6 *              Will Deacon <will.deacon@arm.com>
   7 *
   8 * This program is free software; you can redistribute it and/or modify
   9 * it under the terms of the GNU General Public License version 2 as
  10 * published by the Free Software Foundation.
  11 *
  12 * This program is distributed in the hope that it will be useful,
  13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  15 * GNU General Public License for more details.
  16 *
  17 * You should have received a copy of the GNU General Public License
  18 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  19 */
  20
  21#include <linux/arm-smccc.h>
  22#include <linux/init.h>
  23#include <linux/linkage.h>
  24
  25#include <asm/alternative.h>
  26#include <asm/assembler.h>
  27#include <asm/asm-offsets.h>
  28#include <asm/cpufeature.h>
  29#include <asm/errno.h>
  30#include <asm/esr.h>
  31#include <asm/irq.h>
  32#include <asm/memory.h>
  33#include <asm/mmu.h>
  34#include <asm/processor.h>
  35#include <asm/ptrace.h>
  36#include <asm/thread_info.h>
  37#include <asm/asm-uaccess.h>
  38#include <asm/unistd.h>
  39
  40/*
  41 * Context tracking subsystem.  Used to instrument transitions
  42 * between user and kernel mode.
  43 */
  44        .macro ct_user_exit, syscall = 0
  45#ifdef CONFIG_CONTEXT_TRACKING
  46        bl      context_tracking_user_exit
  47        .if \syscall == 1
  48        /*
  49         * Save/restore needed during syscalls.  Restore syscall arguments from
  50         * the values already saved on stack during kernel_entry.
  51         */
  52        ldp     x0, x1, [sp]
  53        ldp     x2, x3, [sp, #S_X2]
  54        ldp     x4, x5, [sp, #S_X4]
  55        ldp     x6, x7, [sp, #S_X6]
  56        .endif
  57#endif
  58        .endm
  59
  60        .macro ct_user_enter
  61#ifdef CONFIG_CONTEXT_TRACKING
  62        bl      context_tracking_user_enter
  63#endif
  64        .endm
  65
  66/*
  67 * Bad Abort numbers
  68 *-----------------
  69 */
  70#define BAD_SYNC        0
  71#define BAD_IRQ         1
  72#define BAD_FIQ         2
  73#define BAD_ERROR       3
  74
  75        .macro kernel_ventry, el, label, regsize = 64
  76        .align 7
  77#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
  78alternative_if ARM64_UNMAP_KERNEL_AT_EL0
  79        .if     \el == 0
  80        .if     \regsize == 64
  81        mrs     x30, tpidrro_el0
  82        msr     tpidrro_el0, xzr
  83        .else
  84        mov     x30, xzr
  85        .endif
  86        .endif
  87alternative_else_nop_endif
  88#endif
  89
  90        sub     sp, sp, #S_FRAME_SIZE
  91#ifdef CONFIG_VMAP_STACK
  92        /*
  93         * Test whether the SP has overflowed, without corrupting a GPR.
  94         * Task and IRQ stacks are aligned to (1 << THREAD_SHIFT).
  95         */
  96        add     sp, sp, x0                      // sp' = sp + x0
  97        sub     x0, sp, x0                      // x0' = sp' - x0 = (sp + x0) - x0 = sp
  98        tbnz    x0, #THREAD_SHIFT, 0f
  99        sub     x0, sp, x0                      // x0'' = sp' - x0' = (sp + x0) - sp = x0
 100        sub     sp, sp, x0                      // sp'' = sp' - x0 = (sp + x0) - x0 = sp
 101        b       el\()\el\()_\label
 102
 1030:
 104        /*
 105         * Either we've just detected an overflow, or we've taken an exception
 106         * while on the overflow stack. Either way, we won't return to
 107         * userspace, and can clobber EL0 registers to free up GPRs.
 108         */
 109
 110        /* Stash the original SP (minus S_FRAME_SIZE) in tpidr_el0. */
 111        msr     tpidr_el0, x0
 112
 113        /* Recover the original x0 value and stash it in tpidrro_el0 */
 114        sub     x0, sp, x0
 115        msr     tpidrro_el0, x0
 116
 117        /* Switch to the overflow stack */
 118        adr_this_cpu sp, overflow_stack + OVERFLOW_STACK_SIZE, x0
 119
 120        /*
 121         * Check whether we were already on the overflow stack. This may happen
 122         * after panic() re-enables interrupts.
 123         */
 124        mrs     x0, tpidr_el0                   // sp of interrupted context
 125        sub     x0, sp, x0                      // delta with top of overflow stack
 126        tst     x0, #~(OVERFLOW_STACK_SIZE - 1) // within range?
 127        b.ne    __bad_stack                     // no? -> bad stack pointer
 128
 129        /* We were already on the overflow stack. Restore sp/x0 and carry on. */
 130        sub     sp, sp, x0
 131        mrs     x0, tpidrro_el0
 132#endif
 133        b       el\()\el\()_\label
 134        .endm
 135
 136        .macro tramp_alias, dst, sym
 137        mov_q   \dst, TRAMP_VALIAS
 138        add     \dst, \dst, #(\sym - .entry.tramp.text)
 139        .endm
 140
 141        // This macro corrupts x0-x3. It is the caller's duty
 142        // to save/restore them if required.
 143        .macro  apply_ssbd, state, targ, tmp1, tmp2
 144#ifdef CONFIG_ARM64_SSBD
 145alternative_cb  arm64_enable_wa2_handling
 146        b       \targ
 147alternative_cb_end
 148        ldr_this_cpu    \tmp2, arm64_ssbd_callback_required, \tmp1
 149        cbz     \tmp2, \targ
 150        ldr     \tmp2, [tsk, #TSK_TI_FLAGS]
 151        tbnz    \tmp2, #TIF_SSBD, \targ
 152        mov     w0, #ARM_SMCCC_ARCH_WORKAROUND_2
 153        mov     w1, #\state
 154alternative_cb  arm64_update_smccc_conduit
 155        nop                                     // Patched to SMC/HVC #0
 156alternative_cb_end
 157#endif
 158        .endm
 159
 160        .macro  kernel_entry, el, regsize = 64
 161        .if     \regsize == 32
 162        mov     w0, w0                          // zero upper 32 bits of x0
 163        .endif
 164        stp     x0, x1, [sp, #16 * 0]
 165        stp     x2, x3, [sp, #16 * 1]
 166        stp     x4, x5, [sp, #16 * 2]
 167        stp     x6, x7, [sp, #16 * 3]
 168        stp     x8, x9, [sp, #16 * 4]
 169        stp     x10, x11, [sp, #16 * 5]
 170        stp     x12, x13, [sp, #16 * 6]
 171        stp     x14, x15, [sp, #16 * 7]
 172        stp     x16, x17, [sp, #16 * 8]
 173        stp     x18, x19, [sp, #16 * 9]
 174        stp     x20, x21, [sp, #16 * 10]
 175        stp     x22, x23, [sp, #16 * 11]
 176        stp     x24, x25, [sp, #16 * 12]
 177        stp     x26, x27, [sp, #16 * 13]
 178        stp     x28, x29, [sp, #16 * 14]
 179
 180        .if     \el == 0
 181        mrs     x21, sp_el0
 182        ldr_this_cpu    tsk, __entry_task, x20  // Ensure MDSCR_EL1.SS is clear,
 183        ldr     x19, [tsk, #TSK_TI_FLAGS]       // since we can unmask debug
 184        disable_step_tsk x19, x20               // exceptions when scheduling.
 185
 186        apply_ssbd 1, 1f, x22, x23
 187
 188#ifdef CONFIG_ARM64_SSBD
 189        ldp     x0, x1, [sp, #16 * 0]
 190        ldp     x2, x3, [sp, #16 * 1]
 191#endif
 1921:
 193
 194        mov     x29, xzr                        // fp pointed to user-space
 195        .else
 196        add     x21, sp, #S_FRAME_SIZE
 197        get_thread_info tsk
 198        /* Save the task's original addr_limit and set USER_DS */
 199        ldr     x20, [tsk, #TSK_TI_ADDR_LIMIT]
 200        str     x20, [sp, #S_ORIG_ADDR_LIMIT]
 201        mov     x20, #USER_DS
 202        str     x20, [tsk, #TSK_TI_ADDR_LIMIT]
 203        /* No need to reset PSTATE.UAO, hardware's already set it to 0 for us */
 204        .endif /* \el == 0 */
 205        mrs     x22, elr_el1
 206        mrs     x23, spsr_el1
 207        stp     lr, x21, [sp, #S_LR]
 208
 209        /*
 210         * In order to be able to dump the contents of struct pt_regs at the
 211         * time the exception was taken (in case we attempt to walk the call
 212         * stack later), chain it together with the stack frames.
 213         */
 214        .if \el == 0
 215        stp     xzr, xzr, [sp, #S_STACKFRAME]
 216        .else
 217        stp     x29, x22, [sp, #S_STACKFRAME]
 218        .endif
 219        add     x29, sp, #S_STACKFRAME
 220
 221#ifdef CONFIG_ARM64_SW_TTBR0_PAN
 222        /*
 223         * Set the TTBR0 PAN bit in SPSR. When the exception is taken from
 224         * EL0, there is no need to check the state of TTBR0_EL1 since
 225         * accesses are always enabled.
 226         * Note that the meaning of this bit differs from the ARMv8.1 PAN
 227         * feature as all TTBR0_EL1 accesses are disabled, not just those to
 228         * user mappings.
 229         */
 230alternative_if ARM64_HAS_PAN
 231        b       1f                              // skip TTBR0 PAN
 232alternative_else_nop_endif
 233
 234        .if     \el != 0
 235        mrs     x21, ttbr0_el1
 236        tst     x21, #TTBR_ASID_MASK            // Check for the reserved ASID
 237        orr     x23, x23, #PSR_PAN_BIT          // Set the emulated PAN in the saved SPSR
 238        b.eq    1f                              // TTBR0 access already disabled
 239        and     x23, x23, #~PSR_PAN_BIT         // Clear the emulated PAN in the saved SPSR
 240        .endif
 241
 242        __uaccess_ttbr0_disable x21
 2431:
 244#endif
 245
 246        stp     x22, x23, [sp, #S_PC]
 247
 248        /* Not in a syscall by default (el0_svc overwrites for real syscall) */
 249        .if     \el == 0
 250        mov     w21, #NO_SYSCALL
 251        str     w21, [sp, #S_SYSCALLNO]
 252        .endif
 253
 254        /*
 255         * Set sp_el0 to current thread_info.
 256         */
 257        .if     \el == 0
 258        msr     sp_el0, tsk
 259        .endif
 260
 261        /* Save pmr */
 262alternative_if ARM64_HAS_IRQ_PRIO_MASKING
 263        mrs_s   x20, SYS_ICC_PMR_EL1
 264        str     x20, [sp, #S_PMR_SAVE]
 265alternative_else_nop_endif
 266
 267        /*
 268         * Registers that may be useful after this macro is invoked:
 269         *
 270         * x20 - ICC_PMR_EL1
 271         * x21 - aborted SP
 272         * x22 - aborted PC
 273         * x23 - aborted PSTATE
 274        */
 275        .endm
 276
 277        .macro  kernel_exit, el
 278        .if     \el != 0
 279        disable_daif
 280
 281        /* Restore the task's original addr_limit. */
 282        ldr     x20, [sp, #S_ORIG_ADDR_LIMIT]
 283        str     x20, [tsk, #TSK_TI_ADDR_LIMIT]
 284
 285        /* No need to restore UAO, it will be restored from SPSR_EL1 */
 286        .endif
 287
 288        /* Restore pmr */
 289alternative_if ARM64_HAS_IRQ_PRIO_MASKING
 290        ldr     x20, [sp, #S_PMR_SAVE]
 291        msr_s   SYS_ICC_PMR_EL1, x20
 292        mrs_s   x21, SYS_ICC_CTLR_EL1
 293        tbz     x21, #6, .L__skip_pmr_sync\@    // Check for ICC_CTLR_EL1.PMHE
 294        dsb     sy                              // Ensure priority change is seen by redistributor
 295.L__skip_pmr_sync\@:
 296alternative_else_nop_endif
 297
 298        ldp     x21, x22, [sp, #S_PC]           // load ELR, SPSR
 299        .if     \el == 0
 300        ct_user_enter
 301        .endif
 302
 303#ifdef CONFIG_ARM64_SW_TTBR0_PAN
 304        /*
 305         * Restore access to TTBR0_EL1. If returning to EL0, no need for SPSR
 306         * PAN bit checking.
 307         */
 308alternative_if ARM64_HAS_PAN
 309        b       2f                              // skip TTBR0 PAN
 310alternative_else_nop_endif
 311
 312        .if     \el != 0
 313        tbnz    x22, #22, 1f                    // Skip re-enabling TTBR0 access if the PSR_PAN_BIT is set
 314        .endif
 315
 316        __uaccess_ttbr0_enable x0, x1
 317
 318        .if     \el == 0
 319        /*
 320         * Enable errata workarounds only if returning to user. The only
 321         * workaround currently required for TTBR0_EL1 changes are for the
 322         * Cavium erratum 27456 (broadcast TLBI instructions may cause I-cache
 323         * corruption).
 324         */
 325        bl      post_ttbr_update_workaround
 326        .endif
 3271:
 328        .if     \el != 0
 329        and     x22, x22, #~PSR_PAN_BIT         // ARMv8.0 CPUs do not understand this bit
 330        .endif
 3312:
 332#endif
 333
 334        .if     \el == 0
 335        ldr     x23, [sp, #S_SP]                // load return stack pointer
 336        msr     sp_el0, x23
 337        tst     x22, #PSR_MODE32_BIT            // native task?
 338        b.eq    3f
 339
 340#ifdef CONFIG_ARM64_ERRATUM_845719
 341alternative_if ARM64_WORKAROUND_845719
 342#ifdef CONFIG_PID_IN_CONTEXTIDR
 343        mrs     x29, contextidr_el1
 344        msr     contextidr_el1, x29
 345#else
 346        msr contextidr_el1, xzr
 347#endif
 348alternative_else_nop_endif
 349#endif
 3503:
 351#ifdef CONFIG_ARM64_ERRATUM_1418040
 352alternative_if_not ARM64_WORKAROUND_1418040
 353        b       4f
 354alternative_else_nop_endif
 355        /*
 356         * if (x22.mode32 == cntkctl_el1.el0vcten)
 357         *     cntkctl_el1.el0vcten = ~cntkctl_el1.el0vcten
 358         */
 359        mrs     x1, cntkctl_el1
 360        eon     x0, x1, x22, lsr #3
 361        tbz     x0, #1, 4f
 362        eor     x1, x1, #2      // ARCH_TIMER_USR_VCT_ACCESS_EN
 363        msr     cntkctl_el1, x1
 3644:
 365#endif
 366        apply_ssbd 0, 5f, x0, x1
 3675:
 368        .endif
 369
 370        msr     elr_el1, x21                    // set up the return data
 371        msr     spsr_el1, x22
 372        ldp     x0, x1, [sp, #16 * 0]
 373        ldp     x2, x3, [sp, #16 * 1]
 374        ldp     x4, x5, [sp, #16 * 2]
 375        ldp     x6, x7, [sp, #16 * 3]
 376        ldp     x8, x9, [sp, #16 * 4]
 377        ldp     x10, x11, [sp, #16 * 5]
 378        ldp     x12, x13, [sp, #16 * 6]
 379        ldp     x14, x15, [sp, #16 * 7]
 380        ldp     x16, x17, [sp, #16 * 8]
 381        ldp     x18, x19, [sp, #16 * 9]
 382        ldp     x20, x21, [sp, #16 * 10]
 383        ldp     x22, x23, [sp, #16 * 11]
 384        ldp     x24, x25, [sp, #16 * 12]
 385        ldp     x26, x27, [sp, #16 * 13]
 386        ldp     x28, x29, [sp, #16 * 14]
 387        ldr     lr, [sp, #S_LR]
 388        add     sp, sp, #S_FRAME_SIZE           // restore sp
 389
 390        .if     \el == 0
 391alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0
 392#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
 393        bne     5f
 394        msr     far_el1, x30
 395        tramp_alias     x30, tramp_exit_native
 396        br      x30
 3975:
 398        tramp_alias     x30, tramp_exit_compat
 399        br      x30
 400#endif
 401        .else
 402        eret
 403        .endif
 404        sb
 405        .endm
 406
 407        .macro  irq_stack_entry
 408        mov     x19, sp                 // preserve the original sp
 409
 410        /*
 411         * Compare sp with the base of the task stack.
 412         * If the top ~(THREAD_SIZE - 1) bits match, we are on a task stack,
 413         * and should switch to the irq stack.
 414         */
 415        ldr     x25, [tsk, TSK_STACK]
 416        eor     x25, x25, x19
 417        and     x25, x25, #~(THREAD_SIZE - 1)
 418        cbnz    x25, 9998f
 419
 420        ldr_this_cpu x25, irq_stack_ptr, x26
 421        mov     x26, #IRQ_STACK_SIZE
 422        add     x26, x25, x26
 423
 424        /* switch to the irq stack */
 425        mov     sp, x26
 4269998:
 427        .endm
 428
 429        /*
 430         * x19 should be preserved between irq_stack_entry and
 431         * irq_stack_exit.
 432         */
 433        .macro  irq_stack_exit
 434        mov     sp, x19
 435        .endm
 436
 437/* GPRs used by entry code */
 438tsk     .req    x28             // current thread_info
 439
 440/*
 441 * Interrupt handling.
 442 */
 443        .macro  irq_handler
 444        ldr_l   x1, handle_arch_irq
 445        mov     x0, sp
 446        irq_stack_entry
 447        blr     x1
 448        irq_stack_exit
 449        .endm
 450
 451#ifdef CONFIG_ARM64_PSEUDO_NMI
 452        /*
 453         * Set res to 0 if irqs were unmasked in interrupted context.
 454         * Otherwise set res to non-0 value.
 455         */
 456        .macro  test_irqs_unmasked res:req, pmr:req
 457alternative_if ARM64_HAS_IRQ_PRIO_MASKING
 458        sub     \res, \pmr, #GIC_PRIO_IRQON
 459alternative_else
 460        mov     \res, xzr
 461alternative_endif
 462        .endm
 463#endif
 464
 465        .macro  gic_prio_kentry_setup, tmp:req
 466#ifdef CONFIG_ARM64_PSEUDO_NMI
 467        alternative_if ARM64_HAS_IRQ_PRIO_MASKING
 468        mov     \tmp, #(GIC_PRIO_PSR_I_SET | GIC_PRIO_IRQON)
 469        msr_s   SYS_ICC_PMR_EL1, \tmp
 470        alternative_else_nop_endif
 471#endif
 472        .endm
 473
 474        .macro  gic_prio_irq_setup, pmr:req, tmp:req
 475#ifdef CONFIG_ARM64_PSEUDO_NMI
 476        alternative_if ARM64_HAS_IRQ_PRIO_MASKING
 477        orr     \tmp, \pmr, #GIC_PRIO_PSR_I_SET
 478        msr_s   SYS_ICC_PMR_EL1, \tmp
 479        alternative_else_nop_endif
 480#endif
 481        .endm
 482
 483        .text
 484
 485/*
 486 * Exception vectors.
 487 */
 488        .pushsection ".entry.text", "ax"
 489
 490        .align  11
 491ENTRY(vectors)
 492        kernel_ventry   1, sync_invalid                 // Synchronous EL1t
 493        kernel_ventry   1, irq_invalid                  // IRQ EL1t
 494        kernel_ventry   1, fiq_invalid                  // FIQ EL1t
 495        kernel_ventry   1, error_invalid                // Error EL1t
 496
 497        kernel_ventry   1, sync                         // Synchronous EL1h
 498        kernel_ventry   1, irq                          // IRQ EL1h
 499        kernel_ventry   1, fiq_invalid                  // FIQ EL1h
 500        kernel_ventry   1, error                        // Error EL1h
 501
 502        kernel_ventry   0, sync                         // Synchronous 64-bit EL0
 503        kernel_ventry   0, irq                          // IRQ 64-bit EL0
 504        kernel_ventry   0, fiq_invalid                  // FIQ 64-bit EL0
 505        kernel_ventry   0, error                        // Error 64-bit EL0
 506
 507#ifdef CONFIG_COMPAT
 508        kernel_ventry   0, sync_compat, 32              // Synchronous 32-bit EL0
 509        kernel_ventry   0, irq_compat, 32               // IRQ 32-bit EL0
 510        kernel_ventry   0, fiq_invalid_compat, 32       // FIQ 32-bit EL0
 511        kernel_ventry   0, error_compat, 32             // Error 32-bit EL0
 512#else
 513        kernel_ventry   0, sync_invalid, 32             // Synchronous 32-bit EL0
 514        kernel_ventry   0, irq_invalid, 32              // IRQ 32-bit EL0
 515        kernel_ventry   0, fiq_invalid, 32              // FIQ 32-bit EL0
 516        kernel_ventry   0, error_invalid, 32            // Error 32-bit EL0
 517#endif
 518END(vectors)
 519
 520#ifdef CONFIG_VMAP_STACK
 521        /*
 522         * We detected an overflow in kernel_ventry, which switched to the
 523         * overflow stack. Stash the exception regs, and head to our overflow
 524         * handler.
 525         */
 526__bad_stack:
 527        /* Restore the original x0 value */
 528        mrs     x0, tpidrro_el0
 529
 530        /*
 531         * Store the original GPRs to the new stack. The orginal SP (minus
 532         * S_FRAME_SIZE) was stashed in tpidr_el0 by kernel_ventry.
 533         */
 534        sub     sp, sp, #S_FRAME_SIZE
 535        kernel_entry 1
 536        mrs     x0, tpidr_el0
 537        add     x0, x0, #S_FRAME_SIZE
 538        str     x0, [sp, #S_SP]
 539
 540        /* Stash the regs for handle_bad_stack */
 541        mov     x0, sp
 542
 543        /* Time to die */
 544        bl      handle_bad_stack
 545        ASM_BUG()
 546#endif /* CONFIG_VMAP_STACK */
 547
 548/*
 549 * Invalid mode handlers
 550 */
 551        .macro  inv_entry, el, reason, regsize = 64
 552        kernel_entry \el, \regsize
 553        mov     x0, sp
 554        mov     x1, #\reason
 555        mrs     x2, esr_el1
 556        bl      bad_mode
 557        ASM_BUG()
 558        .endm
 559
 560el0_sync_invalid:
 561        inv_entry 0, BAD_SYNC
 562ENDPROC(el0_sync_invalid)
 563
 564el0_irq_invalid:
 565        inv_entry 0, BAD_IRQ
 566ENDPROC(el0_irq_invalid)
 567
 568el0_fiq_invalid:
 569        inv_entry 0, BAD_FIQ
 570ENDPROC(el0_fiq_invalid)
 571
 572el0_error_invalid:
 573        inv_entry 0, BAD_ERROR
 574ENDPROC(el0_error_invalid)
 575
 576#ifdef CONFIG_COMPAT
 577el0_fiq_invalid_compat:
 578        inv_entry 0, BAD_FIQ, 32
 579ENDPROC(el0_fiq_invalid_compat)
 580#endif
 581
 582el1_sync_invalid:
 583        inv_entry 1, BAD_SYNC
 584ENDPROC(el1_sync_invalid)
 585
 586el1_irq_invalid:
 587        inv_entry 1, BAD_IRQ
 588ENDPROC(el1_irq_invalid)
 589
 590el1_fiq_invalid:
 591        inv_entry 1, BAD_FIQ
 592ENDPROC(el1_fiq_invalid)
 593
 594el1_error_invalid:
 595        inv_entry 1, BAD_ERROR
 596ENDPROC(el1_error_invalid)
 597
 598/*
 599 * EL1 mode handlers.
 600 */
 601        .align  6
 602el1_sync:
 603        kernel_entry 1
 604        mrs     x1, esr_el1                     // read the syndrome register
 605        lsr     x24, x1, #ESR_ELx_EC_SHIFT      // exception class
 606        cmp     x24, #ESR_ELx_EC_DABT_CUR       // data abort in EL1
 607        b.eq    el1_da
 608        cmp     x24, #ESR_ELx_EC_IABT_CUR       // instruction abort in EL1
 609        b.eq    el1_ia
 610        cmp     x24, #ESR_ELx_EC_SYS64          // configurable trap
 611        b.eq    el1_undef
 612        cmp     x24, #ESR_ELx_EC_SP_ALIGN       // stack alignment exception
 613        b.eq    el1_sp_pc
 614        cmp     x24, #ESR_ELx_EC_PC_ALIGN       // pc alignment exception
 615        b.eq    el1_sp_pc
 616        cmp     x24, #ESR_ELx_EC_UNKNOWN        // unknown exception in EL1
 617        b.eq    el1_undef
 618        cmp     x24, #ESR_ELx_EC_BREAKPT_CUR    // debug exception in EL1
 619        b.ge    el1_dbg
 620        b       el1_inv
 621
 622el1_ia:
 623        /*
 624         * Fall through to the Data abort case
 625         */
 626el1_da:
 627        /*
 628         * Data abort handling
 629         */
 630        mrs     x3, far_el1
 631        inherit_daif    pstate=x23, tmp=x2
 632        clear_address_tag x0, x3
 633        mov     x2, sp                          // struct pt_regs
 634        bl      do_mem_abort
 635
 636        kernel_exit 1
 637el1_sp_pc:
 638        /*
 639         * Stack or PC alignment exception handling
 640         */
 641        mrs     x0, far_el1
 642        inherit_daif    pstate=x23, tmp=x2
 643        mov     x2, sp
 644        bl      do_sp_pc_abort
 645        ASM_BUG()
 646el1_undef:
 647        /*
 648         * Undefined instruction
 649         */
 650        inherit_daif    pstate=x23, tmp=x2
 651        mov     x0, sp
 652        bl      do_undefinstr
 653        kernel_exit 1
 654el1_dbg:
 655        /*
 656         * Debug exception handling
 657         */
 658        cmp     x24, #ESR_ELx_EC_BRK64          // if BRK64
 659        cinc    x24, x24, eq                    // set bit '0'
 660        tbz     x24, #0, el1_inv                // EL1 only
 661        gic_prio_kentry_setup tmp=x3
 662        mrs     x0, far_el1
 663        mov     x2, sp                          // struct pt_regs
 664        bl      do_debug_exception
 665        kernel_exit 1
 666el1_inv:
 667        // TODO: add support for undefined instructions in kernel mode
 668        inherit_daif    pstate=x23, tmp=x2
 669        mov     x0, sp
 670        mov     x2, x1
 671        mov     x1, #BAD_SYNC
 672        bl      bad_mode
 673        ASM_BUG()
 674ENDPROC(el1_sync)
 675
 676        .align  6
 677el1_irq:
 678        kernel_entry 1
 679        gic_prio_irq_setup pmr=x20, tmp=x1
 680        enable_da_f
 681
 682#ifdef CONFIG_ARM64_PSEUDO_NMI
 683        test_irqs_unmasked      res=x0, pmr=x20
 684        cbz     x0, 1f
 685        bl      asm_nmi_enter
 6861:
 687#endif
 688
 689#ifdef CONFIG_TRACE_IRQFLAGS
 690        bl      trace_hardirqs_off
 691#endif
 692
 693        irq_handler
 694
 695#ifdef CONFIG_PREEMPT
 696        ldr     x24, [tsk, #TSK_TI_PREEMPT]     // get preempt count
 697alternative_if ARM64_HAS_IRQ_PRIO_MASKING
 698        /*
 699         * DA_F were cleared at start of handling. If anything is set in DAIF,
 700         * we come back from an NMI, so skip preemption
 701         */
 702        mrs     x0, daif
 703        orr     x24, x24, x0
 704alternative_else_nop_endif
 705        cbnz    x24, 1f                         // preempt count != 0 || NMI return path
 706        bl      preempt_schedule_irq            // irq en/disable is done inside
 7071:
 708#endif
 709
 710#ifdef CONFIG_ARM64_PSEUDO_NMI
 711        /*
 712         * When using IRQ priority masking, we can get spurious interrupts while
 713         * PMR is set to GIC_PRIO_IRQOFF. An NMI might also have occurred in a
 714         * section with interrupts disabled. Skip tracing in those cases.
 715         */
 716        test_irqs_unmasked      res=x0, pmr=x20
 717        cbz     x0, 1f
 718        bl      asm_nmi_exit
 7191:
 720#endif
 721
 722#ifdef CONFIG_TRACE_IRQFLAGS
 723#ifdef CONFIG_ARM64_PSEUDO_NMI
 724        test_irqs_unmasked      res=x0, pmr=x20
 725        cbnz    x0, 1f
 726#endif
 727        bl      trace_hardirqs_on
 7281:
 729#endif
 730
 731        kernel_exit 1
 732ENDPROC(el1_irq)
 733
 734/*
 735 * EL0 mode handlers.
 736 */
 737        .align  6
 738el0_sync:
 739        kernel_entry 0
 740        mrs     x25, esr_el1                    // read the syndrome register
 741        lsr     x24, x25, #ESR_ELx_EC_SHIFT     // exception class
 742        cmp     x24, #ESR_ELx_EC_SVC64          // SVC in 64-bit state
 743        b.eq    el0_svc
 744        cmp     x24, #ESR_ELx_EC_DABT_LOW       // data abort in EL0
 745        b.eq    el0_da
 746        cmp     x24, #ESR_ELx_EC_IABT_LOW       // instruction abort in EL0
 747        b.eq    el0_ia
 748        cmp     x24, #ESR_ELx_EC_FP_ASIMD       // FP/ASIMD access
 749        b.eq    el0_fpsimd_acc
 750        cmp     x24, #ESR_ELx_EC_SVE            // SVE access
 751        b.eq    el0_sve_acc
 752        cmp     x24, #ESR_ELx_EC_FP_EXC64       // FP/ASIMD exception
 753        b.eq    el0_fpsimd_exc
 754        cmp     x24, #ESR_ELx_EC_SYS64          // configurable trap
 755        b.eq    el0_sys
 756        cmp     x24, #ESR_ELx_EC_SP_ALIGN       // stack alignment exception
 757        b.eq    el0_sp_pc
 758        cmp     x24, #ESR_ELx_EC_PC_ALIGN       // pc alignment exception
 759        b.eq    el0_sp_pc
 760        cmp     x24, #ESR_ELx_EC_UNKNOWN        // unknown exception in EL0
 761        b.eq    el0_undef
 762        cmp     x24, #ESR_ELx_EC_BREAKPT_LOW    // debug exception in EL0
 763        b.ge    el0_dbg
 764        b       el0_inv
 765
 766#ifdef CONFIG_COMPAT
 767        .align  6
 768el0_sync_compat:
 769        kernel_entry 0, 32
 770        mrs     x25, esr_el1                    // read the syndrome register
 771        lsr     x24, x25, #ESR_ELx_EC_SHIFT     // exception class
 772        cmp     x24, #ESR_ELx_EC_SVC32          // SVC in 32-bit state
 773        b.eq    el0_svc_compat
 774        cmp     x24, #ESR_ELx_EC_DABT_LOW       // data abort in EL0
 775        b.eq    el0_da
 776        cmp     x24, #ESR_ELx_EC_IABT_LOW       // instruction abort in EL0
 777        b.eq    el0_ia
 778        cmp     x24, #ESR_ELx_EC_FP_ASIMD       // FP/ASIMD access
 779        b.eq    el0_fpsimd_acc
 780        cmp     x24, #ESR_ELx_EC_FP_EXC32       // FP/ASIMD exception
 781        b.eq    el0_fpsimd_exc
 782        cmp     x24, #ESR_ELx_EC_PC_ALIGN       // pc alignment exception
 783        b.eq    el0_sp_pc
 784        cmp     x24, #ESR_ELx_EC_UNKNOWN        // unknown exception in EL0
 785        b.eq    el0_undef
 786        cmp     x24, #ESR_ELx_EC_CP15_32        // CP15 MRC/MCR trap
 787        b.eq    el0_undef
 788        cmp     x24, #ESR_ELx_EC_CP15_64        // CP15 MRRC/MCRR trap
 789        b.eq    el0_undef
 790        cmp     x24, #ESR_ELx_EC_CP14_MR        // CP14 MRC/MCR trap
 791        b.eq    el0_undef
 792        cmp     x24, #ESR_ELx_EC_CP14_LS        // CP14 LDC/STC trap
 793        b.eq    el0_undef
 794        cmp     x24, #ESR_ELx_EC_CP14_64        // CP14 MRRC/MCRR trap
 795        b.eq    el0_undef
 796        cmp     x24, #ESR_ELx_EC_BREAKPT_LOW    // debug exception in EL0
 797        b.ge    el0_dbg
 798        b       el0_inv
 799el0_svc_compat:
 800        gic_prio_kentry_setup tmp=x1
 801        mov     x0, sp
 802        bl      el0_svc_compat_handler
 803        b       ret_to_user
 804
 805        .align  6
 806el0_irq_compat:
 807        kernel_entry 0, 32
 808        b       el0_irq_naked
 809
 810el0_error_compat:
 811        kernel_entry 0, 32
 812        b       el0_error_naked
 813#endif
 814
 815el0_da:
 816        /*
 817         * Data abort handling
 818         */
 819        mrs     x26, far_el1
 820        enable_daif
 821        ct_user_exit
 822        clear_address_tag x0, x26
 823        mov     x1, x25
 824        mov     x2, sp
 825        bl      do_mem_abort
 826        b       ret_to_user
 827el0_ia:
 828        /*
 829         * Instruction abort handling
 830         */
 831        mrs     x26, far_el1
 832        gic_prio_kentry_setup tmp=x0
 833        enable_da_f
 834#ifdef CONFIG_TRACE_IRQFLAGS
 835        bl      trace_hardirqs_off
 836#endif
 837        ct_user_exit
 838        mov     x0, x26
 839        mov     x1, x25
 840        mov     x2, sp
 841        bl      do_el0_ia_bp_hardening
 842        b       ret_to_user
 843el0_fpsimd_acc:
 844        /*
 845         * Floating Point or Advanced SIMD access
 846         */
 847        enable_daif
 848        ct_user_exit
 849        mov     x0, x25
 850        mov     x1, sp
 851        bl      do_fpsimd_acc
 852        b       ret_to_user
 853el0_sve_acc:
 854        /*
 855         * Scalable Vector Extension access
 856         */
 857        enable_daif
 858        ct_user_exit
 859        mov     x0, x25
 860        mov     x1, sp
 861        bl      do_sve_acc
 862        b       ret_to_user
 863el0_fpsimd_exc:
 864        /*
 865         * Floating Point, Advanced SIMD or SVE exception
 866         */
 867        enable_daif
 868        ct_user_exit
 869        mov     x0, x25
 870        mov     x1, sp
 871        bl      do_fpsimd_exc
 872        b       ret_to_user
 873el0_sp_pc:
 874        /*
 875         * Stack or PC alignment exception handling
 876         */
 877        mrs     x26, far_el1
 878        gic_prio_kentry_setup tmp=x0
 879        enable_da_f
 880#ifdef CONFIG_TRACE_IRQFLAGS
 881        bl      trace_hardirqs_off
 882#endif
 883        ct_user_exit
 884        mov     x0, x26
 885        mov     x1, x25
 886        mov     x2, sp
 887        bl      do_sp_pc_abort
 888        b       ret_to_user
 889el0_undef:
 890        /*
 891         * Undefined instruction
 892         */
 893        enable_daif
 894        ct_user_exit
 895        mov     x0, sp
 896        bl      do_undefinstr
 897        b       ret_to_user
 898el0_sys:
 899        /*
 900         * System instructions, for trapped cache maintenance instructions
 901         */
 902        enable_daif
 903        ct_user_exit
 904        mov     x0, x25
 905        mov     x1, sp
 906        bl      do_sysinstr
 907        b       ret_to_user
 908el0_dbg:
 909        /*
 910         * Debug exception handling
 911         */
 912        tbnz    x24, #0, el0_inv                // EL0 only
 913        gic_prio_kentry_setup tmp=x3
 914        mrs     x0, far_el1
 915        mov     x1, x25
 916        mov     x2, sp
 917        bl      do_debug_exception
 918        enable_da_f
 919        ct_user_exit
 920        b       ret_to_user
 921el0_inv:
 922        enable_daif
 923        ct_user_exit
 924        mov     x0, sp
 925        mov     x1, #BAD_SYNC
 926        mov     x2, x25
 927        bl      bad_el0_sync
 928        b       ret_to_user
 929ENDPROC(el0_sync)
 930
 931        .align  6
 932el0_irq:
 933        kernel_entry 0
 934el0_irq_naked:
 935        gic_prio_irq_setup pmr=x20, tmp=x0
 936        enable_da_f
 937
 938#ifdef CONFIG_TRACE_IRQFLAGS
 939        bl      trace_hardirqs_off
 940#endif
 941
 942        ct_user_exit
 943#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
 944        tbz     x22, #55, 1f
 945        bl      do_el0_irq_bp_hardening
 9461:
 947#endif
 948        irq_handler
 949
 950#ifdef CONFIG_TRACE_IRQFLAGS
 951        bl      trace_hardirqs_on
 952#endif
 953        b       ret_to_user
 954ENDPROC(el0_irq)
 955
 956el1_error:
 957        kernel_entry 1
 958        mrs     x1, esr_el1
 959        gic_prio_kentry_setup tmp=x2
 960        enable_dbg
 961        mov     x0, sp
 962        bl      do_serror
 963        kernel_exit 1
 964ENDPROC(el1_error)
 965
 966el0_error:
 967        kernel_entry 0
 968el0_error_naked:
 969        mrs     x1, esr_el1
 970        gic_prio_kentry_setup tmp=x2
 971        enable_dbg
 972        mov     x0, sp
 973        bl      do_serror
 974        enable_da_f
 975        ct_user_exit
 976        b       ret_to_user
 977ENDPROC(el0_error)
 978
 979/*
 980 * Ok, we need to do extra processing, enter the slow path.
 981 */
 982work_pending:
 983        mov     x0, sp                          // 'regs'
 984        bl      do_notify_resume
 985#ifdef CONFIG_TRACE_IRQFLAGS
 986        bl      trace_hardirqs_on               // enabled while in userspace
 987#endif
 988        ldr     x1, [tsk, #TSK_TI_FLAGS]        // re-check for single-step
 989        b       finish_ret_to_user
 990/*
 991 * "slow" syscall return path.
 992 */
 993ret_to_user:
 994        disable_daif
 995        gic_prio_kentry_setup tmp=x3
 996        ldr     x1, [tsk, #TSK_TI_FLAGS]
 997        and     x2, x1, #_TIF_WORK_MASK
 998        cbnz    x2, work_pending
 999finish_ret_to_user:
1000        enable_step_tsk x1, x2
1001        kernel_exit 0
1002ENDPROC(ret_to_user)
1003
1004/*
1005 * SVC handler.
1006 */
1007        .align  6
1008el0_svc:
1009        gic_prio_kentry_setup tmp=x1
1010        mov     x0, sp
1011        bl      el0_svc_handler
1012        b       ret_to_user
1013ENDPROC(el0_svc)
1014
1015        .popsection                             // .entry.text
1016
1017#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
1018/*
1019 * Exception vectors trampoline.
1020 */
1021        .pushsection ".entry.tramp.text", "ax"
1022
1023        .macro tramp_map_kernel, tmp
1024        mrs     \tmp, ttbr1_el1
1025        add     \tmp, \tmp, #(PAGE_SIZE + RESERVED_TTBR0_SIZE)
1026        bic     \tmp, \tmp, #USER_ASID_FLAG
1027        msr     ttbr1_el1, \tmp
1028#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
1029alternative_if ARM64_WORKAROUND_QCOM_FALKOR_E1003
1030        /* ASID already in \tmp[63:48] */
1031        movk    \tmp, #:abs_g2_nc:(TRAMP_VALIAS >> 12)
1032        movk    \tmp, #:abs_g1_nc:(TRAMP_VALIAS >> 12)
1033        /* 2MB boundary containing the vectors, so we nobble the walk cache */
1034        movk    \tmp, #:abs_g0_nc:((TRAMP_VALIAS & ~(SZ_2M - 1)) >> 12)
1035        isb
1036        tlbi    vae1, \tmp
1037        dsb     nsh
1038alternative_else_nop_endif
1039#endif /* CONFIG_QCOM_FALKOR_ERRATUM_1003 */
1040        .endm
1041
1042        .macro tramp_unmap_kernel, tmp
1043        mrs     \tmp, ttbr1_el1
1044        sub     \tmp, \tmp, #(PAGE_SIZE + RESERVED_TTBR0_SIZE)
1045        orr     \tmp, \tmp, #USER_ASID_FLAG
1046        msr     ttbr1_el1, \tmp
1047        /*
1048         * We avoid running the post_ttbr_update_workaround here because
1049         * it's only needed by Cavium ThunderX, which requires KPTI to be
1050         * disabled.
1051         */
1052        .endm
1053
1054        .macro tramp_ventry, regsize = 64
1055        .align  7
10561:
1057        .if     \regsize == 64
1058        msr     tpidrro_el0, x30        // Restored in kernel_ventry
1059        .endif
1060        /*
1061         * Defend against branch aliasing attacks by pushing a dummy
1062         * entry onto the return stack and using a RET instruction to
1063         * enter the full-fat kernel vectors.
1064         */
1065        bl      2f
1066        b       .
10672:
1068        tramp_map_kernel        x30
1069#ifdef CONFIG_RANDOMIZE_BASE
1070        adr     x30, tramp_vectors + PAGE_SIZE
1071alternative_insn isb, nop, ARM64_WORKAROUND_QCOM_FALKOR_E1003
1072        ldr     x30, [x30]
1073#else
1074        ldr     x30, =vectors
1075#endif
1076alternative_if_not ARM64_WORKAROUND_CAVIUM_TX2_219_PRFM
1077        prfm    plil1strm, [x30, #(1b - tramp_vectors)]
1078alternative_else_nop_endif
1079        msr     vbar_el1, x30
1080        add     x30, x30, #(1b - tramp_vectors)
1081        isb
1082        ret
1083        .endm
1084
1085        .macro tramp_exit, regsize = 64
1086        adr     x30, tramp_vectors
1087        msr     vbar_el1, x30
1088        tramp_unmap_kernel      x30
1089        .if     \regsize == 64
1090        mrs     x30, far_el1
1091        .endif
1092        eret
1093        sb
1094        .endm
1095
1096        .align  11
1097ENTRY(tramp_vectors)
1098        .space  0x400
1099
1100        tramp_ventry
1101        tramp_ventry
1102        tramp_ventry
1103        tramp_ventry
1104
1105        tramp_ventry    32
1106        tramp_ventry    32
1107        tramp_ventry    32
1108        tramp_ventry    32
1109END(tramp_vectors)
1110
1111ENTRY(tramp_exit_native)
1112        tramp_exit
1113END(tramp_exit_native)
1114
1115ENTRY(tramp_exit_compat)
1116        tramp_exit      32
1117END(tramp_exit_compat)
1118
1119        .ltorg
1120        .popsection                             // .entry.tramp.text
1121#ifdef CONFIG_RANDOMIZE_BASE
1122        .pushsection ".rodata", "a"
1123        .align PAGE_SHIFT
1124        .globl  __entry_tramp_data_start
1125__entry_tramp_data_start:
1126        .quad   vectors
1127        .popsection                             // .rodata
1128#endif /* CONFIG_RANDOMIZE_BASE */
1129#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
1130
1131/*
1132 * Register switch for AArch64. The callee-saved registers need to be saved
1133 * and restored. On entry:
1134 *   x0 = previous task_struct (must be preserved across the switch)
1135 *   x1 = next task_struct
1136 * Previous and next are guaranteed not to be the same.
1137 *
1138 */
1139ENTRY(cpu_switch_to)
1140        mov     x10, #THREAD_CPU_CONTEXT
1141        add     x8, x0, x10
1142        mov     x9, sp
1143        stp     x19, x20, [x8], #16             // store callee-saved registers
1144        stp     x21, x22, [x8], #16
1145        stp     x23, x24, [x8], #16
1146        stp     x25, x26, [x8], #16
1147        stp     x27, x28, [x8], #16
1148        stp     x29, x9, [x8], #16
1149        str     lr, [x8]
1150        add     x8, x1, x10
1151        ldp     x19, x20, [x8], #16             // restore callee-saved registers
1152        ldp     x21, x22, [x8], #16
1153        ldp     x23, x24, [x8], #16
1154        ldp     x25, x26, [x8], #16
1155        ldp     x27, x28, [x8], #16
1156        ldp     x29, x9, [x8], #16
1157        ldr     lr, [x8]
1158        mov     sp, x9
1159        msr     sp_el0, x1
1160        ret
1161ENDPROC(cpu_switch_to)
1162NOKPROBE(cpu_switch_to)
1163
1164/*
1165 * This is how we return from a fork.
1166 */
1167ENTRY(ret_from_fork)
1168        bl      schedule_tail
1169        cbz     x19, 1f                         // not a kernel thread
1170        mov     x0, x20
1171        blr     x19
11721:      get_thread_info tsk
1173        b       ret_to_user
1174ENDPROC(ret_from_fork)
1175NOKPROBE(ret_from_fork)
1176
1177#ifdef CONFIG_ARM_SDE_INTERFACE
1178
1179#include <asm/sdei.h>
1180#include <uapi/linux/arm_sdei.h>
1181
1182.macro sdei_handler_exit exit_mode
1183        /* On success, this call never returns... */
1184        cmp     \exit_mode, #SDEI_EXIT_SMC
1185        b.ne    99f
1186        smc     #0
1187        b       .
118899:     hvc     #0
1189        b       .
1190.endm
1191
1192#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
1193/*
1194 * The regular SDEI entry point may have been unmapped along with the rest of
1195 * the kernel. This trampoline restores the kernel mapping to make the x1 memory
1196 * argument accessible.
1197 *
1198 * This clobbers x4, __sdei_handler() will restore this from firmware's
1199 * copy.
1200 */
1201.ltorg
1202.pushsection ".entry.tramp.text", "ax"
1203ENTRY(__sdei_asm_entry_trampoline)
1204        mrs     x4, ttbr1_el1
1205        tbz     x4, #USER_ASID_BIT, 1f
1206
1207        tramp_map_kernel tmp=x4
1208        isb
1209        mov     x4, xzr
1210
1211        /*
1212         * Use reg->interrupted_regs.addr_limit to remember whether to unmap
1213         * the kernel on exit.
1214         */
12151:      str     x4, [x1, #(SDEI_EVENT_INTREGS + S_ORIG_ADDR_LIMIT)]
1216
1217#ifdef CONFIG_RANDOMIZE_BASE
1218        adr     x4, tramp_vectors + PAGE_SIZE
1219        add     x4, x4, #:lo12:__sdei_asm_trampoline_next_handler
1220        ldr     x4, [x4]
1221#else
1222        ldr     x4, =__sdei_asm_handler
1223#endif
1224        br      x4
1225ENDPROC(__sdei_asm_entry_trampoline)
1226NOKPROBE(__sdei_asm_entry_trampoline)
1227
1228/*
1229 * Make the exit call and restore the original ttbr1_el1
1230 *
1231 * x0 & x1: setup for the exit API call
1232 * x2: exit_mode
1233 * x4: struct sdei_registered_event argument from registration time.
1234 */
1235ENTRY(__sdei_asm_exit_trampoline)
1236        ldr     x4, [x4, #(SDEI_EVENT_INTREGS + S_ORIG_ADDR_LIMIT)]
1237        cbnz    x4, 1f
1238
1239        tramp_unmap_kernel      tmp=x4
1240
12411:      sdei_handler_exit exit_mode=x2
1242ENDPROC(__sdei_asm_exit_trampoline)
1243NOKPROBE(__sdei_asm_exit_trampoline)
1244        .ltorg
1245.popsection             // .entry.tramp.text
1246#ifdef CONFIG_RANDOMIZE_BASE
1247.pushsection ".rodata", "a"
1248__sdei_asm_trampoline_next_handler:
1249        .quad   __sdei_asm_handler
1250.popsection             // .rodata
1251#endif /* CONFIG_RANDOMIZE_BASE */
1252#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
1253
1254/*
1255 * Software Delegated Exception entry point.
1256 *
1257 * x0: Event number
1258 * x1: struct sdei_registered_event argument from registration time.
1259 * x2: interrupted PC
1260 * x3: interrupted PSTATE
1261 * x4: maybe clobbered by the trampoline
1262 *
1263 * Firmware has preserved x0->x17 for us, we must save/restore the rest to
1264 * follow SMC-CC. We save (or retrieve) all the registers as the handler may
1265 * want them.
1266 */
1267ENTRY(__sdei_asm_handler)
1268        stp     x2, x3, [x1, #SDEI_EVENT_INTREGS + S_PC]
1269        stp     x4, x5, [x1, #SDEI_EVENT_INTREGS + 16 * 2]
1270        stp     x6, x7, [x1, #SDEI_EVENT_INTREGS + 16 * 3]
1271        stp     x8, x9, [x1, #SDEI_EVENT_INTREGS + 16 * 4]
1272        stp     x10, x11, [x1, #SDEI_EVENT_INTREGS + 16 * 5]
1273        stp     x12, x13, [x1, #SDEI_EVENT_INTREGS + 16 * 6]
1274        stp     x14, x15, [x1, #SDEI_EVENT_INTREGS + 16 * 7]
1275        stp     x16, x17, [x1, #SDEI_EVENT_INTREGS + 16 * 8]
1276        stp     x18, x19, [x1, #SDEI_EVENT_INTREGS + 16 * 9]
1277        stp     x20, x21, [x1, #SDEI_EVENT_INTREGS + 16 * 10]
1278        stp     x22, x23, [x1, #SDEI_EVENT_INTREGS + 16 * 11]
1279        stp     x24, x25, [x1, #SDEI_EVENT_INTREGS + 16 * 12]
1280        stp     x26, x27, [x1, #SDEI_EVENT_INTREGS + 16 * 13]
1281        stp     x28, x29, [x1, #SDEI_EVENT_INTREGS + 16 * 14]
1282        mov     x4, sp
1283        stp     lr, x4, [x1, #SDEI_EVENT_INTREGS + S_LR]
1284
1285        mov     x19, x1
1286
1287#ifdef CONFIG_VMAP_STACK
1288        /*
1289         * entry.S may have been using sp as a scratch register, find whether
1290         * this is a normal or critical event and switch to the appropriate
1291         * stack for this CPU.
1292         */
1293        ldrb    w4, [x19, #SDEI_EVENT_PRIORITY]
1294        cbnz    w4, 1f
1295        ldr_this_cpu dst=x5, sym=sdei_stack_normal_ptr, tmp=x6
1296        b       2f
12971:      ldr_this_cpu dst=x5, sym=sdei_stack_critical_ptr, tmp=x6
12982:      mov     x6, #SDEI_STACK_SIZE
1299        add     x5, x5, x6
1300        mov     sp, x5
1301#endif
1302
1303        /*
1304         * We may have interrupted userspace, or a guest, or exit-from or
1305         * return-to either of these. We can't trust sp_el0, restore it.
1306         */
1307        mrs     x28, sp_el0
1308        ldr_this_cpu    dst=x0, sym=__entry_task, tmp=x1
1309        msr     sp_el0, x0
1310
1311        /* If we interrupted the kernel point to the previous stack/frame. */
1312        and     x0, x3, #0xc
1313        mrs     x1, CurrentEL
1314        cmp     x0, x1
1315        csel    x29, x29, xzr, eq       // fp, or zero
1316        csel    x4, x2, xzr, eq         // elr, or zero
1317
1318        stp     x29, x4, [sp, #-16]!
1319        mov     x29, sp
1320
1321        add     x0, x19, #SDEI_EVENT_INTREGS
1322        mov     x1, x19
1323        bl      __sdei_handler
1324
1325        msr     sp_el0, x28
1326        /* restore regs >x17 that we clobbered */
1327        mov     x4, x19         // keep x4 for __sdei_asm_exit_trampoline
1328        ldp     x28, x29, [x4, #SDEI_EVENT_INTREGS + 16 * 14]
1329        ldp     x18, x19, [x4, #SDEI_EVENT_INTREGS + 16 * 9]
1330        ldp     lr, x1, [x4, #SDEI_EVENT_INTREGS + S_LR]
1331        mov     sp, x1
1332
1333        mov     x1, x0                  // address to complete_and_resume
1334        /* x0 = (x0 <= 1) ? EVENT_COMPLETE:EVENT_COMPLETE_AND_RESUME */
1335        cmp     x0, #1
1336        mov_q   x2, SDEI_1_0_FN_SDEI_EVENT_COMPLETE
1337        mov_q   x3, SDEI_1_0_FN_SDEI_EVENT_COMPLETE_AND_RESUME
1338        csel    x0, x2, x3, ls
1339
1340        ldr_l   x2, sdei_exit_mode
1341
1342alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0
1343        sdei_handler_exit exit_mode=x2
1344alternative_else_nop_endif
1345
1346#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
1347        tramp_alias     dst=x5, sym=__sdei_asm_exit_trampoline
1348        br      x5
1349#endif
1350ENDPROC(__sdei_asm_handler)
1351NOKPROBE(__sdei_asm_handler)
1352#endif /* CONFIG_ARM_SDE_INTERFACE */
1353