linux/arch/arm64/kernel/entry.S
<<
>>
Prefs
   1/*
   2 * Low-level exception handling code
   3 *
   4 * Copyright (C) 2012 ARM Ltd.
   5 * Authors:     Catalin Marinas <catalin.marinas@arm.com>
   6 *              Will Deacon <will.deacon@arm.com>
   7 *
   8 * This program is free software; you can redistribute it and/or modify
   9 * it under the terms of the GNU General Public License version 2 as
  10 * published by the Free Software Foundation.
  11 *
  12 * This program is distributed in the hope that it will be useful,
  13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  15 * GNU General Public License for more details.
  16 *
  17 * You should have received a copy of the GNU General Public License
  18 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  19 */
  20
  21#include <linux/init.h>
  22#include <linux/linkage.h>
  23
  24#include <asm/alternative.h>
  25#include <asm/assembler.h>
  26#include <asm/asm-offsets.h>
  27#include <asm/cpufeature.h>
  28#include <asm/errno.h>
  29#include <asm/esr.h>
  30#include <asm/irq.h>
  31#include <asm/memory.h>
  32#include <asm/mmu.h>
  33#include <asm/processor.h>
  34#include <asm/ptrace.h>
  35#include <asm/thread_info.h>
  36#include <asm/asm-uaccess.h>
  37#include <asm/unistd.h>
  38
  39/*
  40 * Context tracking subsystem.  Used to instrument transitions
  41 * between user and kernel mode.
  42 */
  43        .macro ct_user_exit, syscall = 0
  44#ifdef CONFIG_CONTEXT_TRACKING
  45        bl      context_tracking_user_exit
  46        .if \syscall == 1
  47        /*
  48         * Save/restore needed during syscalls.  Restore syscall arguments from
  49         * the values already saved on stack during kernel_entry.
  50         */
  51        ldp     x0, x1, [sp]
  52        ldp     x2, x3, [sp, #S_X2]
  53        ldp     x4, x5, [sp, #S_X4]
  54        ldp     x6, x7, [sp, #S_X6]
  55        .endif
  56#endif
  57        .endm
  58
  59        .macro ct_user_enter
  60#ifdef CONFIG_CONTEXT_TRACKING
  61        bl      context_tracking_user_enter
  62#endif
  63        .endm
  64
  65/*
  66 * Bad Abort numbers
  67 *-----------------
  68 */
  69#define BAD_SYNC        0
  70#define BAD_IRQ         1
  71#define BAD_FIQ         2
  72#define BAD_ERROR       3
  73
  74        .macro kernel_ventry, el, label, regsize = 64
  75        .align 7
  76#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
  77alternative_if ARM64_UNMAP_KERNEL_AT_EL0
  78        .if     \el == 0
  79        .if     \regsize == 64
  80        mrs     x30, tpidrro_el0
  81        msr     tpidrro_el0, xzr
  82        .else
  83        mov     x30, xzr
  84        .endif
  85        .endif
  86alternative_else_nop_endif
  87#endif
  88
  89        sub     sp, sp, #S_FRAME_SIZE
  90#ifdef CONFIG_VMAP_STACK
  91        /*
  92         * Test whether the SP has overflowed, without corrupting a GPR.
  93         * Task and IRQ stacks are aligned to (1 << THREAD_SHIFT).
  94         */
  95        add     sp, sp, x0                      // sp' = sp + x0
  96        sub     x0, sp, x0                      // x0' = sp' - x0 = (sp + x0) - x0 = sp
  97        tbnz    x0, #THREAD_SHIFT, 0f
  98        sub     x0, sp, x0                      // x0'' = sp' - x0' = (sp + x0) - sp = x0
  99        sub     sp, sp, x0                      // sp'' = sp' - x0 = (sp + x0) - x0 = sp
 100        b       el\()\el\()_\label
 101
 1020:
 103        /*
 104         * Either we've just detected an overflow, or we've taken an exception
 105         * while on the overflow stack. Either way, we won't return to
 106         * userspace, and can clobber EL0 registers to free up GPRs.
 107         */
 108
 109        /* Stash the original SP (minus S_FRAME_SIZE) in tpidr_el0. */
 110        msr     tpidr_el0, x0
 111
 112        /* Recover the original x0 value and stash it in tpidrro_el0 */
 113        sub     x0, sp, x0
 114        msr     tpidrro_el0, x0
 115
 116        /* Switch to the overflow stack */
 117        adr_this_cpu sp, overflow_stack + OVERFLOW_STACK_SIZE, x0
 118
 119        /*
 120         * Check whether we were already on the overflow stack. This may happen
 121         * after panic() re-enables interrupts.
 122         */
 123        mrs     x0, tpidr_el0                   // sp of interrupted context
 124        sub     x0, sp, x0                      // delta with top of overflow stack
 125        tst     x0, #~(OVERFLOW_STACK_SIZE - 1) // within range?
 126        b.ne    __bad_stack                     // no? -> bad stack pointer
 127
 128        /* We were already on the overflow stack. Restore sp/x0 and carry on. */
 129        sub     sp, sp, x0
 130        mrs     x0, tpidrro_el0
 131#endif
 132        b       el\()\el\()_\label
 133        .endm
 134
 135        .macro tramp_alias, dst, sym
 136        mov_q   \dst, TRAMP_VALIAS
 137        add     \dst, \dst, #(\sym - .entry.tramp.text)
 138        .endm
 139
 140        .macro  kernel_entry, el, regsize = 64
 141        .if     \regsize == 32
 142        mov     w0, w0                          // zero upper 32 bits of x0
 143        .endif
 144        stp     x0, x1, [sp, #16 * 0]
 145        stp     x2, x3, [sp, #16 * 1]
 146        stp     x4, x5, [sp, #16 * 2]
 147        stp     x6, x7, [sp, #16 * 3]
 148        stp     x8, x9, [sp, #16 * 4]
 149        stp     x10, x11, [sp, #16 * 5]
 150        stp     x12, x13, [sp, #16 * 6]
 151        stp     x14, x15, [sp, #16 * 7]
 152        stp     x16, x17, [sp, #16 * 8]
 153        stp     x18, x19, [sp, #16 * 9]
 154        stp     x20, x21, [sp, #16 * 10]
 155        stp     x22, x23, [sp, #16 * 11]
 156        stp     x24, x25, [sp, #16 * 12]
 157        stp     x26, x27, [sp, #16 * 13]
 158        stp     x28, x29, [sp, #16 * 14]
 159
 160        .if     \el == 0
 161        mrs     x21, sp_el0
 162        ldr_this_cpu    tsk, __entry_task, x20  // Ensure MDSCR_EL1.SS is clear,
 163        ldr     x19, [tsk, #TSK_TI_FLAGS]       // since we can unmask debug
 164        disable_step_tsk x19, x20               // exceptions when scheduling.
 165
 166        mov     x29, xzr                        // fp pointed to user-space
 167        .else
 168        add     x21, sp, #S_FRAME_SIZE
 169        get_thread_info tsk
 170        /* Save the task's original addr_limit and set USER_DS */
 171        ldr     x20, [tsk, #TSK_TI_ADDR_LIMIT]
 172        str     x20, [sp, #S_ORIG_ADDR_LIMIT]
 173        mov     x20, #USER_DS
 174        str     x20, [tsk, #TSK_TI_ADDR_LIMIT]
 175        /* No need to reset PSTATE.UAO, hardware's already set it to 0 for us */
 176        .endif /* \el == 0 */
 177        mrs     x22, elr_el1
 178        mrs     x23, spsr_el1
 179        stp     lr, x21, [sp, #S_LR]
 180
 181        /*
 182         * In order to be able to dump the contents of struct pt_regs at the
 183         * time the exception was taken (in case we attempt to walk the call
 184         * stack later), chain it together with the stack frames.
 185         */
 186        .if \el == 0
 187        stp     xzr, xzr, [sp, #S_STACKFRAME]
 188        .else
 189        stp     x29, x22, [sp, #S_STACKFRAME]
 190        .endif
 191        add     x29, sp, #S_STACKFRAME
 192
 193#ifdef CONFIG_ARM64_SW_TTBR0_PAN
 194        /*
 195         * Set the TTBR0 PAN bit in SPSR. When the exception is taken from
 196         * EL0, there is no need to check the state of TTBR0_EL1 since
 197         * accesses are always enabled.
 198         * Note that the meaning of this bit differs from the ARMv8.1 PAN
 199         * feature as all TTBR0_EL1 accesses are disabled, not just those to
 200         * user mappings.
 201         */
 202alternative_if ARM64_HAS_PAN
 203        b       1f                              // skip TTBR0 PAN
 204alternative_else_nop_endif
 205
 206        .if     \el != 0
 207        mrs     x21, ttbr0_el1
 208        tst     x21, #TTBR_ASID_MASK            // Check for the reserved ASID
 209        orr     x23, x23, #PSR_PAN_BIT          // Set the emulated PAN in the saved SPSR
 210        b.eq    1f                              // TTBR0 access already disabled
 211        and     x23, x23, #~PSR_PAN_BIT         // Clear the emulated PAN in the saved SPSR
 212        .endif
 213
 214        __uaccess_ttbr0_disable x21
 2151:
 216#endif
 217
 218        stp     x22, x23, [sp, #S_PC]
 219
 220        /* Not in a syscall by default (el0_svc overwrites for real syscall) */
 221        .if     \el == 0
 222        mov     w21, #NO_SYSCALL
 223        str     w21, [sp, #S_SYSCALLNO]
 224        .endif
 225
 226        /*
 227         * Set sp_el0 to current thread_info.
 228         */
 229        .if     \el == 0
 230        msr     sp_el0, tsk
 231        .endif
 232
 233        /*
 234         * Registers that may be useful after this macro is invoked:
 235         *
 236         * x21 - aborted SP
 237         * x22 - aborted PC
 238         * x23 - aborted PSTATE
 239        */
 240        .endm
 241
 242        .macro  kernel_exit, el
 243        .if     \el != 0
 244        disable_daif
 245
 246        /* Restore the task's original addr_limit. */
 247        ldr     x20, [sp, #S_ORIG_ADDR_LIMIT]
 248        str     x20, [tsk, #TSK_TI_ADDR_LIMIT]
 249
 250        /* No need to restore UAO, it will be restored from SPSR_EL1 */
 251        .endif
 252
 253        ldp     x21, x22, [sp, #S_PC]           // load ELR, SPSR
 254        .if     \el == 0
 255        ct_user_enter
 256        .endif
 257
 258#ifdef CONFIG_ARM64_SW_TTBR0_PAN
 259        /*
 260         * Restore access to TTBR0_EL1. If returning to EL0, no need for SPSR
 261         * PAN bit checking.
 262         */
 263alternative_if ARM64_HAS_PAN
 264        b       2f                              // skip TTBR0 PAN
 265alternative_else_nop_endif
 266
 267        .if     \el != 0
 268        tbnz    x22, #22, 1f                    // Skip re-enabling TTBR0 access if the PSR_PAN_BIT is set
 269        .endif
 270
 271        __uaccess_ttbr0_enable x0, x1
 272
 273        .if     \el == 0
 274        /*
 275         * Enable errata workarounds only if returning to user. The only
 276         * workaround currently required for TTBR0_EL1 changes are for the
 277         * Cavium erratum 27456 (broadcast TLBI instructions may cause I-cache
 278         * corruption).
 279         */
 280        bl      post_ttbr_update_workaround
 281        .endif
 2821:
 283        .if     \el != 0
 284        and     x22, x22, #~PSR_PAN_BIT         // ARMv8.0 CPUs do not understand this bit
 285        .endif
 2862:
 287#endif
 288
 289        .if     \el == 0
 290        ldr     x23, [sp, #S_SP]                // load return stack pointer
 291        msr     sp_el0, x23
 292        tst     x22, #PSR_MODE32_BIT            // native task?
 293        b.eq    3f
 294
 295#ifdef CONFIG_ARM64_ERRATUM_845719
 296alternative_if ARM64_WORKAROUND_845719
 297#ifdef CONFIG_PID_IN_CONTEXTIDR
 298        mrs     x29, contextidr_el1
 299        msr     contextidr_el1, x29
 300#else
 301        msr contextidr_el1, xzr
 302#endif
 303alternative_else_nop_endif
 304#endif
 3053:
 306        .endif
 307
 308        msr     elr_el1, x21                    // set up the return data
 309        msr     spsr_el1, x22
 310        ldp     x0, x1, [sp, #16 * 0]
 311        ldp     x2, x3, [sp, #16 * 1]
 312        ldp     x4, x5, [sp, #16 * 2]
 313        ldp     x6, x7, [sp, #16 * 3]
 314        ldp     x8, x9, [sp, #16 * 4]
 315        ldp     x10, x11, [sp, #16 * 5]
 316        ldp     x12, x13, [sp, #16 * 6]
 317        ldp     x14, x15, [sp, #16 * 7]
 318        ldp     x16, x17, [sp, #16 * 8]
 319        ldp     x18, x19, [sp, #16 * 9]
 320        ldp     x20, x21, [sp, #16 * 10]
 321        ldp     x22, x23, [sp, #16 * 11]
 322        ldp     x24, x25, [sp, #16 * 12]
 323        ldp     x26, x27, [sp, #16 * 13]
 324        ldp     x28, x29, [sp, #16 * 14]
 325        ldr     lr, [sp, #S_LR]
 326        add     sp, sp, #S_FRAME_SIZE           // restore sp
 327        /*
 328         * ARCH_HAS_MEMBARRIER_SYNC_CORE rely on eret context synchronization
 329         * when returning from IPI handler, and when returning to user-space.
 330         */
 331
 332        .if     \el == 0
 333alternative_insn eret, nop, ARM64_UNMAP_KERNEL_AT_EL0
 334#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
 335        bne     4f
 336        msr     far_el1, x30
 337        tramp_alias     x30, tramp_exit_native
 338        br      x30
 3394:
 340        tramp_alias     x30, tramp_exit_compat
 341        br      x30
 342#endif
 343        .else
 344        eret
 345        .endif
 346        .endm
 347
 348        .macro  irq_stack_entry
 349        mov     x19, sp                 // preserve the original sp
 350
 351        /*
 352         * Compare sp with the base of the task stack.
 353         * If the top ~(THREAD_SIZE - 1) bits match, we are on a task stack,
 354         * and should switch to the irq stack.
 355         */
 356        ldr     x25, [tsk, TSK_STACK]
 357        eor     x25, x25, x19
 358        and     x25, x25, #~(THREAD_SIZE - 1)
 359        cbnz    x25, 9998f
 360
 361        ldr_this_cpu x25, irq_stack_ptr, x26
 362        mov     x26, #IRQ_STACK_SIZE
 363        add     x26, x25, x26
 364
 365        /* switch to the irq stack */
 366        mov     sp, x26
 3679998:
 368        .endm
 369
 370        /*
 371         * x19 should be preserved between irq_stack_entry and
 372         * irq_stack_exit.
 373         */
 374        .macro  irq_stack_exit
 375        mov     sp, x19
 376        .endm
 377
 378/*
 379 * These are the registers used in the syscall handler, and allow us to
 380 * have in theory up to 7 arguments to a function - x0 to x6.
 381 *
 382 * x7 is reserved for the system call number in 32-bit mode.
 383 */
 384wsc_nr  .req    w25             // number of system calls
 385xsc_nr  .req    x25             // number of system calls (zero-extended)
 386wscno   .req    w26             // syscall number
 387xscno   .req    x26             // syscall number (zero-extended)
 388stbl    .req    x27             // syscall table pointer
 389tsk     .req    x28             // current thread_info
 390
 391/*
 392 * Interrupt handling.
 393 */
 394        .macro  irq_handler
 395        ldr_l   x1, handle_arch_irq
 396        mov     x0, sp
 397        irq_stack_entry
 398        blr     x1
 399        irq_stack_exit
 400        .endm
 401
 402        .text
 403
 404/*
 405 * Exception vectors.
 406 */
 407        .pushsection ".entry.text", "ax"
 408
 409        .align  11
 410ENTRY(vectors)
 411        kernel_ventry   1, sync_invalid                 // Synchronous EL1t
 412        kernel_ventry   1, irq_invalid                  // IRQ EL1t
 413        kernel_ventry   1, fiq_invalid                  // FIQ EL1t
 414        kernel_ventry   1, error_invalid                // Error EL1t
 415
 416        kernel_ventry   1, sync                         // Synchronous EL1h
 417        kernel_ventry   1, irq                          // IRQ EL1h
 418        kernel_ventry   1, fiq_invalid                  // FIQ EL1h
 419        kernel_ventry   1, error                        // Error EL1h
 420
 421        kernel_ventry   0, sync                         // Synchronous 64-bit EL0
 422        kernel_ventry   0, irq                          // IRQ 64-bit EL0
 423        kernel_ventry   0, fiq_invalid                  // FIQ 64-bit EL0
 424        kernel_ventry   0, error                        // Error 64-bit EL0
 425
 426#ifdef CONFIG_COMPAT
 427        kernel_ventry   0, sync_compat, 32              // Synchronous 32-bit EL0
 428        kernel_ventry   0, irq_compat, 32               // IRQ 32-bit EL0
 429        kernel_ventry   0, fiq_invalid_compat, 32       // FIQ 32-bit EL0
 430        kernel_ventry   0, error_compat, 32             // Error 32-bit EL0
 431#else
 432        kernel_ventry   0, sync_invalid, 32             // Synchronous 32-bit EL0
 433        kernel_ventry   0, irq_invalid, 32              // IRQ 32-bit EL0
 434        kernel_ventry   0, fiq_invalid, 32              // FIQ 32-bit EL0
 435        kernel_ventry   0, error_invalid, 32            // Error 32-bit EL0
 436#endif
 437END(vectors)
 438
 439#ifdef CONFIG_VMAP_STACK
 440        /*
 441         * We detected an overflow in kernel_ventry, which switched to the
 442         * overflow stack. Stash the exception regs, and head to our overflow
 443         * handler.
 444         */
 445__bad_stack:
 446        /* Restore the original x0 value */
 447        mrs     x0, tpidrro_el0
 448
 449        /*
 450         * Store the original GPRs to the new stack. The orginal SP (minus
 451         * S_FRAME_SIZE) was stashed in tpidr_el0 by kernel_ventry.
 452         */
 453        sub     sp, sp, #S_FRAME_SIZE
 454        kernel_entry 1
 455        mrs     x0, tpidr_el0
 456        add     x0, x0, #S_FRAME_SIZE
 457        str     x0, [sp, #S_SP]
 458
 459        /* Stash the regs for handle_bad_stack */
 460        mov     x0, sp
 461
 462        /* Time to die */
 463        bl      handle_bad_stack
 464        ASM_BUG()
 465#endif /* CONFIG_VMAP_STACK */
 466
 467/*
 468 * Invalid mode handlers
 469 */
 470        .macro  inv_entry, el, reason, regsize = 64
 471        kernel_entry \el, \regsize
 472        mov     x0, sp
 473        mov     x1, #\reason
 474        mrs     x2, esr_el1
 475        bl      bad_mode
 476        ASM_BUG()
 477        .endm
 478
 479el0_sync_invalid:
 480        inv_entry 0, BAD_SYNC
 481ENDPROC(el0_sync_invalid)
 482
 483el0_irq_invalid:
 484        inv_entry 0, BAD_IRQ
 485ENDPROC(el0_irq_invalid)
 486
 487el0_fiq_invalid:
 488        inv_entry 0, BAD_FIQ
 489ENDPROC(el0_fiq_invalid)
 490
 491el0_error_invalid:
 492        inv_entry 0, BAD_ERROR
 493ENDPROC(el0_error_invalid)
 494
 495#ifdef CONFIG_COMPAT
 496el0_fiq_invalid_compat:
 497        inv_entry 0, BAD_FIQ, 32
 498ENDPROC(el0_fiq_invalid_compat)
 499#endif
 500
 501el1_sync_invalid:
 502        inv_entry 1, BAD_SYNC
 503ENDPROC(el1_sync_invalid)
 504
 505el1_irq_invalid:
 506        inv_entry 1, BAD_IRQ
 507ENDPROC(el1_irq_invalid)
 508
 509el1_fiq_invalid:
 510        inv_entry 1, BAD_FIQ
 511ENDPROC(el1_fiq_invalid)
 512
 513el1_error_invalid:
 514        inv_entry 1, BAD_ERROR
 515ENDPROC(el1_error_invalid)
 516
 517/*
 518 * EL1 mode handlers.
 519 */
 520        .align  6
 521el1_sync:
 522        kernel_entry 1
 523        mrs     x1, esr_el1                     // read the syndrome register
 524        lsr     x24, x1, #ESR_ELx_EC_SHIFT      // exception class
 525        cmp     x24, #ESR_ELx_EC_DABT_CUR       // data abort in EL1
 526        b.eq    el1_da
 527        cmp     x24, #ESR_ELx_EC_IABT_CUR       // instruction abort in EL1
 528        b.eq    el1_ia
 529        cmp     x24, #ESR_ELx_EC_SYS64          // configurable trap
 530        b.eq    el1_undef
 531        cmp     x24, #ESR_ELx_EC_SP_ALIGN       // stack alignment exception
 532        b.eq    el1_sp_pc
 533        cmp     x24, #ESR_ELx_EC_PC_ALIGN       // pc alignment exception
 534        b.eq    el1_sp_pc
 535        cmp     x24, #ESR_ELx_EC_UNKNOWN        // unknown exception in EL1
 536        b.eq    el1_undef
 537        cmp     x24, #ESR_ELx_EC_BREAKPT_CUR    // debug exception in EL1
 538        b.ge    el1_dbg
 539        b       el1_inv
 540
 541el1_ia:
 542        /*
 543         * Fall through to the Data abort case
 544         */
 545el1_da:
 546        /*
 547         * Data abort handling
 548         */
 549        mrs     x3, far_el1
 550        inherit_daif    pstate=x23, tmp=x2
 551        clear_address_tag x0, x3
 552        mov     x2, sp                          // struct pt_regs
 553        bl      do_mem_abort
 554
 555        kernel_exit 1
 556el1_sp_pc:
 557        /*
 558         * Stack or PC alignment exception handling
 559         */
 560        mrs     x0, far_el1
 561        inherit_daif    pstate=x23, tmp=x2
 562        mov     x2, sp
 563        bl      do_sp_pc_abort
 564        ASM_BUG()
 565el1_undef:
 566        /*
 567         * Undefined instruction
 568         */
 569        inherit_daif    pstate=x23, tmp=x2
 570        mov     x0, sp
 571        bl      do_undefinstr
 572        ASM_BUG()
 573el1_dbg:
 574        /*
 575         * Debug exception handling
 576         */
 577        cmp     x24, #ESR_ELx_EC_BRK64          // if BRK64
 578        cinc    x24, x24, eq                    // set bit '0'
 579        tbz     x24, #0, el1_inv                // EL1 only
 580        mrs     x0, far_el1
 581        mov     x2, sp                          // struct pt_regs
 582        bl      do_debug_exception
 583        kernel_exit 1
 584el1_inv:
 585        // TODO: add support for undefined instructions in kernel mode
 586        inherit_daif    pstate=x23, tmp=x2
 587        mov     x0, sp
 588        mov     x2, x1
 589        mov     x1, #BAD_SYNC
 590        bl      bad_mode
 591        ASM_BUG()
 592ENDPROC(el1_sync)
 593
 594        .align  6
 595el1_irq:
 596        kernel_entry 1
 597        enable_da_f
 598#ifdef CONFIG_TRACE_IRQFLAGS
 599        bl      trace_hardirqs_off
 600#endif
 601
 602        irq_handler
 603
 604#ifdef CONFIG_PREEMPT
 605        ldr     w24, [tsk, #TSK_TI_PREEMPT]     // get preempt count
 606        cbnz    w24, 1f                         // preempt count != 0
 607        ldr     x0, [tsk, #TSK_TI_FLAGS]        // get flags
 608        tbz     x0, #TIF_NEED_RESCHED, 1f       // needs rescheduling?
 609        bl      el1_preempt
 6101:
 611#endif
 612#ifdef CONFIG_TRACE_IRQFLAGS
 613        bl      trace_hardirqs_on
 614#endif
 615        kernel_exit 1
 616ENDPROC(el1_irq)
 617
 618#ifdef CONFIG_PREEMPT
 619el1_preempt:
 620        mov     x24, lr
 6211:      bl      preempt_schedule_irq            // irq en/disable is done inside
 622        ldr     x0, [tsk, #TSK_TI_FLAGS]        // get new tasks TI_FLAGS
 623        tbnz    x0, #TIF_NEED_RESCHED, 1b       // needs rescheduling?
 624        ret     x24
 625#endif
 626
 627/*
 628 * EL0 mode handlers.
 629 */
 630        .align  6
 631el0_sync:
 632        kernel_entry 0
 633        mrs     x25, esr_el1                    // read the syndrome register
 634        lsr     x24, x25, #ESR_ELx_EC_SHIFT     // exception class
 635        cmp     x24, #ESR_ELx_EC_SVC64          // SVC in 64-bit state
 636        b.eq    el0_svc
 637        cmp     x24, #ESR_ELx_EC_DABT_LOW       // data abort in EL0
 638        b.eq    el0_da
 639        cmp     x24, #ESR_ELx_EC_IABT_LOW       // instruction abort in EL0
 640        b.eq    el0_ia
 641        cmp     x24, #ESR_ELx_EC_FP_ASIMD       // FP/ASIMD access
 642        b.eq    el0_fpsimd_acc
 643        cmp     x24, #ESR_ELx_EC_SVE            // SVE access
 644        b.eq    el0_sve_acc
 645        cmp     x24, #ESR_ELx_EC_FP_EXC64       // FP/ASIMD exception
 646        b.eq    el0_fpsimd_exc
 647        cmp     x24, #ESR_ELx_EC_SYS64          // configurable trap
 648        b.eq    el0_sys
 649        cmp     x24, #ESR_ELx_EC_SP_ALIGN       // stack alignment exception
 650        b.eq    el0_sp_pc
 651        cmp     x24, #ESR_ELx_EC_PC_ALIGN       // pc alignment exception
 652        b.eq    el0_sp_pc
 653        cmp     x24, #ESR_ELx_EC_UNKNOWN        // unknown exception in EL0
 654        b.eq    el0_undef
 655        cmp     x24, #ESR_ELx_EC_BREAKPT_LOW    // debug exception in EL0
 656        b.ge    el0_dbg
 657        b       el0_inv
 658
 659#ifdef CONFIG_COMPAT
 660        .align  6
 661el0_sync_compat:
 662        kernel_entry 0, 32
 663        mrs     x25, esr_el1                    // read the syndrome register
 664        lsr     x24, x25, #ESR_ELx_EC_SHIFT     // exception class
 665        cmp     x24, #ESR_ELx_EC_SVC32          // SVC in 32-bit state
 666        b.eq    el0_svc_compat
 667        cmp     x24, #ESR_ELx_EC_DABT_LOW       // data abort in EL0
 668        b.eq    el0_da
 669        cmp     x24, #ESR_ELx_EC_IABT_LOW       // instruction abort in EL0
 670        b.eq    el0_ia
 671        cmp     x24, #ESR_ELx_EC_FP_ASIMD       // FP/ASIMD access
 672        b.eq    el0_fpsimd_acc
 673        cmp     x24, #ESR_ELx_EC_FP_EXC32       // FP/ASIMD exception
 674        b.eq    el0_fpsimd_exc
 675        cmp     x24, #ESR_ELx_EC_PC_ALIGN       // pc alignment exception
 676        b.eq    el0_sp_pc
 677        cmp     x24, #ESR_ELx_EC_UNKNOWN        // unknown exception in EL0
 678        b.eq    el0_undef
 679        cmp     x24, #ESR_ELx_EC_CP15_32        // CP15 MRC/MCR trap
 680        b.eq    el0_undef
 681        cmp     x24, #ESR_ELx_EC_CP15_64        // CP15 MRRC/MCRR trap
 682        b.eq    el0_undef
 683        cmp     x24, #ESR_ELx_EC_CP14_MR        // CP14 MRC/MCR trap
 684        b.eq    el0_undef
 685        cmp     x24, #ESR_ELx_EC_CP14_LS        // CP14 LDC/STC trap
 686        b.eq    el0_undef
 687        cmp     x24, #ESR_ELx_EC_CP14_64        // CP14 MRRC/MCRR trap
 688        b.eq    el0_undef
 689        cmp     x24, #ESR_ELx_EC_BREAKPT_LOW    // debug exception in EL0
 690        b.ge    el0_dbg
 691        b       el0_inv
 692el0_svc_compat:
 693        /*
 694         * AArch32 syscall handling
 695         */
 696        ldr     x16, [tsk, #TSK_TI_FLAGS]       // load thread flags
 697        adrp    stbl, compat_sys_call_table     // load compat syscall table pointer
 698        mov     wscno, w7                       // syscall number in w7 (r7)
 699        mov     wsc_nr, #__NR_compat_syscalls
 700        b       el0_svc_naked
 701
 702        .align  6
 703el0_irq_compat:
 704        kernel_entry 0, 32
 705        b       el0_irq_naked
 706
 707el0_error_compat:
 708        kernel_entry 0, 32
 709        b       el0_error_naked
 710#endif
 711
 712el0_da:
 713        /*
 714         * Data abort handling
 715         */
 716        mrs     x26, far_el1
 717        enable_daif
 718        ct_user_exit
 719        clear_address_tag x0, x26
 720        mov     x1, x25
 721        mov     x2, sp
 722        bl      do_mem_abort
 723        b       ret_to_user
 724el0_ia:
 725        /*
 726         * Instruction abort handling
 727         */
 728        mrs     x26, far_el1
 729        enable_da_f
 730#ifdef CONFIG_TRACE_IRQFLAGS
 731        bl      trace_hardirqs_off
 732#endif
 733        ct_user_exit
 734        mov     x0, x26
 735        mov     x1, x25
 736        mov     x2, sp
 737        bl      do_el0_ia_bp_hardening
 738        b       ret_to_user
 739el0_fpsimd_acc:
 740        /*
 741         * Floating Point or Advanced SIMD access
 742         */
 743        enable_daif
 744        ct_user_exit
 745        mov     x0, x25
 746        mov     x1, sp
 747        bl      do_fpsimd_acc
 748        b       ret_to_user
 749el0_sve_acc:
 750        /*
 751         * Scalable Vector Extension access
 752         */
 753        enable_daif
 754        ct_user_exit
 755        mov     x0, x25
 756        mov     x1, sp
 757        bl      do_sve_acc
 758        b       ret_to_user
 759el0_fpsimd_exc:
 760        /*
 761         * Floating Point, Advanced SIMD or SVE exception
 762         */
 763        enable_daif
 764        ct_user_exit
 765        mov     x0, x25
 766        mov     x1, sp
 767        bl      do_fpsimd_exc
 768        b       ret_to_user
 769el0_sp_pc:
 770        /*
 771         * Stack or PC alignment exception handling
 772         */
 773        mrs     x26, far_el1
 774        enable_da_f
 775#ifdef CONFIG_TRACE_IRQFLAGS
 776        bl      trace_hardirqs_off
 777#endif
 778        ct_user_exit
 779        mov     x0, x26
 780        mov     x1, x25
 781        mov     x2, sp
 782        bl      do_sp_pc_abort
 783        b       ret_to_user
 784el0_undef:
 785        /*
 786         * Undefined instruction
 787         */
 788        enable_daif
 789        ct_user_exit
 790        mov     x0, sp
 791        bl      do_undefinstr
 792        b       ret_to_user
 793el0_sys:
 794        /*
 795         * System instructions, for trapped cache maintenance instructions
 796         */
 797        enable_daif
 798        ct_user_exit
 799        mov     x0, x25
 800        mov     x1, sp
 801        bl      do_sysinstr
 802        b       ret_to_user
 803el0_dbg:
 804        /*
 805         * Debug exception handling
 806         */
 807        tbnz    x24, #0, el0_inv                // EL0 only
 808        mrs     x0, far_el1
 809        mov     x1, x25
 810        mov     x2, sp
 811        bl      do_debug_exception
 812        enable_daif
 813        ct_user_exit
 814        b       ret_to_user
 815el0_inv:
 816        enable_daif
 817        ct_user_exit
 818        mov     x0, sp
 819        mov     x1, #BAD_SYNC
 820        mov     x2, x25
 821        bl      bad_el0_sync
 822        b       ret_to_user
 823ENDPROC(el0_sync)
 824
 825        .align  6
 826el0_irq:
 827        kernel_entry 0
 828el0_irq_naked:
 829        enable_da_f
 830#ifdef CONFIG_TRACE_IRQFLAGS
 831        bl      trace_hardirqs_off
 832#endif
 833
 834        ct_user_exit
 835#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR
 836        tbz     x22, #55, 1f
 837        bl      do_el0_irq_bp_hardening
 8381:
 839#endif
 840        irq_handler
 841
 842#ifdef CONFIG_TRACE_IRQFLAGS
 843        bl      trace_hardirqs_on
 844#endif
 845        b       ret_to_user
 846ENDPROC(el0_irq)
 847
 848el1_error:
 849        kernel_entry 1
 850        mrs     x1, esr_el1
 851        enable_dbg
 852        mov     x0, sp
 853        bl      do_serror
 854        kernel_exit 1
 855ENDPROC(el1_error)
 856
 857el0_error:
 858        kernel_entry 0
 859el0_error_naked:
 860        mrs     x1, esr_el1
 861        enable_dbg
 862        mov     x0, sp
 863        bl      do_serror
 864        enable_daif
 865        ct_user_exit
 866        b       ret_to_user
 867ENDPROC(el0_error)
 868
 869
 870/*
 871 * This is the fast syscall return path.  We do as little as possible here,
 872 * and this includes saving x0 back into the kernel stack.
 873 */
 874ret_fast_syscall:
 875        disable_daif
 876        str     x0, [sp, #S_X0]                 // returned x0
 877        ldr     x1, [tsk, #TSK_TI_FLAGS]        // re-check for syscall tracing
 878        and     x2, x1, #_TIF_SYSCALL_WORK
 879        cbnz    x2, ret_fast_syscall_trace
 880        and     x2, x1, #_TIF_WORK_MASK
 881        cbnz    x2, work_pending
 882        enable_step_tsk x1, x2
 883        kernel_exit 0
 884ret_fast_syscall_trace:
 885        enable_daif
 886        b       __sys_trace_return_skipped      // we already saved x0
 887
 888/*
 889 * Ok, we need to do extra processing, enter the slow path.
 890 */
 891work_pending:
 892        mov     x0, sp                          // 'regs'
 893        bl      do_notify_resume
 894#ifdef CONFIG_TRACE_IRQFLAGS
 895        bl      trace_hardirqs_on               // enabled while in userspace
 896#endif
 897        ldr     x1, [tsk, #TSK_TI_FLAGS]        // re-check for single-step
 898        b       finish_ret_to_user
 899/*
 900 * "slow" syscall return path.
 901 */
 902ret_to_user:
 903        disable_daif
 904        ldr     x1, [tsk, #TSK_TI_FLAGS]
 905        and     x2, x1, #_TIF_WORK_MASK
 906        cbnz    x2, work_pending
 907finish_ret_to_user:
 908        enable_step_tsk x1, x2
 909        kernel_exit 0
 910ENDPROC(ret_to_user)
 911
 912/*
 913 * SVC handler.
 914 */
 915        .align  6
 916el0_svc:
 917        ldr     x16, [tsk, #TSK_TI_FLAGS]       // load thread flags
 918        adrp    stbl, sys_call_table            // load syscall table pointer
 919        mov     wscno, w8                       // syscall number in w8
 920        mov     wsc_nr, #__NR_syscalls
 921
 922#ifdef CONFIG_ARM64_SVE
 923alternative_if_not ARM64_SVE
 924        b       el0_svc_naked
 925alternative_else_nop_endif
 926        tbz     x16, #TIF_SVE, el0_svc_naked    // Skip unless TIF_SVE set:
 927        bic     x16, x16, #_TIF_SVE             // discard SVE state
 928        str     x16, [tsk, #TSK_TI_FLAGS]
 929
 930        /*
 931         * task_fpsimd_load() won't be called to update CPACR_EL1 in
 932         * ret_to_user unless TIF_FOREIGN_FPSTATE is still set, which only
 933         * happens if a context switch or kernel_neon_begin() or context
 934         * modification (sigreturn, ptrace) intervenes.
 935         * So, ensure that CPACR_EL1 is already correct for the fast-path case:
 936         */
 937        mrs     x9, cpacr_el1
 938        bic     x9, x9, #CPACR_EL1_ZEN_EL0EN    // disable SVE for el0
 939        msr     cpacr_el1, x9                   // synchronised by eret to el0
 940#endif
 941
 942el0_svc_naked:                                  // compat entry point
 943        stp     x0, xscno, [sp, #S_ORIG_X0]     // save the original x0 and syscall number
 944        enable_daif
 945        ct_user_exit 1
 946
 947        tst     x16, #_TIF_SYSCALL_WORK         // check for syscall hooks
 948        b.ne    __sys_trace
 949        cmp     wscno, wsc_nr                   // check upper syscall limit
 950        b.hs    ni_sys
 951        mask_nospec64 xscno, xsc_nr, x19        // enforce bounds for syscall number
 952        ldr     x16, [stbl, xscno, lsl #3]      // address in the syscall table
 953        blr     x16                             // call sys_* routine
 954        b       ret_fast_syscall
 955ni_sys:
 956        mov     x0, sp
 957        bl      do_ni_syscall
 958        b       ret_fast_syscall
 959ENDPROC(el0_svc)
 960
 961        /*
 962         * This is the really slow path.  We're going to be doing context
 963         * switches, and waiting for our parent to respond.
 964         */
 965__sys_trace:
 966        cmp     wscno, #NO_SYSCALL              // user-issued syscall(-1)?
 967        b.ne    1f
 968        mov     x0, #-ENOSYS                    // set default errno if so
 969        str     x0, [sp, #S_X0]
 9701:      mov     x0, sp
 971        bl      syscall_trace_enter
 972        cmp     w0, #NO_SYSCALL                 // skip the syscall?
 973        b.eq    __sys_trace_return_skipped
 974        mov     wscno, w0                       // syscall number (possibly new)
 975        mov     x1, sp                          // pointer to regs
 976        cmp     wscno, wsc_nr                   // check upper syscall limit
 977        b.hs    __ni_sys_trace
 978        ldp     x0, x1, [sp]                    // restore the syscall args
 979        ldp     x2, x3, [sp, #S_X2]
 980        ldp     x4, x5, [sp, #S_X4]
 981        ldp     x6, x7, [sp, #S_X6]
 982        ldr     x16, [stbl, xscno, lsl #3]      // address in the syscall table
 983        blr     x16                             // call sys_* routine
 984
 985__sys_trace_return:
 986        str     x0, [sp, #S_X0]                 // save returned x0
 987__sys_trace_return_skipped:
 988        mov     x0, sp
 989        bl      syscall_trace_exit
 990        b       ret_to_user
 991
 992__ni_sys_trace:
 993        mov     x0, sp
 994        bl      do_ni_syscall
 995        b       __sys_trace_return
 996
 997        .popsection                             // .entry.text
 998
 999#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
1000/*
1001 * Exception vectors trampoline.
1002 */
1003        .pushsection ".entry.tramp.text", "ax"
1004
1005        .macro tramp_map_kernel, tmp
1006        mrs     \tmp, ttbr1_el1
1007        add     \tmp, \tmp, #(PAGE_SIZE + RESERVED_TTBR0_SIZE)
1008        bic     \tmp, \tmp, #USER_ASID_FLAG
1009        msr     ttbr1_el1, \tmp
1010#ifdef CONFIG_QCOM_FALKOR_ERRATUM_1003
1011alternative_if ARM64_WORKAROUND_QCOM_FALKOR_E1003
1012        /* ASID already in \tmp[63:48] */
1013        movk    \tmp, #:abs_g2_nc:(TRAMP_VALIAS >> 12)
1014        movk    \tmp, #:abs_g1_nc:(TRAMP_VALIAS >> 12)
1015        /* 2MB boundary containing the vectors, so we nobble the walk cache */
1016        movk    \tmp, #:abs_g0_nc:((TRAMP_VALIAS & ~(SZ_2M - 1)) >> 12)
1017        isb
1018        tlbi    vae1, \tmp
1019        dsb     nsh
1020alternative_else_nop_endif
1021#endif /* CONFIG_QCOM_FALKOR_ERRATUM_1003 */
1022        .endm
1023
1024        .macro tramp_unmap_kernel, tmp
1025        mrs     \tmp, ttbr1_el1
1026        sub     \tmp, \tmp, #(PAGE_SIZE + RESERVED_TTBR0_SIZE)
1027        orr     \tmp, \tmp, #USER_ASID_FLAG
1028        msr     ttbr1_el1, \tmp
1029        /*
1030         * We avoid running the post_ttbr_update_workaround here because
1031         * it's only needed by Cavium ThunderX, which requires KPTI to be
1032         * disabled.
1033         */
1034        .endm
1035
1036        .macro tramp_ventry, regsize = 64
1037        .align  7
10381:
1039        .if     \regsize == 64
1040        msr     tpidrro_el0, x30        // Restored in kernel_ventry
1041        .endif
1042        /*
1043         * Defend against branch aliasing attacks by pushing a dummy
1044         * entry onto the return stack and using a RET instruction to
1045         * enter the full-fat kernel vectors.
1046         */
1047        bl      2f
1048        b       .
10492:
1050        tramp_map_kernel        x30
1051#ifdef CONFIG_RANDOMIZE_BASE
1052        adr     x30, tramp_vectors + PAGE_SIZE
1053alternative_insn isb, nop, ARM64_WORKAROUND_QCOM_FALKOR_E1003
1054        ldr     x30, [x30]
1055#else
1056        ldr     x30, =vectors
1057#endif
1058        prfm    plil1strm, [x30, #(1b - tramp_vectors)]
1059        msr     vbar_el1, x30
1060        add     x30, x30, #(1b - tramp_vectors)
1061        isb
1062        ret
1063        .endm
1064
1065        .macro tramp_exit, regsize = 64
1066        adr     x30, tramp_vectors
1067        msr     vbar_el1, x30
1068        tramp_unmap_kernel      x30
1069        .if     \regsize == 64
1070        mrs     x30, far_el1
1071        .endif
1072        eret
1073        .endm
1074
1075        .align  11
1076ENTRY(tramp_vectors)
1077        .space  0x400
1078
1079        tramp_ventry
1080        tramp_ventry
1081        tramp_ventry
1082        tramp_ventry
1083
1084        tramp_ventry    32
1085        tramp_ventry    32
1086        tramp_ventry    32
1087        tramp_ventry    32
1088END(tramp_vectors)
1089
1090ENTRY(tramp_exit_native)
1091        tramp_exit
1092END(tramp_exit_native)
1093
1094ENTRY(tramp_exit_compat)
1095        tramp_exit      32
1096END(tramp_exit_compat)
1097
1098        .ltorg
1099        .popsection                             // .entry.tramp.text
1100#ifdef CONFIG_RANDOMIZE_BASE
1101        .pushsection ".rodata", "a"
1102        .align PAGE_SHIFT
1103        .globl  __entry_tramp_data_start
1104__entry_tramp_data_start:
1105        .quad   vectors
1106        .popsection                             // .rodata
1107#endif /* CONFIG_RANDOMIZE_BASE */
1108#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
1109
1110/*
1111 * Special system call wrappers.
1112 */
1113ENTRY(sys_rt_sigreturn_wrapper)
1114        mov     x0, sp
1115        b       sys_rt_sigreturn
1116ENDPROC(sys_rt_sigreturn_wrapper)
1117
1118/*
1119 * Register switch for AArch64. The callee-saved registers need to be saved
1120 * and restored. On entry:
1121 *   x0 = previous task_struct (must be preserved across the switch)
1122 *   x1 = next task_struct
1123 * Previous and next are guaranteed not to be the same.
1124 *
1125 */
1126ENTRY(cpu_switch_to)
1127        mov     x10, #THREAD_CPU_CONTEXT
1128        add     x8, x0, x10
1129        mov     x9, sp
1130        stp     x19, x20, [x8], #16             // store callee-saved registers
1131        stp     x21, x22, [x8], #16
1132        stp     x23, x24, [x8], #16
1133        stp     x25, x26, [x8], #16
1134        stp     x27, x28, [x8], #16
1135        stp     x29, x9, [x8], #16
1136        str     lr, [x8]
1137        add     x8, x1, x10
1138        ldp     x19, x20, [x8], #16             // restore callee-saved registers
1139        ldp     x21, x22, [x8], #16
1140        ldp     x23, x24, [x8], #16
1141        ldp     x25, x26, [x8], #16
1142        ldp     x27, x28, [x8], #16
1143        ldp     x29, x9, [x8], #16
1144        ldr     lr, [x8]
1145        mov     sp, x9
1146        msr     sp_el0, x1
1147        ret
1148ENDPROC(cpu_switch_to)
1149NOKPROBE(cpu_switch_to)
1150
1151/*
1152 * This is how we return from a fork.
1153 */
1154ENTRY(ret_from_fork)
1155        bl      schedule_tail
1156        cbz     x19, 1f                         // not a kernel thread
1157        mov     x0, x20
1158        blr     x19
11591:      get_thread_info tsk
1160        b       ret_to_user
1161ENDPROC(ret_from_fork)
1162NOKPROBE(ret_from_fork)
1163
1164#ifdef CONFIG_ARM_SDE_INTERFACE
1165
1166#include <asm/sdei.h>
1167#include <uapi/linux/arm_sdei.h>
1168
1169.macro sdei_handler_exit exit_mode
1170        /* On success, this call never returns... */
1171        cmp     \exit_mode, #SDEI_EXIT_SMC
1172        b.ne    99f
1173        smc     #0
1174        b       .
117599:     hvc     #0
1176        b       .
1177.endm
1178
1179#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
1180/*
1181 * The regular SDEI entry point may have been unmapped along with the rest of
1182 * the kernel. This trampoline restores the kernel mapping to make the x1 memory
1183 * argument accessible.
1184 *
1185 * This clobbers x4, __sdei_handler() will restore this from firmware's
1186 * copy.
1187 */
1188.ltorg
1189.pushsection ".entry.tramp.text", "ax"
1190ENTRY(__sdei_asm_entry_trampoline)
1191        mrs     x4, ttbr1_el1
1192        tbz     x4, #USER_ASID_BIT, 1f
1193
1194        tramp_map_kernel tmp=x4
1195        isb
1196        mov     x4, xzr
1197
1198        /*
1199         * Use reg->interrupted_regs.addr_limit to remember whether to unmap
1200         * the kernel on exit.
1201         */
12021:      str     x4, [x1, #(SDEI_EVENT_INTREGS + S_ORIG_ADDR_LIMIT)]
1203
1204#ifdef CONFIG_RANDOMIZE_BASE
1205        adr     x4, tramp_vectors + PAGE_SIZE
1206        add     x4, x4, #:lo12:__sdei_asm_trampoline_next_handler
1207        ldr     x4, [x4]
1208#else
1209        ldr     x4, =__sdei_asm_handler
1210#endif
1211        br      x4
1212ENDPROC(__sdei_asm_entry_trampoline)
1213NOKPROBE(__sdei_asm_entry_trampoline)
1214
1215/*
1216 * Make the exit call and restore the original ttbr1_el1
1217 *
1218 * x0 & x1: setup for the exit API call
1219 * x2: exit_mode
1220 * x4: struct sdei_registered_event argument from registration time.
1221 */
1222ENTRY(__sdei_asm_exit_trampoline)
1223        ldr     x4, [x4, #(SDEI_EVENT_INTREGS + S_ORIG_ADDR_LIMIT)]
1224        cbnz    x4, 1f
1225
1226        tramp_unmap_kernel      tmp=x4
1227
12281:      sdei_handler_exit exit_mode=x2
1229ENDPROC(__sdei_asm_exit_trampoline)
1230NOKPROBE(__sdei_asm_exit_trampoline)
1231        .ltorg
1232.popsection             // .entry.tramp.text
1233#ifdef CONFIG_RANDOMIZE_BASE
1234.pushsection ".rodata", "a"
1235__sdei_asm_trampoline_next_handler:
1236        .quad   __sdei_asm_handler
1237.popsection             // .rodata
1238#endif /* CONFIG_RANDOMIZE_BASE */
1239#endif /* CONFIG_UNMAP_KERNEL_AT_EL0 */
1240
1241/*
1242 * Software Delegated Exception entry point.
1243 *
1244 * x0: Event number
1245 * x1: struct sdei_registered_event argument from registration time.
1246 * x2: interrupted PC
1247 * x3: interrupted PSTATE
1248 * x4: maybe clobbered by the trampoline
1249 *
1250 * Firmware has preserved x0->x17 for us, we must save/restore the rest to
1251 * follow SMC-CC. We save (or retrieve) all the registers as the handler may
1252 * want them.
1253 */
1254ENTRY(__sdei_asm_handler)
1255        stp     x2, x3, [x1, #SDEI_EVENT_INTREGS + S_PC]
1256        stp     x4, x5, [x1, #SDEI_EVENT_INTREGS + 16 * 2]
1257        stp     x6, x7, [x1, #SDEI_EVENT_INTREGS + 16 * 3]
1258        stp     x8, x9, [x1, #SDEI_EVENT_INTREGS + 16 * 4]
1259        stp     x10, x11, [x1, #SDEI_EVENT_INTREGS + 16 * 5]
1260        stp     x12, x13, [x1, #SDEI_EVENT_INTREGS + 16 * 6]
1261        stp     x14, x15, [x1, #SDEI_EVENT_INTREGS + 16 * 7]
1262        stp     x16, x17, [x1, #SDEI_EVENT_INTREGS + 16 * 8]
1263        stp     x18, x19, [x1, #SDEI_EVENT_INTREGS + 16 * 9]
1264        stp     x20, x21, [x1, #SDEI_EVENT_INTREGS + 16 * 10]
1265        stp     x22, x23, [x1, #SDEI_EVENT_INTREGS + 16 * 11]
1266        stp     x24, x25, [x1, #SDEI_EVENT_INTREGS + 16 * 12]
1267        stp     x26, x27, [x1, #SDEI_EVENT_INTREGS + 16 * 13]
1268        stp     x28, x29, [x1, #SDEI_EVENT_INTREGS + 16 * 14]
1269        mov     x4, sp
1270        stp     lr, x4, [x1, #SDEI_EVENT_INTREGS + S_LR]
1271
1272        mov     x19, x1
1273
1274#ifdef CONFIG_VMAP_STACK
1275        /*
1276         * entry.S may have been using sp as a scratch register, find whether
1277         * this is a normal or critical event and switch to the appropriate
1278         * stack for this CPU.
1279         */
1280        ldrb    w4, [x19, #SDEI_EVENT_PRIORITY]
1281        cbnz    w4, 1f
1282        ldr_this_cpu dst=x5, sym=sdei_stack_normal_ptr, tmp=x6
1283        b       2f
12841:      ldr_this_cpu dst=x5, sym=sdei_stack_critical_ptr, tmp=x6
12852:      mov     x6, #SDEI_STACK_SIZE
1286        add     x5, x5, x6
1287        mov     sp, x5
1288#endif
1289
1290        /*
1291         * We may have interrupted userspace, or a guest, or exit-from or
1292         * return-to either of these. We can't trust sp_el0, restore it.
1293         */
1294        mrs     x28, sp_el0
1295        ldr_this_cpu    dst=x0, sym=__entry_task, tmp=x1
1296        msr     sp_el0, x0
1297
1298        /* If we interrupted the kernel point to the previous stack/frame. */
1299        and     x0, x3, #0xc
1300        mrs     x1, CurrentEL
1301        cmp     x0, x1
1302        csel    x29, x29, xzr, eq       // fp, or zero
1303        csel    x4, x2, xzr, eq         // elr, or zero
1304
1305        stp     x29, x4, [sp, #-16]!
1306        mov     x29, sp
1307
1308        add     x0, x19, #SDEI_EVENT_INTREGS
1309        mov     x1, x19
1310        bl      __sdei_handler
1311
1312        msr     sp_el0, x28
1313        /* restore regs >x17 that we clobbered */
1314        mov     x4, x19         // keep x4 for __sdei_asm_exit_trampoline
1315        ldp     x28, x29, [x4, #SDEI_EVENT_INTREGS + 16 * 14]
1316        ldp     x18, x19, [x4, #SDEI_EVENT_INTREGS + 16 * 9]
1317        ldp     lr, x1, [x4, #SDEI_EVENT_INTREGS + S_LR]
1318        mov     sp, x1
1319
1320        mov     x1, x0                  // address to complete_and_resume
1321        /* x0 = (x0 <= 1) ? EVENT_COMPLETE:EVENT_COMPLETE_AND_RESUME */
1322        cmp     x0, #1
1323        mov_q   x2, SDEI_1_0_FN_SDEI_EVENT_COMPLETE
1324        mov_q   x3, SDEI_1_0_FN_SDEI_EVENT_COMPLETE_AND_RESUME
1325        csel    x0, x2, x3, ls
1326
1327        ldr_l   x2, sdei_exit_mode
1328
1329alternative_if_not ARM64_UNMAP_KERNEL_AT_EL0
1330        sdei_handler_exit exit_mode=x2
1331alternative_else_nop_endif
1332
1333#ifdef CONFIG_UNMAP_KERNEL_AT_EL0
1334        tramp_alias     dst=x5, sym=__sdei_asm_exit_trampoline
1335        br      x5
1336#endif
1337ENDPROC(__sdei_asm_handler)
1338NOKPROBE(__sdei_asm_handler)
1339#endif /* CONFIG_ARM_SDE_INTERFACE */
1340