linux/arch/arm64/kernel/entry.S
<<
>>
Prefs
   1/*
   2 * Low-level exception handling code
   3 *
   4 * Copyright (C) 2012 ARM Ltd.
   5 * Authors:     Catalin Marinas <catalin.marinas@arm.com>
   6 *              Will Deacon <will.deacon@arm.com>
   7 *
   8 * This program is free software; you can redistribute it and/or modify
   9 * it under the terms of the GNU General Public License version 2 as
  10 * published by the Free Software Foundation.
  11 *
  12 * This program is distributed in the hope that it will be useful,
  13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  15 * GNU General Public License for more details.
  16 *
  17 * You should have received a copy of the GNU General Public License
  18 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  19 */
  20
  21#include <linux/init.h>
  22#include <linux/linkage.h>
  23
  24#include <asm/alternative-asm.h>
  25#include <asm/assembler.h>
  26#include <asm/asm-offsets.h>
  27#include <asm/cpufeature.h>
  28#include <asm/errno.h>
  29#include <asm/esr.h>
  30#include <asm/thread_info.h>
  31#include <asm/unistd.h>
  32
  33/*
  34 * Context tracking subsystem.  Used to instrument transitions
  35 * between user and kernel mode.
  36 */
  37        .macro ct_user_exit, syscall = 0
  38#ifdef CONFIG_CONTEXT_TRACKING
  39        bl      context_tracking_user_exit
  40        .if \syscall == 1
  41        /*
  42         * Save/restore needed during syscalls.  Restore syscall arguments from
  43         * the values already saved on stack during kernel_entry.
  44         */
  45        ldp     x0, x1, [sp]
  46        ldp     x2, x3, [sp, #S_X2]
  47        ldp     x4, x5, [sp, #S_X4]
  48        ldp     x6, x7, [sp, #S_X6]
  49        .endif
  50#endif
  51        .endm
  52
  53        .macro ct_user_enter
  54#ifdef CONFIG_CONTEXT_TRACKING
  55        bl      context_tracking_user_enter
  56#endif
  57        .endm
  58
  59/*
  60 * Bad Abort numbers
  61 *-----------------
  62 */
  63#define BAD_SYNC        0
  64#define BAD_IRQ         1
  65#define BAD_FIQ         2
  66#define BAD_ERROR       3
  67
  68        .macro  kernel_entry, el, regsize = 64
  69        sub     sp, sp, #S_FRAME_SIZE
  70        .if     \regsize == 32
  71        mov     w0, w0                          // zero upper 32 bits of x0
  72        .endif
  73        stp     x0, x1, [sp, #16 * 0]
  74        stp     x2, x3, [sp, #16 * 1]
  75        stp     x4, x5, [sp, #16 * 2]
  76        stp     x6, x7, [sp, #16 * 3]
  77        stp     x8, x9, [sp, #16 * 4]
  78        stp     x10, x11, [sp, #16 * 5]
  79        stp     x12, x13, [sp, #16 * 6]
  80        stp     x14, x15, [sp, #16 * 7]
  81        stp     x16, x17, [sp, #16 * 8]
  82        stp     x18, x19, [sp, #16 * 9]
  83        stp     x20, x21, [sp, #16 * 10]
  84        stp     x22, x23, [sp, #16 * 11]
  85        stp     x24, x25, [sp, #16 * 12]
  86        stp     x26, x27, [sp, #16 * 13]
  87        stp     x28, x29, [sp, #16 * 14]
  88
  89        .if     \el == 0
  90        mrs     x21, sp_el0
  91        get_thread_info tsk                     // Ensure MDSCR_EL1.SS is clear,
  92        ldr     x19, [tsk, #TI_FLAGS]           // since we can unmask debug
  93        disable_step_tsk x19, x20               // exceptions when scheduling.
  94        .else
  95        add     x21, sp, #S_FRAME_SIZE
  96        .endif
  97        mrs     x22, elr_el1
  98        mrs     x23, spsr_el1
  99        stp     lr, x21, [sp, #S_LR]
 100        stp     x22, x23, [sp, #S_PC]
 101
 102        /*
 103         * Set syscallno to -1 by default (overridden later if real syscall).
 104         */
 105        .if     \el == 0
 106        mvn     x21, xzr
 107        str     x21, [sp, #S_SYSCALLNO]
 108        .endif
 109
 110        /*
 111         * Registers that may be useful after this macro is invoked:
 112         *
 113         * x21 - aborted SP
 114         * x22 - aborted PC
 115         * x23 - aborted PSTATE
 116        */
 117        .endm
 118
 119        .macro  kernel_exit, el, ret = 0
 120        ldp     x21, x22, [sp, #S_PC]           // load ELR, SPSR
 121        .if     \el == 0
 122        ct_user_enter
 123        ldr     x23, [sp, #S_SP]                // load return stack pointer
 124        msr     sp_el0, x23
 125
 126#ifdef CONFIG_ARM64_ERRATUM_845719
 127        alternative_insn                                                \
 128        "nop",                                                          \
 129        "tbz x22, #4, 1f",                                              \
 130        ARM64_WORKAROUND_845719
 131#ifdef CONFIG_PID_IN_CONTEXTIDR
 132        alternative_insn                                                \
 133        "nop; nop",                                                     \
 134        "mrs x29, contextidr_el1; msr contextidr_el1, x29; 1:",         \
 135        ARM64_WORKAROUND_845719
 136#else
 137        alternative_insn                                                \
 138        "nop",                                                          \
 139        "msr contextidr_el1, xzr; 1:",                                  \
 140        ARM64_WORKAROUND_845719
 141#endif
 142#endif
 143        .endif
 144        msr     elr_el1, x21                    // set up the return data
 145        msr     spsr_el1, x22
 146        .if     \ret
 147        ldr     x1, [sp, #S_X1]                 // preserve x0 (syscall return)
 148        .else
 149        ldp     x0, x1, [sp, #16 * 0]
 150        .endif
 151        ldp     x2, x3, [sp, #16 * 1]
 152        ldp     x4, x5, [sp, #16 * 2]
 153        ldp     x6, x7, [sp, #16 * 3]
 154        ldp     x8, x9, [sp, #16 * 4]
 155        ldp     x10, x11, [sp, #16 * 5]
 156        ldp     x12, x13, [sp, #16 * 6]
 157        ldp     x14, x15, [sp, #16 * 7]
 158        ldp     x16, x17, [sp, #16 * 8]
 159        ldp     x18, x19, [sp, #16 * 9]
 160        ldp     x20, x21, [sp, #16 * 10]
 161        ldp     x22, x23, [sp, #16 * 11]
 162        ldp     x24, x25, [sp, #16 * 12]
 163        ldp     x26, x27, [sp, #16 * 13]
 164        ldp     x28, x29, [sp, #16 * 14]
 165        ldr     lr, [sp, #S_LR]
 166        add     sp, sp, #S_FRAME_SIZE           // restore sp
 167        eret                                    // return to kernel
 168        .endm
 169
 170        .macro  get_thread_info, rd
 171        mov     \rd, sp
 172        and     \rd, \rd, #~(THREAD_SIZE - 1)   // top of stack
 173        .endm
 174
 175/*
 176 * These are the registers used in the syscall handler, and allow us to
 177 * have in theory up to 7 arguments to a function - x0 to x6.
 178 *
 179 * x7 is reserved for the system call number in 32-bit mode.
 180 */
 181sc_nr   .req    x25             // number of system calls
 182scno    .req    x26             // syscall number
 183stbl    .req    x27             // syscall table pointer
 184tsk     .req    x28             // current thread_info
 185
 186/*
 187 * Interrupt handling.
 188 */
 189        .macro  irq_handler
 190        adrp    x1, handle_arch_irq
 191        ldr     x1, [x1, #:lo12:handle_arch_irq]
 192        mov     x0, sp
 193        blr     x1
 194        .endm
 195
 196        .text
 197
 198/*
 199 * Exception vectors.
 200 */
 201
 202        .align  11
 203ENTRY(vectors)
 204        ventry  el1_sync_invalid                // Synchronous EL1t
 205        ventry  el1_irq_invalid                 // IRQ EL1t
 206        ventry  el1_fiq_invalid                 // FIQ EL1t
 207        ventry  el1_error_invalid               // Error EL1t
 208
 209        ventry  el1_sync                        // Synchronous EL1h
 210        ventry  el1_irq                         // IRQ EL1h
 211        ventry  el1_fiq_invalid                 // FIQ EL1h
 212        ventry  el1_error_invalid               // Error EL1h
 213
 214        ventry  el0_sync                        // Synchronous 64-bit EL0
 215        ventry  el0_irq                         // IRQ 64-bit EL0
 216        ventry  el0_fiq_invalid                 // FIQ 64-bit EL0
 217        ventry  el0_error_invalid               // Error 64-bit EL0
 218
 219#ifdef CONFIG_COMPAT
 220        ventry  el0_sync_compat                 // Synchronous 32-bit EL0
 221        ventry  el0_irq_compat                  // IRQ 32-bit EL0
 222        ventry  el0_fiq_invalid_compat          // FIQ 32-bit EL0
 223        ventry  el0_error_invalid_compat        // Error 32-bit EL0
 224#else
 225        ventry  el0_sync_invalid                // Synchronous 32-bit EL0
 226        ventry  el0_irq_invalid                 // IRQ 32-bit EL0
 227        ventry  el0_fiq_invalid                 // FIQ 32-bit EL0
 228        ventry  el0_error_invalid               // Error 32-bit EL0
 229#endif
 230END(vectors)
 231
 232/*
 233 * Invalid mode handlers
 234 */
 235        .macro  inv_entry, el, reason, regsize = 64
 236        kernel_entry el, \regsize
 237        mov     x0, sp
 238        mov     x1, #\reason
 239        mrs     x2, esr_el1
 240        b       bad_mode
 241        .endm
 242
 243el0_sync_invalid:
 244        inv_entry 0, BAD_SYNC
 245ENDPROC(el0_sync_invalid)
 246
 247el0_irq_invalid:
 248        inv_entry 0, BAD_IRQ
 249ENDPROC(el0_irq_invalid)
 250
 251el0_fiq_invalid:
 252        inv_entry 0, BAD_FIQ
 253ENDPROC(el0_fiq_invalid)
 254
 255el0_error_invalid:
 256        inv_entry 0, BAD_ERROR
 257ENDPROC(el0_error_invalid)
 258
 259#ifdef CONFIG_COMPAT
 260el0_fiq_invalid_compat:
 261        inv_entry 0, BAD_FIQ, 32
 262ENDPROC(el0_fiq_invalid_compat)
 263
 264el0_error_invalid_compat:
 265        inv_entry 0, BAD_ERROR, 32
 266ENDPROC(el0_error_invalid_compat)
 267#endif
 268
 269el1_sync_invalid:
 270        inv_entry 1, BAD_SYNC
 271ENDPROC(el1_sync_invalid)
 272
 273el1_irq_invalid:
 274        inv_entry 1, BAD_IRQ
 275ENDPROC(el1_irq_invalid)
 276
 277el1_fiq_invalid:
 278        inv_entry 1, BAD_FIQ
 279ENDPROC(el1_fiq_invalid)
 280
 281el1_error_invalid:
 282        inv_entry 1, BAD_ERROR
 283ENDPROC(el1_error_invalid)
 284
 285/*
 286 * EL1 mode handlers.
 287 */
 288        .align  6
 289el1_sync:
 290        kernel_entry 1
 291        mrs     x1, esr_el1                     // read the syndrome register
 292        lsr     x24, x1, #ESR_ELx_EC_SHIFT      // exception class
 293        cmp     x24, #ESR_ELx_EC_DABT_CUR       // data abort in EL1
 294        b.eq    el1_da
 295        cmp     x24, #ESR_ELx_EC_SYS64          // configurable trap
 296        b.eq    el1_undef
 297        cmp     x24, #ESR_ELx_EC_SP_ALIGN       // stack alignment exception
 298        b.eq    el1_sp_pc
 299        cmp     x24, #ESR_ELx_EC_PC_ALIGN       // pc alignment exception
 300        b.eq    el1_sp_pc
 301        cmp     x24, #ESR_ELx_EC_UNKNOWN        // unknown exception in EL1
 302        b.eq    el1_undef
 303        cmp     x24, #ESR_ELx_EC_BREAKPT_CUR    // debug exception in EL1
 304        b.ge    el1_dbg
 305        b       el1_inv
 306el1_da:
 307        /*
 308         * Data abort handling
 309         */
 310        mrs     x0, far_el1
 311        enable_dbg
 312        // re-enable interrupts if they were enabled in the aborted context
 313        tbnz    x23, #7, 1f                     // PSR_I_BIT
 314        enable_irq
 3151:
 316        mov     x2, sp                          // struct pt_regs
 317        bl      do_mem_abort
 318
 319        // disable interrupts before pulling preserved data off the stack
 320        disable_irq
 321        kernel_exit 1
 322el1_sp_pc:
 323        /*
 324         * Stack or PC alignment exception handling
 325         */
 326        mrs     x0, far_el1
 327        enable_dbg
 328        mov     x2, sp
 329        b       do_sp_pc_abort
 330el1_undef:
 331        /*
 332         * Undefined instruction
 333         */
 334        enable_dbg
 335        mov     x0, sp
 336        b       do_undefinstr
 337el1_dbg:
 338        /*
 339         * Debug exception handling
 340         */
 341        cmp     x24, #ESR_ELx_EC_BRK64          // if BRK64
 342        cinc    x24, x24, eq                    // set bit '0'
 343        tbz     x24, #0, el1_inv                // EL1 only
 344        mrs     x0, far_el1
 345        mov     x2, sp                          // struct pt_regs
 346        bl      do_debug_exception
 347        kernel_exit 1
 348el1_inv:
 349        // TODO: add support for undefined instructions in kernel mode
 350        enable_dbg
 351        mov     x0, sp
 352        mov     x1, #BAD_SYNC
 353        mrs     x2, esr_el1
 354        b       bad_mode
 355ENDPROC(el1_sync)
 356
 357        .align  6
 358el1_irq:
 359        kernel_entry 1
 360        enable_dbg
 361#ifdef CONFIG_TRACE_IRQFLAGS
 362        bl      trace_hardirqs_off
 363#endif
 364
 365        irq_handler
 366
 367#ifdef CONFIG_PREEMPT
 368        get_thread_info tsk
 369        ldr     w24, [tsk, #TI_PREEMPT]         // get preempt count
 370        cbnz    w24, 1f                         // preempt count != 0
 371        ldr     x0, [tsk, #TI_FLAGS]            // get flags
 372        tbz     x0, #TIF_NEED_RESCHED, 1f       // needs rescheduling?
 373        bl      el1_preempt
 3741:
 375#endif
 376#ifdef CONFIG_TRACE_IRQFLAGS
 377        bl      trace_hardirqs_on
 378#endif
 379        kernel_exit 1
 380ENDPROC(el1_irq)
 381
 382#ifdef CONFIG_PREEMPT
 383el1_preempt:
 384        mov     x24, lr
 3851:      bl      preempt_schedule_irq            // irq en/disable is done inside
 386        ldr     x0, [tsk, #TI_FLAGS]            // get new tasks TI_FLAGS
 387        tbnz    x0, #TIF_NEED_RESCHED, 1b       // needs rescheduling?
 388        ret     x24
 389#endif
 390
 391/*
 392 * EL0 mode handlers.
 393 */
 394        .align  6
 395el0_sync:
 396        kernel_entry 0
 397        mrs     x25, esr_el1                    // read the syndrome register
 398        lsr     x24, x25, #ESR_ELx_EC_SHIFT     // exception class
 399        cmp     x24, #ESR_ELx_EC_SVC64          // SVC in 64-bit state
 400        b.eq    el0_svc
 401        cmp     x24, #ESR_ELx_EC_DABT_LOW       // data abort in EL0
 402        b.eq    el0_da
 403        cmp     x24, #ESR_ELx_EC_IABT_LOW       // instruction abort in EL0
 404        b.eq    el0_ia
 405        cmp     x24, #ESR_ELx_EC_FP_ASIMD       // FP/ASIMD access
 406        b.eq    el0_fpsimd_acc
 407        cmp     x24, #ESR_ELx_EC_FP_EXC64       // FP/ASIMD exception
 408        b.eq    el0_fpsimd_exc
 409        cmp     x24, #ESR_ELx_EC_SYS64          // configurable trap
 410        b.eq    el0_undef
 411        cmp     x24, #ESR_ELx_EC_SP_ALIGN       // stack alignment exception
 412        b.eq    el0_sp_pc
 413        cmp     x24, #ESR_ELx_EC_PC_ALIGN       // pc alignment exception
 414        b.eq    el0_sp_pc
 415        cmp     x24, #ESR_ELx_EC_UNKNOWN        // unknown exception in EL0
 416        b.eq    el0_undef
 417        cmp     x24, #ESR_ELx_EC_BREAKPT_LOW    // debug exception in EL0
 418        b.ge    el0_dbg
 419        b       el0_inv
 420
 421#ifdef CONFIG_COMPAT
 422        .align  6
 423el0_sync_compat:
 424        kernel_entry 0, 32
 425        mrs     x25, esr_el1                    // read the syndrome register
 426        lsr     x24, x25, #ESR_ELx_EC_SHIFT     // exception class
 427        cmp     x24, #ESR_ELx_EC_SVC32          // SVC in 32-bit state
 428        b.eq    el0_svc_compat
 429        cmp     x24, #ESR_ELx_EC_DABT_LOW       // data abort in EL0
 430        b.eq    el0_da
 431        cmp     x24, #ESR_ELx_EC_IABT_LOW       // instruction abort in EL0
 432        b.eq    el0_ia
 433        cmp     x24, #ESR_ELx_EC_FP_ASIMD       // FP/ASIMD access
 434        b.eq    el0_fpsimd_acc
 435        cmp     x24, #ESR_ELx_EC_FP_EXC32       // FP/ASIMD exception
 436        b.eq    el0_fpsimd_exc
 437        cmp     x24, #ESR_ELx_EC_UNKNOWN        // unknown exception in EL0
 438        b.eq    el0_undef
 439        cmp     x24, #ESR_ELx_EC_CP15_32        // CP15 MRC/MCR trap
 440        b.eq    el0_undef
 441        cmp     x24, #ESR_ELx_EC_CP15_64        // CP15 MRRC/MCRR trap
 442        b.eq    el0_undef
 443        cmp     x24, #ESR_ELx_EC_CP14_MR        // CP14 MRC/MCR trap
 444        b.eq    el0_undef
 445        cmp     x24, #ESR_ELx_EC_CP14_LS        // CP14 LDC/STC trap
 446        b.eq    el0_undef
 447        cmp     x24, #ESR_ELx_EC_CP14_64        // CP14 MRRC/MCRR trap
 448        b.eq    el0_undef
 449        cmp     x24, #ESR_ELx_EC_BREAKPT_LOW    // debug exception in EL0
 450        b.ge    el0_dbg
 451        b       el0_inv
 452el0_svc_compat:
 453        /*
 454         * AArch32 syscall handling
 455         */
 456        adrp    stbl, compat_sys_call_table     // load compat syscall table pointer
 457        uxtw    scno, w7                        // syscall number in w7 (r7)
 458        mov     sc_nr, #__NR_compat_syscalls
 459        b       el0_svc_naked
 460
 461        .align  6
 462el0_irq_compat:
 463        kernel_entry 0, 32
 464        b       el0_irq_naked
 465#endif
 466
 467el0_da:
 468        /*
 469         * Data abort handling
 470         */
 471        mrs     x26, far_el1
 472        // enable interrupts before calling the main handler
 473        enable_dbg_and_irq
 474        ct_user_exit
 475        bic     x0, x26, #(0xff << 56)
 476        mov     x1, x25
 477        mov     x2, sp
 478        bl      do_mem_abort
 479        b       ret_to_user
 480el0_ia:
 481        /*
 482         * Instruction abort handling
 483         */
 484        mrs     x26, far_el1
 485        // enable interrupts before calling the main handler
 486        enable_dbg_and_irq
 487        ct_user_exit
 488        mov     x0, x26
 489        orr     x1, x25, #1 << 24               // use reserved ISS bit for instruction aborts
 490        mov     x2, sp
 491        bl      do_mem_abort
 492        b       ret_to_user
 493el0_fpsimd_acc:
 494        /*
 495         * Floating Point or Advanced SIMD access
 496         */
 497        enable_dbg
 498        ct_user_exit
 499        mov     x0, x25
 500        mov     x1, sp
 501        bl      do_fpsimd_acc
 502        b       ret_to_user
 503el0_fpsimd_exc:
 504        /*
 505         * Floating Point or Advanced SIMD exception
 506         */
 507        enable_dbg
 508        ct_user_exit
 509        mov     x0, x25
 510        mov     x1, sp
 511        bl      do_fpsimd_exc
 512        b       ret_to_user
 513el0_sp_pc:
 514        /*
 515         * Stack or PC alignment exception handling
 516         */
 517        mrs     x26, far_el1
 518        // enable interrupts before calling the main handler
 519        enable_dbg_and_irq
 520        mov     x0, x26
 521        mov     x1, x25
 522        mov     x2, sp
 523        bl      do_sp_pc_abort
 524        b       ret_to_user
 525el0_undef:
 526        /*
 527         * Undefined instruction
 528         */
 529        // enable interrupts before calling the main handler
 530        enable_dbg_and_irq
 531        ct_user_exit
 532        mov     x0, sp
 533        bl      do_undefinstr
 534        b       ret_to_user
 535el0_dbg:
 536        /*
 537         * Debug exception handling
 538         */
 539        tbnz    x24, #0, el0_inv                // EL0 only
 540        mrs     x0, far_el1
 541        mov     x1, x25
 542        mov     x2, sp
 543        bl      do_debug_exception
 544        enable_dbg
 545        ct_user_exit
 546        b       ret_to_user
 547el0_inv:
 548        enable_dbg
 549        ct_user_exit
 550        mov     x0, sp
 551        mov     x1, #BAD_SYNC
 552        mrs     x2, esr_el1
 553        bl      bad_mode
 554        b       ret_to_user
 555ENDPROC(el0_sync)
 556
 557        .align  6
 558el0_irq:
 559        kernel_entry 0
 560el0_irq_naked:
 561        enable_dbg
 562#ifdef CONFIG_TRACE_IRQFLAGS
 563        bl      trace_hardirqs_off
 564#endif
 565
 566        ct_user_exit
 567        irq_handler
 568
 569#ifdef CONFIG_TRACE_IRQFLAGS
 570        bl      trace_hardirqs_on
 571#endif
 572        b       ret_to_user
 573ENDPROC(el0_irq)
 574
 575/*
 576 * Register switch for AArch64. The callee-saved registers need to be saved
 577 * and restored. On entry:
 578 *   x0 = previous task_struct (must be preserved across the switch)
 579 *   x1 = next task_struct
 580 * Previous and next are guaranteed not to be the same.
 581 *
 582 */
 583ENTRY(cpu_switch_to)
 584        add     x8, x0, #THREAD_CPU_CONTEXT
 585        mov     x9, sp
 586        stp     x19, x20, [x8], #16             // store callee-saved registers
 587        stp     x21, x22, [x8], #16
 588        stp     x23, x24, [x8], #16
 589        stp     x25, x26, [x8], #16
 590        stp     x27, x28, [x8], #16
 591        stp     x29, x9, [x8], #16
 592        str     lr, [x8]
 593        add     x8, x1, #THREAD_CPU_CONTEXT
 594        ldp     x19, x20, [x8], #16             // restore callee-saved registers
 595        ldp     x21, x22, [x8], #16
 596        ldp     x23, x24, [x8], #16
 597        ldp     x25, x26, [x8], #16
 598        ldp     x27, x28, [x8], #16
 599        ldp     x29, x9, [x8], #16
 600        ldr     lr, [x8]
 601        mov     sp, x9
 602        ret
 603ENDPROC(cpu_switch_to)
 604
 605/*
 606 * This is the fast syscall return path.  We do as little as possible here,
 607 * and this includes saving x0 back into the kernel stack.
 608 */
 609ret_fast_syscall:
 610        disable_irq                             // disable interrupts
 611        ldr     x1, [tsk, #TI_FLAGS]
 612        and     x2, x1, #_TIF_WORK_MASK
 613        cbnz    x2, fast_work_pending
 614        enable_step_tsk x1, x2
 615        kernel_exit 0, ret = 1
 616
 617/*
 618 * Ok, we need to do extra processing, enter the slow path.
 619 */
 620fast_work_pending:
 621        str     x0, [sp, #S_X0]                 // returned x0
 622work_pending:
 623        tbnz    x1, #TIF_NEED_RESCHED, work_resched
 624        /* TIF_SIGPENDING, TIF_NOTIFY_RESUME or TIF_FOREIGN_FPSTATE case */
 625        ldr     x2, [sp, #S_PSTATE]
 626        mov     x0, sp                          // 'regs'
 627        tst     x2, #PSR_MODE_MASK              // user mode regs?
 628        b.ne    no_work_pending                 // returning to kernel
 629        enable_irq                              // enable interrupts for do_notify_resume()
 630        bl      do_notify_resume
 631        b       ret_to_user
 632work_resched:
 633        bl      schedule
 634
 635/*
 636 * "slow" syscall return path.
 637 */
 638ret_to_user:
 639        disable_irq                             // disable interrupts
 640        ldr     x1, [tsk, #TI_FLAGS]
 641        and     x2, x1, #_TIF_WORK_MASK
 642        cbnz    x2, work_pending
 643        enable_step_tsk x1, x2
 644no_work_pending:
 645        kernel_exit 0, ret = 0
 646ENDPROC(ret_to_user)
 647
 648/*
 649 * This is how we return from a fork.
 650 */
 651ENTRY(ret_from_fork)
 652        bl      schedule_tail
 653        cbz     x19, 1f                         // not a kernel thread
 654        mov     x0, x20
 655        blr     x19
 6561:      get_thread_info tsk
 657        b       ret_to_user
 658ENDPROC(ret_from_fork)
 659
 660/*
 661 * SVC handler.
 662 */
 663        .align  6
 664el0_svc:
 665        adrp    stbl, sys_call_table            // load syscall table pointer
 666        uxtw    scno, w8                        // syscall number in w8
 667        mov     sc_nr, #__NR_syscalls
 668el0_svc_naked:                                  // compat entry point
 669        stp     x0, scno, [sp, #S_ORIG_X0]      // save the original x0 and syscall number
 670        enable_dbg_and_irq
 671        ct_user_exit 1
 672
 673        ldr     x16, [tsk, #TI_FLAGS]           // check for syscall hooks
 674        tst     x16, #_TIF_SYSCALL_WORK
 675        b.ne    __sys_trace
 676        cmp     scno, sc_nr                     // check upper syscall limit
 677        b.hs    ni_sys
 678        ldr     x16, [stbl, scno, lsl #3]       // address in the syscall table
 679        blr     x16                             // call sys_* routine
 680        b       ret_fast_syscall
 681ni_sys:
 682        mov     x0, sp
 683        bl      do_ni_syscall
 684        b       ret_fast_syscall
 685ENDPROC(el0_svc)
 686
 687        /*
 688         * This is the really slow path.  We're going to be doing context
 689         * switches, and waiting for our parent to respond.
 690         */
 691__sys_trace:
 692        mov     w0, #-1                         // set default errno for
 693        cmp     scno, x0                        // user-issued syscall(-1)
 694        b.ne    1f
 695        mov     x0, #-ENOSYS
 696        str     x0, [sp, #S_X0]
 6971:      mov     x0, sp
 698        bl      syscall_trace_enter
 699        cmp     w0, #-1                         // skip the syscall?
 700        b.eq    __sys_trace_return_skipped
 701        uxtw    scno, w0                        // syscall number (possibly new)
 702        mov     x1, sp                          // pointer to regs
 703        cmp     scno, sc_nr                     // check upper syscall limit
 704        b.hs    __ni_sys_trace
 705        ldp     x0, x1, [sp]                    // restore the syscall args
 706        ldp     x2, x3, [sp, #S_X2]
 707        ldp     x4, x5, [sp, #S_X4]
 708        ldp     x6, x7, [sp, #S_X6]
 709        ldr     x16, [stbl, scno, lsl #3]       // address in the syscall table
 710        blr     x16                             // call sys_* routine
 711
 712__sys_trace_return:
 713        str     x0, [sp, #S_X0]                 // save returned x0
 714__sys_trace_return_skipped:
 715        mov     x0, sp
 716        bl      syscall_trace_exit
 717        b       ret_to_user
 718
 719__ni_sys_trace:
 720        mov     x0, sp
 721        bl      do_ni_syscall
 722        b       __sys_trace_return
 723
 724/*
 725 * Special system call wrappers.
 726 */
 727ENTRY(sys_rt_sigreturn_wrapper)
 728        mov     x0, sp
 729        b       sys_rt_sigreturn
 730ENDPROC(sys_rt_sigreturn_wrapper)
 731