linux/arch/arm64/kernel/entry.S
<<
>>
Prefs
   1/*
   2 * Low-level exception handling code
   3 *
   4 * Copyright (C) 2012 ARM Ltd.
   5 * Authors:     Catalin Marinas <catalin.marinas@arm.com>
   6 *              Will Deacon <will.deacon@arm.com>
   7 *
   8 * This program is free software; you can redistribute it and/or modify
   9 * it under the terms of the GNU General Public License version 2 as
  10 * published by the Free Software Foundation.
  11 *
  12 * This program is distributed in the hope that it will be useful,
  13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
  15 * GNU General Public License for more details.
  16 *
  17 * You should have received a copy of the GNU General Public License
  18 * along with this program.  If not, see <http://www.gnu.org/licenses/>.
  19 */
  20
  21#include <linux/init.h>
  22#include <linux/linkage.h>
  23
  24#include <asm/assembler.h>
  25#include <asm/asm-offsets.h>
  26#include <asm/errno.h>
  27#include <asm/esr.h>
  28#include <asm/thread_info.h>
  29#include <asm/unistd.h>
  30#include <asm/unistd32.h>
  31
  32/*
  33 * Bad Abort numbers
  34 *-----------------
  35 */
  36#define BAD_SYNC        0
  37#define BAD_IRQ         1
  38#define BAD_FIQ         2
  39#define BAD_ERROR       3
  40
  41        .macro  kernel_entry, el, regsize = 64
  42        sub     sp, sp, #S_FRAME_SIZE - S_LR    // room for LR, SP, SPSR, ELR
  43        .if     \regsize == 32
  44        mov     w0, w0                          // zero upper 32 bits of x0
  45        .endif
  46        push    x28, x29
  47        push    x26, x27
  48        push    x24, x25
  49        push    x22, x23
  50        push    x20, x21
  51        push    x18, x19
  52        push    x16, x17
  53        push    x14, x15
  54        push    x12, x13
  55        push    x10, x11
  56        push    x8, x9
  57        push    x6, x7
  58        push    x4, x5
  59        push    x2, x3
  60        push    x0, x1
  61        .if     \el == 0
  62        mrs     x21, sp_el0
  63        .else
  64        add     x21, sp, #S_FRAME_SIZE
  65        .endif
  66        mrs     x22, elr_el1
  67        mrs     x23, spsr_el1
  68        stp     lr, x21, [sp, #S_LR]
  69        stp     x22, x23, [sp, #S_PC]
  70
  71        /*
  72         * Set syscallno to -1 by default (overridden later if real syscall).
  73         */
  74        .if     \el == 0
  75        mvn     x21, xzr
  76        str     x21, [sp, #S_SYSCALLNO]
  77        .endif
  78
  79        /*
  80         * Registers that may be useful after this macro is invoked:
  81         *
  82         * x21 - aborted SP
  83         * x22 - aborted PC
  84         * x23 - aborted PSTATE
  85        */
  86        .endm
  87
  88        .macro  kernel_exit, el, ret = 0
  89        ldp     x21, x22, [sp, #S_PC]           // load ELR, SPSR
  90        .if     \el == 0
  91        ldr     x23, [sp, #S_SP]                // load return stack pointer
  92        .endif
  93        .if     \ret
  94        ldr     x1, [sp, #S_X1]                 // preserve x0 (syscall return)
  95        add     sp, sp, S_X2
  96        .else
  97        pop     x0, x1
  98        .endif
  99        pop     x2, x3                          // load the rest of the registers
 100        pop     x4, x5
 101        pop     x6, x7
 102        pop     x8, x9
 103        msr     elr_el1, x21                    // set up the return data
 104        msr     spsr_el1, x22
 105        .if     \el == 0
 106        msr     sp_el0, x23
 107        .endif
 108        pop     x10, x11
 109        pop     x12, x13
 110        pop     x14, x15
 111        pop     x16, x17
 112        pop     x18, x19
 113        pop     x20, x21
 114        pop     x22, x23
 115        pop     x24, x25
 116        pop     x26, x27
 117        pop     x28, x29
 118        ldr     lr, [sp], #S_FRAME_SIZE - S_LR  // load LR and restore SP
 119        eret                                    // return to kernel
 120        .endm
 121
 122        .macro  get_thread_info, rd
 123        mov     \rd, sp
 124        and     \rd, \rd, #~(THREAD_SIZE - 1)   // top of stack
 125        .endm
 126
 127/*
 128 * These are the registers used in the syscall handler, and allow us to
 129 * have in theory up to 7 arguments to a function - x0 to x6.
 130 *
 131 * x7 is reserved for the system call number in 32-bit mode.
 132 */
 133sc_nr   .req    x25             // number of system calls
 134scno    .req    x26             // syscall number
 135stbl    .req    x27             // syscall table pointer
 136tsk     .req    x28             // current thread_info
 137
 138/*
 139 * Interrupt handling.
 140 */
 141        .macro  irq_handler
 142        ldr     x1, handle_arch_irq
 143        mov     x0, sp
 144        blr     x1
 145        .endm
 146
 147        .text
 148
 149/*
 150 * Exception vectors.
 151 */
 152
 153        .align  11
 154ENTRY(vectors)
 155        ventry  el1_sync_invalid                // Synchronous EL1t
 156        ventry  el1_irq_invalid                 // IRQ EL1t
 157        ventry  el1_fiq_invalid                 // FIQ EL1t
 158        ventry  el1_error_invalid               // Error EL1t
 159
 160        ventry  el1_sync                        // Synchronous EL1h
 161        ventry  el1_irq                         // IRQ EL1h
 162        ventry  el1_fiq_invalid                 // FIQ EL1h
 163        ventry  el1_error_invalid               // Error EL1h
 164
 165        ventry  el0_sync                        // Synchronous 64-bit EL0
 166        ventry  el0_irq                         // IRQ 64-bit EL0
 167        ventry  el0_fiq_invalid                 // FIQ 64-bit EL0
 168        ventry  el0_error_invalid               // Error 64-bit EL0
 169
 170#ifdef CONFIG_COMPAT
 171        ventry  el0_sync_compat                 // Synchronous 32-bit EL0
 172        ventry  el0_irq_compat                  // IRQ 32-bit EL0
 173        ventry  el0_fiq_invalid_compat          // FIQ 32-bit EL0
 174        ventry  el0_error_invalid_compat        // Error 32-bit EL0
 175#else
 176        ventry  el0_sync_invalid                // Synchronous 32-bit EL0
 177        ventry  el0_irq_invalid                 // IRQ 32-bit EL0
 178        ventry  el0_fiq_invalid                 // FIQ 32-bit EL0
 179        ventry  el0_error_invalid               // Error 32-bit EL0
 180#endif
 181END(vectors)
 182
 183/*
 184 * Invalid mode handlers
 185 */
 186        .macro  inv_entry, el, reason, regsize = 64
 187        kernel_entry el, \regsize
 188        mov     x0, sp
 189        mov     x1, #\reason
 190        mrs     x2, esr_el1
 191        b       bad_mode
 192        .endm
 193
 194el0_sync_invalid:
 195        inv_entry 0, BAD_SYNC
 196ENDPROC(el0_sync_invalid)
 197
 198el0_irq_invalid:
 199        inv_entry 0, BAD_IRQ
 200ENDPROC(el0_irq_invalid)
 201
 202el0_fiq_invalid:
 203        inv_entry 0, BAD_FIQ
 204ENDPROC(el0_fiq_invalid)
 205
 206el0_error_invalid:
 207        inv_entry 0, BAD_ERROR
 208ENDPROC(el0_error_invalid)
 209
 210#ifdef CONFIG_COMPAT
 211el0_fiq_invalid_compat:
 212        inv_entry 0, BAD_FIQ, 32
 213ENDPROC(el0_fiq_invalid_compat)
 214
 215el0_error_invalid_compat:
 216        inv_entry 0, BAD_ERROR, 32
 217ENDPROC(el0_error_invalid_compat)
 218#endif
 219
 220el1_sync_invalid:
 221        inv_entry 1, BAD_SYNC
 222ENDPROC(el1_sync_invalid)
 223
 224el1_irq_invalid:
 225        inv_entry 1, BAD_IRQ
 226ENDPROC(el1_irq_invalid)
 227
 228el1_fiq_invalid:
 229        inv_entry 1, BAD_FIQ
 230ENDPROC(el1_fiq_invalid)
 231
 232el1_error_invalid:
 233        inv_entry 1, BAD_ERROR
 234ENDPROC(el1_error_invalid)
 235
 236/*
 237 * EL1 mode handlers.
 238 */
 239        .align  6
 240el1_sync:
 241        kernel_entry 1
 242        mrs     x1, esr_el1                     // read the syndrome register
 243        lsr     x24, x1, #ESR_EL1_EC_SHIFT      // exception class
 244        cmp     x24, #ESR_EL1_EC_DABT_EL1       // data abort in EL1
 245        b.eq    el1_da
 246        cmp     x24, #ESR_EL1_EC_SYS64          // configurable trap
 247        b.eq    el1_undef
 248        cmp     x24, #ESR_EL1_EC_SP_ALIGN       // stack alignment exception
 249        b.eq    el1_sp_pc
 250        cmp     x24, #ESR_EL1_EC_PC_ALIGN       // pc alignment exception
 251        b.eq    el1_sp_pc
 252        cmp     x24, #ESR_EL1_EC_UNKNOWN        // unknown exception in EL1
 253        b.eq    el1_undef
 254        cmp     x24, #ESR_EL1_EC_BREAKPT_EL1    // debug exception in EL1
 255        b.ge    el1_dbg
 256        b       el1_inv
 257el1_da:
 258        /*
 259         * Data abort handling
 260         */
 261        mrs     x0, far_el1
 262        enable_dbg_if_not_stepping x2
 263        // re-enable interrupts if they were enabled in the aborted context
 264        tbnz    x23, #7, 1f                     // PSR_I_BIT
 265        enable_irq
 2661:
 267        mov     x2, sp                          // struct pt_regs
 268        bl      do_mem_abort
 269
 270        // disable interrupts before pulling preserved data off the stack
 271        disable_irq
 272        kernel_exit 1
 273el1_sp_pc:
 274        /*
 275         * Stack or PC alignment exception handling
 276         */
 277        mrs     x0, far_el1
 278        mov     x1, x25
 279        mov     x2, sp
 280        b       do_sp_pc_abort
 281el1_undef:
 282        /*
 283         * Undefined instruction
 284         */
 285        mov     x0, sp
 286        b       do_undefinstr
 287el1_dbg:
 288        /*
 289         * Debug exception handling
 290         */
 291        tbz     x24, #0, el1_inv                // EL1 only
 292        mrs     x0, far_el1
 293        mov     x2, sp                          // struct pt_regs
 294        bl      do_debug_exception
 295
 296        kernel_exit 1
 297el1_inv:
 298        // TODO: add support for undefined instructions in kernel mode
 299        mov     x0, sp
 300        mov     x1, #BAD_SYNC
 301        mrs     x2, esr_el1
 302        b       bad_mode
 303ENDPROC(el1_sync)
 304
 305        .align  6
 306el1_irq:
 307        kernel_entry 1
 308        enable_dbg_if_not_stepping x0
 309#ifdef CONFIG_TRACE_IRQFLAGS
 310        bl      trace_hardirqs_off
 311#endif
 312#ifdef CONFIG_PREEMPT
 313        get_thread_info tsk
 314        ldr     x24, [tsk, #TI_PREEMPT]         // get preempt count
 315        add     x0, x24, #1                     // increment it
 316        str     x0, [tsk, #TI_PREEMPT]
 317#endif
 318        irq_handler
 319#ifdef CONFIG_PREEMPT
 320        str     x24, [tsk, #TI_PREEMPT]         // restore preempt count
 321        cbnz    x24, 1f                         // preempt count != 0
 322        ldr     x0, [tsk, #TI_FLAGS]            // get flags
 323        tbz     x0, #TIF_NEED_RESCHED, 1f       // needs rescheduling?
 324        bl      el1_preempt
 3251:
 326#endif
 327#ifdef CONFIG_TRACE_IRQFLAGS
 328        bl      trace_hardirqs_on
 329#endif
 330        kernel_exit 1
 331ENDPROC(el1_irq)
 332
 333#ifdef CONFIG_PREEMPT
 334el1_preempt:
 335        mov     x24, lr
 3361:      enable_dbg
 337        bl      preempt_schedule_irq            // irq en/disable is done inside
 338        ldr     x0, [tsk, #TI_FLAGS]            // get new tasks TI_FLAGS
 339        tbnz    x0, #TIF_NEED_RESCHED, 1b       // needs rescheduling?
 340        ret     x24
 341#endif
 342
 343/*
 344 * EL0 mode handlers.
 345 */
 346        .align  6
 347el0_sync:
 348        kernel_entry 0
 349        mrs     x25, esr_el1                    // read the syndrome register
 350        lsr     x24, x25, #ESR_EL1_EC_SHIFT     // exception class
 351        cmp     x24, #ESR_EL1_EC_SVC64          // SVC in 64-bit state
 352        b.eq    el0_svc
 353        adr     lr, ret_from_exception
 354        cmp     x24, #ESR_EL1_EC_DABT_EL0       // data abort in EL0
 355        b.eq    el0_da
 356        cmp     x24, #ESR_EL1_EC_IABT_EL0       // instruction abort in EL0
 357        b.eq    el0_ia
 358        cmp     x24, #ESR_EL1_EC_FP_ASIMD       // FP/ASIMD access
 359        b.eq    el0_fpsimd_acc
 360        cmp     x24, #ESR_EL1_EC_FP_EXC64       // FP/ASIMD exception
 361        b.eq    el0_fpsimd_exc
 362        cmp     x24, #ESR_EL1_EC_SYS64          // configurable trap
 363        b.eq    el0_undef
 364        cmp     x24, #ESR_EL1_EC_SP_ALIGN       // stack alignment exception
 365        b.eq    el0_sp_pc
 366        cmp     x24, #ESR_EL1_EC_PC_ALIGN       // pc alignment exception
 367        b.eq    el0_sp_pc
 368        cmp     x24, #ESR_EL1_EC_UNKNOWN        // unknown exception in EL0
 369        b.eq    el0_undef
 370        cmp     x24, #ESR_EL1_EC_BREAKPT_EL0    // debug exception in EL0
 371        b.ge    el0_dbg
 372        b       el0_inv
 373
 374#ifdef CONFIG_COMPAT
 375        .align  6
 376el0_sync_compat:
 377        kernel_entry 0, 32
 378        mrs     x25, esr_el1                    // read the syndrome register
 379        lsr     x24, x25, #ESR_EL1_EC_SHIFT     // exception class
 380        cmp     x24, #ESR_EL1_EC_SVC32          // SVC in 32-bit state
 381        b.eq    el0_svc_compat
 382        adr     lr, ret_from_exception
 383        cmp     x24, #ESR_EL1_EC_DABT_EL0       // data abort in EL0
 384        b.eq    el0_da
 385        cmp     x24, #ESR_EL1_EC_IABT_EL0       // instruction abort in EL0
 386        b.eq    el0_ia
 387        cmp     x24, #ESR_EL1_EC_FP_ASIMD       // FP/ASIMD access
 388        b.eq    el0_fpsimd_acc
 389        cmp     x24, #ESR_EL1_EC_FP_EXC32       // FP/ASIMD exception
 390        b.eq    el0_fpsimd_exc
 391        cmp     x24, #ESR_EL1_EC_UNKNOWN        // unknown exception in EL0
 392        b.eq    el0_undef
 393        cmp     x24, #ESR_EL1_EC_CP15_32        // CP15 MRC/MCR trap
 394        b.eq    el0_undef
 395        cmp     x24, #ESR_EL1_EC_CP15_64        // CP15 MRRC/MCRR trap
 396        b.eq    el0_undef
 397        cmp     x24, #ESR_EL1_EC_CP14_MR        // CP14 MRC/MCR trap
 398        b.eq    el0_undef
 399        cmp     x24, #ESR_EL1_EC_CP14_LS        // CP14 LDC/STC trap
 400        b.eq    el0_undef
 401        cmp     x24, #ESR_EL1_EC_CP14_64        // CP14 MRRC/MCRR trap
 402        b.eq    el0_undef
 403        cmp     x24, #ESR_EL1_EC_BREAKPT_EL0    // debug exception in EL0
 404        b.ge    el0_dbg
 405        b       el0_inv
 406el0_svc_compat:
 407        /*
 408         * AArch32 syscall handling
 409         */
 410        adr     stbl, compat_sys_call_table     // load compat syscall table pointer
 411        uxtw    scno, w7                        // syscall number in w7 (r7)
 412        mov     sc_nr, #__NR_compat_syscalls
 413        b       el0_svc_naked
 414
 415        .align  6
 416el0_irq_compat:
 417        kernel_entry 0, 32
 418        b       el0_irq_naked
 419#endif
 420
 421el0_da:
 422        /*
 423         * Data abort handling
 424         */
 425        mrs     x0, far_el1
 426        bic     x0, x0, #(0xff << 56)
 427        disable_step x1
 428        isb
 429        enable_dbg
 430        // enable interrupts before calling the main handler
 431        enable_irq
 432        mov     x1, x25
 433        mov     x2, sp
 434        b       do_mem_abort
 435el0_ia:
 436        /*
 437         * Instruction abort handling
 438         */
 439        mrs     x0, far_el1
 440        disable_step x1
 441        isb
 442        enable_dbg
 443        // enable interrupts before calling the main handler
 444        enable_irq
 445        orr     x1, x25, #1 << 24               // use reserved ISS bit for instruction aborts
 446        mov     x2, sp
 447        b       do_mem_abort
 448el0_fpsimd_acc:
 449        /*
 450         * Floating Point or Advanced SIMD access
 451         */
 452        mov     x0, x25
 453        mov     x1, sp
 454        b       do_fpsimd_acc
 455el0_fpsimd_exc:
 456        /*
 457         * Floating Point or Advanced SIMD exception
 458         */
 459        mov     x0, x25
 460        mov     x1, sp
 461        b       do_fpsimd_exc
 462el0_sp_pc:
 463        /*
 464         * Stack or PC alignment exception handling
 465         */
 466        mrs     x0, far_el1
 467        disable_step x1
 468        isb
 469        enable_dbg
 470        // enable interrupts before calling the main handler
 471        enable_irq
 472        mov     x1, x25
 473        mov     x2, sp
 474        b       do_sp_pc_abort
 475el0_undef:
 476        /*
 477         * Undefined instruction
 478         */
 479        mov     x0, sp
 480        // enable interrupts before calling the main handler
 481        enable_irq
 482        b       do_undefinstr
 483el0_dbg:
 484        /*
 485         * Debug exception handling
 486         */
 487        tbnz    x24, #0, el0_inv                // EL0 only
 488        mrs     x0, far_el1
 489        disable_step x1
 490        mov     x1, x25
 491        mov     x2, sp
 492        b       do_debug_exception
 493el0_inv:
 494        mov     x0, sp
 495        mov     x1, #BAD_SYNC
 496        mrs     x2, esr_el1
 497        b       bad_mode
 498ENDPROC(el0_sync)
 499
 500        .align  6
 501el0_irq:
 502        kernel_entry 0
 503el0_irq_naked:
 504        disable_step x1
 505        isb
 506        enable_dbg
 507#ifdef CONFIG_TRACE_IRQFLAGS
 508        bl      trace_hardirqs_off
 509#endif
 510        get_thread_info tsk
 511#ifdef CONFIG_PREEMPT
 512        ldr     x24, [tsk, #TI_PREEMPT]         // get preempt count
 513        add     x23, x24, #1                    // increment it
 514        str     x23, [tsk, #TI_PREEMPT]
 515#endif
 516        irq_handler
 517#ifdef CONFIG_PREEMPT
 518        ldr     x0, [tsk, #TI_PREEMPT]
 519        str     x24, [tsk, #TI_PREEMPT]
 520        cmp     x0, x23
 521        b.eq    1f
 522        mov     x1, #0
 523        str     x1, [x1]                        // BUG
 5241:
 525#endif
 526#ifdef CONFIG_TRACE_IRQFLAGS
 527        bl      trace_hardirqs_on
 528#endif
 529        b       ret_to_user
 530ENDPROC(el0_irq)
 531
 532/*
 533 * This is the return code to user mode for abort handlers
 534 */
 535ret_from_exception:
 536        get_thread_info tsk
 537        b       ret_to_user
 538ENDPROC(ret_from_exception)
 539
 540/*
 541 * Register switch for AArch64. The callee-saved registers need to be saved
 542 * and restored. On entry:
 543 *   x0 = previous task_struct (must be preserved across the switch)
 544 *   x1 = next task_struct
 545 * Previous and next are guaranteed not to be the same.
 546 *
 547 */
 548ENTRY(cpu_switch_to)
 549        add     x8, x0, #THREAD_CPU_CONTEXT
 550        mov     x9, sp
 551        stp     x19, x20, [x8], #16             // store callee-saved registers
 552        stp     x21, x22, [x8], #16
 553        stp     x23, x24, [x8], #16
 554        stp     x25, x26, [x8], #16
 555        stp     x27, x28, [x8], #16
 556        stp     x29, x9, [x8], #16
 557        str     lr, [x8]
 558        add     x8, x1, #THREAD_CPU_CONTEXT
 559        ldp     x19, x20, [x8], #16             // restore callee-saved registers
 560        ldp     x21, x22, [x8], #16
 561        ldp     x23, x24, [x8], #16
 562        ldp     x25, x26, [x8], #16
 563        ldp     x27, x28, [x8], #16
 564        ldp     x29, x9, [x8], #16
 565        ldr     lr, [x8]
 566        mov     sp, x9
 567        ret
 568ENDPROC(cpu_switch_to)
 569
 570/*
 571 * This is the fast syscall return path.  We do as little as possible here,
 572 * and this includes saving x0 back into the kernel stack.
 573 */
 574ret_fast_syscall:
 575        disable_irq                             // disable interrupts
 576        ldr     x1, [tsk, #TI_FLAGS]
 577        and     x2, x1, #_TIF_WORK_MASK
 578        cbnz    x2, fast_work_pending
 579        tbz     x1, #TIF_SINGLESTEP, fast_exit
 580        disable_dbg
 581        enable_step x2
 582fast_exit:
 583        kernel_exit 0, ret = 1
 584
 585/*
 586 * Ok, we need to do extra processing, enter the slow path.
 587 */
 588fast_work_pending:
 589        str     x0, [sp, #S_X0]                 // returned x0
 590work_pending:
 591        tbnz    x1, #TIF_NEED_RESCHED, work_resched
 592        /* TIF_SIGPENDING or TIF_NOTIFY_RESUME case */
 593        ldr     x2, [sp, #S_PSTATE]
 594        mov     x0, sp                          // 'regs'
 595        tst     x2, #PSR_MODE_MASK              // user mode regs?
 596        b.ne    no_work_pending                 // returning to kernel
 597        enable_irq                              // enable interrupts for do_notify_resume()
 598        bl      do_notify_resume
 599        b       ret_to_user
 600work_resched:
 601        enable_dbg
 602        bl      schedule
 603
 604/*
 605 * "slow" syscall return path.
 606 */
 607ret_to_user:
 608        disable_irq                             // disable interrupts
 609        ldr     x1, [tsk, #TI_FLAGS]
 610        and     x2, x1, #_TIF_WORK_MASK
 611        cbnz    x2, work_pending
 612        tbz     x1, #TIF_SINGLESTEP, no_work_pending
 613        disable_dbg
 614        enable_step x2
 615no_work_pending:
 616        kernel_exit 0, ret = 0
 617ENDPROC(ret_to_user)
 618
 619/*
 620 * This is how we return from a fork.
 621 */
 622ENTRY(ret_from_fork)
 623        bl      schedule_tail
 624        cbz     x19, 1f                         // not a kernel thread
 625        mov     x0, x20
 626        blr     x19
 6271:      get_thread_info tsk
 628        b       ret_to_user
 629ENDPROC(ret_from_fork)
 630
 631/*
 632 * SVC handler.
 633 */
 634        .align  6
 635el0_svc:
 636        adrp    stbl, sys_call_table            // load syscall table pointer
 637        uxtw    scno, w8                        // syscall number in w8
 638        mov     sc_nr, #__NR_syscalls
 639el0_svc_naked:                                  // compat entry point
 640        stp     x0, scno, [sp, #S_ORIG_X0]      // save the original x0 and syscall number
 641        disable_step x16
 642        isb
 643        enable_dbg
 644        enable_irq
 645
 646        get_thread_info tsk
 647        ldr     x16, [tsk, #TI_FLAGS]           // check for syscall tracing
 648        tbnz    x16, #TIF_SYSCALL_TRACE, __sys_trace // are we tracing syscalls?
 649        adr     lr, ret_fast_syscall            // return address
 650        cmp     scno, sc_nr                     // check upper syscall limit
 651        b.hs    ni_sys
 652        ldr     x16, [stbl, scno, lsl #3]       // address in the syscall table
 653        br      x16                             // call sys_* routine
 654ni_sys:
 655        mov     x0, sp
 656        b       do_ni_syscall
 657ENDPROC(el0_svc)
 658
 659        /*
 660         * This is the really slow path.  We're going to be doing context
 661         * switches, and waiting for our parent to respond.
 662         */
 663__sys_trace:
 664        mov     x1, sp
 665        mov     w0, #0                          // trace entry
 666        bl      syscall_trace
 667        adr     lr, __sys_trace_return          // return address
 668        uxtw    scno, w0                        // syscall number (possibly new)
 669        mov     x1, sp                          // pointer to regs
 670        cmp     scno, sc_nr                     // check upper syscall limit
 671        b.hs    ni_sys
 672        ldp     x0, x1, [sp]                    // restore the syscall args
 673        ldp     x2, x3, [sp, #S_X2]
 674        ldp     x4, x5, [sp, #S_X4]
 675        ldp     x6, x7, [sp, #S_X6]
 676        ldr     x16, [stbl, scno, lsl #3]       // address in the syscall table
 677        br      x16                             // call sys_* routine
 678
 679__sys_trace_return:
 680        str     x0, [sp]                        // save returned x0
 681        mov     x1, sp
 682        mov     w0, #1                          // trace exit
 683        bl      syscall_trace
 684        b       ret_to_user
 685
 686/*
 687 * Special system call wrappers.
 688 */
 689ENTRY(sys_rt_sigreturn_wrapper)
 690        mov     x0, sp
 691        b       sys_rt_sigreturn
 692ENDPROC(sys_rt_sigreturn_wrapper)
 693
 694ENTRY(handle_arch_irq)
 695        .quad   0
 696