linux/arch/s390/kernel/entry.S
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0 */
   2/*
   3 *    S390 low-level entry points.
   4 *
   5 *    Copyright IBM Corp. 1999, 2012
   6 *    Author(s): Martin Schwidefsky (schwidefsky@de.ibm.com),
   7 *               Hartmut Penner (hp@de.ibm.com),
   8 *               Denis Joseph Barrow (djbarrow@de.ibm.com,barrow_dj@yahoo.com),
   9 *               Heiko Carstens <heiko.carstens@de.ibm.com>
  10 */
  11
  12#include <linux/init.h>
  13#include <linux/linkage.h>
  14#include <asm/alternative-asm.h>
  15#include <asm/processor.h>
  16#include <asm/cache.h>
  17#include <asm/dwarf.h>
  18#include <asm/errno.h>
  19#include <asm/ptrace.h>
  20#include <asm/thread_info.h>
  21#include <asm/asm-offsets.h>
  22#include <asm/unistd.h>
  23#include <asm/page.h>
  24#include <asm/sigp.h>
  25#include <asm/irq.h>
  26#include <asm/vx-insn.h>
  27#include <asm/setup.h>
  28#include <asm/nmi.h>
  29#include <asm/export.h>
  30#include <asm/nospec-insn.h>
  31
  32__PT_R0      =  __PT_GPRS
  33__PT_R1      =  __PT_GPRS + 8
  34__PT_R2      =  __PT_GPRS + 16
  35__PT_R3      =  __PT_GPRS + 24
  36__PT_R4      =  __PT_GPRS + 32
  37__PT_R5      =  __PT_GPRS + 40
  38__PT_R6      =  __PT_GPRS + 48
  39__PT_R7      =  __PT_GPRS + 56
  40__PT_R8      =  __PT_GPRS + 64
  41__PT_R9      =  __PT_GPRS + 72
  42__PT_R10     =  __PT_GPRS + 80
  43__PT_R11     =  __PT_GPRS + 88
  44__PT_R12     =  __PT_GPRS + 96
  45__PT_R13     =  __PT_GPRS + 104
  46__PT_R14     =  __PT_GPRS + 112
  47__PT_R15     =  __PT_GPRS + 120
  48
  49STACK_SHIFT = PAGE_SHIFT + THREAD_SIZE_ORDER
  50STACK_SIZE  = 1 << STACK_SHIFT
  51STACK_INIT = STACK_SIZE - STACK_FRAME_OVERHEAD - __PT_SIZE
  52
  53_LPP_OFFSET     = __LC_LPP
  54
  55        .macro  CHECK_STACK savearea
  56#ifdef CONFIG_CHECK_STACK
  57        tml     %r15,STACK_SIZE - CONFIG_STACK_GUARD
  58        lghi    %r14,\savearea
  59        jz      stack_overflow
  60#endif
  61        .endm
  62
  63        .macro  CHECK_VMAP_STACK savearea,oklabel
  64#ifdef CONFIG_VMAP_STACK
  65        lgr     %r14,%r15
  66        nill    %r14,0x10000 - STACK_SIZE
  67        oill    %r14,STACK_INIT
  68        clg     %r14,__LC_KERNEL_STACK
  69        je      \oklabel
  70        clg     %r14,__LC_ASYNC_STACK
  71        je      \oklabel
  72        clg     %r14,__LC_MCCK_STACK
  73        je      \oklabel
  74        clg     %r14,__LC_NODAT_STACK
  75        je      \oklabel
  76        clg     %r14,__LC_RESTART_STACK
  77        je      \oklabel
  78        lghi    %r14,\savearea
  79        j       stack_overflow
  80#else
  81        j       \oklabel
  82#endif
  83        .endm
  84
  85        .macro STCK savearea
  86        ALTERNATIVE ".insn      s,0xb2050000,\savearea", \
  87                    ".insn      s,0xb27c0000,\savearea", 25
  88        .endm
  89
  90        /*
  91         * The TSTMSK macro generates a test-under-mask instruction by
  92         * calculating the memory offset for the specified mask value.
  93         * Mask value can be any constant.  The macro shifts the mask
  94         * value to calculate the memory offset for the test-under-mask
  95         * instruction.
  96         */
  97        .macro TSTMSK addr, mask, size=8, bytepos=0
  98                .if (\bytepos < \size) && (\mask >> 8)
  99                        .if (\mask & 0xff)
 100                                .error "Mask exceeds byte boundary"
 101                        .endif
 102                        TSTMSK \addr, "(\mask >> 8)", \size, "(\bytepos + 1)"
 103                        .exitm
 104                .endif
 105                .ifeq \mask
 106                        .error "Mask must not be zero"
 107                .endif
 108                off = \size - \bytepos - 1
 109                tm      off+\addr, \mask
 110        .endm
 111
 112        .macro BPOFF
 113        ALTERNATIVE "", ".long 0xb2e8c000", 82
 114        .endm
 115
 116        .macro BPON
 117        ALTERNATIVE "", ".long 0xb2e8d000", 82
 118        .endm
 119
 120        .macro BPENTER tif_ptr,tif_mask
 121        ALTERNATIVE "TSTMSK \tif_ptr,\tif_mask; jz .+8; .long 0xb2e8d000", \
 122                    "", 82
 123        .endm
 124
 125        .macro BPEXIT tif_ptr,tif_mask
 126        TSTMSK  \tif_ptr,\tif_mask
 127        ALTERNATIVE "jz .+8;  .long 0xb2e8c000", \
 128                    "jnz .+8; .long 0xb2e8d000", 82
 129        .endm
 130
 131        /*
 132         * The CHKSTG macro jumps to the provided label in case the
 133         * machine check interruption code reports one of unrecoverable
 134         * storage errors:
 135         * - Storage error uncorrected
 136         * - Storage key error uncorrected
 137         * - Storage degradation with Failing-storage-address validity
 138         */
 139        .macro CHKSTG errlabel
 140        TSTMSK  __LC_MCCK_CODE,(MCCK_CODE_STG_ERROR|MCCK_CODE_STG_KEY_ERROR)
 141        jnz     \errlabel
 142        TSTMSK  __LC_MCCK_CODE,MCCK_CODE_STG_DEGRAD
 143        jz      .Loklabel\@
 144        TSTMSK  __LC_MCCK_CODE,MCCK_CODE_STG_FAIL_ADDR
 145        jnz     \errlabel
 146.Loklabel\@:
 147        .endm
 148
 149#if IS_ENABLED(CONFIG_KVM)
 150        /*
 151         * The OUTSIDE macro jumps to the provided label in case the value
 152         * in the provided register is outside of the provided range. The
 153         * macro is useful for checking whether a PSW stored in a register
 154         * pair points inside or outside of a block of instructions.
 155         * @reg: register to check
 156         * @start: start of the range
 157         * @end: end of the range
 158         * @outside_label: jump here if @reg is outside of [@start..@end)
 159         */
 160        .macro OUTSIDE reg,start,end,outside_label
 161        lgr     %r14,\reg
 162        larl    %r13,\start
 163        slgr    %r14,%r13
 164        lghi    %r13,\end - \start
 165        clgr    %r14,%r13
 166        jhe     \outside_label
 167        .endm
 168
 169        .macro SIEEXIT
 170        lg      %r9,__SF_SIE_CONTROL(%r15)      # get control block pointer
 171        ni      __SIE_PROG0C+3(%r9),0xfe        # no longer in SIE
 172        lctlg   %c1,%c1,__LC_KERNEL_ASCE        # load primary asce
 173        larl    %r9,sie_exit                    # skip forward to sie_exit
 174        .endm
 175#endif
 176
 177        GEN_BR_THUNK %r14
 178        GEN_BR_THUNK %r14,%r13
 179
 180        .section .kprobes.text, "ax"
 181.Ldummy:
 182        /*
 183         * This nop exists only in order to avoid that __bpon starts at
 184         * the beginning of the kprobes text section. In that case we would
 185         * have several symbols at the same address. E.g. objdump would take
 186         * an arbitrary symbol name when disassembling this code.
 187         * With the added nop in between the __bpon symbol is unique
 188         * again.
 189         */
 190        nop     0
 191
 192ENTRY(__bpon)
 193        .globl __bpon
 194        BPON
 195        BR_EX   %r14
 196ENDPROC(__bpon)
 197
 198/*
 199 * Scheduler resume function, called by switch_to
 200 *  gpr2 = (task_struct *) prev
 201 *  gpr3 = (task_struct *) next
 202 * Returns:
 203 *  gpr2 = prev
 204 */
 205ENTRY(__switch_to)
 206        stmg    %r6,%r15,__SF_GPRS(%r15)        # store gprs of prev task
 207        lghi    %r4,__TASK_stack
 208        lghi    %r1,__TASK_thread
 209        llill   %r5,STACK_INIT
 210        stg     %r15,__THREAD_ksp(%r1,%r2)      # store kernel stack of prev
 211        lg      %r15,0(%r4,%r3)                 # start of kernel stack of next
 212        agr     %r15,%r5                        # end of kernel stack of next
 213        stg     %r3,__LC_CURRENT                # store task struct of next
 214        stg     %r15,__LC_KERNEL_STACK          # store end of kernel stack
 215        lg      %r15,__THREAD_ksp(%r1,%r3)      # load kernel stack of next
 216        aghi    %r3,__TASK_pid
 217        mvc     __LC_CURRENT_PID(4,%r0),0(%r3)  # store pid of next
 218        lmg     %r6,%r15,__SF_GPRS(%r15)        # load gprs of next task
 219        ALTERNATIVE "", ".insn s,0xb2800000,_LPP_OFFSET", 40
 220        BR_EX   %r14
 221ENDPROC(__switch_to)
 222
 223#if IS_ENABLED(CONFIG_KVM)
 224/*
 225 * sie64a calling convention:
 226 * %r2 pointer to sie control block
 227 * %r3 guest register save area
 228 */
 229ENTRY(sie64a)
 230        stmg    %r6,%r14,__SF_GPRS(%r15)        # save kernel registers
 231        lg      %r12,__LC_CURRENT
 232        stg     %r2,__SF_SIE_CONTROL(%r15)      # save control block pointer
 233        stg     %r3,__SF_SIE_SAVEAREA(%r15)     # save guest register save area
 234        xc      __SF_SIE_REASON(8,%r15),__SF_SIE_REASON(%r15) # reason code = 0
 235        mvc     __SF_SIE_FLAGS(8,%r15),__TI_flags(%r12) # copy thread flags
 236        lmg     %r0,%r13,0(%r3)                 # load guest gprs 0-13
 237        lg      %r14,__LC_GMAP                  # get gmap pointer
 238        ltgr    %r14,%r14
 239        jz      .Lsie_gmap
 240        lctlg   %c1,%c1,__GMAP_ASCE(%r14)       # load primary asce
 241.Lsie_gmap:
 242        lg      %r14,__SF_SIE_CONTROL(%r15)     # get control block pointer
 243        oi      __SIE_PROG0C+3(%r14),1          # we are going into SIE now
 244        tm      __SIE_PROG20+3(%r14),3          # last exit...
 245        jnz     .Lsie_skip
 246        TSTMSK  __LC_CPU_FLAGS,_CIF_FPU
 247        jo      .Lsie_skip                      # exit if fp/vx regs changed
 248        BPEXIT  __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
 249.Lsie_entry:
 250        sie     0(%r14)
 251        BPOFF
 252        BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
 253.Lsie_skip:
 254        ni      __SIE_PROG0C+3(%r14),0xfe       # no longer in SIE
 255        lctlg   %c1,%c1,__LC_KERNEL_ASCE        # load primary asce
 256.Lsie_done:
 257# some program checks are suppressing. C code (e.g. do_protection_exception)
 258# will rewind the PSW by the ILC, which is often 4 bytes in case of SIE. There
 259# are some corner cases (e.g. runtime instrumentation) where ILC is unpredictable.
 260# Other instructions between sie64a and .Lsie_done should not cause program
 261# interrupts. So lets use 3 nops as a landing pad for all possible rewinds.
 262.Lrewind_pad6:
 263        nopr    7
 264.Lrewind_pad4:
 265        nopr    7
 266.Lrewind_pad2:
 267        nopr    7
 268        .globl sie_exit
 269sie_exit:
 270        lg      %r14,__SF_SIE_SAVEAREA(%r15)    # load guest register save area
 271        stmg    %r0,%r13,0(%r14)                # save guest gprs 0-13
 272        xgr     %r0,%r0                         # clear guest registers to
 273        xgr     %r1,%r1                         # prevent speculative use
 274        xgr     %r3,%r3
 275        xgr     %r4,%r4
 276        xgr     %r5,%r5
 277        lmg     %r6,%r14,__SF_GPRS(%r15)        # restore kernel registers
 278        lg      %r2,__SF_SIE_REASON(%r15)       # return exit reason code
 279        BR_EX   %r14
 280.Lsie_fault:
 281        lghi    %r14,-EFAULT
 282        stg     %r14,__SF_SIE_REASON(%r15)      # set exit reason code
 283        j       sie_exit
 284
 285        EX_TABLE(.Lrewind_pad6,.Lsie_fault)
 286        EX_TABLE(.Lrewind_pad4,.Lsie_fault)
 287        EX_TABLE(.Lrewind_pad2,.Lsie_fault)
 288        EX_TABLE(sie_exit,.Lsie_fault)
 289ENDPROC(sie64a)
 290EXPORT_SYMBOL(sie64a)
 291EXPORT_SYMBOL(sie_exit)
 292#endif
 293
 294/*
 295 * SVC interrupt handler routine. System calls are synchronous events and
 296 * are entered with interrupts disabled.
 297 */
 298
 299ENTRY(system_call)
 300        stpt    __LC_SYS_ENTER_TIMER
 301        stmg    %r8,%r15,__LC_SAVE_AREA_SYNC
 302        BPOFF
 303        lghi    %r14,0
 304.Lsysc_per:
 305        lctlg   %c1,%c1,__LC_KERNEL_ASCE
 306        lg      %r12,__LC_CURRENT
 307        lg      %r15,__LC_KERNEL_STACK
 308        xc      __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
 309        stmg    %r0,%r7,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
 310        BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
 311        # clear user controlled register to prevent speculative use
 312        xgr     %r0,%r0
 313        xgr     %r1,%r1
 314        xgr     %r4,%r4
 315        xgr     %r5,%r5
 316        xgr     %r6,%r6
 317        xgr     %r7,%r7
 318        xgr     %r8,%r8
 319        xgr     %r9,%r9
 320        xgr     %r10,%r10
 321        xgr     %r11,%r11
 322        la      %r2,STACK_FRAME_OVERHEAD(%r15)  # pointer to pt_regs
 323        mvc     __PT_R8(64,%r2),__LC_SAVE_AREA_SYNC
 324        lgr     %r3,%r14
 325        brasl   %r14,__do_syscall
 326        lctlg   %c1,%c1,__LC_USER_ASCE
 327        mvc     __LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
 328        BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
 329        lmg     %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
 330        stpt    __LC_EXIT_TIMER
 331        b       __LC_RETURN_LPSWE
 332ENDPROC(system_call)
 333
 334#
 335# a new process exits the kernel with ret_from_fork
 336#
 337ENTRY(ret_from_fork)
 338        lgr     %r3,%r11
 339        brasl   %r14,__ret_from_fork
 340        lctlg   %c1,%c1,__LC_USER_ASCE
 341        mvc     __LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
 342        BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
 343        lmg     %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
 344        stpt    __LC_EXIT_TIMER
 345        b       __LC_RETURN_LPSWE
 346ENDPROC(ret_from_fork)
 347
 348/*
 349 * Program check handler routine
 350 */
 351
 352ENTRY(pgm_check_handler)
 353        stpt    __LC_SYS_ENTER_TIMER
 354        BPOFF
 355        stmg    %r8,%r15,__LC_SAVE_AREA_SYNC
 356        lg      %r12,__LC_CURRENT
 357        lghi    %r10,0
 358        lmg     %r8,%r9,__LC_PGM_OLD_PSW
 359        tmhh    %r8,0x0001              # coming from user space?
 360        jno     .Lpgm_skip_asce
 361        lctlg   %c1,%c1,__LC_KERNEL_ASCE
 362        j       3f                      # -> fault in user space
 363.Lpgm_skip_asce:
 364#if IS_ENABLED(CONFIG_KVM)
 365        # cleanup critical section for program checks in sie64a
 366        OUTSIDE %r9,.Lsie_gmap,.Lsie_done,1f
 367        SIEEXIT
 368        lghi    %r10,_PIF_GUEST_FAULT
 369#endif
 3701:      tmhh    %r8,0x4000              # PER bit set in old PSW ?
 371        jnz     2f                      # -> enabled, can't be a double fault
 372        tm      __LC_PGM_ILC+3,0x80     # check for per exception
 373        jnz     .Lpgm_svcper            # -> single stepped svc
 3742:      CHECK_STACK __LC_SAVE_AREA_SYNC
 375        aghi    %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
 376        # CHECK_VMAP_STACK branches to stack_overflow or 4f
 377        CHECK_VMAP_STACK __LC_SAVE_AREA_SYNC,4f
 3783:      BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
 379        lg      %r15,__LC_KERNEL_STACK
 3804:      la      %r11,STACK_FRAME_OVERHEAD(%r15)
 381        stg     %r10,__PT_FLAGS(%r11)
 382        xc      __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
 383        stmg    %r0,%r7,__PT_R0(%r11)
 384        mvc     __PT_R8(64,%r11),__LC_SAVE_AREA_SYNC
 385        stmg    %r8,%r9,__PT_PSW(%r11)
 386
 387        # clear user controlled registers to prevent speculative use
 388        xgr     %r0,%r0
 389        xgr     %r1,%r1
 390        xgr     %r3,%r3
 391        xgr     %r4,%r4
 392        xgr     %r5,%r5
 393        xgr     %r6,%r6
 394        xgr     %r7,%r7
 395        lgr     %r2,%r11
 396        brasl   %r14,__do_pgm_check
 397        tmhh    %r8,0x0001              # returning to user space?
 398        jno     .Lpgm_exit_kernel
 399        lctlg   %c1,%c1,__LC_USER_ASCE
 400        BPEXIT __TI_flags(%r12),_TIF_ISOLATE_BP
 401        stpt    __LC_EXIT_TIMER
 402.Lpgm_exit_kernel:
 403        mvc     __LC_RETURN_PSW(16),STACK_FRAME_OVERHEAD+__PT_PSW(%r15)
 404        lmg     %r0,%r15,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
 405        b       __LC_RETURN_LPSWE
 406
 407#
 408# single stepped system call
 409#
 410.Lpgm_svcper:
 411        mvc     __LC_RETURN_PSW(8),__LC_SVC_NEW_PSW
 412        larl    %r14,.Lsysc_per
 413        stg     %r14,__LC_RETURN_PSW+8
 414        lghi    %r14,1
 415        lpswe   __LC_RETURN_PSW         # branch to .Lsysc_per
 416ENDPROC(pgm_check_handler)
 417
 418/*
 419 * Interrupt handler macro used for external and IO interrupts.
 420 */
 421.macro INT_HANDLER name,lc_old_psw,handler
 422ENTRY(\name)
 423        STCK    __LC_INT_CLOCK
 424        stpt    __LC_SYS_ENTER_TIMER
 425        BPOFF
 426        stmg    %r8,%r15,__LC_SAVE_AREA_ASYNC
 427        lg      %r12,__LC_CURRENT
 428        lmg     %r8,%r9,\lc_old_psw
 429        tmhh    %r8,0x0001                      # interrupting from user ?
 430        jnz     1f
 431#if IS_ENABLED(CONFIG_KVM)
 432        OUTSIDE %r9,.Lsie_gmap,.Lsie_done,0f
 433        BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
 434        SIEEXIT
 435#endif
 4360:      CHECK_STACK __LC_SAVE_AREA_ASYNC
 437        aghi    %r15,-(STACK_FRAME_OVERHEAD + __PT_SIZE)
 438        j       2f
 4391:      BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
 440        lctlg   %c1,%c1,__LC_KERNEL_ASCE
 441        lg      %r15,__LC_KERNEL_STACK
 4422:      xc      __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
 443        la      %r11,STACK_FRAME_OVERHEAD(%r15)
 444        stmg    %r0,%r7,__PT_R0(%r11)
 445        # clear user controlled registers to prevent speculative use
 446        xgr     %r0,%r0
 447        xgr     %r1,%r1
 448        xgr     %r3,%r3
 449        xgr     %r4,%r4
 450        xgr     %r5,%r5
 451        xgr     %r6,%r6
 452        xgr     %r7,%r7
 453        xgr     %r10,%r10
 454        xc      __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
 455        mvc     __PT_R8(64,%r11),__LC_SAVE_AREA_ASYNC
 456        stmg    %r8,%r9,__PT_PSW(%r11)
 457        tm      %r8,0x0001              # coming from user space?
 458        jno     1f
 459        lctlg   %c1,%c1,__LC_KERNEL_ASCE
 4601:      lgr     %r2,%r11                # pass pointer to pt_regs
 461        brasl   %r14,\handler
 462        mvc     __LC_RETURN_PSW(16),__PT_PSW(%r11)
 463        tmhh    %r8,0x0001              # returning to user ?
 464        jno     2f
 465        lctlg   %c1,%c1,__LC_USER_ASCE
 466        BPEXIT  __TI_flags(%r12),_TIF_ISOLATE_BP
 467        stpt    __LC_EXIT_TIMER
 4682:      lmg     %r0,%r15,__PT_R0(%r11)
 469        b       __LC_RETURN_LPSWE
 470ENDPROC(\name)
 471.endm
 472
 473INT_HANDLER ext_int_handler,__LC_EXT_OLD_PSW,do_ext_irq
 474INT_HANDLER io_int_handler,__LC_IO_OLD_PSW,do_io_irq
 475
 476/*
 477 * Load idle PSW.
 478 */
 479ENTRY(psw_idle)
 480        stg     %r14,(__SF_GPRS+8*8)(%r15)
 481        stg     %r3,__SF_EMPTY(%r15)
 482        larl    %r1,psw_idle_exit
 483        stg     %r1,__SF_EMPTY+8(%r15)
 484        larl    %r1,smp_cpu_mtid
 485        llgf    %r1,0(%r1)
 486        ltgr    %r1,%r1
 487        jz      .Lpsw_idle_stcctm
 488        .insn   rsy,0xeb0000000017,%r1,5,__MT_CYCLES_ENTER(%r2)
 489.Lpsw_idle_stcctm:
 490        oi      __LC_CPU_FLAGS+7,_CIF_ENABLED_WAIT
 491        BPON
 492        STCK    __CLOCK_IDLE_ENTER(%r2)
 493        stpt    __TIMER_IDLE_ENTER(%r2)
 494        lpswe   __SF_EMPTY(%r15)
 495.globl psw_idle_exit
 496psw_idle_exit:
 497        BR_EX   %r14
 498ENDPROC(psw_idle)
 499
 500/*
 501 * Machine check handler routines
 502 */
 503ENTRY(mcck_int_handler)
 504        STCK    __LC_MCCK_CLOCK
 505        BPOFF
 506        la      %r1,4095                # validate r1
 507        spt     __LC_CPU_TIMER_SAVE_AREA-4095(%r1)      # validate cpu timer
 508        lmg     %r0,%r15,__LC_GPREGS_SAVE_AREA-4095(%r1)# validate gprs
 509        lg      %r12,__LC_CURRENT
 510        lmg     %r8,%r9,__LC_MCK_OLD_PSW
 511        TSTMSK  __LC_MCCK_CODE,MCCK_CODE_SYSTEM_DAMAGE
 512        jo      .Lmcck_panic            # yes -> rest of mcck code invalid
 513        TSTMSK  __LC_MCCK_CODE,MCCK_CODE_CR_VALID
 514        jno     .Lmcck_panic            # control registers invalid -> panic
 515        la      %r14,4095
 516        lctlg   %c0,%c15,__LC_CREGS_SAVE_AREA-4095(%r14) # validate ctl regs
 517        ptlb
 518        lghi    %r14,__LC_CPU_TIMER_SAVE_AREA
 519        mvc     __LC_MCCK_ENTER_TIMER(8),0(%r14)
 520        TSTMSK  __LC_MCCK_CODE,MCCK_CODE_CPU_TIMER_VALID
 521        jo      3f
 522        la      %r14,__LC_SYS_ENTER_TIMER
 523        clc     0(8,%r14),__LC_EXIT_TIMER
 524        jl      1f
 525        la      %r14,__LC_EXIT_TIMER
 5261:      clc     0(8,%r14),__LC_LAST_UPDATE_TIMER
 527        jl      2f
 528        la      %r14,__LC_LAST_UPDATE_TIMER
 5292:      spt     0(%r14)
 530        mvc     __LC_MCCK_ENTER_TIMER(8),0(%r14)
 5313:      TSTMSK  __LC_MCCK_CODE,MCCK_CODE_PSW_MWP_VALID
 532        jno     .Lmcck_panic
 533        tmhh    %r8,0x0001              # interrupting from user ?
 534        jnz     6f
 535        TSTMSK  __LC_MCCK_CODE,MCCK_CODE_PSW_IA_VALID
 536        jno     .Lmcck_panic
 537#if IS_ENABLED(CONFIG_KVM)
 538        OUTSIDE %r9,.Lsie_gmap,.Lsie_done,6f
 539        OUTSIDE %r9,.Lsie_entry,.Lsie_skip,4f
 540        oi      __LC_CPU_FLAGS+7, _CIF_MCCK_GUEST
 541        j       5f
 5424:      CHKSTG  .Lmcck_panic
 5435:      larl    %r14,.Lstosm_tmp
 544        stosm   0(%r14),0x04            # turn dat on, keep irqs off
 545        BPENTER __SF_SIE_FLAGS(%r15),(_TIF_ISOLATE_BP|_TIF_ISOLATE_BP_GUEST)
 546        SIEEXIT
 547        j       .Lmcck_stack
 548#endif
 5496:      CHKSTG  .Lmcck_panic
 550        larl    %r14,.Lstosm_tmp
 551        stosm   0(%r14),0x04            # turn dat on, keep irqs off
 552        tmhh    %r8,0x0001              # interrupting from user ?
 553        jz      .Lmcck_stack
 554        BPENTER __TI_flags(%r12),_TIF_ISOLATE_BP
 555.Lmcck_stack:
 556        lg      %r15,__LC_MCCK_STACK
 557        la      %r11,STACK_FRAME_OVERHEAD(%r15)
 558        stctg   %c1,%c1,__PT_CR1(%r11)
 559        lctlg   %c1,%c1,__LC_KERNEL_ASCE
 560        xc      __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
 561        lghi    %r14,__LC_GPREGS_SAVE_AREA+64
 562        stmg    %r0,%r7,__PT_R0(%r11)
 563        # clear user controlled registers to prevent speculative use
 564        xgr     %r0,%r0
 565        xgr     %r1,%r1
 566        xgr     %r3,%r3
 567        xgr     %r4,%r4
 568        xgr     %r5,%r5
 569        xgr     %r6,%r6
 570        xgr     %r7,%r7
 571        xgr     %r10,%r10
 572        mvc     __PT_R8(64,%r11),0(%r14)
 573        stmg    %r8,%r9,__PT_PSW(%r11)
 574        xc      __PT_FLAGS(8,%r11),__PT_FLAGS(%r11)
 575        xc      __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
 576        lgr     %r2,%r11                # pass pointer to pt_regs
 577        brasl   %r14,s390_do_machine_check
 578        cghi    %r2,0
 579        je      .Lmcck_return
 580        lg      %r1,__LC_KERNEL_STACK   # switch to kernel stack
 581        mvc     STACK_FRAME_OVERHEAD(__PT_SIZE,%r1),0(%r11)
 582        xc      __SF_BACKCHAIN(8,%r1),__SF_BACKCHAIN(%r1)
 583        la      %r11,STACK_FRAME_OVERHEAD(%r1)
 584        lgr     %r15,%r1
 585        brasl   %r14,s390_handle_mcck
 586.Lmcck_return:
 587        lctlg   %c1,%c1,__PT_CR1(%r11)
 588        lmg     %r0,%r10,__PT_R0(%r11)
 589        mvc     __LC_RETURN_MCCK_PSW(16),__PT_PSW(%r11) # move return PSW
 590        tm      __LC_RETURN_MCCK_PSW+1,0x01 # returning to user ?
 591        jno     0f
 592        BPEXIT  __TI_flags(%r12),_TIF_ISOLATE_BP
 593        stpt    __LC_EXIT_TIMER
 5940:      lmg     %r11,%r15,__PT_R11(%r11)
 595        b       __LC_RETURN_MCCK_LPSWE
 596
 597.Lmcck_panic:
 598        /*
 599         * Iterate over all possible CPU addresses in the range 0..0xffff
 600         * and stop each CPU using signal processor. Use compare and swap
 601         * to allow just one CPU-stopper and prevent concurrent CPUs from
 602         * stopping each other while leaving the others running.
 603         */
 604        lhi     %r5,0
 605        lhi     %r6,1
 606        larl    %r7,.Lstop_lock
 607        cs      %r5,%r6,0(%r7)          # single CPU-stopper only
 608        jnz     4f
 609        larl    %r7,.Lthis_cpu
 610        stap    0(%r7)                  # this CPU address
 611        lh      %r4,0(%r7)
 612        nilh    %r4,0
 613        lhi     %r0,1
 614        sll     %r0,16                  # CPU counter
 615        lhi     %r3,0                   # next CPU address
 6160:      cr      %r3,%r4
 617        je      2f
 6181:      sigp    %r1,%r3,SIGP_STOP       # stop next CPU
 619        brc     SIGP_CC_BUSY,1b
 6202:      ahi     %r3,1
 621        brct    %r0,0b
 6223:      sigp    %r1,%r4,SIGP_STOP       # stop this CPU
 623        brc     SIGP_CC_BUSY,3b
 6244:      j       4b
 625ENDPROC(mcck_int_handler)
 626
 627ENTRY(restart_int_handler)
 628        ALTERNATIVE "", ".insn s,0xb2800000,_LPP_OFFSET", 40
 629        stg     %r15,__LC_SAVE_AREA_RESTART
 630        TSTMSK  __LC_RESTART_FLAGS,RESTART_FLAG_CTLREGS,4
 631        jz      0f
 632        la      %r15,4095
 633        lctlg   %c0,%c15,__LC_CREGS_SAVE_AREA-4095(%r15)
 6340:      larl    %r15,.Lstosm_tmp
 635        stosm   0(%r15),0x04                    # turn dat on, keep irqs off
 636        lg      %r15,__LC_RESTART_STACK
 637        xc      STACK_FRAME_OVERHEAD(__PT_SIZE,%r15),STACK_FRAME_OVERHEAD(%r15)
 638        stmg    %r0,%r14,STACK_FRAME_OVERHEAD+__PT_R0(%r15)
 639        mvc     STACK_FRAME_OVERHEAD+__PT_R15(8,%r15),__LC_SAVE_AREA_RESTART
 640        mvc     STACK_FRAME_OVERHEAD+__PT_PSW(16,%r15),__LC_RST_OLD_PSW
 641        xc      0(STACK_FRAME_OVERHEAD,%r15),0(%r15)
 642        lg      %r1,__LC_RESTART_FN             # load fn, parm & source cpu
 643        lg      %r2,__LC_RESTART_DATA
 644        lgf     %r3,__LC_RESTART_SOURCE
 645        ltgr    %r3,%r3                         # test source cpu address
 646        jm      1f                              # negative -> skip source stop
 6470:      sigp    %r4,%r3,SIGP_SENSE              # sigp sense to source cpu
 648        brc     10,0b                           # wait for status stored
 6491:      basr    %r14,%r1                        # call function
 650        stap    __SF_EMPTY(%r15)                # store cpu address
 651        llgh    %r3,__SF_EMPTY(%r15)
 6522:      sigp    %r4,%r3,SIGP_STOP               # sigp stop to current cpu
 653        brc     2,2b
 6543:      j       3b
 655ENDPROC(restart_int_handler)
 656
 657        .section .kprobes.text, "ax"
 658
 659#if defined(CONFIG_CHECK_STACK) || defined(CONFIG_VMAP_STACK)
 660/*
 661 * The synchronous or the asynchronous stack overflowed. We are dead.
 662 * No need to properly save the registers, we are going to panic anyway.
 663 * Setup a pt_regs so that show_trace can provide a good call trace.
 664 */
 665ENTRY(stack_overflow)
 666        lg      %r15,__LC_NODAT_STACK   # change to panic stack
 667        la      %r11,STACK_FRAME_OVERHEAD(%r15)
 668        stmg    %r0,%r7,__PT_R0(%r11)
 669        stmg    %r8,%r9,__PT_PSW(%r11)
 670        mvc     __PT_R8(64,%r11),0(%r14)
 671        stg     %r10,__PT_ORIG_GPR2(%r11) # store last break to orig_gpr2
 672        xc      __SF_BACKCHAIN(8,%r15),__SF_BACKCHAIN(%r15)
 673        lgr     %r2,%r11                # pass pointer to pt_regs
 674        jg      kernel_stack_overflow
 675ENDPROC(stack_overflow)
 676#endif
 677
 678        .section .data, "aw"
 679                .align  4
 680.Lstop_lock:    .long   0
 681.Lthis_cpu:     .short  0
 682.Lstosm_tmp:    .byte   0
 683        .section .rodata, "a"
 684#define SYSCALL(esame,emu)      .quad __s390x_ ## esame
 685        .globl  sys_call_table
 686sys_call_table:
 687#include "asm/syscall_table.h"
 688#undef SYSCALL
 689
 690#ifdef CONFIG_COMPAT
 691
 692#define SYSCALL(esame,emu)      .quad __s390_ ## emu
 693        .globl  sys_call_table_emu
 694sys_call_table_emu:
 695#include "asm/syscall_table.h"
 696#undef SYSCALL
 697#endif
 698