linux/arch/powerpc/kernel/entry_64.S
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0-or-later */
   2/*
   3 *  PowerPC version 
   4 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
   5 *  Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
   6 *    Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
   7 *  Adapted for Power Macintosh by Paul Mackerras.
   8 *  Low-level exception handlers and MMU support
   9 *  rewritten by Paul Mackerras.
  10 *    Copyright (C) 1996 Paul Mackerras.
  11 *  MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
  12 *
  13 *  This file contains the system call entry code, context switch
  14 *  code, and exception/interrupt return code for PowerPC.
  15 */
  16
  17#include <linux/errno.h>
  18#include <linux/err.h>
  19#include <asm/cache.h>
  20#include <asm/unistd.h>
  21#include <asm/processor.h>
  22#include <asm/page.h>
  23#include <asm/mmu.h>
  24#include <asm/thread_info.h>
  25#include <asm/code-patching-asm.h>
  26#include <asm/ppc_asm.h>
  27#include <asm/asm-offsets.h>
  28#include <asm/cputable.h>
  29#include <asm/firmware.h>
  30#include <asm/bug.h>
  31#include <asm/ptrace.h>
  32#include <asm/irqflags.h>
  33#include <asm/hw_irq.h>
  34#include <asm/context_tracking.h>
  35#include <asm/ppc-opcode.h>
  36#include <asm/barrier.h>
  37#include <asm/export.h>
  38#include <asm/asm-compat.h>
  39#ifdef CONFIG_PPC_BOOK3S
  40#include <asm/exception-64s.h>
  41#else
  42#include <asm/exception-64e.h>
  43#endif
  44#include <asm/feature-fixups.h>
  45#include <asm/kup.h>
  46
  47/*
  48 * System calls.
  49 */
  50        .section        ".text"
  51
  52#ifdef CONFIG_PPC_BOOK3S_64
  53
  54#define FLUSH_COUNT_CACHE       \
  551:      nop;                    \
  56        patch_site 1b, patch__call_flush_branch_caches1; \
  571:      nop;                    \
  58        patch_site 1b, patch__call_flush_branch_caches2; \
  591:      nop;                    \
  60        patch_site 1b, patch__call_flush_branch_caches3
  61
  62.macro nops number
  63        .rept \number
  64        nop
  65        .endr
  66.endm
  67
  68.balign 32
  69.global flush_branch_caches
  70flush_branch_caches:
  71        /* Save LR into r9 */
  72        mflr    r9
  73
  74        // Flush the link stack
  75        .rept 64
  76        bl      .+4
  77        .endr
  78        b       1f
  79        nops    6
  80
  81        .balign 32
  82        /* Restore LR */
  831:      mtlr    r9
  84
  85        // If we're just flushing the link stack, return here
  863:      nop
  87        patch_site 3b patch__flush_link_stack_return
  88
  89        li      r9,0x7fff
  90        mtctr   r9
  91
  92        PPC_BCCTR_FLUSH
  93
  942:      nop
  95        patch_site 2b patch__flush_count_cache_return
  96
  97        nops    3
  98
  99        .rept 278
 100        .balign 32
 101        PPC_BCCTR_FLUSH
 102        nops    7
 103        .endr
 104
 105        blr
 106#else
 107#define FLUSH_COUNT_CACHE
 108#endif /* CONFIG_PPC_BOOK3S_64 */
 109
 110/*
 111 * This routine switches between two different tasks.  The process
 112 * state of one is saved on its kernel stack.  Then the state
 113 * of the other is restored from its kernel stack.  The memory
 114 * management hardware is updated to the second process's state.
 115 * Finally, we can return to the second process, via interrupt_return.
 116 * On entry, r3 points to the THREAD for the current task, r4
 117 * points to the THREAD for the new task.
 118 *
 119 * Note: there are two ways to get to the "going out" portion
 120 * of this code; either by coming in via the entry (_switch)
 121 * or via "fork" which must set up an environment equivalent
 122 * to the "_switch" path.  If you change this you'll have to change
 123 * the fork code also.
 124 *
 125 * The code which creates the new task context is in 'copy_thread'
 126 * in arch/powerpc/kernel/process.c 
 127 */
 128        .align  7
 129_GLOBAL(_switch)
 130        mflr    r0
 131        std     r0,16(r1)
 132        stdu    r1,-SWITCH_FRAME_SIZE(r1)
 133        /* r3-r13 are caller saved -- Cort */
 134        SAVE_NVGPRS(r1)
 135        std     r0,_NIP(r1)     /* Return to switch caller */
 136        mfcr    r23
 137        std     r23,_CCR(r1)
 138        std     r1,KSP(r3)      /* Set old stack pointer */
 139
 140        kuap_check_amr r9, r10
 141
 142        FLUSH_COUNT_CACHE       /* Clobbers r9, ctr */
 143
 144        /*
 145         * On SMP kernels, care must be taken because a task may be
 146         * scheduled off CPUx and on to CPUy. Memory ordering must be
 147         * considered.
 148         *
 149         * Cacheable stores on CPUx will be visible when the task is
 150         * scheduled on CPUy by virtue of the core scheduler barriers
 151         * (see "Notes on Program-Order guarantees on SMP systems." in
 152         * kernel/sched/core.c).
 153         *
 154         * Uncacheable stores in the case of involuntary preemption must
 155         * be taken care of. The smp_mb__after_spinlock() in __schedule()
 156         * is implemented as hwsync on powerpc, which orders MMIO too. So
 157         * long as there is an hwsync in the context switch path, it will
 158         * be executed on the source CPU after the task has performed
 159         * all MMIO ops on that CPU, and on the destination CPU before the
 160         * task performs any MMIO ops there.
 161         */
 162
 163        /*
 164         * The kernel context switch path must contain a spin_lock,
 165         * which contains larx/stcx, which will clear any reservation
 166         * of the task being switched.
 167         */
 168#ifdef CONFIG_PPC_BOOK3S
 169/* Cancel all explict user streams as they will have no use after context
 170 * switch and will stop the HW from creating streams itself
 171 */
 172        DCBT_BOOK3S_STOP_ALL_STREAM_IDS(r6)
 173#endif
 174
 175        addi    r6,r4,-THREAD   /* Convert THREAD to 'current' */
 176        std     r6,PACACURRENT(r13)     /* Set new 'current' */
 177#if defined(CONFIG_STACKPROTECTOR)
 178        ld      r6, TASK_CANARY(r6)
 179        std     r6, PACA_CANARY(r13)
 180#endif
 181
 182        ld      r8,KSP(r4)      /* new stack pointer */
 183#ifdef CONFIG_PPC_BOOK3S_64
 184BEGIN_MMU_FTR_SECTION
 185        b       2f
 186END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
 187BEGIN_FTR_SECTION
 188        clrrdi  r6,r8,28        /* get its ESID */
 189        clrrdi  r9,r1,28        /* get current sp ESID */
 190FTR_SECTION_ELSE
 191        clrrdi  r6,r8,40        /* get its 1T ESID */
 192        clrrdi  r9,r1,40        /* get current sp 1T ESID */
 193ALT_MMU_FTR_SECTION_END_IFCLR(MMU_FTR_1T_SEGMENT)
 194        clrldi. r0,r6,2         /* is new ESID c00000000? */
 195        cmpd    cr1,r6,r9       /* or is new ESID the same as current ESID? */
 196        cror    eq,4*cr1+eq,eq
 197        beq     2f              /* if yes, don't slbie it */
 198
 199        /* Bolt in the new stack SLB entry */
 200        ld      r7,KSP_VSID(r4) /* Get new stack's VSID */
 201        oris    r0,r6,(SLB_ESID_V)@h
 202        ori     r0,r0,(SLB_NUM_BOLTED-1)@l
 203BEGIN_FTR_SECTION
 204        li      r9,MMU_SEGSIZE_1T       /* insert B field */
 205        oris    r6,r6,(MMU_SEGSIZE_1T << SLBIE_SSIZE_SHIFT)@h
 206        rldimi  r7,r9,SLB_VSID_SSIZE_SHIFT,0
 207END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
 208
 209        /* Update the last bolted SLB.  No write barriers are needed
 210         * here, provided we only update the current CPU's SLB shadow
 211         * buffer.
 212         */
 213        ld      r9,PACA_SLBSHADOWPTR(r13)
 214        li      r12,0
 215        std     r12,SLBSHADOW_STACKESID(r9)     /* Clear ESID */
 216        li      r12,SLBSHADOW_STACKVSID
 217        STDX_BE r7,r12,r9                       /* Save VSID */
 218        li      r12,SLBSHADOW_STACKESID
 219        STDX_BE r0,r12,r9                       /* Save ESID */
 220
 221        /* No need to check for MMU_FTR_NO_SLBIE_B here, since when
 222         * we have 1TB segments, the only CPUs known to have the errata
 223         * only support less than 1TB of system memory and we'll never
 224         * actually hit this code path.
 225         */
 226
 227        isync
 228        slbie   r6
 229BEGIN_FTR_SECTION
 230        slbie   r6              /* Workaround POWER5 < DD2.1 issue */
 231END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
 232        slbmte  r7,r0
 233        isync
 2342:
 235#endif /* CONFIG_PPC_BOOK3S_64 */
 236
 237        clrrdi  r7, r8, THREAD_SHIFT    /* base of new stack */
 238        /* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
 239           because we don't need to leave the 288-byte ABI gap at the
 240           top of the kernel stack. */
 241        addi    r7,r7,THREAD_SIZE-SWITCH_FRAME_SIZE
 242
 243        /*
 244         * PMU interrupts in radix may come in here. They will use r1, not
 245         * PACAKSAVE, so this stack switch will not cause a problem. They
 246         * will store to the process stack, which may then be migrated to
 247         * another CPU. However the rq lock release on this CPU paired with
 248         * the rq lock acquire on the new CPU before the stack becomes
 249         * active on the new CPU, will order those stores.
 250         */
 251        mr      r1,r8           /* start using new stack pointer */
 252        std     r7,PACAKSAVE(r13)
 253
 254        ld      r6,_CCR(r1)
 255        mtcrf   0xFF,r6
 256
 257        /* r3-r13 are destroyed -- Cort */
 258        REST_NVGPRS(r1)
 259
 260        /* convert old thread to its task_struct for return value */
 261        addi    r3,r3,-THREAD
 262        ld      r7,_NIP(r1)     /* Return to _switch caller in new task */
 263        mtlr    r7
 264        addi    r1,r1,SWITCH_FRAME_SIZE
 265        blr
 266
 267#ifdef CONFIG_PPC_RTAS
 268/*
 269 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
 270 * called with the MMU off.
 271 *
 272 * In addition, we need to be in 32b mode, at least for now.
 273 * 
 274 * Note: r3 is an input parameter to rtas, so don't trash it...
 275 */
 276_GLOBAL(enter_rtas)
 277        mflr    r0
 278        std     r0,16(r1)
 279        stdu    r1,-SWITCH_FRAME_SIZE(r1) /* Save SP and create stack space. */
 280
 281        /* Because RTAS is running in 32b mode, it clobbers the high order half
 282         * of all registers that it saves.  We therefore save those registers
 283         * RTAS might touch to the stack.  (r0, r3-r13 are caller saved)
 284         */
 285        SAVE_GPR(2, r1)                 /* Save the TOC */
 286        SAVE_GPR(13, r1)                /* Save paca */
 287        SAVE_NVGPRS(r1)                 /* Save the non-volatiles */
 288
 289        mfcr    r4
 290        std     r4,_CCR(r1)
 291        mfctr   r5
 292        std     r5,_CTR(r1)
 293        mfspr   r6,SPRN_XER
 294        std     r6,_XER(r1)
 295        mfdar   r7
 296        std     r7,_DAR(r1)
 297        mfdsisr r8
 298        std     r8,_DSISR(r1)
 299
 300        /* Temporary workaround to clear CR until RTAS can be modified to
 301         * ignore all bits.
 302         */
 303        li      r0,0
 304        mtcr    r0
 305
 306#ifdef CONFIG_BUG
 307        /* There is no way it is acceptable to get here with interrupts enabled,
 308         * check it with the asm equivalent of WARN_ON
 309         */
 310        lbz     r0,PACAIRQSOFTMASK(r13)
 3111:      tdeqi   r0,IRQS_ENABLED
 312        EMIT_WARN_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
 313#endif
 314
 315        /* Hard-disable interrupts */
 316        mfmsr   r6
 317        rldicl  r7,r6,48,1
 318        rotldi  r7,r7,16
 319        mtmsrd  r7,1
 320
 321        /* Unfortunately, the stack pointer and the MSR are also clobbered,
 322         * so they are saved in the PACA which allows us to restore
 323         * our original state after RTAS returns.
 324         */
 325        std     r1,PACAR1(r13)
 326        std     r6,PACASAVEDMSR(r13)
 327
 328        /* Setup our real return addr */        
 329        LOAD_REG_ADDR(r4,rtas_return_loc)
 330        clrldi  r4,r4,2                 /* convert to realmode address */
 331        mtlr    r4
 332
 333        li      r0,0
 334        ori     r0,r0,MSR_EE|MSR_SE|MSR_BE|MSR_RI
 335        andc    r0,r6,r0
 336        
 337        li      r9,1
 338        rldicr  r9,r9,MSR_SF_LG,(63-MSR_SF_LG)
 339        ori     r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP|MSR_RI|MSR_LE
 340        andc    r6,r0,r9
 341
 342__enter_rtas:
 343        sync                            /* disable interrupts so SRR0/1 */
 344        mtmsrd  r0                      /* don't get trashed */
 345
 346        LOAD_REG_ADDR(r4, rtas)
 347        ld      r5,RTASENTRY(r4)        /* get the rtas->entry value */
 348        ld      r4,RTASBASE(r4)         /* get the rtas->base value */
 349        
 350        mtspr   SPRN_SRR0,r5
 351        mtspr   SPRN_SRR1,r6
 352        RFI_TO_KERNEL
 353        b       .       /* prevent speculative execution */
 354
 355rtas_return_loc:
 356        FIXUP_ENDIAN
 357
 358        /*
 359         * Clear RI and set SF before anything.
 360         */
 361        mfmsr   r6
 362        li      r0,MSR_RI
 363        andc    r6,r6,r0
 364        sldi    r0,r0,(MSR_SF_LG - MSR_RI_LG)
 365        or      r6,r6,r0
 366        sync
 367        mtmsrd  r6
 368
 369        /* relocation is off at this point */
 370        GET_PACA(r4)
 371        clrldi  r4,r4,2                 /* convert to realmode address */
 372
 373        bcl     20,31,$+4
 3740:      mflr    r3
 375        ld      r3,(1f-0b)(r3)          /* get &rtas_restore_regs */
 376
 377        ld      r1,PACAR1(r4)           /* Restore our SP */
 378        ld      r4,PACASAVEDMSR(r4)     /* Restore our MSR */
 379
 380        mtspr   SPRN_SRR0,r3
 381        mtspr   SPRN_SRR1,r4
 382        RFI_TO_KERNEL
 383        b       .       /* prevent speculative execution */
 384_ASM_NOKPROBE_SYMBOL(__enter_rtas)
 385_ASM_NOKPROBE_SYMBOL(rtas_return_loc)
 386
 387        .align  3
 3881:      .8byte  rtas_restore_regs
 389
 390rtas_restore_regs:
 391        /* relocation is on at this point */
 392        REST_GPR(2, r1)                 /* Restore the TOC */
 393        REST_GPR(13, r1)                /* Restore paca */
 394        REST_NVGPRS(r1)                 /* Restore the non-volatiles */
 395
 396        GET_PACA(r13)
 397
 398        ld      r4,_CCR(r1)
 399        mtcr    r4
 400        ld      r5,_CTR(r1)
 401        mtctr   r5
 402        ld      r6,_XER(r1)
 403        mtspr   SPRN_XER,r6
 404        ld      r7,_DAR(r1)
 405        mtdar   r7
 406        ld      r8,_DSISR(r1)
 407        mtdsisr r8
 408
 409        addi    r1,r1,SWITCH_FRAME_SIZE /* Unstack our frame */
 410        ld      r0,16(r1)               /* get return address */
 411
 412        mtlr    r0
 413        blr                             /* return to caller */
 414
 415#endif /* CONFIG_PPC_RTAS */
 416
 417_GLOBAL(enter_prom)
 418        mflr    r0
 419        std     r0,16(r1)
 420        stdu    r1,-SWITCH_FRAME_SIZE(r1) /* Save SP and create stack space */
 421
 422        /* Because PROM is running in 32b mode, it clobbers the high order half
 423         * of all registers that it saves.  We therefore save those registers
 424         * PROM might touch to the stack.  (r0, r3-r13 are caller saved)
 425         */
 426        SAVE_GPR(2, r1)
 427        SAVE_GPR(13, r1)
 428        SAVE_NVGPRS(r1)
 429        mfcr    r10
 430        mfmsr   r11
 431        std     r10,_CCR(r1)
 432        std     r11,_MSR(r1)
 433
 434        /* Put PROM address in SRR0 */
 435        mtsrr0  r4
 436
 437        /* Setup our trampoline return addr in LR */
 438        bcl     20,31,$+4
 4390:      mflr    r4
 440        addi    r4,r4,(1f - 0b)
 441        mtlr    r4
 442
 443        /* Prepare a 32-bit mode big endian MSR
 444         */
 445#ifdef CONFIG_PPC_BOOK3E
 446        rlwinm  r11,r11,0,1,31
 447        mtsrr1  r11
 448        rfi
 449#else /* CONFIG_PPC_BOOK3E */
 450        LOAD_REG_IMMEDIATE(r12, MSR_SF | MSR_LE)
 451        andc    r11,r11,r12
 452        mtsrr1  r11
 453        RFI_TO_KERNEL
 454#endif /* CONFIG_PPC_BOOK3E */
 455
 4561:      /* Return from OF */
 457        FIXUP_ENDIAN
 458
 459        /* Just make sure that r1 top 32 bits didn't get
 460         * corrupt by OF
 461         */
 462        rldicl  r1,r1,0,32
 463
 464        /* Restore the MSR (back to 64 bits) */
 465        ld      r0,_MSR(r1)
 466        MTMSRD(r0)
 467        isync
 468
 469        /* Restore other registers */
 470        REST_GPR(2, r1)
 471        REST_GPR(13, r1)
 472        REST_NVGPRS(r1)
 473        ld      r4,_CCR(r1)
 474        mtcr    r4
 475
 476        addi    r1,r1,SWITCH_FRAME_SIZE
 477        ld      r0,16(r1)
 478        mtlr    r0
 479        blr
 480