linux/arch/powerpc/kernel/entry_32.S
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0-or-later */
   2/*
   3 *  PowerPC version
   4 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
   5 *  Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
   6 *    Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
   7 *  Adapted for Power Macintosh by Paul Mackerras.
   8 *  Low-level exception handlers and MMU support
   9 *  rewritten by Paul Mackerras.
  10 *    Copyright (C) 1996 Paul Mackerras.
  11 *  MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
  12 *
  13 *  This file contains the system call entry code, context switch
  14 *  code, and exception/interrupt return code for PowerPC.
  15 */
  16
  17#include <linux/errno.h>
  18#include <linux/err.h>
  19#include <linux/sys.h>
  20#include <linux/threads.h>
  21#include <asm/reg.h>
  22#include <asm/page.h>
  23#include <asm/mmu.h>
  24#include <asm/cputable.h>
  25#include <asm/thread_info.h>
  26#include <asm/ppc_asm.h>
  27#include <asm/asm-offsets.h>
  28#include <asm/unistd.h>
  29#include <asm/ptrace.h>
  30#include <asm/export.h>
  31#include <asm/asm-405.h>
  32#include <asm/feature-fixups.h>
  33#include <asm/barrier.h>
  34#include <asm/kup.h>
  35#include <asm/bug.h>
  36
  37#include "head_32.h"
  38
  39/*
  40 * Align to 4k in order to ensure that all functions modyfing srr0/srr1
  41 * fit into one page in order to not encounter a TLB miss between the
  42 * modification of srr0/srr1 and the associated rfi.
  43 */
  44        .align  12
  45
  46#ifdef CONFIG_BOOKE
  47        .globl  mcheck_transfer_to_handler
  48mcheck_transfer_to_handler:
  49        mfspr   r0,SPRN_DSRR0
  50        stw     r0,_DSRR0(r11)
  51        mfspr   r0,SPRN_DSRR1
  52        stw     r0,_DSRR1(r11)
  53        /* fall through */
  54
  55        .globl  debug_transfer_to_handler
  56debug_transfer_to_handler:
  57        mfspr   r0,SPRN_CSRR0
  58        stw     r0,_CSRR0(r11)
  59        mfspr   r0,SPRN_CSRR1
  60        stw     r0,_CSRR1(r11)
  61        /* fall through */
  62
  63        .globl  crit_transfer_to_handler
  64crit_transfer_to_handler:
  65#ifdef CONFIG_PPC_BOOK3E_MMU
  66        mfspr   r0,SPRN_MAS0
  67        stw     r0,MAS0(r11)
  68        mfspr   r0,SPRN_MAS1
  69        stw     r0,MAS1(r11)
  70        mfspr   r0,SPRN_MAS2
  71        stw     r0,MAS2(r11)
  72        mfspr   r0,SPRN_MAS3
  73        stw     r0,MAS3(r11)
  74        mfspr   r0,SPRN_MAS6
  75        stw     r0,MAS6(r11)
  76#ifdef CONFIG_PHYS_64BIT
  77        mfspr   r0,SPRN_MAS7
  78        stw     r0,MAS7(r11)
  79#endif /* CONFIG_PHYS_64BIT */
  80#endif /* CONFIG_PPC_BOOK3E_MMU */
  81#ifdef CONFIG_44x
  82        mfspr   r0,SPRN_MMUCR
  83        stw     r0,MMUCR(r11)
  84#endif
  85        mfspr   r0,SPRN_SRR0
  86        stw     r0,_SRR0(r11)
  87        mfspr   r0,SPRN_SRR1
  88        stw     r0,_SRR1(r11)
  89
  90        /* set the stack limit to the current stack */
  91        mfspr   r8,SPRN_SPRG_THREAD
  92        lwz     r0,KSP_LIMIT(r8)
  93        stw     r0,SAVED_KSP_LIMIT(r11)
  94        rlwinm  r0,r1,0,0,(31 - THREAD_SHIFT)
  95        stw     r0,KSP_LIMIT(r8)
  96        /* fall through */
  97#endif
  98
  99#ifdef CONFIG_40x
 100        .globl  crit_transfer_to_handler
 101crit_transfer_to_handler:
 102        lwz     r0,crit_r10@l(0)
 103        stw     r0,GPR10(r11)
 104        lwz     r0,crit_r11@l(0)
 105        stw     r0,GPR11(r11)
 106        mfspr   r0,SPRN_SRR0
 107        stw     r0,crit_srr0@l(0)
 108        mfspr   r0,SPRN_SRR1
 109        stw     r0,crit_srr1@l(0)
 110
 111        /* set the stack limit to the current stack */
 112        mfspr   r8,SPRN_SPRG_THREAD
 113        lwz     r0,KSP_LIMIT(r8)
 114        stw     r0,saved_ksp_limit@l(0)
 115        rlwinm  r0,r1,0,0,(31 - THREAD_SHIFT)
 116        stw     r0,KSP_LIMIT(r8)
 117        /* fall through */
 118#endif
 119
 120/*
 121 * This code finishes saving the registers to the exception frame
 122 * and jumps to the appropriate handler for the exception, turning
 123 * on address translation.
 124 * Note that we rely on the caller having set cr0.eq iff the exception
 125 * occurred in kernel mode (i.e. MSR:PR = 0).
 126 */
 127        .globl  transfer_to_handler_full
 128transfer_to_handler_full:
 129        SAVE_NVGPRS(r11)
 130        /* fall through */
 131
 132        .globl  transfer_to_handler
 133transfer_to_handler:
 134        stw     r2,GPR2(r11)
 135        stw     r12,_NIP(r11)
 136        stw     r9,_MSR(r11)
 137        andi.   r2,r9,MSR_PR
 138        mfctr   r12
 139        mfspr   r2,SPRN_XER
 140        stw     r12,_CTR(r11)
 141        stw     r2,_XER(r11)
 142        mfspr   r12,SPRN_SPRG_THREAD
 143        beq     2f                      /* if from user, fix up THREAD.regs */
 144        addi    r2, r12, -THREAD
 145        addi    r11,r1,STACK_FRAME_OVERHEAD
 146        stw     r11,PT_REGS(r12)
 147#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
 148        /* Check to see if the dbcr0 register is set up to debug.  Use the
 149           internal debug mode bit to do this. */
 150        lwz     r12,THREAD_DBCR0(r12)
 151        andis.  r12,r12,DBCR0_IDM@h
 152#endif
 153        ACCOUNT_CPU_USER_ENTRY(r2, r11, r12)
 154#ifdef CONFIG_PPC_BOOK3S_32
 155        kuep_lock r11, r12
 156#endif
 157#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
 158        beq+    3f
 159        /* From user and task is ptraced - load up global dbcr0 */
 160        li      r12,-1                  /* clear all pending debug events */
 161        mtspr   SPRN_DBSR,r12
 162        lis     r11,global_dbcr0@ha
 163        tophys(r11,r11)
 164        addi    r11,r11,global_dbcr0@l
 165#ifdef CONFIG_SMP
 166        lwz     r9,TASK_CPU(r2)
 167        slwi    r9,r9,3
 168        add     r11,r11,r9
 169#endif
 170        lwz     r12,0(r11)
 171        mtspr   SPRN_DBCR0,r12
 172        lwz     r12,4(r11)
 173        addi    r12,r12,-1
 174        stw     r12,4(r11)
 175#endif
 176
 177        b       3f
 178
 1792:      /* if from kernel, check interrupted DOZE/NAP mode and
 180         * check for stack overflow
 181         */
 182        kuap_save_and_lock r11, r12, r9, r2, r0
 183        addi    r2, r12, -THREAD
 184        lwz     r9,KSP_LIMIT(r12)
 185        cmplw   r1,r9                   /* if r1 <= ksp_limit */
 186        ble-    stack_ovf               /* then the kernel stack overflowed */
 1875:
 188#if defined(CONFIG_PPC_BOOK3S_32) || defined(CONFIG_E500)
 189        lwz     r12,TI_LOCAL_FLAGS(r2)
 190        mtcrf   0x01,r12
 191        bt-     31-TLF_NAPPING,4f
 192        bt-     31-TLF_SLEEPING,7f
 193#endif /* CONFIG_PPC_BOOK3S_32 || CONFIG_E500 */
 194        .globl transfer_to_handler_cont
 195transfer_to_handler_cont:
 1963:
 197        mflr    r9
 198        tovirt(r2, r2)                  /* set r2 to current */
 199        lwz     r11,0(r9)               /* virtual address of handler */
 200        lwz     r9,4(r9)                /* where to go when done */
 201#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
 202        mtspr   SPRN_NRI, r0
 203#endif
 204#ifdef CONFIG_TRACE_IRQFLAGS
 205        /*
 206         * When tracing IRQ state (lockdep) we enable the MMU before we call
 207         * the IRQ tracing functions as they might access vmalloc space or
 208         * perform IOs for console output.
 209         *
 210         * To speed up the syscall path where interrupts stay on, let's check
 211         * first if we are changing the MSR value at all.
 212         */
 213        tophys(r12, r1)
 214        lwz     r12,_MSR(r12)
 215        andi.   r12,r12,MSR_EE
 216        bne     1f
 217
 218        /* MSR isn't changing, just transition directly */
 219#endif
 220        mtspr   SPRN_SRR0,r11
 221        mtspr   SPRN_SRR1,r10
 222        mtlr    r9
 223        SYNC
 224        RFI                             /* jump to handler, enable MMU */
 225
 226#ifdef CONFIG_TRACE_IRQFLAGS
 2271:      /* MSR is changing, re-enable MMU so we can notify lockdep. We need to
 228         * keep interrupts disabled at this point otherwise we might risk
 229         * taking an interrupt before we tell lockdep they are enabled.
 230         */
 231        lis     r12,reenable_mmu@h
 232        ori     r12,r12,reenable_mmu@l
 233        LOAD_MSR_KERNEL(r0, MSR_KERNEL)
 234        mtspr   SPRN_SRR0,r12
 235        mtspr   SPRN_SRR1,r0
 236        SYNC
 237        RFI
 238
 239reenable_mmu:
 240        /*
 241         * We save a bunch of GPRs,
 242         * r3 can be different from GPR3(r1) at this point, r9 and r11
 243         * contains the old MSR and handler address respectively,
 244         * r4 & r5 can contain page fault arguments that need to be passed
 245         * along as well. r12, CCR, CTR, XER etc... are left clobbered as
 246         * they aren't useful past this point (aren't syscall arguments),
 247         * the rest is restored from the exception frame.
 248         */
 249
 250        stwu    r1,-32(r1)
 251        stw     r9,8(r1)
 252        stw     r11,12(r1)
 253        stw     r3,16(r1)
 254        stw     r4,20(r1)
 255        stw     r5,24(r1)
 256
 257        /* If we are disabling interrupts (normal case), simply log it with
 258         * lockdep
 259         */
 2601:      bl      trace_hardirqs_off
 2612:      lwz     r5,24(r1)
 262        lwz     r4,20(r1)
 263        lwz     r3,16(r1)
 264        lwz     r11,12(r1)
 265        lwz     r9,8(r1)
 266        addi    r1,r1,32
 267        lwz     r0,GPR0(r1)
 268        lwz     r6,GPR6(r1)
 269        lwz     r7,GPR7(r1)
 270        lwz     r8,GPR8(r1)
 271        mtctr   r11
 272        mtlr    r9
 273        bctr                            /* jump to handler */
 274#endif /* CONFIG_TRACE_IRQFLAGS */
 275
 276#if defined (CONFIG_PPC_BOOK3S_32) || defined(CONFIG_E500)
 2774:      rlwinm  r12,r12,0,~_TLF_NAPPING
 278        stw     r12,TI_LOCAL_FLAGS(r2)
 279        b       power_save_ppc32_restore
 280
 2817:      rlwinm  r12,r12,0,~_TLF_SLEEPING
 282        stw     r12,TI_LOCAL_FLAGS(r2)
 283        lwz     r9,_MSR(r11)            /* if sleeping, clear MSR.EE */
 284        rlwinm  r9,r9,0,~MSR_EE
 285        lwz     r12,_LINK(r11)          /* and return to address in LR */
 286        kuap_restore r11, r2, r3, r4, r5
 287        b       fast_exception_return
 288#endif
 289
 290/*
 291 * On kernel stack overflow, load up an initial stack pointer
 292 * and call StackOverflow(regs), which should not return.
 293 */
 294stack_ovf:
 295        /* sometimes we use a statically-allocated stack, which is OK. */
 296        lis     r12,_end@h
 297        ori     r12,r12,_end@l
 298        cmplw   r1,r12
 299        ble     5b                      /* r1 <= &_end is OK */
 300        SAVE_NVGPRS(r11)
 301        addi    r3,r1,STACK_FRAME_OVERHEAD
 302        lis     r1,init_thread_union@ha
 303        addi    r1,r1,init_thread_union@l
 304        addi    r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
 305        lis     r9,StackOverflow@ha
 306        addi    r9,r9,StackOverflow@l
 307        LOAD_MSR_KERNEL(r10,MSR_KERNEL)
 308#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
 309        mtspr   SPRN_NRI, r0
 310#endif
 311        mtspr   SPRN_SRR0,r9
 312        mtspr   SPRN_SRR1,r10
 313        SYNC
 314        RFI
 315
 316#ifdef CONFIG_TRACE_IRQFLAGS
 317trace_syscall_entry_irq_off:
 318        /*
 319         * Syscall shouldn't happen while interrupts are disabled,
 320         * so let's do a warning here.
 321         */
 3220:      trap
 323        EMIT_BUG_ENTRY 0b,__FILE__,__LINE__, BUGFLAG_WARNING
 324        bl      trace_hardirqs_on
 325
 326        /* Now enable for real */
 327        LOAD_MSR_KERNEL(r10, MSR_KERNEL | MSR_EE)
 328        mtmsr   r10
 329
 330        REST_GPR(0, r1)
 331        REST_4GPRS(3, r1)
 332        REST_2GPRS(7, r1)
 333        b       DoSyscall
 334#endif /* CONFIG_TRACE_IRQFLAGS */
 335
 336        .globl  transfer_to_syscall
 337transfer_to_syscall:
 338#ifdef CONFIG_TRACE_IRQFLAGS
 339        andi.   r12,r9,MSR_EE
 340        beq-    trace_syscall_entry_irq_off
 341#endif /* CONFIG_TRACE_IRQFLAGS */
 342
 343/*
 344 * Handle a system call.
 345 */
 346        .stabs  "arch/powerpc/kernel/",N_SO,0,0,0f
 347        .stabs  "entry_32.S",N_SO,0,0,0f
 3480:
 349
 350_GLOBAL(DoSyscall)
 351        stw     r3,ORIG_GPR3(r1)
 352        li      r12,0
 353        stw     r12,RESULT(r1)
 354#ifdef CONFIG_TRACE_IRQFLAGS
 355        /* Make sure interrupts are enabled */
 356        mfmsr   r11
 357        andi.   r12,r11,MSR_EE
 358        /* We came in with interrupts disabled, we WARN and mark them enabled
 359         * for lockdep now */
 3600:      tweqi   r12, 0
 361        EMIT_BUG_ENTRY 0b,__FILE__,__LINE__, BUGFLAG_WARNING
 362#endif /* CONFIG_TRACE_IRQFLAGS */
 363        lwz     r11,TI_FLAGS(r2)
 364        andi.   r11,r11,_TIF_SYSCALL_DOTRACE
 365        bne-    syscall_dotrace
 366syscall_dotrace_cont:
 367        cmplwi  0,r0,NR_syscalls
 368        lis     r10,sys_call_table@h
 369        ori     r10,r10,sys_call_table@l
 370        slwi    r0,r0,2
 371        bge-    66f
 372
 373        barrier_nospec_asm
 374        /*
 375         * Prevent the load of the handler below (based on the user-passed
 376         * system call number) being speculatively executed until the test
 377         * against NR_syscalls and branch to .66f above has
 378         * committed.
 379         */
 380
 381        lwzx    r10,r10,r0      /* Fetch system call handler [ptr] */
 382        mtlr    r10
 383        addi    r9,r1,STACK_FRAME_OVERHEAD
 384        PPC440EP_ERR42
 385        blrl                    /* Call handler */
 386        .globl  ret_from_syscall
 387ret_from_syscall:
 388#ifdef CONFIG_DEBUG_RSEQ
 389        /* Check whether the syscall is issued inside a restartable sequence */
 390        stw     r3,GPR3(r1)
 391        addi    r3,r1,STACK_FRAME_OVERHEAD
 392        bl      rseq_syscall
 393        lwz     r3,GPR3(r1)
 394#endif
 395        mr      r6,r3
 396        /* disable interrupts so current_thread_info()->flags can't change */
 397        LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
 398        /* Note: We don't bother telling lockdep about it */
 399        SYNC
 400        MTMSRD(r10)
 401        lwz     r9,TI_FLAGS(r2)
 402        li      r8,-MAX_ERRNO
 403        andi.   r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
 404        bne-    syscall_exit_work
 405        cmplw   0,r3,r8
 406        blt+    syscall_exit_cont
 407        lwz     r11,_CCR(r1)                    /* Load CR */
 408        neg     r3,r3
 409        oris    r11,r11,0x1000  /* Set SO bit in CR */
 410        stw     r11,_CCR(r1)
 411syscall_exit_cont:
 412        lwz     r8,_MSR(r1)
 413#ifdef CONFIG_TRACE_IRQFLAGS
 414        /* If we are going to return from the syscall with interrupts
 415         * off, we trace that here. It shouldn't normally happen.
 416         */
 417        andi.   r10,r8,MSR_EE
 418        bne+    1f
 419        stw     r3,GPR3(r1)
 420        bl      trace_hardirqs_off
 421        lwz     r3,GPR3(r1)
 4221:
 423#endif /* CONFIG_TRACE_IRQFLAGS */
 424#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
 425        /* If the process has its own DBCR0 value, load it up.  The internal
 426           debug mode bit tells us that dbcr0 should be loaded. */
 427        lwz     r0,THREAD+THREAD_DBCR0(r2)
 428        andis.  r10,r0,DBCR0_IDM@h
 429        bnel-   load_dbcr0
 430#endif
 431#ifdef CONFIG_44x
 432BEGIN_MMU_FTR_SECTION
 433        lis     r4,icache_44x_need_flush@ha
 434        lwz     r5,icache_44x_need_flush@l(r4)
 435        cmplwi  cr0,r5,0
 436        bne-    2f
 4371:
 438END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_47x)
 439#endif /* CONFIG_44x */
 440BEGIN_FTR_SECTION
 441        lwarx   r7,0,r1
 442END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
 443        stwcx.  r0,0,r1                 /* to clear the reservation */
 444        ACCOUNT_CPU_USER_EXIT(r2, r5, r7)
 445#ifdef CONFIG_PPC_BOOK3S_32
 446        kuep_unlock r5, r7
 447#endif
 448        kuap_check r2, r4
 449        lwz     r4,_LINK(r1)
 450        lwz     r5,_CCR(r1)
 451        mtlr    r4
 452        mtcr    r5
 453        lwz     r7,_NIP(r1)
 454        lwz     r2,GPR2(r1)
 455        lwz     r1,GPR1(r1)
 456#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
 457        mtspr   SPRN_NRI, r0
 458#endif
 459        mtspr   SPRN_SRR0,r7
 460        mtspr   SPRN_SRR1,r8
 461        SYNC
 462        RFI
 463#ifdef CONFIG_44x
 4642:      li      r7,0
 465        iccci   r0,r0
 466        stw     r7,icache_44x_need_flush@l(r4)
 467        b       1b
 468#endif  /* CONFIG_44x */
 469
 47066:     li      r3,-ENOSYS
 471        b       ret_from_syscall
 472
 473        .globl  ret_from_fork
 474ret_from_fork:
 475        REST_NVGPRS(r1)
 476        bl      schedule_tail
 477        li      r3,0
 478        b       ret_from_syscall
 479
 480        .globl  ret_from_kernel_thread
 481ret_from_kernel_thread:
 482        REST_NVGPRS(r1)
 483        bl      schedule_tail
 484        mtlr    r14
 485        mr      r3,r15
 486        PPC440EP_ERR42
 487        blrl
 488        li      r3,0
 489        b       ret_from_syscall
 490
 491/* Traced system call support */
 492syscall_dotrace:
 493        SAVE_NVGPRS(r1)
 494        li      r0,0xc00
 495        stw     r0,_TRAP(r1)
 496        addi    r3,r1,STACK_FRAME_OVERHEAD
 497        bl      do_syscall_trace_enter
 498        /*
 499         * Restore argument registers possibly just changed.
 500         * We use the return value of do_syscall_trace_enter
 501         * for call number to look up in the table (r0).
 502         */
 503        mr      r0,r3
 504        lwz     r3,GPR3(r1)
 505        lwz     r4,GPR4(r1)
 506        lwz     r5,GPR5(r1)
 507        lwz     r6,GPR6(r1)
 508        lwz     r7,GPR7(r1)
 509        lwz     r8,GPR8(r1)
 510        REST_NVGPRS(r1)
 511
 512        cmplwi  r0,NR_syscalls
 513        /* Return code is already in r3 thanks to do_syscall_trace_enter() */
 514        bge-    ret_from_syscall
 515        b       syscall_dotrace_cont
 516
 517syscall_exit_work:
 518        andi.   r0,r9,_TIF_RESTOREALL
 519        beq+    0f
 520        REST_NVGPRS(r1)
 521        b       2f
 5220:      cmplw   0,r3,r8
 523        blt+    1f
 524        andi.   r0,r9,_TIF_NOERROR
 525        bne-    1f
 526        lwz     r11,_CCR(r1)                    /* Load CR */
 527        neg     r3,r3
 528        oris    r11,r11,0x1000  /* Set SO bit in CR */
 529        stw     r11,_CCR(r1)
 530
 5311:      stw     r6,RESULT(r1)   /* Save result */
 532        stw     r3,GPR3(r1)     /* Update return value */
 5332:      andi.   r0,r9,(_TIF_PERSYSCALL_MASK)
 534        beq     4f
 535
 536        /* Clear per-syscall TIF flags if any are set.  */
 537
 538        li      r11,_TIF_PERSYSCALL_MASK
 539        addi    r12,r2,TI_FLAGS
 5403:      lwarx   r8,0,r12
 541        andc    r8,r8,r11
 542#ifdef CONFIG_IBM405_ERR77
 543        dcbt    0,r12
 544#endif
 545        stwcx.  r8,0,r12
 546        bne-    3b
 547        
 5484:      /* Anything which requires enabling interrupts? */
 549        andi.   r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP)
 550        beq     ret_from_except
 551
 552        /* Re-enable interrupts. There is no need to trace that with
 553         * lockdep as we are supposed to have IRQs on at this point
 554         */
 555        ori     r10,r10,MSR_EE
 556        SYNC
 557        MTMSRD(r10)
 558
 559        /* Save NVGPRS if they're not saved already */
 560        lwz     r4,_TRAP(r1)
 561        andi.   r4,r4,1
 562        beq     5f
 563        SAVE_NVGPRS(r1)
 564        li      r4,0xc00
 565        stw     r4,_TRAP(r1)
 5665:
 567        addi    r3,r1,STACK_FRAME_OVERHEAD
 568        bl      do_syscall_trace_leave
 569        b       ret_from_except_full
 570
 571/*
 572 * The fork/clone functions need to copy the full register set into
 573 * the child process. Therefore we need to save all the nonvolatile
 574 * registers (r13 - r31) before calling the C code.
 575 */
 576        .globl  ppc_fork
 577ppc_fork:
 578        SAVE_NVGPRS(r1)
 579        lwz     r0,_TRAP(r1)
 580        rlwinm  r0,r0,0,0,30            /* clear LSB to indicate full */
 581        stw     r0,_TRAP(r1)            /* register set saved */
 582        b       sys_fork
 583
 584        .globl  ppc_vfork
 585ppc_vfork:
 586        SAVE_NVGPRS(r1)
 587        lwz     r0,_TRAP(r1)
 588        rlwinm  r0,r0,0,0,30            /* clear LSB to indicate full */
 589        stw     r0,_TRAP(r1)            /* register set saved */
 590        b       sys_vfork
 591
 592        .globl  ppc_clone
 593ppc_clone:
 594        SAVE_NVGPRS(r1)
 595        lwz     r0,_TRAP(r1)
 596        rlwinm  r0,r0,0,0,30            /* clear LSB to indicate full */
 597        stw     r0,_TRAP(r1)            /* register set saved */
 598        b       sys_clone
 599
 600        .globl  ppc_swapcontext
 601ppc_swapcontext:
 602        SAVE_NVGPRS(r1)
 603        lwz     r0,_TRAP(r1)
 604        rlwinm  r0,r0,0,0,30            /* clear LSB to indicate full */
 605        stw     r0,_TRAP(r1)            /* register set saved */
 606        b       sys_swapcontext
 607
 608/*
 609 * Top-level page fault handling.
 610 * This is in assembler because if do_page_fault tells us that
 611 * it is a bad kernel page fault, we want to save the non-volatile
 612 * registers before calling bad_page_fault.
 613 */
 614        .globl  handle_page_fault
 615handle_page_fault:
 616        stw     r4,_DAR(r1)
 617        addi    r3,r1,STACK_FRAME_OVERHEAD
 618#ifdef CONFIG_PPC_BOOK3S_32
 619        andis.  r0,r5,DSISR_DABRMATCH@h
 620        bne-    handle_dabr_fault
 621#endif
 622        bl      do_page_fault
 623        cmpwi   r3,0
 624        beq+    ret_from_except
 625        SAVE_NVGPRS(r1)
 626        lwz     r0,_TRAP(r1)
 627        clrrwi  r0,r0,1
 628        stw     r0,_TRAP(r1)
 629        mr      r5,r3
 630        addi    r3,r1,STACK_FRAME_OVERHEAD
 631        lwz     r4,_DAR(r1)
 632        bl      bad_page_fault
 633        b       ret_from_except_full
 634
 635#ifdef CONFIG_PPC_BOOK3S_32
 636        /* We have a data breakpoint exception - handle it */
 637handle_dabr_fault:
 638        SAVE_NVGPRS(r1)
 639        lwz     r0,_TRAP(r1)
 640        clrrwi  r0,r0,1
 641        stw     r0,_TRAP(r1)
 642        bl      do_break
 643        b       ret_from_except_full
 644#endif
 645
 646/*
 647 * This routine switches between two different tasks.  The process
 648 * state of one is saved on its kernel stack.  Then the state
 649 * of the other is restored from its kernel stack.  The memory
 650 * management hardware is updated to the second process's state.
 651 * Finally, we can return to the second process.
 652 * On entry, r3 points to the THREAD for the current task, r4
 653 * points to the THREAD for the new task.
 654 *
 655 * This routine is always called with interrupts disabled.
 656 *
 657 * Note: there are two ways to get to the "going out" portion
 658 * of this code; either by coming in via the entry (_switch)
 659 * or via "fork" which must set up an environment equivalent
 660 * to the "_switch" path.  If you change this , you'll have to
 661 * change the fork code also.
 662 *
 663 * The code which creates the new task context is in 'copy_thread'
 664 * in arch/ppc/kernel/process.c
 665 */
 666_GLOBAL(_switch)
 667        stwu    r1,-INT_FRAME_SIZE(r1)
 668        mflr    r0
 669        stw     r0,INT_FRAME_SIZE+4(r1)
 670        /* r3-r12 are caller saved -- Cort */
 671        SAVE_NVGPRS(r1)
 672        stw     r0,_NIP(r1)     /* Return to switch caller */
 673        mfmsr   r11
 674        li      r0,MSR_FP       /* Disable floating-point */
 675#ifdef CONFIG_ALTIVEC
 676BEGIN_FTR_SECTION
 677        oris    r0,r0,MSR_VEC@h /* Disable altivec */
 678        mfspr   r12,SPRN_VRSAVE /* save vrsave register value */
 679        stw     r12,THREAD+THREAD_VRSAVE(r2)
 680END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
 681#endif /* CONFIG_ALTIVEC */
 682#ifdef CONFIG_SPE
 683BEGIN_FTR_SECTION
 684        oris    r0,r0,MSR_SPE@h  /* Disable SPE */
 685        mfspr   r12,SPRN_SPEFSCR /* save spefscr register value */
 686        stw     r12,THREAD+THREAD_SPEFSCR(r2)
 687END_FTR_SECTION_IFSET(CPU_FTR_SPE)
 688#endif /* CONFIG_SPE */
 689        and.    r0,r0,r11       /* FP or altivec or SPE enabled? */
 690        beq+    1f
 691        andc    r11,r11,r0
 692        MTMSRD(r11)
 693        isync
 6941:      stw     r11,_MSR(r1)
 695        mfcr    r10
 696        stw     r10,_CCR(r1)
 697        stw     r1,KSP(r3)      /* Set old stack pointer */
 698
 699        kuap_check r2, r4
 700#ifdef CONFIG_SMP
 701        /* We need a sync somewhere here to make sure that if the
 702         * previous task gets rescheduled on another CPU, it sees all
 703         * stores it has performed on this one.
 704         */
 705        sync
 706#endif /* CONFIG_SMP */
 707
 708        tophys(r0,r4)
 709        mtspr   SPRN_SPRG_THREAD,r0     /* Update current THREAD phys addr */
 710        lwz     r1,KSP(r4)      /* Load new stack pointer */
 711
 712        /* save the old current 'last' for return value */
 713        mr      r3,r2
 714        addi    r2,r4,-THREAD   /* Update current */
 715
 716#ifdef CONFIG_ALTIVEC
 717BEGIN_FTR_SECTION
 718        lwz     r0,THREAD+THREAD_VRSAVE(r2)
 719        mtspr   SPRN_VRSAVE,r0          /* if G4, restore VRSAVE reg */
 720END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
 721#endif /* CONFIG_ALTIVEC */
 722#ifdef CONFIG_SPE
 723BEGIN_FTR_SECTION
 724        lwz     r0,THREAD+THREAD_SPEFSCR(r2)
 725        mtspr   SPRN_SPEFSCR,r0         /* restore SPEFSCR reg */
 726END_FTR_SECTION_IFSET(CPU_FTR_SPE)
 727#endif /* CONFIG_SPE */
 728
 729        lwz     r0,_CCR(r1)
 730        mtcrf   0xFF,r0
 731        /* r3-r12 are destroyed -- Cort */
 732        REST_NVGPRS(r1)
 733
 734        lwz     r4,_NIP(r1)     /* Return to _switch caller in new task */
 735        mtlr    r4
 736        addi    r1,r1,INT_FRAME_SIZE
 737        blr
 738
 739        .globl  fast_exception_return
 740fast_exception_return:
 741#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
 742        andi.   r10,r9,MSR_RI           /* check for recoverable interrupt */
 743        beq     1f                      /* if not, we've got problems */
 744#endif
 745
 7462:      REST_4GPRS(3, r11)
 747        lwz     r10,_CCR(r11)
 748        REST_GPR(1, r11)
 749        mtcr    r10
 750        lwz     r10,_LINK(r11)
 751        mtlr    r10
 752        /* Clear the exception_marker on the stack to avoid confusing stacktrace */
 753        li      r10, 0
 754        stw     r10, 8(r11)
 755        REST_GPR(10, r11)
 756#if defined(CONFIG_PPC_8xx) && defined(CONFIG_PERF_EVENTS)
 757        mtspr   SPRN_NRI, r0
 758#endif
 759        mtspr   SPRN_SRR1,r9
 760        mtspr   SPRN_SRR0,r12
 761        REST_GPR(9, r11)
 762        REST_GPR(12, r11)
 763        lwz     r11,GPR11(r11)
 764        SYNC
 765        RFI
 766
 767#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
 768/* check if the exception happened in a restartable section */
 7691:      lis     r3,exc_exit_restart_end@ha
 770        addi    r3,r3,exc_exit_restart_end@l
 771        cmplw   r12,r3
 772        bge     3f
 773        lis     r4,exc_exit_restart@ha
 774        addi    r4,r4,exc_exit_restart@l
 775        cmplw   r12,r4
 776        blt     3f
 777        lis     r3,fee_restarts@ha
 778        tophys(r3,r3)
 779        lwz     r5,fee_restarts@l(r3)
 780        addi    r5,r5,1
 781        stw     r5,fee_restarts@l(r3)
 782        mr      r12,r4          /* restart at exc_exit_restart */
 783        b       2b
 784
 785        .section .bss
 786        .align  2
 787fee_restarts:
 788        .space  4
 789        .previous
 790
 791/* aargh, a nonrecoverable interrupt, panic */
 792/* aargh, we don't know which trap this is */
 793/* but the 601 doesn't implement the RI bit, so assume it's OK */
 7943:
 795BEGIN_FTR_SECTION
 796        b       2b
 797END_FTR_SECTION_IFSET(CPU_FTR_601)
 798        li      r10,-1
 799        stw     r10,_TRAP(r11)
 800        addi    r3,r1,STACK_FRAME_OVERHEAD
 801        lis     r10,MSR_KERNEL@h
 802        ori     r10,r10,MSR_KERNEL@l
 803        bl      transfer_to_handler_full
 804        .long   unrecoverable_exception
 805        .long   ret_from_except
 806#endif
 807
 808        .globl  ret_from_except_full
 809ret_from_except_full:
 810        REST_NVGPRS(r1)
 811        /* fall through */
 812
 813        .globl  ret_from_except
 814ret_from_except:
 815        /* Hard-disable interrupts so that current_thread_info()->flags
 816         * can't change between when we test it and when we return
 817         * from the interrupt. */
 818        /* Note: We don't bother telling lockdep about it */
 819        LOAD_MSR_KERNEL(r10,MSR_KERNEL)
 820        SYNC                    /* Some chip revs have problems here... */
 821        MTMSRD(r10)             /* disable interrupts */
 822
 823        lwz     r3,_MSR(r1)     /* Returning to user mode? */
 824        andi.   r0,r3,MSR_PR
 825        beq     resume_kernel
 826
 827user_exc_return:                /* r10 contains MSR_KERNEL here */
 828        /* Check current_thread_info()->flags */
 829        lwz     r9,TI_FLAGS(r2)
 830        andi.   r0,r9,_TIF_USER_WORK_MASK
 831        bne     do_work
 832
 833restore_user:
 834#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
 835        /* Check whether this process has its own DBCR0 value.  The internal
 836           debug mode bit tells us that dbcr0 should be loaded. */
 837        lwz     r0,THREAD+THREAD_DBCR0(r2)
 838        andis.  r10,r0,DBCR0_IDM@h
 839        bnel-   load_dbcr0
 840#endif
 841        ACCOUNT_CPU_USER_EXIT(r2, r10, r11)
 842#ifdef CONFIG_PPC_BOOK3S_32
 843        kuep_unlock     r10, r11
 844#endif
 845
 846        b       restore
 847
 848/* N.B. the only way to get here is from the beq following ret_from_except. */
 849resume_kernel:
 850        /* check current_thread_info, _TIF_EMULATE_STACK_STORE */
 851        lwz     r8,TI_FLAGS(r2)
 852        andis.  r0,r8,_TIF_EMULATE_STACK_STORE@h
 853        beq+    1f
 854
 855        addi    r8,r1,INT_FRAME_SIZE    /* Get the kprobed function entry */
 856
 857        lwz     r3,GPR1(r1)
 858        subi    r3,r3,INT_FRAME_SIZE    /* dst: Allocate a trampoline exception frame */
 859        mr      r4,r1                   /* src:  current exception frame */
 860        mr      r1,r3                   /* Reroute the trampoline frame to r1 */
 861
 862        /* Copy from the original to the trampoline. */
 863        li      r5,INT_FRAME_SIZE/4     /* size: INT_FRAME_SIZE */
 864        li      r6,0                    /* start offset: 0 */
 865        mtctr   r5
 8662:      lwzx    r0,r6,r4
 867        stwx    r0,r6,r3
 868        addi    r6,r6,4
 869        bdnz    2b
 870
 871        /* Do real store operation to complete stwu */
 872        lwz     r5,GPR1(r1)
 873        stw     r8,0(r5)
 874
 875        /* Clear _TIF_EMULATE_STACK_STORE flag */
 876        lis     r11,_TIF_EMULATE_STACK_STORE@h
 877        addi    r5,r2,TI_FLAGS
 8780:      lwarx   r8,0,r5
 879        andc    r8,r8,r11
 880#ifdef CONFIG_IBM405_ERR77
 881        dcbt    0,r5
 882#endif
 883        stwcx.  r8,0,r5
 884        bne-    0b
 8851:
 886
 887#ifdef CONFIG_PREEMPT
 888        /* check current_thread_info->preempt_count */
 889        lwz     r0,TI_PREEMPT(r2)
 890        cmpwi   0,r0,0          /* if non-zero, just restore regs and return */
 891        bne     restore_kuap
 892        andi.   r8,r8,_TIF_NEED_RESCHED
 893        beq+    restore_kuap
 894        lwz     r3,_MSR(r1)
 895        andi.   r0,r3,MSR_EE    /* interrupts off? */
 896        beq     restore_kuap    /* don't schedule if so */
 897#ifdef CONFIG_TRACE_IRQFLAGS
 898        /* Lockdep thinks irqs are enabled, we need to call
 899         * preempt_schedule_irq with IRQs off, so we inform lockdep
 900         * now that we -did- turn them off already
 901         */
 902        bl      trace_hardirqs_off
 903#endif
 904        bl      preempt_schedule_irq
 905#ifdef CONFIG_TRACE_IRQFLAGS
 906        /* And now, to properly rebalance the above, we tell lockdep they
 907         * are being turned back on, which will happen when we return
 908         */
 909        bl      trace_hardirqs_on
 910#endif
 911#endif /* CONFIG_PREEMPT */
 912restore_kuap:
 913        kuap_restore r1, r2, r9, r10, r0
 914
 915        /* interrupts are hard-disabled at this point */
 916restore:
 917#ifdef CONFIG_44x
 918BEGIN_MMU_FTR_SECTION
 919        b       1f
 920END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x)
 921        lis     r4,icache_44x_need_flush@ha
 922        lwz     r5,icache_44x_need_flush@l(r4)
 923        cmplwi  cr0,r5,0
 924        beq+    1f
 925        li      r6,0
 926        iccci   r0,r0
 927        stw     r6,icache_44x_need_flush@l(r4)
 9281:
 929#endif  /* CONFIG_44x */
 930
 931        lwz     r9,_MSR(r1)
 932#ifdef CONFIG_TRACE_IRQFLAGS
 933        /* Lockdep doesn't know about the fact that IRQs are temporarily turned
 934         * off in this assembly code while peeking at TI_FLAGS() and such. However
 935         * we need to inform it if the exception turned interrupts off, and we
 936         * are about to trun them back on.
 937         */
 938        andi.   r10,r9,MSR_EE
 939        beq     1f
 940        stwu    r1,-32(r1)
 941        mflr    r0
 942        stw     r0,4(r1)
 943        bl      trace_hardirqs_on
 944        addi    r1, r1, 32
 945        lwz     r9,_MSR(r1)
 9461:
 947#endif /* CONFIG_TRACE_IRQFLAGS */
 948
 949        lwz     r0,GPR0(r1)
 950        lwz     r2,GPR2(r1)
 951        REST_4GPRS(3, r1)
 952        REST_2GPRS(7, r1)
 953
 954        lwz     r10,_XER(r1)
 955        lwz     r11,_CTR(r1)
 956        mtspr   SPRN_XER,r10
 957        mtctr   r11
 958
 959        PPC405_ERR77(0,r1)
 960BEGIN_FTR_SECTION
 961        lwarx   r11,0,r1
 962END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
 963        stwcx.  r0,0,r1                 /* to clear the reservation */
 964
 965#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
 966        andi.   r10,r9,MSR_RI           /* check if this exception occurred */
 967        beql    nonrecoverable          /* at a bad place (MSR:RI = 0) */
 968
 969        lwz     r10,_CCR(r1)
 970        lwz     r11,_LINK(r1)
 971        mtcrf   0xFF,r10
 972        mtlr    r11
 973
 974        /* Clear the exception_marker on the stack to avoid confusing stacktrace */
 975        li      r10, 0
 976        stw     r10, 8(r1)
 977        /*
 978         * Once we put values in SRR0 and SRR1, we are in a state
 979         * where exceptions are not recoverable, since taking an
 980         * exception will trash SRR0 and SRR1.  Therefore we clear the
 981         * MSR:RI bit to indicate this.  If we do take an exception,
 982         * we can't return to the point of the exception but we
 983         * can restart the exception exit path at the label
 984         * exc_exit_restart below.  -- paulus
 985         */
 986        LOAD_MSR_KERNEL(r10,MSR_KERNEL & ~MSR_RI)
 987        SYNC
 988        MTMSRD(r10)             /* clear the RI bit */
 989        .globl exc_exit_restart
 990exc_exit_restart:
 991        lwz     r12,_NIP(r1)
 992        mtspr   SPRN_SRR0,r12
 993        mtspr   SPRN_SRR1,r9
 994        REST_4GPRS(9, r1)
 995        lwz     r1,GPR1(r1)
 996        .globl exc_exit_restart_end
 997exc_exit_restart_end:
 998        SYNC
 999        RFI
1000
1001#else /* !(CONFIG_4xx || CONFIG_BOOKE) */
1002        /*
1003         * This is a bit different on 4xx/Book-E because it doesn't have
1004         * the RI bit in the MSR.
1005         * The TLB miss handler checks if we have interrupted
1006         * the exception exit path and restarts it if so
1007         * (well maybe one day it will... :).
1008         */
1009        lwz     r11,_LINK(r1)
1010        mtlr    r11
1011        lwz     r10,_CCR(r1)
1012        mtcrf   0xff,r10
1013        /* Clear the exception_marker on the stack to avoid confusing stacktrace */
1014        li      r10, 0
1015        stw     r10, 8(r1)
1016        REST_2GPRS(9, r1)
1017        .globl exc_exit_restart
1018exc_exit_restart:
1019        lwz     r11,_NIP(r1)
1020        lwz     r12,_MSR(r1)
1021exc_exit_start:
1022        mtspr   SPRN_SRR0,r11
1023        mtspr   SPRN_SRR1,r12
1024        REST_2GPRS(11, r1)
1025        lwz     r1,GPR1(r1)
1026        .globl exc_exit_restart_end
1027exc_exit_restart_end:
1028        PPC405_ERR77_SYNC
1029        rfi
1030        b       .                       /* prevent prefetch past rfi */
1031
1032/*
1033 * Returning from a critical interrupt in user mode doesn't need
1034 * to be any different from a normal exception.  For a critical
1035 * interrupt in the kernel, we just return (without checking for
1036 * preemption) since the interrupt may have happened at some crucial
1037 * place (e.g. inside the TLB miss handler), and because we will be
1038 * running with r1 pointing into critical_stack, not the current
1039 * process's kernel stack (and therefore current_thread_info() will
1040 * give the wrong answer).
1041 * We have to restore various SPRs that may have been in use at the
1042 * time of the critical interrupt.
1043 *
1044 */
1045#ifdef CONFIG_40x
1046#define PPC_40x_TURN_OFF_MSR_DR                                             \
1047        /* avoid any possible TLB misses here by turning off MSR.DR, we     \
1048         * assume the instructions here are mapped by a pinned TLB entry */ \
1049        li      r10,MSR_IR;                                                 \
1050        mtmsr   r10;                                                        \
1051        isync;                                                              \
1052        tophys(r1, r1);
1053#else
1054#define PPC_40x_TURN_OFF_MSR_DR
1055#endif
1056
1057#define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi)     \
1058        REST_NVGPRS(r1);                                                \
1059        lwz     r3,_MSR(r1);                                            \
1060        andi.   r3,r3,MSR_PR;                                           \
1061        LOAD_MSR_KERNEL(r10,MSR_KERNEL);                                \
1062        bne     user_exc_return;                                        \
1063        lwz     r0,GPR0(r1);                                            \
1064        lwz     r2,GPR2(r1);                                            \
1065        REST_4GPRS(3, r1);                                              \
1066        REST_2GPRS(7, r1);                                              \
1067        lwz     r10,_XER(r1);                                           \
1068        lwz     r11,_CTR(r1);                                           \
1069        mtspr   SPRN_XER,r10;                                           \
1070        mtctr   r11;                                                    \
1071        PPC405_ERR77(0,r1);                                             \
1072        stwcx.  r0,0,r1;                /* to clear the reservation */  \
1073        lwz     r11,_LINK(r1);                                          \
1074        mtlr    r11;                                                    \
1075        lwz     r10,_CCR(r1);                                           \
1076        mtcrf   0xff,r10;                                               \
1077        PPC_40x_TURN_OFF_MSR_DR;                                        \
1078        lwz     r9,_DEAR(r1);                                           \
1079        lwz     r10,_ESR(r1);                                           \
1080        mtspr   SPRN_DEAR,r9;                                           \
1081        mtspr   SPRN_ESR,r10;                                           \
1082        lwz     r11,_NIP(r1);                                           \
1083        lwz     r12,_MSR(r1);                                           \
1084        mtspr   exc_lvl_srr0,r11;                                       \
1085        mtspr   exc_lvl_srr1,r12;                                       \
1086        lwz     r9,GPR9(r1);                                            \
1087        lwz     r12,GPR12(r1);                                          \
1088        lwz     r10,GPR10(r1);                                          \
1089        lwz     r11,GPR11(r1);                                          \
1090        lwz     r1,GPR1(r1);                                            \
1091        PPC405_ERR77_SYNC;                                              \
1092        exc_lvl_rfi;                                                    \
1093        b       .;              /* prevent prefetch past exc_lvl_rfi */
1094
1095#define RESTORE_xSRR(exc_lvl_srr0, exc_lvl_srr1)                        \
1096        lwz     r9,_##exc_lvl_srr0(r1);                                 \
1097        lwz     r10,_##exc_lvl_srr1(r1);                                \
1098        mtspr   SPRN_##exc_lvl_srr0,r9;                                 \
1099        mtspr   SPRN_##exc_lvl_srr1,r10;
1100
1101#if defined(CONFIG_PPC_BOOK3E_MMU)
1102#ifdef CONFIG_PHYS_64BIT
1103#define RESTORE_MAS7                                                    \
1104        lwz     r11,MAS7(r1);                                           \
1105        mtspr   SPRN_MAS7,r11;
1106#else
1107#define RESTORE_MAS7
1108#endif /* CONFIG_PHYS_64BIT */
1109#define RESTORE_MMU_REGS                                                \
1110        lwz     r9,MAS0(r1);                                            \
1111        lwz     r10,MAS1(r1);                                           \
1112        lwz     r11,MAS2(r1);                                           \
1113        mtspr   SPRN_MAS0,r9;                                           \
1114        lwz     r9,MAS3(r1);                                            \
1115        mtspr   SPRN_MAS1,r10;                                          \
1116        lwz     r10,MAS6(r1);                                           \
1117        mtspr   SPRN_MAS2,r11;                                          \
1118        mtspr   SPRN_MAS3,r9;                                           \
1119        mtspr   SPRN_MAS6,r10;                                          \
1120        RESTORE_MAS7;
1121#elif defined(CONFIG_44x)
1122#define RESTORE_MMU_REGS                                                \
1123        lwz     r9,MMUCR(r1);                                           \
1124        mtspr   SPRN_MMUCR,r9;
1125#else
1126#define RESTORE_MMU_REGS
1127#endif
1128
1129#ifdef CONFIG_40x
1130        .globl  ret_from_crit_exc
1131ret_from_crit_exc:
1132        mfspr   r9,SPRN_SPRG_THREAD
1133        lis     r10,saved_ksp_limit@ha;
1134        lwz     r10,saved_ksp_limit@l(r10);
1135        tovirt(r9,r9);
1136        stw     r10,KSP_LIMIT(r9)
1137        lis     r9,crit_srr0@ha;
1138        lwz     r9,crit_srr0@l(r9);
1139        lis     r10,crit_srr1@ha;
1140        lwz     r10,crit_srr1@l(r10);
1141        mtspr   SPRN_SRR0,r9;
1142        mtspr   SPRN_SRR1,r10;
1143        RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
1144#endif /* CONFIG_40x */
1145
1146#ifdef CONFIG_BOOKE
1147        .globl  ret_from_crit_exc
1148ret_from_crit_exc:
1149        mfspr   r9,SPRN_SPRG_THREAD
1150        lwz     r10,SAVED_KSP_LIMIT(r1)
1151        stw     r10,KSP_LIMIT(r9)
1152        RESTORE_xSRR(SRR0,SRR1);
1153        RESTORE_MMU_REGS;
1154        RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
1155
1156        .globl  ret_from_debug_exc
1157ret_from_debug_exc:
1158        mfspr   r9,SPRN_SPRG_THREAD
1159        lwz     r10,SAVED_KSP_LIMIT(r1)
1160        stw     r10,KSP_LIMIT(r9)
1161        RESTORE_xSRR(SRR0,SRR1);
1162        RESTORE_xSRR(CSRR0,CSRR1);
1163        RESTORE_MMU_REGS;
1164        RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, PPC_RFDI)
1165
1166        .globl  ret_from_mcheck_exc
1167ret_from_mcheck_exc:
1168        mfspr   r9,SPRN_SPRG_THREAD
1169        lwz     r10,SAVED_KSP_LIMIT(r1)
1170        stw     r10,KSP_LIMIT(r9)
1171        RESTORE_xSRR(SRR0,SRR1);
1172        RESTORE_xSRR(CSRR0,CSRR1);
1173        RESTORE_xSRR(DSRR0,DSRR1);
1174        RESTORE_MMU_REGS;
1175        RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, PPC_RFMCI)
1176#endif /* CONFIG_BOOKE */
1177
1178/*
1179 * Load the DBCR0 value for a task that is being ptraced,
1180 * having first saved away the global DBCR0.  Note that r0
1181 * has the dbcr0 value to set upon entry to this.
1182 */
1183load_dbcr0:
1184        mfmsr   r10             /* first disable debug exceptions */
1185        rlwinm  r10,r10,0,~MSR_DE
1186        mtmsr   r10
1187        isync
1188        mfspr   r10,SPRN_DBCR0
1189        lis     r11,global_dbcr0@ha
1190        addi    r11,r11,global_dbcr0@l
1191#ifdef CONFIG_SMP
1192        lwz     r9,TASK_CPU(r2)
1193        slwi    r9,r9,3
1194        add     r11,r11,r9
1195#endif
1196        stw     r10,0(r11)
1197        mtspr   SPRN_DBCR0,r0
1198        lwz     r10,4(r11)
1199        addi    r10,r10,1
1200        stw     r10,4(r11)
1201        li      r11,-1
1202        mtspr   SPRN_DBSR,r11   /* clear all pending debug events */
1203        blr
1204
1205        .section .bss
1206        .align  4
1207        .global global_dbcr0
1208global_dbcr0:
1209        .space  8*NR_CPUS
1210        .previous
1211#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
1212
1213do_work:                        /* r10 contains MSR_KERNEL here */
1214        andi.   r0,r9,_TIF_NEED_RESCHED
1215        beq     do_user_signal
1216
1217do_resched:                     /* r10 contains MSR_KERNEL here */
1218#ifdef CONFIG_TRACE_IRQFLAGS
1219        bl      trace_hardirqs_on
1220        mfmsr   r10
1221#endif
1222        ori     r10,r10,MSR_EE
1223        SYNC
1224        MTMSRD(r10)             /* hard-enable interrupts */
1225        bl      schedule
1226recheck:
1227        /* Note: And we don't tell it we are disabling them again
1228         * neither. Those disable/enable cycles used to peek at
1229         * TI_FLAGS aren't advertised.
1230         */
1231        LOAD_MSR_KERNEL(r10,MSR_KERNEL)
1232        SYNC
1233        MTMSRD(r10)             /* disable interrupts */
1234        lwz     r9,TI_FLAGS(r2)
1235        andi.   r0,r9,_TIF_NEED_RESCHED
1236        bne-    do_resched
1237        andi.   r0,r9,_TIF_USER_WORK_MASK
1238        beq     restore_user
1239do_user_signal:                 /* r10 contains MSR_KERNEL here */
1240        ori     r10,r10,MSR_EE
1241        SYNC
1242        MTMSRD(r10)             /* hard-enable interrupts */
1243        /* save r13-r31 in the exception frame, if not already done */
1244        lwz     r3,_TRAP(r1)
1245        andi.   r0,r3,1
1246        beq     2f
1247        SAVE_NVGPRS(r1)
1248        rlwinm  r3,r3,0,0,30
1249        stw     r3,_TRAP(r1)
12502:      addi    r3,r1,STACK_FRAME_OVERHEAD
1251        mr      r4,r9
1252        bl      do_notify_resume
1253        REST_NVGPRS(r1)
1254        b       recheck
1255
1256/*
1257 * We come here when we are at the end of handling an exception
1258 * that occurred at a place where taking an exception will lose
1259 * state information, such as the contents of SRR0 and SRR1.
1260 */
1261nonrecoverable:
1262        lis     r10,exc_exit_restart_end@ha
1263        addi    r10,r10,exc_exit_restart_end@l
1264        cmplw   r12,r10
1265        bge     3f
1266        lis     r11,exc_exit_restart@ha
1267        addi    r11,r11,exc_exit_restart@l
1268        cmplw   r12,r11
1269        blt     3f
1270        lis     r10,ee_restarts@ha
1271        lwz     r12,ee_restarts@l(r10)
1272        addi    r12,r12,1
1273        stw     r12,ee_restarts@l(r10)
1274        mr      r12,r11         /* restart at exc_exit_restart */
1275        blr
12763:      /* OK, we can't recover, kill this process */
1277        /* but the 601 doesn't implement the RI bit, so assume it's OK */
1278BEGIN_FTR_SECTION
1279        blr
1280END_FTR_SECTION_IFSET(CPU_FTR_601)
1281        lwz     r3,_TRAP(r1)
1282        andi.   r0,r3,1
1283        beq     5f
1284        SAVE_NVGPRS(r1)
1285        rlwinm  r3,r3,0,0,30
1286        stw     r3,_TRAP(r1)
12875:      mfspr   r2,SPRN_SPRG_THREAD
1288        addi    r2,r2,-THREAD
1289        tovirt(r2,r2)                   /* set back r2 to current */
12904:      addi    r3,r1,STACK_FRAME_OVERHEAD
1291        bl      unrecoverable_exception
1292        /* shouldn't return */
1293        b       4b
1294
1295        .section .bss
1296        .align  2
1297ee_restarts:
1298        .space  4
1299        .previous
1300
1301/*
1302 * PROM code for specific machines follows.  Put it
1303 * here so it's easy to add arch-specific sections later.
1304 * -- Cort
1305 */
1306#ifdef CONFIG_PPC_RTAS
1307/*
1308 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
1309 * called with the MMU off.
1310 */
1311_GLOBAL(enter_rtas)
1312        stwu    r1,-INT_FRAME_SIZE(r1)
1313        mflr    r0
1314        stw     r0,INT_FRAME_SIZE+4(r1)
1315        LOAD_REG_ADDR(r4, rtas)
1316        lis     r6,1f@ha        /* physical return address for rtas */
1317        addi    r6,r6,1f@l
1318        tophys(r6,r6)
1319        tophys(r7,r1)
1320        lwz     r8,RTASENTRY(r4)
1321        lwz     r4,RTASBASE(r4)
1322        mfmsr   r9
1323        stw     r9,8(r1)
1324        LOAD_MSR_KERNEL(r0,MSR_KERNEL)
1325        SYNC                    /* disable interrupts so SRR0/1 */
1326        MTMSRD(r0)              /* don't get trashed */
1327        li      r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
1328        mtlr    r6
1329        stw     r7, THREAD + RTAS_SP(r2)
1330        mtspr   SPRN_SRR0,r8
1331        mtspr   SPRN_SRR1,r9
1332        RFI
13331:      tophys(r9,r1)
1334        lwz     r8,INT_FRAME_SIZE+4(r9) /* get return address */
1335        lwz     r9,8(r9)        /* original msr value */
1336        addi    r1,r1,INT_FRAME_SIZE
1337        li      r0,0
1338        tophys(r7, r2)
1339        stw     r0, THREAD + RTAS_SP(r7)
1340        mtspr   SPRN_SRR0,r8
1341        mtspr   SPRN_SRR1,r9
1342        RFI                     /* return to caller */
1343
1344        .globl  machine_check_in_rtas
1345machine_check_in_rtas:
1346        twi     31,0,0
1347        /* XXX load up BATs and panic */
1348
1349#endif /* CONFIG_PPC_RTAS */
1350