linux/arch/powerpc/kernel/entry_32.S
<<
>>
Prefs
   1/*
   2 *  PowerPC version
   3 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
   4 *  Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
   5 *    Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
   6 *  Adapted for Power Macintosh by Paul Mackerras.
   7 *  Low-level exception handlers and MMU support
   8 *  rewritten by Paul Mackerras.
   9 *    Copyright (C) 1996 Paul Mackerras.
  10 *  MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
  11 *
  12 *  This file contains the system call entry code, context switch
  13 *  code, and exception/interrupt return code for PowerPC.
  14 *
  15 *  This program is free software; you can redistribute it and/or
  16 *  modify it under the terms of the GNU General Public License
  17 *  as published by the Free Software Foundation; either version
  18 *  2 of the License, or (at your option) any later version.
  19 *
  20 */
  21
  22#include <linux/errno.h>
  23#include <linux/sys.h>
  24#include <linux/threads.h>
  25#include <asm/reg.h>
  26#include <asm/page.h>
  27#include <asm/mmu.h>
  28#include <asm/cputable.h>
  29#include <asm/thread_info.h>
  30#include <asm/ppc_asm.h>
  31#include <asm/asm-offsets.h>
  32#include <asm/unistd.h>
  33#include <asm/ftrace.h>
  34
  35#undef SHOW_SYSCALLS
  36#undef SHOW_SYSCALLS_TASK
  37
  38/*
  39 * MSR_KERNEL is > 0x10000 on 4xx/Book-E since it include MSR_CE.
  40 */
  41#if MSR_KERNEL >= 0x10000
  42#define LOAD_MSR_KERNEL(r, x)   lis r,(x)@h; ori r,r,(x)@l
  43#else
  44#define LOAD_MSR_KERNEL(r, x)   li r,(x)
  45#endif
  46
  47#ifdef CONFIG_BOOKE
  48        .globl  mcheck_transfer_to_handler
  49mcheck_transfer_to_handler:
  50        mfspr   r0,SPRN_DSRR0
  51        stw     r0,_DSRR0(r11)
  52        mfspr   r0,SPRN_DSRR1
  53        stw     r0,_DSRR1(r11)
  54        /* fall through */
  55
  56        .globl  debug_transfer_to_handler
  57debug_transfer_to_handler:
  58        mfspr   r0,SPRN_CSRR0
  59        stw     r0,_CSRR0(r11)
  60        mfspr   r0,SPRN_CSRR1
  61        stw     r0,_CSRR1(r11)
  62        /* fall through */
  63
  64        .globl  crit_transfer_to_handler
  65crit_transfer_to_handler:
  66#ifdef CONFIG_PPC_BOOK3E_MMU
  67        mfspr   r0,SPRN_MAS0
  68        stw     r0,MAS0(r11)
  69        mfspr   r0,SPRN_MAS1
  70        stw     r0,MAS1(r11)
  71        mfspr   r0,SPRN_MAS2
  72        stw     r0,MAS2(r11)
  73        mfspr   r0,SPRN_MAS3
  74        stw     r0,MAS3(r11)
  75        mfspr   r0,SPRN_MAS6
  76        stw     r0,MAS6(r11)
  77#ifdef CONFIG_PHYS_64BIT
  78        mfspr   r0,SPRN_MAS7
  79        stw     r0,MAS7(r11)
  80#endif /* CONFIG_PHYS_64BIT */
  81#endif /* CONFIG_PPC_BOOK3E_MMU */
  82#ifdef CONFIG_44x
  83        mfspr   r0,SPRN_MMUCR
  84        stw     r0,MMUCR(r11)
  85#endif
  86        mfspr   r0,SPRN_SRR0
  87        stw     r0,_SRR0(r11)
  88        mfspr   r0,SPRN_SRR1
  89        stw     r0,_SRR1(r11)
  90
  91        mfspr   r8,SPRN_SPRG_THREAD
  92        lwz     r0,KSP_LIMIT(r8)
  93        stw     r0,SAVED_KSP_LIMIT(r11)
  94        rlwimi  r0,r1,0,0,(31-THREAD_SHIFT)
  95        stw     r0,KSP_LIMIT(r8)
  96        /* fall through */
  97#endif
  98
  99#ifdef CONFIG_40x
 100        .globl  crit_transfer_to_handler
 101crit_transfer_to_handler:
 102        lwz     r0,crit_r10@l(0)
 103        stw     r0,GPR10(r11)
 104        lwz     r0,crit_r11@l(0)
 105        stw     r0,GPR11(r11)
 106        mfspr   r0,SPRN_SRR0
 107        stw     r0,crit_srr0@l(0)
 108        mfspr   r0,SPRN_SRR1
 109        stw     r0,crit_srr1@l(0)
 110
 111        mfspr   r8,SPRN_SPRG_THREAD
 112        lwz     r0,KSP_LIMIT(r8)
 113        stw     r0,saved_ksp_limit@l(0)
 114        rlwimi  r0,r1,0,0,(31-THREAD_SHIFT)
 115        stw     r0,KSP_LIMIT(r8)
 116        /* fall through */
 117#endif
 118
 119/*
 120 * This code finishes saving the registers to the exception frame
 121 * and jumps to the appropriate handler for the exception, turning
 122 * on address translation.
 123 * Note that we rely on the caller having set cr0.eq iff the exception
 124 * occurred in kernel mode (i.e. MSR:PR = 0).
 125 */
 126        .globl  transfer_to_handler_full
 127transfer_to_handler_full:
 128        SAVE_NVGPRS(r11)
 129        /* fall through */
 130
 131        .globl  transfer_to_handler
 132transfer_to_handler:
 133        stw     r2,GPR2(r11)
 134        stw     r12,_NIP(r11)
 135        stw     r9,_MSR(r11)
 136        andi.   r2,r9,MSR_PR
 137        mfctr   r12
 138        mfspr   r2,SPRN_XER
 139        stw     r12,_CTR(r11)
 140        stw     r2,_XER(r11)
 141        mfspr   r12,SPRN_SPRG_THREAD
 142        addi    r2,r12,-THREAD
 143        tovirt(r2,r2)                   /* set r2 to current */
 144        beq     2f                      /* if from user, fix up THREAD.regs */
 145        addi    r11,r1,STACK_FRAME_OVERHEAD
 146        stw     r11,PT_REGS(r12)
 147#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
 148        /* Check to see if the dbcr0 register is set up to debug.  Use the
 149           internal debug mode bit to do this. */
 150        lwz     r12,THREAD_DBCR0(r12)
 151        andis.  r12,r12,DBCR0_IDM@h
 152        beq+    3f
 153        /* From user and task is ptraced - load up global dbcr0 */
 154        li      r12,-1                  /* clear all pending debug events */
 155        mtspr   SPRN_DBSR,r12
 156        lis     r11,global_dbcr0@ha
 157        tophys(r11,r11)
 158        addi    r11,r11,global_dbcr0@l
 159#ifdef CONFIG_SMP
 160        rlwinm  r9,r1,0,0,(31-THREAD_SHIFT)
 161        lwz     r9,TI_CPU(r9)
 162        slwi    r9,r9,3
 163        add     r11,r11,r9
 164#endif
 165        lwz     r12,0(r11)
 166        mtspr   SPRN_DBCR0,r12
 167        lwz     r12,4(r11)
 168        addi    r12,r12,-1
 169        stw     r12,4(r11)
 170#endif
 171        b       3f
 172
 1732:      /* if from kernel, check interrupted DOZE/NAP mode and
 174         * check for stack overflow
 175         */
 176        lwz     r9,KSP_LIMIT(r12)
 177        cmplw   r1,r9                   /* if r1 <= ksp_limit */
 178        ble-    stack_ovf               /* then the kernel stack overflowed */
 1795:
 180#if defined(CONFIG_6xx) || defined(CONFIG_E500)
 181        rlwinm  r9,r1,0,0,31-THREAD_SHIFT
 182        tophys(r9,r9)                   /* check local flags */
 183        lwz     r12,TI_LOCAL_FLAGS(r9)
 184        mtcrf   0x01,r12
 185        bt-     31-TLF_NAPPING,4f
 186        bt-     31-TLF_SLEEPING,7f
 187#endif /* CONFIG_6xx || CONFIG_E500 */
 188        .globl transfer_to_handler_cont
 189transfer_to_handler_cont:
 1903:
 191        mflr    r9
 192        lwz     r11,0(r9)               /* virtual address of handler */
 193        lwz     r9,4(r9)                /* where to go when done */
 194#ifdef CONFIG_TRACE_IRQFLAGS
 195        lis     r12,reenable_mmu@h
 196        ori     r12,r12,reenable_mmu@l
 197        mtspr   SPRN_SRR0,r12
 198        mtspr   SPRN_SRR1,r10
 199        SYNC
 200        RFI
 201reenable_mmu:                           /* re-enable mmu so we can */
 202        mfmsr   r10
 203        lwz     r12,_MSR(r1)
 204        xor     r10,r10,r12
 205        andi.   r10,r10,MSR_EE          /* Did EE change? */
 206        beq     1f
 207
 208        /* Save handler and return address into the 2 unused words
 209         * of the STACK_FRAME_OVERHEAD (sneak sneak sneak). Everything
 210         * else can be recovered from the pt_regs except r3 which for
 211         * normal interrupts has been set to pt_regs and for syscalls
 212         * is an argument, so we temporarily use ORIG_GPR3 to save it
 213         */
 214        stw     r9,8(r1)
 215        stw     r11,12(r1)
 216        stw     r3,ORIG_GPR3(r1)
 217        bl      trace_hardirqs_off
 218        lwz     r0,GPR0(r1)
 219        lwz     r3,ORIG_GPR3(r1)
 220        lwz     r4,GPR4(r1)
 221        lwz     r5,GPR5(r1)
 222        lwz     r6,GPR6(r1)
 223        lwz     r7,GPR7(r1)
 224        lwz     r8,GPR8(r1)
 225        lwz     r9,8(r1)
 226        lwz     r11,12(r1)
 2271:      mtctr   r11
 228        mtlr    r9
 229        bctr                            /* jump to handler */
 230#else /* CONFIG_TRACE_IRQFLAGS */
 231        mtspr   SPRN_SRR0,r11
 232        mtspr   SPRN_SRR1,r10
 233        mtlr    r9
 234        SYNC
 235        RFI                             /* jump to handler, enable MMU */
 236#endif /* CONFIG_TRACE_IRQFLAGS */
 237
 238#if defined (CONFIG_6xx) || defined(CONFIG_E500)
 2394:      rlwinm  r12,r12,0,~_TLF_NAPPING
 240        stw     r12,TI_LOCAL_FLAGS(r9)
 241        b       power_save_ppc32_restore
 242
 2437:      rlwinm  r12,r12,0,~_TLF_SLEEPING
 244        stw     r12,TI_LOCAL_FLAGS(r9)
 245        lwz     r9,_MSR(r11)            /* if sleeping, clear MSR.EE */
 246        rlwinm  r9,r9,0,~MSR_EE
 247        lwz     r12,_LINK(r11)          /* and return to address in LR */
 248        b       fast_exception_return
 249#endif
 250
 251/*
 252 * On kernel stack overflow, load up an initial stack pointer
 253 * and call StackOverflow(regs), which should not return.
 254 */
 255stack_ovf:
 256        /* sometimes we use a statically-allocated stack, which is OK. */
 257        lis     r12,_end@h
 258        ori     r12,r12,_end@l
 259        cmplw   r1,r12
 260        ble     5b                      /* r1 <= &_end is OK */
 261        SAVE_NVGPRS(r11)
 262        addi    r3,r1,STACK_FRAME_OVERHEAD
 263        lis     r1,init_thread_union@ha
 264        addi    r1,r1,init_thread_union@l
 265        addi    r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
 266        lis     r9,StackOverflow@ha
 267        addi    r9,r9,StackOverflow@l
 268        LOAD_MSR_KERNEL(r10,MSR_KERNEL)
 269        FIX_SRR1(r10,r12)
 270        mtspr   SPRN_SRR0,r9
 271        mtspr   SPRN_SRR1,r10
 272        SYNC
 273        RFI
 274
 275/*
 276 * Handle a system call.
 277 */
 278        .stabs  "arch/powerpc/kernel/",N_SO,0,0,0f
 279        .stabs  "entry_32.S",N_SO,0,0,0f
 2800:
 281
 282_GLOBAL(DoSyscall)
 283        stw     r3,ORIG_GPR3(r1)
 284        li      r12,0
 285        stw     r12,RESULT(r1)
 286        lwz     r11,_CCR(r1)    /* Clear SO bit in CR */
 287        rlwinm  r11,r11,0,4,2
 288        stw     r11,_CCR(r1)
 289#ifdef SHOW_SYSCALLS
 290        bl      do_show_syscall
 291#endif /* SHOW_SYSCALLS */
 292#ifdef CONFIG_TRACE_IRQFLAGS
 293        /* Return from syscalls can (and generally will) hard enable
 294         * interrupts. You aren't supposed to call a syscall with
 295         * interrupts disabled in the first place. However, to ensure
 296         * that we get it right vs. lockdep if it happens, we force
 297         * that hard enable here with appropriate tracing if we see
 298         * that we have been called with interrupts off
 299         */
 300        mfmsr   r11
 301        andi.   r12,r11,MSR_EE
 302        bne+    1f
 303        /* We came in with interrupts disabled, we enable them now */
 304        bl      trace_hardirqs_on
 305        mfmsr   r11
 306        lwz     r0,GPR0(r1)
 307        lwz     r3,GPR3(r1)
 308        lwz     r4,GPR4(r1)
 309        ori     r11,r11,MSR_EE
 310        lwz     r5,GPR5(r1)
 311        lwz     r6,GPR6(r1)
 312        lwz     r7,GPR7(r1)
 313        lwz     r8,GPR8(r1)
 314        mtmsr   r11
 3151:
 316#endif /* CONFIG_TRACE_IRQFLAGS */
 317        rlwinm  r10,r1,0,0,(31-THREAD_SHIFT)    /* current_thread_info() */
 318        lwz     r11,TI_FLAGS(r10)
 319        andi.   r11,r11,_TIF_SYSCALL_T_OR_A
 320        bne-    syscall_dotrace
 321syscall_dotrace_cont:
 322        cmplwi  0,r0,NR_syscalls
 323        lis     r10,sys_call_table@h
 324        ori     r10,r10,sys_call_table@l
 325        slwi    r0,r0,2
 326        bge-    66f
 327        lwzx    r10,r10,r0      /* Fetch system call handler [ptr] */
 328        mtlr    r10
 329        addi    r9,r1,STACK_FRAME_OVERHEAD
 330        PPC440EP_ERR42
 331        blrl                    /* Call handler */
 332        .globl  ret_from_syscall
 333ret_from_syscall:
 334#ifdef SHOW_SYSCALLS
 335        bl      do_show_syscall_exit
 336#endif
 337        mr      r6,r3
 338        rlwinm  r12,r1,0,0,(31-THREAD_SHIFT)    /* current_thread_info() */
 339        /* disable interrupts so current_thread_info()->flags can't change */
 340        LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
 341        /* Note: We don't bother telling lockdep about it */
 342        SYNC
 343        MTMSRD(r10)
 344        lwz     r9,TI_FLAGS(r12)
 345        li      r8,-_LAST_ERRNO
 346        andi.   r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
 347        bne-    syscall_exit_work
 348        cmplw   0,r3,r8
 349        blt+    syscall_exit_cont
 350        lwz     r11,_CCR(r1)                    /* Load CR */
 351        neg     r3,r3
 352        oris    r11,r11,0x1000  /* Set SO bit in CR */
 353        stw     r11,_CCR(r1)
 354syscall_exit_cont:
 355        lwz     r8,_MSR(r1)
 356#ifdef CONFIG_TRACE_IRQFLAGS
 357        /* If we are going to return from the syscall with interrupts
 358         * off, we trace that here. It shouldn't happen though but we
 359         * want to catch the bugger if it does right ?
 360         */
 361        andi.   r10,r8,MSR_EE
 362        bne+    1f
 363        stw     r3,GPR3(r1)
 364        bl      trace_hardirqs_off
 365        lwz     r3,GPR3(r1)
 3661:
 367#endif /* CONFIG_TRACE_IRQFLAGS */
 368#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
 369        /* If the process has its own DBCR0 value, load it up.  The internal
 370           debug mode bit tells us that dbcr0 should be loaded. */
 371        lwz     r0,THREAD+THREAD_DBCR0(r2)
 372        andis.  r10,r0,DBCR0_IDM@h
 373        bnel-   load_dbcr0
 374#endif
 375#ifdef CONFIG_44x
 376        lis     r4,icache_44x_need_flush@ha
 377        lwz     r5,icache_44x_need_flush@l(r4)
 378        cmplwi  cr0,r5,0
 379        bne-    2f
 3801:
 381#endif /* CONFIG_44x */
 382BEGIN_FTR_SECTION
 383        lwarx   r7,0,r1
 384END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
 385        stwcx.  r0,0,r1                 /* to clear the reservation */
 386        lwz     r4,_LINK(r1)
 387        lwz     r5,_CCR(r1)
 388        mtlr    r4
 389        mtcr    r5
 390        lwz     r7,_NIP(r1)
 391        FIX_SRR1(r8, r0)
 392        lwz     r2,GPR2(r1)
 393        lwz     r1,GPR1(r1)
 394        mtspr   SPRN_SRR0,r7
 395        mtspr   SPRN_SRR1,r8
 396        SYNC
 397        RFI
 398#ifdef CONFIG_44x
 3992:      li      r7,0
 400        iccci   r0,r0
 401        stw     r7,icache_44x_need_flush@l(r4)
 402        b       1b
 403#endif  /* CONFIG_44x */
 404
 40566:     li      r3,-ENOSYS
 406        b       ret_from_syscall
 407
 408        .globl  ret_from_fork
 409ret_from_fork:
 410        REST_NVGPRS(r1)
 411        bl      schedule_tail
 412        li      r3,0
 413        b       ret_from_syscall
 414
 415/* Traced system call support */
 416syscall_dotrace:
 417        SAVE_NVGPRS(r1)
 418        li      r0,0xc00
 419        stw     r0,_TRAP(r1)
 420        addi    r3,r1,STACK_FRAME_OVERHEAD
 421        bl      do_syscall_trace_enter
 422        /*
 423         * Restore argument registers possibly just changed.
 424         * We use the return value of do_syscall_trace_enter
 425         * for call number to look up in the table (r0).
 426         */
 427        mr      r0,r3
 428        lwz     r3,GPR3(r1)
 429        lwz     r4,GPR4(r1)
 430        lwz     r5,GPR5(r1)
 431        lwz     r6,GPR6(r1)
 432        lwz     r7,GPR7(r1)
 433        lwz     r8,GPR8(r1)
 434        REST_NVGPRS(r1)
 435        b       syscall_dotrace_cont
 436
 437syscall_exit_work:
 438        andi.   r0,r9,_TIF_RESTOREALL
 439        beq+    0f
 440        REST_NVGPRS(r1)
 441        b       2f
 4420:      cmplw   0,r3,r8
 443        blt+    1f
 444        andi.   r0,r9,_TIF_NOERROR
 445        bne-    1f
 446        lwz     r11,_CCR(r1)                    /* Load CR */
 447        neg     r3,r3
 448        oris    r11,r11,0x1000  /* Set SO bit in CR */
 449        stw     r11,_CCR(r1)
 450
 4511:      stw     r6,RESULT(r1)   /* Save result */
 452        stw     r3,GPR3(r1)     /* Update return value */
 4532:      andi.   r0,r9,(_TIF_PERSYSCALL_MASK)
 454        beq     4f
 455
 456        /* Clear per-syscall TIF flags if any are set.  */
 457
 458        li      r11,_TIF_PERSYSCALL_MASK
 459        addi    r12,r12,TI_FLAGS
 4603:      lwarx   r8,0,r12
 461        andc    r8,r8,r11
 462#ifdef CONFIG_IBM405_ERR77
 463        dcbt    0,r12
 464#endif
 465        stwcx.  r8,0,r12
 466        bne-    3b
 467        subi    r12,r12,TI_FLAGS
 468        
 4694:      /* Anything which requires enabling interrupts? */
 470        andi.   r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
 471        beq     ret_from_except
 472
 473        /* Re-enable interrupts. There is no need to trace that with
 474         * lockdep as we are supposed to have IRQs on at this point
 475         */
 476        ori     r10,r10,MSR_EE
 477        SYNC
 478        MTMSRD(r10)
 479
 480        /* Save NVGPRS if they're not saved already */
 481        lwz     r4,_TRAP(r1)
 482        andi.   r4,r4,1
 483        beq     5f
 484        SAVE_NVGPRS(r1)
 485        li      r4,0xc00
 486        stw     r4,_TRAP(r1)
 4875:
 488        addi    r3,r1,STACK_FRAME_OVERHEAD
 489        bl      do_syscall_trace_leave
 490        b       ret_from_except_full
 491
 492#ifdef SHOW_SYSCALLS
 493do_show_syscall:
 494#ifdef SHOW_SYSCALLS_TASK
 495        lis     r11,show_syscalls_task@ha
 496        lwz     r11,show_syscalls_task@l(r11)
 497        cmp     0,r2,r11
 498        bnelr
 499#endif
 500        stw     r31,GPR31(r1)
 501        mflr    r31
 502        lis     r3,7f@ha
 503        addi    r3,r3,7f@l
 504        lwz     r4,GPR0(r1)
 505        lwz     r5,GPR3(r1)
 506        lwz     r6,GPR4(r1)
 507        lwz     r7,GPR5(r1)
 508        lwz     r8,GPR6(r1)
 509        lwz     r9,GPR7(r1)
 510        bl      printk
 511        lis     r3,77f@ha
 512        addi    r3,r3,77f@l
 513        lwz     r4,GPR8(r1)
 514        mr      r5,r2
 515        bl      printk
 516        lwz     r0,GPR0(r1)
 517        lwz     r3,GPR3(r1)
 518        lwz     r4,GPR4(r1)
 519        lwz     r5,GPR5(r1)
 520        lwz     r6,GPR6(r1)
 521        lwz     r7,GPR7(r1)
 522        lwz     r8,GPR8(r1)
 523        mtlr    r31
 524        lwz     r31,GPR31(r1)
 525        blr
 526
 527do_show_syscall_exit:
 528#ifdef SHOW_SYSCALLS_TASK
 529        lis     r11,show_syscalls_task@ha
 530        lwz     r11,show_syscalls_task@l(r11)
 531        cmp     0,r2,r11
 532        bnelr
 533#endif
 534        stw     r31,GPR31(r1)
 535        mflr    r31
 536        stw     r3,RESULT(r1)   /* Save result */
 537        mr      r4,r3
 538        lis     r3,79f@ha
 539        addi    r3,r3,79f@l
 540        bl      printk
 541        lwz     r3,RESULT(r1)
 542        mtlr    r31
 543        lwz     r31,GPR31(r1)
 544        blr
 545
 5467:      .string "syscall %d(%x, %x, %x, %x, %x, "
 54777:     .string "%x), current=%p\n"
 54879:     .string " -> %x\n"
 549        .align  2,0
 550
 551#ifdef SHOW_SYSCALLS_TASK
 552        .data
 553        .globl  show_syscalls_task
 554show_syscalls_task:
 555        .long   -1
 556        .text
 557#endif
 558#endif /* SHOW_SYSCALLS */
 559
 560/*
 561 * The fork/clone functions need to copy the full register set into
 562 * the child process. Therefore we need to save all the nonvolatile
 563 * registers (r13 - r31) before calling the C code.
 564 */
 565        .globl  ppc_fork
 566ppc_fork:
 567        SAVE_NVGPRS(r1)
 568        lwz     r0,_TRAP(r1)
 569        rlwinm  r0,r0,0,0,30            /* clear LSB to indicate full */
 570        stw     r0,_TRAP(r1)            /* register set saved */
 571        b       sys_fork
 572
 573        .globl  ppc_vfork
 574ppc_vfork:
 575        SAVE_NVGPRS(r1)
 576        lwz     r0,_TRAP(r1)
 577        rlwinm  r0,r0,0,0,30            /* clear LSB to indicate full */
 578        stw     r0,_TRAP(r1)            /* register set saved */
 579        b       sys_vfork
 580
 581        .globl  ppc_clone
 582ppc_clone:
 583        SAVE_NVGPRS(r1)
 584        lwz     r0,_TRAP(r1)
 585        rlwinm  r0,r0,0,0,30            /* clear LSB to indicate full */
 586        stw     r0,_TRAP(r1)            /* register set saved */
 587        b       sys_clone
 588
 589        .globl  ppc_swapcontext
 590ppc_swapcontext:
 591        SAVE_NVGPRS(r1)
 592        lwz     r0,_TRAP(r1)
 593        rlwinm  r0,r0,0,0,30            /* clear LSB to indicate full */
 594        stw     r0,_TRAP(r1)            /* register set saved */
 595        b       sys_swapcontext
 596
 597/*
 598 * Top-level page fault handling.
 599 * This is in assembler because if do_page_fault tells us that
 600 * it is a bad kernel page fault, we want to save the non-volatile
 601 * registers before calling bad_page_fault.
 602 */
 603        .globl  handle_page_fault
 604handle_page_fault:
 605        stw     r4,_DAR(r1)
 606        addi    r3,r1,STACK_FRAME_OVERHEAD
 607        bl      do_page_fault
 608        cmpwi   r3,0
 609        beq+    ret_from_except
 610        SAVE_NVGPRS(r1)
 611        lwz     r0,_TRAP(r1)
 612        clrrwi  r0,r0,1
 613        stw     r0,_TRAP(r1)
 614        mr      r5,r3
 615        addi    r3,r1,STACK_FRAME_OVERHEAD
 616        lwz     r4,_DAR(r1)
 617        bl      bad_page_fault
 618        b       ret_from_except_full
 619
 620/*
 621 * This routine switches between two different tasks.  The process
 622 * state of one is saved on its kernel stack.  Then the state
 623 * of the other is restored from its kernel stack.  The memory
 624 * management hardware is updated to the second process's state.
 625 * Finally, we can return to the second process.
 626 * On entry, r3 points to the THREAD for the current task, r4
 627 * points to the THREAD for the new task.
 628 *
 629 * This routine is always called with interrupts disabled.
 630 *
 631 * Note: there are two ways to get to the "going out" portion
 632 * of this code; either by coming in via the entry (_switch)
 633 * or via "fork" which must set up an environment equivalent
 634 * to the "_switch" path.  If you change this , you'll have to
 635 * change the fork code also.
 636 *
 637 * The code which creates the new task context is in 'copy_thread'
 638 * in arch/ppc/kernel/process.c
 639 */
 640_GLOBAL(_switch)
 641        stwu    r1,-INT_FRAME_SIZE(r1)
 642        mflr    r0
 643        stw     r0,INT_FRAME_SIZE+4(r1)
 644        /* r3-r12 are caller saved -- Cort */
 645        SAVE_NVGPRS(r1)
 646        stw     r0,_NIP(r1)     /* Return to switch caller */
 647        mfmsr   r11
 648        li      r0,MSR_FP       /* Disable floating-point */
 649#ifdef CONFIG_ALTIVEC
 650BEGIN_FTR_SECTION
 651        oris    r0,r0,MSR_VEC@h /* Disable altivec */
 652        mfspr   r12,SPRN_VRSAVE /* save vrsave register value */
 653        stw     r12,THREAD+THREAD_VRSAVE(r2)
 654END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
 655#endif /* CONFIG_ALTIVEC */
 656#ifdef CONFIG_SPE
 657BEGIN_FTR_SECTION
 658        oris    r0,r0,MSR_SPE@h  /* Disable SPE */
 659        mfspr   r12,SPRN_SPEFSCR /* save spefscr register value */
 660        stw     r12,THREAD+THREAD_SPEFSCR(r2)
 661END_FTR_SECTION_IFSET(CPU_FTR_SPE)
 662#endif /* CONFIG_SPE */
 663        and.    r0,r0,r11       /* FP or altivec or SPE enabled? */
 664        beq+    1f
 665        andc    r11,r11,r0
 666        MTMSRD(r11)
 667        isync
 6681:      stw     r11,_MSR(r1)
 669        mfcr    r10
 670        stw     r10,_CCR(r1)
 671        stw     r1,KSP(r3)      /* Set old stack pointer */
 672
 673#ifdef CONFIG_SMP
 674        /* We need a sync somewhere here to make sure that if the
 675         * previous task gets rescheduled on another CPU, it sees all
 676         * stores it has performed on this one.
 677         */
 678        sync
 679#endif /* CONFIG_SMP */
 680
 681        tophys(r0,r4)
 682        CLR_TOP32(r0)
 683        mtspr   SPRN_SPRG_THREAD,r0     /* Update current THREAD phys addr */
 684        lwz     r1,KSP(r4)      /* Load new stack pointer */
 685
 686        /* save the old current 'last' for return value */
 687        mr      r3,r2
 688        addi    r2,r4,-THREAD   /* Update current */
 689
 690#ifdef CONFIG_ALTIVEC
 691BEGIN_FTR_SECTION
 692        lwz     r0,THREAD+THREAD_VRSAVE(r2)
 693        mtspr   SPRN_VRSAVE,r0          /* if G4, restore VRSAVE reg */
 694END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
 695#endif /* CONFIG_ALTIVEC */
 696#ifdef CONFIG_SPE
 697BEGIN_FTR_SECTION
 698        lwz     r0,THREAD+THREAD_SPEFSCR(r2)
 699        mtspr   SPRN_SPEFSCR,r0         /* restore SPEFSCR reg */
 700END_FTR_SECTION_IFSET(CPU_FTR_SPE)
 701#endif /* CONFIG_SPE */
 702
 703        lwz     r0,_CCR(r1)
 704        mtcrf   0xFF,r0
 705        /* r3-r12 are destroyed -- Cort */
 706        REST_NVGPRS(r1)
 707
 708        lwz     r4,_NIP(r1)     /* Return to _switch caller in new task */
 709        mtlr    r4
 710        addi    r1,r1,INT_FRAME_SIZE
 711        blr
 712
 713        .globl  fast_exception_return
 714fast_exception_return:
 715#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
 716        andi.   r10,r9,MSR_RI           /* check for recoverable interrupt */
 717        beq     1f                      /* if not, we've got problems */
 718#endif
 719
 7202:      REST_4GPRS(3, r11)
 721        lwz     r10,_CCR(r11)
 722        REST_GPR(1, r11)
 723        mtcr    r10
 724        lwz     r10,_LINK(r11)
 725        mtlr    r10
 726        REST_GPR(10, r11)
 727        mtspr   SPRN_SRR1,r9
 728        mtspr   SPRN_SRR0,r12
 729        REST_GPR(9, r11)
 730        REST_GPR(12, r11)
 731        lwz     r11,GPR11(r11)
 732        SYNC
 733        RFI
 734
 735#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
 736/* check if the exception happened in a restartable section */
 7371:      lis     r3,exc_exit_restart_end@ha
 738        addi    r3,r3,exc_exit_restart_end@l
 739        cmplw   r12,r3
 740        bge     3f
 741        lis     r4,exc_exit_restart@ha
 742        addi    r4,r4,exc_exit_restart@l
 743        cmplw   r12,r4
 744        blt     3f
 745        lis     r3,fee_restarts@ha
 746        tophys(r3,r3)
 747        lwz     r5,fee_restarts@l(r3)
 748        addi    r5,r5,1
 749        stw     r5,fee_restarts@l(r3)
 750        mr      r12,r4          /* restart at exc_exit_restart */
 751        b       2b
 752
 753        .section .bss
 754        .align  2
 755fee_restarts:
 756        .space  4
 757        .previous
 758
 759/* aargh, a nonrecoverable interrupt, panic */
 760/* aargh, we don't know which trap this is */
 761/* but the 601 doesn't implement the RI bit, so assume it's OK */
 7623:
 763BEGIN_FTR_SECTION
 764        b       2b
 765END_FTR_SECTION_IFSET(CPU_FTR_601)
 766        li      r10,-1
 767        stw     r10,_TRAP(r11)
 768        addi    r3,r1,STACK_FRAME_OVERHEAD
 769        lis     r10,MSR_KERNEL@h
 770        ori     r10,r10,MSR_KERNEL@l
 771        bl      transfer_to_handler_full
 772        .long   nonrecoverable_exception
 773        .long   ret_from_except
 774#endif
 775
 776        .globl  ret_from_except_full
 777ret_from_except_full:
 778        REST_NVGPRS(r1)
 779        /* fall through */
 780
 781        .globl  ret_from_except
 782ret_from_except:
 783        /* Hard-disable interrupts so that current_thread_info()->flags
 784         * can't change between when we test it and when we return
 785         * from the interrupt. */
 786        /* Note: We don't bother telling lockdep about it */
 787        LOAD_MSR_KERNEL(r10,MSR_KERNEL)
 788        SYNC                    /* Some chip revs have problems here... */
 789        MTMSRD(r10)             /* disable interrupts */
 790
 791        lwz     r3,_MSR(r1)     /* Returning to user mode? */
 792        andi.   r0,r3,MSR_PR
 793        beq     resume_kernel
 794
 795user_exc_return:                /* r10 contains MSR_KERNEL here */
 796        /* Check current_thread_info()->flags */
 797        rlwinm  r9,r1,0,0,(31-THREAD_SHIFT)
 798        lwz     r9,TI_FLAGS(r9)
 799        andi.   r0,r9,_TIF_USER_WORK_MASK
 800        bne     do_work
 801
 802restore_user:
 803#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
 804        /* Check whether this process has its own DBCR0 value.  The internal
 805           debug mode bit tells us that dbcr0 should be loaded. */
 806        lwz     r0,THREAD+THREAD_DBCR0(r2)
 807        andis.  r10,r0,DBCR0_IDM@h
 808        bnel-   load_dbcr0
 809#endif
 810
 811#ifdef CONFIG_PREEMPT
 812        b       restore
 813
 814/* N.B. the only way to get here is from the beq following ret_from_except. */
 815resume_kernel:
 816        /* check current_thread_info->preempt_count */
 817        rlwinm  r9,r1,0,0,(31-THREAD_SHIFT)
 818        lwz     r0,TI_PREEMPT(r9)
 819        cmpwi   0,r0,0          /* if non-zero, just restore regs and return */
 820        bne     restore
 821        lwz     r0,TI_FLAGS(r9)
 822        andi.   r0,r0,_TIF_NEED_RESCHED
 823        beq+    restore
 824        andi.   r0,r3,MSR_EE    /* interrupts off? */
 825        beq     restore         /* don't schedule if so */
 826#ifdef CONFIG_TRACE_IRQFLAGS
 827        /* Lockdep thinks irqs are enabled, we need to call
 828         * preempt_schedule_irq with IRQs off, so we inform lockdep
 829         * now that we -did- turn them off already
 830         */
 831        bl      trace_hardirqs_off
 832#endif
 8331:      bl      preempt_schedule_irq
 834        rlwinm  r9,r1,0,0,(31-THREAD_SHIFT)
 835        lwz     r3,TI_FLAGS(r9)
 836        andi.   r0,r3,_TIF_NEED_RESCHED
 837        bne-    1b
 838#ifdef CONFIG_TRACE_IRQFLAGS
 839        /* And now, to properly rebalance the above, we tell lockdep they
 840         * are being turned back on, which will happen when we return
 841         */
 842        bl      trace_hardirqs_on
 843#endif
 844#else
 845resume_kernel:
 846#endif /* CONFIG_PREEMPT */
 847
 848        /* interrupts are hard-disabled at this point */
 849restore:
 850#ifdef CONFIG_44x
 851        lis     r4,icache_44x_need_flush@ha
 852        lwz     r5,icache_44x_need_flush@l(r4)
 853        cmplwi  cr0,r5,0
 854        beq+    1f
 855        li      r6,0
 856        iccci   r0,r0
 857        stw     r6,icache_44x_need_flush@l(r4)
 8581:
 859#endif  /* CONFIG_44x */
 860
 861        lwz     r9,_MSR(r1)
 862#ifdef CONFIG_TRACE_IRQFLAGS
 863        /* Lockdep doesn't know about the fact that IRQs are temporarily turned
 864         * off in this assembly code while peeking at TI_FLAGS() and such. However
 865         * we need to inform it if the exception turned interrupts off, and we
 866         * are about to trun them back on.
 867         *
 868         * The problem here sadly is that we don't know whether the exceptions was
 869         * one that turned interrupts off or not. So we always tell lockdep about
 870         * turning them on here when we go back to wherever we came from with EE
 871         * on, even if that may meen some redudant calls being tracked. Maybe later
 872         * we could encode what the exception did somewhere or test the exception
 873         * type in the pt_regs but that sounds overkill
 874         */
 875        andi.   r10,r9,MSR_EE
 876        beq     1f
 877        bl      trace_hardirqs_on
 878        lwz     r9,_MSR(r1)
 8791:
 880#endif /* CONFIG_TRACE_IRQFLAGS */
 881
 882        lwz     r0,GPR0(r1)
 883        lwz     r2,GPR2(r1)
 884        REST_4GPRS(3, r1)
 885        REST_2GPRS(7, r1)
 886
 887        lwz     r10,_XER(r1)
 888        lwz     r11,_CTR(r1)
 889        mtspr   SPRN_XER,r10
 890        mtctr   r11
 891
 892        PPC405_ERR77(0,r1)
 893BEGIN_FTR_SECTION
 894        lwarx   r11,0,r1
 895END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
 896        stwcx.  r0,0,r1                 /* to clear the reservation */
 897
 898#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
 899        andi.   r10,r9,MSR_RI           /* check if this exception occurred */
 900        beql    nonrecoverable          /* at a bad place (MSR:RI = 0) */
 901
 902        lwz     r10,_CCR(r1)
 903        lwz     r11,_LINK(r1)
 904        mtcrf   0xFF,r10
 905        mtlr    r11
 906
 907        /*
 908         * Once we put values in SRR0 and SRR1, we are in a state
 909         * where exceptions are not recoverable, since taking an
 910         * exception will trash SRR0 and SRR1.  Therefore we clear the
 911         * MSR:RI bit to indicate this.  If we do take an exception,
 912         * we can't return to the point of the exception but we
 913         * can restart the exception exit path at the label
 914         * exc_exit_restart below.  -- paulus
 915         */
 916        LOAD_MSR_KERNEL(r10,MSR_KERNEL & ~MSR_RI)
 917        SYNC
 918        MTMSRD(r10)             /* clear the RI bit */
 919        .globl exc_exit_restart
 920exc_exit_restart:
 921        lwz     r12,_NIP(r1)
 922        FIX_SRR1(r9,r10)
 923        mtspr   SPRN_SRR0,r12
 924        mtspr   SPRN_SRR1,r9
 925        REST_4GPRS(9, r1)
 926        lwz     r1,GPR1(r1)
 927        .globl exc_exit_restart_end
 928exc_exit_restart_end:
 929        SYNC
 930        RFI
 931
 932#else /* !(CONFIG_4xx || CONFIG_BOOKE) */
 933        /*
 934         * This is a bit different on 4xx/Book-E because it doesn't have
 935         * the RI bit in the MSR.
 936         * The TLB miss handler checks if we have interrupted
 937         * the exception exit path and restarts it if so
 938         * (well maybe one day it will... :).
 939         */
 940        lwz     r11,_LINK(r1)
 941        mtlr    r11
 942        lwz     r10,_CCR(r1)
 943        mtcrf   0xff,r10
 944        REST_2GPRS(9, r1)
 945        .globl exc_exit_restart
 946exc_exit_restart:
 947        lwz     r11,_NIP(r1)
 948        lwz     r12,_MSR(r1)
 949exc_exit_start:
 950        mtspr   SPRN_SRR0,r11
 951        mtspr   SPRN_SRR1,r12
 952        REST_2GPRS(11, r1)
 953        lwz     r1,GPR1(r1)
 954        .globl exc_exit_restart_end
 955exc_exit_restart_end:
 956        PPC405_ERR77_SYNC
 957        rfi
 958        b       .                       /* prevent prefetch past rfi */
 959
 960/*
 961 * Returning from a critical interrupt in user mode doesn't need
 962 * to be any different from a normal exception.  For a critical
 963 * interrupt in the kernel, we just return (without checking for
 964 * preemption) since the interrupt may have happened at some crucial
 965 * place (e.g. inside the TLB miss handler), and because we will be
 966 * running with r1 pointing into critical_stack, not the current
 967 * process's kernel stack (and therefore current_thread_info() will
 968 * give the wrong answer).
 969 * We have to restore various SPRs that may have been in use at the
 970 * time of the critical interrupt.
 971 *
 972 */
 973#ifdef CONFIG_40x
 974#define PPC_40x_TURN_OFF_MSR_DR                                             \
 975        /* avoid any possible TLB misses here by turning off MSR.DR, we     \
 976         * assume the instructions here are mapped by a pinned TLB entry */ \
 977        li      r10,MSR_IR;                                                 \
 978        mtmsr   r10;                                                        \
 979        isync;                                                              \
 980        tophys(r1, r1);
 981#else
 982#define PPC_40x_TURN_OFF_MSR_DR
 983#endif
 984
 985#define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi)     \
 986        REST_NVGPRS(r1);                                                \
 987        lwz     r3,_MSR(r1);                                            \
 988        andi.   r3,r3,MSR_PR;                                           \
 989        LOAD_MSR_KERNEL(r10,MSR_KERNEL);                                \
 990        bne     user_exc_return;                                        \
 991        lwz     r0,GPR0(r1);                                            \
 992        lwz     r2,GPR2(r1);                                            \
 993        REST_4GPRS(3, r1);                                              \
 994        REST_2GPRS(7, r1);                                              \
 995        lwz     r10,_XER(r1);                                           \
 996        lwz     r11,_CTR(r1);                                           \
 997        mtspr   SPRN_XER,r10;                                           \
 998        mtctr   r11;                                                    \
 999        PPC405_ERR77(0,r1);                                             \
1000        stwcx.  r0,0,r1;                /* to clear the reservation */  \
1001        lwz     r11,_LINK(r1);                                          \
1002        mtlr    r11;                                                    \
1003        lwz     r10,_CCR(r1);                                           \
1004        mtcrf   0xff,r10;                                               \
1005        PPC_40x_TURN_OFF_MSR_DR;                                        \
1006        lwz     r9,_DEAR(r1);                                           \
1007        lwz     r10,_ESR(r1);                                           \
1008        mtspr   SPRN_DEAR,r9;                                           \
1009        mtspr   SPRN_ESR,r10;                                           \
1010        lwz     r11,_NIP(r1);                                           \
1011        lwz     r12,_MSR(r1);                                           \
1012        mtspr   exc_lvl_srr0,r11;                                       \
1013        mtspr   exc_lvl_srr1,r12;                                       \
1014        lwz     r9,GPR9(r1);                                            \
1015        lwz     r12,GPR12(r1);                                          \
1016        lwz     r10,GPR10(r1);                                          \
1017        lwz     r11,GPR11(r1);                                          \
1018        lwz     r1,GPR1(r1);                                            \
1019        PPC405_ERR77_SYNC;                                              \
1020        exc_lvl_rfi;                                                    \
1021        b       .;              /* prevent prefetch past exc_lvl_rfi */
1022
1023#define RESTORE_xSRR(exc_lvl_srr0, exc_lvl_srr1)                        \
1024        lwz     r9,_##exc_lvl_srr0(r1);                                 \
1025        lwz     r10,_##exc_lvl_srr1(r1);                                \
1026        mtspr   SPRN_##exc_lvl_srr0,r9;                                 \
1027        mtspr   SPRN_##exc_lvl_srr1,r10;
1028
1029#if defined(CONFIG_PPC_BOOK3E_MMU)
1030#ifdef CONFIG_PHYS_64BIT
1031#define RESTORE_MAS7                                                    \
1032        lwz     r11,MAS7(r1);                                           \
1033        mtspr   SPRN_MAS7,r11;
1034#else
1035#define RESTORE_MAS7
1036#endif /* CONFIG_PHYS_64BIT */
1037#define RESTORE_MMU_REGS                                                \
1038        lwz     r9,MAS0(r1);                                            \
1039        lwz     r10,MAS1(r1);                                           \
1040        lwz     r11,MAS2(r1);                                           \
1041        mtspr   SPRN_MAS0,r9;                                           \
1042        lwz     r9,MAS3(r1);                                            \
1043        mtspr   SPRN_MAS1,r10;                                          \
1044        lwz     r10,MAS6(r1);                                           \
1045        mtspr   SPRN_MAS2,r11;                                          \
1046        mtspr   SPRN_MAS3,r9;                                           \
1047        mtspr   SPRN_MAS6,r10;                                          \
1048        RESTORE_MAS7;
1049#elif defined(CONFIG_44x)
1050#define RESTORE_MMU_REGS                                                \
1051        lwz     r9,MMUCR(r1);                                           \
1052        mtspr   SPRN_MMUCR,r9;
1053#else
1054#define RESTORE_MMU_REGS
1055#endif
1056
1057#ifdef CONFIG_40x
1058        .globl  ret_from_crit_exc
1059ret_from_crit_exc:
1060        mfspr   r9,SPRN_SPRG_THREAD
1061        lis     r10,saved_ksp_limit@ha;
1062        lwz     r10,saved_ksp_limit@l(r10);
1063        tovirt(r9,r9);
1064        stw     r10,KSP_LIMIT(r9)
1065        lis     r9,crit_srr0@ha;
1066        lwz     r9,crit_srr0@l(r9);
1067        lis     r10,crit_srr1@ha;
1068        lwz     r10,crit_srr1@l(r10);
1069        mtspr   SPRN_SRR0,r9;
1070        mtspr   SPRN_SRR1,r10;
1071        RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
1072#endif /* CONFIG_40x */
1073
1074#ifdef CONFIG_BOOKE
1075        .globl  ret_from_crit_exc
1076ret_from_crit_exc:
1077        mfspr   r9,SPRN_SPRG_THREAD
1078        lwz     r10,SAVED_KSP_LIMIT(r1)
1079        stw     r10,KSP_LIMIT(r9)
1080        RESTORE_xSRR(SRR0,SRR1);
1081        RESTORE_MMU_REGS;
1082        RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
1083
1084        .globl  ret_from_debug_exc
1085ret_from_debug_exc:
1086        mfspr   r9,SPRN_SPRG_THREAD
1087        lwz     r10,SAVED_KSP_LIMIT(r1)
1088        stw     r10,KSP_LIMIT(r9)
1089        lwz     r9,THREAD_INFO-THREAD(r9)
1090        rlwinm  r10,r1,0,0,(31-THREAD_SHIFT)
1091        lwz     r10,TI_PREEMPT(r10)
1092        stw     r10,TI_PREEMPT(r9)
1093        RESTORE_xSRR(SRR0,SRR1);
1094        RESTORE_xSRR(CSRR0,CSRR1);
1095        RESTORE_MMU_REGS;
1096        RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, PPC_RFDI)
1097
1098        .globl  ret_from_mcheck_exc
1099ret_from_mcheck_exc:
1100        mfspr   r9,SPRN_SPRG_THREAD
1101        lwz     r10,SAVED_KSP_LIMIT(r1)
1102        stw     r10,KSP_LIMIT(r9)
1103        RESTORE_xSRR(SRR0,SRR1);
1104        RESTORE_xSRR(CSRR0,CSRR1);
1105        RESTORE_xSRR(DSRR0,DSRR1);
1106        RESTORE_MMU_REGS;
1107        RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, PPC_RFMCI)
1108#endif /* CONFIG_BOOKE */
1109
1110/*
1111 * Load the DBCR0 value for a task that is being ptraced,
1112 * having first saved away the global DBCR0.  Note that r0
1113 * has the dbcr0 value to set upon entry to this.
1114 */
1115load_dbcr0:
1116        mfmsr   r10             /* first disable debug exceptions */
1117        rlwinm  r10,r10,0,~MSR_DE
1118        mtmsr   r10
1119        isync
1120        mfspr   r10,SPRN_DBCR0
1121        lis     r11,global_dbcr0@ha
1122        addi    r11,r11,global_dbcr0@l
1123#ifdef CONFIG_SMP
1124        rlwinm  r9,r1,0,0,(31-THREAD_SHIFT)
1125        lwz     r9,TI_CPU(r9)
1126        slwi    r9,r9,3
1127        add     r11,r11,r9
1128#endif
1129        stw     r10,0(r11)
1130        mtspr   SPRN_DBCR0,r0
1131        lwz     r10,4(r11)
1132        addi    r10,r10,1
1133        stw     r10,4(r11)
1134        li      r11,-1
1135        mtspr   SPRN_DBSR,r11   /* clear all pending debug events */
1136        blr
1137
1138        .section .bss
1139        .align  4
1140global_dbcr0:
1141        .space  8*NR_CPUS
1142        .previous
1143#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
1144
1145do_work:                        /* r10 contains MSR_KERNEL here */
1146        andi.   r0,r9,_TIF_NEED_RESCHED
1147        beq     do_user_signal
1148
1149do_resched:                     /* r10 contains MSR_KERNEL here */
1150        /* Note: We don't need to inform lockdep that we are enabling
1151         * interrupts here. As far as it knows, they are already enabled
1152         */
1153        ori     r10,r10,MSR_EE
1154        SYNC
1155        MTMSRD(r10)             /* hard-enable interrupts */
1156        bl      schedule
1157recheck:
1158        /* Note: And we don't tell it we are disabling them again
1159         * neither. Those disable/enable cycles used to peek at
1160         * TI_FLAGS aren't advertised.
1161         */
1162        LOAD_MSR_KERNEL(r10,MSR_KERNEL)
1163        SYNC
1164        MTMSRD(r10)             /* disable interrupts */
1165        rlwinm  r9,r1,0,0,(31-THREAD_SHIFT)
1166        lwz     r9,TI_FLAGS(r9)
1167        andi.   r0,r9,_TIF_NEED_RESCHED
1168        bne-    do_resched
1169        andi.   r0,r9,_TIF_USER_WORK_MASK
1170        beq     restore_user
1171do_user_signal:                 /* r10 contains MSR_KERNEL here */
1172        ori     r10,r10,MSR_EE
1173        SYNC
1174        MTMSRD(r10)             /* hard-enable interrupts */
1175        /* save r13-r31 in the exception frame, if not already done */
1176        lwz     r3,_TRAP(r1)
1177        andi.   r0,r3,1
1178        beq     2f
1179        SAVE_NVGPRS(r1)
1180        rlwinm  r3,r3,0,0,30
1181        stw     r3,_TRAP(r1)
11822:      addi    r3,r1,STACK_FRAME_OVERHEAD
1183        mr      r4,r9
1184        bl      do_signal
1185        REST_NVGPRS(r1)
1186        b       recheck
1187
1188/*
1189 * We come here when we are at the end of handling an exception
1190 * that occurred at a place where taking an exception will lose
1191 * state information, such as the contents of SRR0 and SRR1.
1192 */
1193nonrecoverable:
1194        lis     r10,exc_exit_restart_end@ha
1195        addi    r10,r10,exc_exit_restart_end@l
1196        cmplw   r12,r10
1197        bge     3f
1198        lis     r11,exc_exit_restart@ha
1199        addi    r11,r11,exc_exit_restart@l
1200        cmplw   r12,r11
1201        blt     3f
1202        lis     r10,ee_restarts@ha
1203        lwz     r12,ee_restarts@l(r10)
1204        addi    r12,r12,1
1205        stw     r12,ee_restarts@l(r10)
1206        mr      r12,r11         /* restart at exc_exit_restart */
1207        blr
12083:      /* OK, we can't recover, kill this process */
1209        /* but the 601 doesn't implement the RI bit, so assume it's OK */
1210BEGIN_FTR_SECTION
1211        blr
1212END_FTR_SECTION_IFSET(CPU_FTR_601)
1213        lwz     r3,_TRAP(r1)
1214        andi.   r0,r3,1
1215        beq     4f
1216        SAVE_NVGPRS(r1)
1217        rlwinm  r3,r3,0,0,30
1218        stw     r3,_TRAP(r1)
12194:      addi    r3,r1,STACK_FRAME_OVERHEAD
1220        bl      nonrecoverable_exception
1221        /* shouldn't return */
1222        b       4b
1223
1224        .section .bss
1225        .align  2
1226ee_restarts:
1227        .space  4
1228        .previous
1229
1230/*
1231 * PROM code for specific machines follows.  Put it
1232 * here so it's easy to add arch-specific sections later.
1233 * -- Cort
1234 */
1235#ifdef CONFIG_PPC_RTAS
1236/*
1237 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
1238 * called with the MMU off.
1239 */
1240_GLOBAL(enter_rtas)
1241        stwu    r1,-INT_FRAME_SIZE(r1)
1242        mflr    r0
1243        stw     r0,INT_FRAME_SIZE+4(r1)
1244        LOAD_REG_ADDR(r4, rtas)
1245        lis     r6,1f@ha        /* physical return address for rtas */
1246        addi    r6,r6,1f@l
1247        tophys(r6,r6)
1248        tophys(r7,r1)
1249        lwz     r8,RTASENTRY(r4)
1250        lwz     r4,RTASBASE(r4)
1251        mfmsr   r9
1252        stw     r9,8(r1)
1253        LOAD_MSR_KERNEL(r0,MSR_KERNEL)
1254        SYNC                    /* disable interrupts so SRR0/1 */
1255        MTMSRD(r0)              /* don't get trashed */
1256        li      r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
1257        mtlr    r6
1258        mtspr   SPRN_SPRG_RTAS,r7
1259        mtspr   SPRN_SRR0,r8
1260        mtspr   SPRN_SRR1,r9
1261        RFI
12621:      tophys(r9,r1)
1263        lwz     r8,INT_FRAME_SIZE+4(r9) /* get return address */
1264        lwz     r9,8(r9)        /* original msr value */
1265        FIX_SRR1(r9,r0)
1266        addi    r1,r1,INT_FRAME_SIZE
1267        li      r0,0
1268        mtspr   SPRN_SPRG_RTAS,r0
1269        mtspr   SPRN_SRR0,r8
1270        mtspr   SPRN_SRR1,r9
1271        RFI                     /* return to caller */
1272
1273        .globl  machine_check_in_rtas
1274machine_check_in_rtas:
1275        twi     31,0,0
1276        /* XXX load up BATs and panic */
1277
1278#endif /* CONFIG_PPC_RTAS */
1279
1280#ifdef CONFIG_FUNCTION_TRACER
1281#ifdef CONFIG_DYNAMIC_FTRACE
1282_GLOBAL(mcount)
1283_GLOBAL(_mcount)
1284        /*
1285         * It is required that _mcount on PPC32 must preserve the
1286         * link register. But we have r0 to play with. We use r0
1287         * to push the return address back to the caller of mcount
1288         * into the ctr register, restore the link register and
1289         * then jump back using the ctr register.
1290         */
1291        mflr    r0
1292        mtctr   r0
1293        lwz     r0, 4(r1)
1294        mtlr    r0
1295        bctr
1296
1297_GLOBAL(ftrace_caller)
1298        MCOUNT_SAVE_FRAME
1299        /* r3 ends up with link register */
1300        subi    r3, r3, MCOUNT_INSN_SIZE
1301.globl ftrace_call
1302ftrace_call:
1303        bl      ftrace_stub
1304        nop
1305#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1306.globl ftrace_graph_call
1307ftrace_graph_call:
1308        b       ftrace_graph_stub
1309_GLOBAL(ftrace_graph_stub)
1310#endif
1311        MCOUNT_RESTORE_FRAME
1312        /* old link register ends up in ctr reg */
1313        bctr
1314#else
1315_GLOBAL(mcount)
1316_GLOBAL(_mcount)
1317
1318        MCOUNT_SAVE_FRAME
1319
1320        subi    r3, r3, MCOUNT_INSN_SIZE
1321        LOAD_REG_ADDR(r5, ftrace_trace_function)
1322        lwz     r5,0(r5)
1323
1324        mtctr   r5
1325        bctrl
1326        nop
1327
1328#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1329        b       ftrace_graph_caller
1330#endif
1331        MCOUNT_RESTORE_FRAME
1332        bctr
1333#endif
1334
1335_GLOBAL(ftrace_stub)
1336        blr
1337
1338#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1339_GLOBAL(ftrace_graph_caller)
1340        /* load r4 with local address */
1341        lwz     r4, 44(r1)
1342        subi    r4, r4, MCOUNT_INSN_SIZE
1343
1344        /* get the parent address */
1345        addi    r3, r1, 52
1346
1347        bl      prepare_ftrace_return
1348        nop
1349
1350        MCOUNT_RESTORE_FRAME
1351        /* old link register ends up in ctr reg */
1352        bctr
1353
1354_GLOBAL(return_to_handler)
1355        /* need to save return values */
1356        stwu    r1, -32(r1)
1357        stw     r3, 20(r1)
1358        stw     r4, 16(r1)
1359        stw     r31, 12(r1)
1360        mr      r31, r1
1361
1362        bl      ftrace_return_to_handler
1363        nop
1364
1365        /* return value has real return address */
1366        mtlr    r3
1367
1368        lwz     r3, 20(r1)
1369        lwz     r4, 16(r1)
1370        lwz     r31,12(r1)
1371        lwz     r1, 0(r1)
1372
1373        /* Jump back to real return address */
1374        blr
1375#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1376
1377#endif /* CONFIG_MCOUNT */
1378