linux/arch/powerpc/kernel/entry_32.S
<<
>>
Prefs
   1/*
   2 *  PowerPC version
   3 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
   4 *  Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
   5 *    Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
   6 *  Adapted for Power Macintosh by Paul Mackerras.
   7 *  Low-level exception handlers and MMU support
   8 *  rewritten by Paul Mackerras.
   9 *    Copyright (C) 1996 Paul Mackerras.
  10 *  MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
  11 *
  12 *  This file contains the system call entry code, context switch
  13 *  code, and exception/interrupt return code for PowerPC.
  14 *
  15 *  This program is free software; you can redistribute it and/or
  16 *  modify it under the terms of the GNU General Public License
  17 *  as published by the Free Software Foundation; either version
  18 *  2 of the License, or (at your option) any later version.
  19 *
  20 */
  21
  22#include <linux/errno.h>
  23#include <linux/err.h>
  24#include <linux/sys.h>
  25#include <linux/threads.h>
  26#include <asm/reg.h>
  27#include <asm/page.h>
  28#include <asm/mmu.h>
  29#include <asm/cputable.h>
  30#include <asm/thread_info.h>
  31#include <asm/ppc_asm.h>
  32#include <asm/asm-offsets.h>
  33#include <asm/unistd.h>
  34#include <asm/ftrace.h>
  35#include <asm/ptrace.h>
  36#include <asm/export.h>
  37
  38/*
  39 * MSR_KERNEL is > 0x10000 on 4xx/Book-E since it include MSR_CE.
  40 */
  41#if MSR_KERNEL >= 0x10000
  42#define LOAD_MSR_KERNEL(r, x)   lis r,(x)@h; ori r,r,(x)@l
  43#else
  44#define LOAD_MSR_KERNEL(r, x)   li r,(x)
  45#endif
  46
  47#ifdef CONFIG_BOOKE
  48        .globl  mcheck_transfer_to_handler
  49mcheck_transfer_to_handler:
  50        mfspr   r0,SPRN_DSRR0
  51        stw     r0,_DSRR0(r11)
  52        mfspr   r0,SPRN_DSRR1
  53        stw     r0,_DSRR1(r11)
  54        /* fall through */
  55
  56        .globl  debug_transfer_to_handler
  57debug_transfer_to_handler:
  58        mfspr   r0,SPRN_CSRR0
  59        stw     r0,_CSRR0(r11)
  60        mfspr   r0,SPRN_CSRR1
  61        stw     r0,_CSRR1(r11)
  62        /* fall through */
  63
  64        .globl  crit_transfer_to_handler
  65crit_transfer_to_handler:
  66#ifdef CONFIG_PPC_BOOK3E_MMU
  67        mfspr   r0,SPRN_MAS0
  68        stw     r0,MAS0(r11)
  69        mfspr   r0,SPRN_MAS1
  70        stw     r0,MAS1(r11)
  71        mfspr   r0,SPRN_MAS2
  72        stw     r0,MAS2(r11)
  73        mfspr   r0,SPRN_MAS3
  74        stw     r0,MAS3(r11)
  75        mfspr   r0,SPRN_MAS6
  76        stw     r0,MAS6(r11)
  77#ifdef CONFIG_PHYS_64BIT
  78        mfspr   r0,SPRN_MAS7
  79        stw     r0,MAS7(r11)
  80#endif /* CONFIG_PHYS_64BIT */
  81#endif /* CONFIG_PPC_BOOK3E_MMU */
  82#ifdef CONFIG_44x
  83        mfspr   r0,SPRN_MMUCR
  84        stw     r0,MMUCR(r11)
  85#endif
  86        mfspr   r0,SPRN_SRR0
  87        stw     r0,_SRR0(r11)
  88        mfspr   r0,SPRN_SRR1
  89        stw     r0,_SRR1(r11)
  90
  91        /* set the stack limit to the current stack
  92         * and set the limit to protect the thread_info
  93         * struct
  94         */
  95        mfspr   r8,SPRN_SPRG_THREAD
  96        lwz     r0,KSP_LIMIT(r8)
  97        stw     r0,SAVED_KSP_LIMIT(r11)
  98        rlwimi  r0,r1,0,0,(31-THREAD_SHIFT)
  99        stw     r0,KSP_LIMIT(r8)
 100        /* fall through */
 101#endif
 102
 103#ifdef CONFIG_40x
 104        .globl  crit_transfer_to_handler
 105crit_transfer_to_handler:
 106        lwz     r0,crit_r10@l(0)
 107        stw     r0,GPR10(r11)
 108        lwz     r0,crit_r11@l(0)
 109        stw     r0,GPR11(r11)
 110        mfspr   r0,SPRN_SRR0
 111        stw     r0,crit_srr0@l(0)
 112        mfspr   r0,SPRN_SRR1
 113        stw     r0,crit_srr1@l(0)
 114
 115        /* set the stack limit to the current stack
 116         * and set the limit to protect the thread_info
 117         * struct
 118         */
 119        mfspr   r8,SPRN_SPRG_THREAD
 120        lwz     r0,KSP_LIMIT(r8)
 121        stw     r0,saved_ksp_limit@l(0)
 122        rlwimi  r0,r1,0,0,(31-THREAD_SHIFT)
 123        stw     r0,KSP_LIMIT(r8)
 124        /* fall through */
 125#endif
 126
 127/*
 128 * This code finishes saving the registers to the exception frame
 129 * and jumps to the appropriate handler for the exception, turning
 130 * on address translation.
 131 * Note that we rely on the caller having set cr0.eq iff the exception
 132 * occurred in kernel mode (i.e. MSR:PR = 0).
 133 */
 134        .globl  transfer_to_handler_full
 135transfer_to_handler_full:
 136        SAVE_NVGPRS(r11)
 137        /* fall through */
 138
 139        .globl  transfer_to_handler
 140transfer_to_handler:
 141        stw     r2,GPR2(r11)
 142        stw     r12,_NIP(r11)
 143        stw     r9,_MSR(r11)
 144        andi.   r2,r9,MSR_PR
 145        mfctr   r12
 146        mfspr   r2,SPRN_XER
 147        stw     r12,_CTR(r11)
 148        stw     r2,_XER(r11)
 149        mfspr   r12,SPRN_SPRG_THREAD
 150        addi    r2,r12,-THREAD
 151        tovirt(r2,r2)                   /* set r2 to current */
 152        beq     2f                      /* if from user, fix up THREAD.regs */
 153        addi    r11,r1,STACK_FRAME_OVERHEAD
 154        stw     r11,PT_REGS(r12)
 155#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
 156        /* Check to see if the dbcr0 register is set up to debug.  Use the
 157           internal debug mode bit to do this. */
 158        lwz     r12,THREAD_DBCR0(r12)
 159        andis.  r12,r12,DBCR0_IDM@h
 160        beq+    3f
 161        /* From user and task is ptraced - load up global dbcr0 */
 162        li      r12,-1                  /* clear all pending debug events */
 163        mtspr   SPRN_DBSR,r12
 164        lis     r11,global_dbcr0@ha
 165        tophys(r11,r11)
 166        addi    r11,r11,global_dbcr0@l
 167#ifdef CONFIG_SMP
 168        CURRENT_THREAD_INFO(r9, r1)
 169        lwz     r9,TI_CPU(r9)
 170        slwi    r9,r9,3
 171        add     r11,r11,r9
 172#endif
 173        lwz     r12,0(r11)
 174        mtspr   SPRN_DBCR0,r12
 175        lwz     r12,4(r11)
 176        addi    r12,r12,-1
 177        stw     r12,4(r11)
 178#endif
 179#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
 180        CURRENT_THREAD_INFO(r9, r1)
 181        tophys(r9, r9)
 182        ACCOUNT_CPU_USER_ENTRY(r9, r11, r12)
 183#endif
 184
 185        b       3f
 186
 1872:      /* if from kernel, check interrupted DOZE/NAP mode and
 188         * check for stack overflow
 189         */
 190        lwz     r9,KSP_LIMIT(r12)
 191        cmplw   r1,r9                   /* if r1 <= ksp_limit */
 192        ble-    stack_ovf               /* then the kernel stack overflowed */
 1935:
 194#if defined(CONFIG_6xx) || defined(CONFIG_E500)
 195        CURRENT_THREAD_INFO(r9, r1)
 196        tophys(r9,r9)                   /* check local flags */
 197        lwz     r12,TI_LOCAL_FLAGS(r9)
 198        mtcrf   0x01,r12
 199        bt-     31-TLF_NAPPING,4f
 200        bt-     31-TLF_SLEEPING,7f
 201#endif /* CONFIG_6xx || CONFIG_E500 */
 202        .globl transfer_to_handler_cont
 203transfer_to_handler_cont:
 2043:
 205        mflr    r9
 206        lwz     r11,0(r9)               /* virtual address of handler */
 207        lwz     r9,4(r9)                /* where to go when done */
 208#ifdef CONFIG_PPC_8xx_PERF_EVENT
 209        mtspr   SPRN_NRI, r0
 210#endif
 211#ifdef CONFIG_TRACE_IRQFLAGS
 212        lis     r12,reenable_mmu@h
 213        ori     r12,r12,reenable_mmu@l
 214        mtspr   SPRN_SRR0,r12
 215        mtspr   SPRN_SRR1,r10
 216        SYNC
 217        RFI
 218reenable_mmu:                           /* re-enable mmu so we can */
 219        mfmsr   r10
 220        lwz     r12,_MSR(r1)
 221        xor     r10,r10,r12
 222        andi.   r10,r10,MSR_EE          /* Did EE change? */
 223        beq     1f
 224
 225        /*
 226         * The trace_hardirqs_off will use CALLER_ADDR0 and CALLER_ADDR1.
 227         * If from user mode there is only one stack frame on the stack, and
 228         * accessing CALLER_ADDR1 will cause oops. So we need create a dummy
 229         * stack frame to make trace_hardirqs_off happy.
 230         *
 231         * This is handy because we also need to save a bunch of GPRs,
 232         * r3 can be different from GPR3(r1) at this point, r9 and r11
 233         * contains the old MSR and handler address respectively,
 234         * r4 & r5 can contain page fault arguments that need to be passed
 235         * along as well. r12, CCR, CTR, XER etc... are left clobbered as
 236         * they aren't useful past this point (aren't syscall arguments),
 237         * the rest is restored from the exception frame.
 238         */
 239        stwu    r1,-32(r1)
 240        stw     r9,8(r1)
 241        stw     r11,12(r1)
 242        stw     r3,16(r1)
 243        stw     r4,20(r1)
 244        stw     r5,24(r1)
 245        bl      trace_hardirqs_off
 246        lwz     r5,24(r1)
 247        lwz     r4,20(r1)
 248        lwz     r3,16(r1)
 249        lwz     r11,12(r1)
 250        lwz     r9,8(r1)
 251        addi    r1,r1,32
 252        lwz     r0,GPR0(r1)
 253        lwz     r6,GPR6(r1)
 254        lwz     r7,GPR7(r1)
 255        lwz     r8,GPR8(r1)
 2561:      mtctr   r11
 257        mtlr    r9
 258        bctr                            /* jump to handler */
 259#else /* CONFIG_TRACE_IRQFLAGS */
 260        mtspr   SPRN_SRR0,r11
 261        mtspr   SPRN_SRR1,r10
 262        mtlr    r9
 263        SYNC
 264        RFI                             /* jump to handler, enable MMU */
 265#endif /* CONFIG_TRACE_IRQFLAGS */
 266
 267#if defined (CONFIG_6xx) || defined(CONFIG_E500)
 2684:      rlwinm  r12,r12,0,~_TLF_NAPPING
 269        stw     r12,TI_LOCAL_FLAGS(r9)
 270        b       power_save_ppc32_restore
 271
 2727:      rlwinm  r12,r12,0,~_TLF_SLEEPING
 273        stw     r12,TI_LOCAL_FLAGS(r9)
 274        lwz     r9,_MSR(r11)            /* if sleeping, clear MSR.EE */
 275        rlwinm  r9,r9,0,~MSR_EE
 276        lwz     r12,_LINK(r11)          /* and return to address in LR */
 277        b       fast_exception_return
 278#endif
 279
 280/*
 281 * On kernel stack overflow, load up an initial stack pointer
 282 * and call StackOverflow(regs), which should not return.
 283 */
 284stack_ovf:
 285        /* sometimes we use a statically-allocated stack, which is OK. */
 286        lis     r12,_end@h
 287        ori     r12,r12,_end@l
 288        cmplw   r1,r12
 289        ble     5b                      /* r1 <= &_end is OK */
 290        SAVE_NVGPRS(r11)
 291        addi    r3,r1,STACK_FRAME_OVERHEAD
 292        lis     r1,init_thread_union@ha
 293        addi    r1,r1,init_thread_union@l
 294        addi    r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
 295        lis     r9,StackOverflow@ha
 296        addi    r9,r9,StackOverflow@l
 297        LOAD_MSR_KERNEL(r10,MSR_KERNEL)
 298#ifdef CONFIG_PPC_8xx_PERF_EVENT
 299        mtspr   SPRN_NRI, r0
 300#endif
 301        mtspr   SPRN_SRR0,r9
 302        mtspr   SPRN_SRR1,r10
 303        SYNC
 304        RFI
 305
 306/*
 307 * Handle a system call.
 308 */
 309        .stabs  "arch/powerpc/kernel/",N_SO,0,0,0f
 310        .stabs  "entry_32.S",N_SO,0,0,0f
 3110:
 312
 313_GLOBAL(DoSyscall)
 314        stw     r3,ORIG_GPR3(r1)
 315        li      r12,0
 316        stw     r12,RESULT(r1)
 317        lwz     r11,_CCR(r1)    /* Clear SO bit in CR */
 318        rlwinm  r11,r11,0,4,2
 319        stw     r11,_CCR(r1)
 320#ifdef CONFIG_TRACE_IRQFLAGS
 321        /* Return from syscalls can (and generally will) hard enable
 322         * interrupts. You aren't supposed to call a syscall with
 323         * interrupts disabled in the first place. However, to ensure
 324         * that we get it right vs. lockdep if it happens, we force
 325         * that hard enable here with appropriate tracing if we see
 326         * that we have been called with interrupts off
 327         */
 328        mfmsr   r11
 329        andi.   r12,r11,MSR_EE
 330        bne+    1f
 331        /* We came in with interrupts disabled, we enable them now */
 332        bl      trace_hardirqs_on
 333        mfmsr   r11
 334        lwz     r0,GPR0(r1)
 335        lwz     r3,GPR3(r1)
 336        lwz     r4,GPR4(r1)
 337        ori     r11,r11,MSR_EE
 338        lwz     r5,GPR5(r1)
 339        lwz     r6,GPR6(r1)
 340        lwz     r7,GPR7(r1)
 341        lwz     r8,GPR8(r1)
 342        mtmsr   r11
 3431:
 344#endif /* CONFIG_TRACE_IRQFLAGS */
 345        CURRENT_THREAD_INFO(r10, r1)
 346        lwz     r11,TI_FLAGS(r10)
 347        andi.   r11,r11,_TIF_SYSCALL_DOTRACE
 348        bne-    syscall_dotrace
 349syscall_dotrace_cont:
 350        cmplwi  0,r0,NR_syscalls
 351        lis     r10,sys_call_table@h
 352        ori     r10,r10,sys_call_table@l
 353        slwi    r0,r0,2
 354        bge-    66f
 355        lwzx    r10,r10,r0      /* Fetch system call handler [ptr] */
 356        mtlr    r10
 357        addi    r9,r1,STACK_FRAME_OVERHEAD
 358        PPC440EP_ERR42
 359        blrl                    /* Call handler */
 360        .globl  ret_from_syscall
 361ret_from_syscall:
 362        mr      r6,r3
 363        CURRENT_THREAD_INFO(r12, r1)
 364        /* disable interrupts so current_thread_info()->flags can't change */
 365        LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
 366        /* Note: We don't bother telling lockdep about it */
 367        SYNC
 368        MTMSRD(r10)
 369        lwz     r9,TI_FLAGS(r12)
 370        li      r8,-MAX_ERRNO
 371        andi.   r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
 372        bne-    syscall_exit_work
 373        cmplw   0,r3,r8
 374        blt+    syscall_exit_cont
 375        lwz     r11,_CCR(r1)                    /* Load CR */
 376        neg     r3,r3
 377        oris    r11,r11,0x1000  /* Set SO bit in CR */
 378        stw     r11,_CCR(r1)
 379syscall_exit_cont:
 380        lwz     r8,_MSR(r1)
 381#ifdef CONFIG_TRACE_IRQFLAGS
 382        /* If we are going to return from the syscall with interrupts
 383         * off, we trace that here. It shouldn't happen though but we
 384         * want to catch the bugger if it does right ?
 385         */
 386        andi.   r10,r8,MSR_EE
 387        bne+    1f
 388        stw     r3,GPR3(r1)
 389        bl      trace_hardirqs_off
 390        lwz     r3,GPR3(r1)
 3911:
 392#endif /* CONFIG_TRACE_IRQFLAGS */
 393#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
 394        /* If the process has its own DBCR0 value, load it up.  The internal
 395           debug mode bit tells us that dbcr0 should be loaded. */
 396        lwz     r0,THREAD+THREAD_DBCR0(r2)
 397        andis.  r10,r0,DBCR0_IDM@h
 398        bnel-   load_dbcr0
 399#endif
 400#ifdef CONFIG_44x
 401BEGIN_MMU_FTR_SECTION
 402        lis     r4,icache_44x_need_flush@ha
 403        lwz     r5,icache_44x_need_flush@l(r4)
 404        cmplwi  cr0,r5,0
 405        bne-    2f
 4061:
 407END_MMU_FTR_SECTION_IFCLR(MMU_FTR_TYPE_47x)
 408#endif /* CONFIG_44x */
 409BEGIN_FTR_SECTION
 410        lwarx   r7,0,r1
 411END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
 412        stwcx.  r0,0,r1                 /* to clear the reservation */
 413#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
 414        andi.   r4,r8,MSR_PR
 415        beq     3f
 416        CURRENT_THREAD_INFO(r4, r1)
 417        ACCOUNT_CPU_USER_EXIT(r4, r5, r7)
 4183:
 419#endif
 420        lwz     r4,_LINK(r1)
 421        lwz     r5,_CCR(r1)
 422        mtlr    r4
 423        mtcr    r5
 424        lwz     r7,_NIP(r1)
 425        lwz     r2,GPR2(r1)
 426        lwz     r1,GPR1(r1)
 427#ifdef CONFIG_PPC_8xx_PERF_EVENT
 428        mtspr   SPRN_NRI, r0
 429#endif
 430        mtspr   SPRN_SRR0,r7
 431        mtspr   SPRN_SRR1,r8
 432        SYNC
 433        RFI
 434#ifdef CONFIG_44x
 4352:      li      r7,0
 436        iccci   r0,r0
 437        stw     r7,icache_44x_need_flush@l(r4)
 438        b       1b
 439#endif  /* CONFIG_44x */
 440
 44166:     li      r3,-ENOSYS
 442        b       ret_from_syscall
 443
 444        .globl  ret_from_fork
 445ret_from_fork:
 446        REST_NVGPRS(r1)
 447        bl      schedule_tail
 448        li      r3,0
 449        b       ret_from_syscall
 450
 451        .globl  ret_from_kernel_thread
 452ret_from_kernel_thread:
 453        REST_NVGPRS(r1)
 454        bl      schedule_tail
 455        mtlr    r14
 456        mr      r3,r15
 457        PPC440EP_ERR42
 458        blrl
 459        li      r3,0
 460        b       ret_from_syscall
 461
 462/* Traced system call support */
 463syscall_dotrace:
 464        SAVE_NVGPRS(r1)
 465        li      r0,0xc00
 466        stw     r0,_TRAP(r1)
 467        addi    r3,r1,STACK_FRAME_OVERHEAD
 468        bl      do_syscall_trace_enter
 469        /*
 470         * Restore argument registers possibly just changed.
 471         * We use the return value of do_syscall_trace_enter
 472         * for call number to look up in the table (r0).
 473         */
 474        mr      r0,r3
 475        lwz     r3,GPR3(r1)
 476        lwz     r4,GPR4(r1)
 477        lwz     r5,GPR5(r1)
 478        lwz     r6,GPR6(r1)
 479        lwz     r7,GPR7(r1)
 480        lwz     r8,GPR8(r1)
 481        REST_NVGPRS(r1)
 482
 483        cmplwi  r0,NR_syscalls
 484        /* Return code is already in r3 thanks to do_syscall_trace_enter() */
 485        bge-    ret_from_syscall
 486        b       syscall_dotrace_cont
 487
 488syscall_exit_work:
 489        andi.   r0,r9,_TIF_RESTOREALL
 490        beq+    0f
 491        REST_NVGPRS(r1)
 492        b       2f
 4930:      cmplw   0,r3,r8
 494        blt+    1f
 495        andi.   r0,r9,_TIF_NOERROR
 496        bne-    1f
 497        lwz     r11,_CCR(r1)                    /* Load CR */
 498        neg     r3,r3
 499        oris    r11,r11,0x1000  /* Set SO bit in CR */
 500        stw     r11,_CCR(r1)
 501
 5021:      stw     r6,RESULT(r1)   /* Save result */
 503        stw     r3,GPR3(r1)     /* Update return value */
 5042:      andi.   r0,r9,(_TIF_PERSYSCALL_MASK)
 505        beq     4f
 506
 507        /* Clear per-syscall TIF flags if any are set.  */
 508
 509        li      r11,_TIF_PERSYSCALL_MASK
 510        addi    r12,r12,TI_FLAGS
 5113:      lwarx   r8,0,r12
 512        andc    r8,r8,r11
 513#ifdef CONFIG_IBM405_ERR77
 514        dcbt    0,r12
 515#endif
 516        stwcx.  r8,0,r12
 517        bne-    3b
 518        subi    r12,r12,TI_FLAGS
 519        
 5204:      /* Anything which requires enabling interrupts? */
 521        andi.   r0,r9,(_TIF_SYSCALL_DOTRACE|_TIF_SINGLESTEP)
 522        beq     ret_from_except
 523
 524        /* Re-enable interrupts. There is no need to trace that with
 525         * lockdep as we are supposed to have IRQs on at this point
 526         */
 527        ori     r10,r10,MSR_EE
 528        SYNC
 529        MTMSRD(r10)
 530
 531        /* Save NVGPRS if they're not saved already */
 532        lwz     r4,_TRAP(r1)
 533        andi.   r4,r4,1
 534        beq     5f
 535        SAVE_NVGPRS(r1)
 536        li      r4,0xc00
 537        stw     r4,_TRAP(r1)
 5385:
 539        addi    r3,r1,STACK_FRAME_OVERHEAD
 540        bl      do_syscall_trace_leave
 541        b       ret_from_except_full
 542
 543/*
 544 * The fork/clone functions need to copy the full register set into
 545 * the child process. Therefore we need to save all the nonvolatile
 546 * registers (r13 - r31) before calling the C code.
 547 */
 548        .globl  ppc_fork
 549ppc_fork:
 550        SAVE_NVGPRS(r1)
 551        lwz     r0,_TRAP(r1)
 552        rlwinm  r0,r0,0,0,30            /* clear LSB to indicate full */
 553        stw     r0,_TRAP(r1)            /* register set saved */
 554        b       sys_fork
 555
 556        .globl  ppc_vfork
 557ppc_vfork:
 558        SAVE_NVGPRS(r1)
 559        lwz     r0,_TRAP(r1)
 560        rlwinm  r0,r0,0,0,30            /* clear LSB to indicate full */
 561        stw     r0,_TRAP(r1)            /* register set saved */
 562        b       sys_vfork
 563
 564        .globl  ppc_clone
 565ppc_clone:
 566        SAVE_NVGPRS(r1)
 567        lwz     r0,_TRAP(r1)
 568        rlwinm  r0,r0,0,0,30            /* clear LSB to indicate full */
 569        stw     r0,_TRAP(r1)            /* register set saved */
 570        b       sys_clone
 571
 572        .globl  ppc_swapcontext
 573ppc_swapcontext:
 574        SAVE_NVGPRS(r1)
 575        lwz     r0,_TRAP(r1)
 576        rlwinm  r0,r0,0,0,30            /* clear LSB to indicate full */
 577        stw     r0,_TRAP(r1)            /* register set saved */
 578        b       sys_swapcontext
 579
 580/*
 581 * Top-level page fault handling.
 582 * This is in assembler because if do_page_fault tells us that
 583 * it is a bad kernel page fault, we want to save the non-volatile
 584 * registers before calling bad_page_fault.
 585 */
 586        .globl  handle_page_fault
 587handle_page_fault:
 588        stw     r4,_DAR(r1)
 589        addi    r3,r1,STACK_FRAME_OVERHEAD
 590        bl      do_page_fault
 591        cmpwi   r3,0
 592        beq+    ret_from_except
 593        SAVE_NVGPRS(r1)
 594        lwz     r0,_TRAP(r1)
 595        clrrwi  r0,r0,1
 596        stw     r0,_TRAP(r1)
 597        mr      r5,r3
 598        addi    r3,r1,STACK_FRAME_OVERHEAD
 599        lwz     r4,_DAR(r1)
 600        bl      bad_page_fault
 601        b       ret_from_except_full
 602
 603/*
 604 * This routine switches between two different tasks.  The process
 605 * state of one is saved on its kernel stack.  Then the state
 606 * of the other is restored from its kernel stack.  The memory
 607 * management hardware is updated to the second process's state.
 608 * Finally, we can return to the second process.
 609 * On entry, r3 points to the THREAD for the current task, r4
 610 * points to the THREAD for the new task.
 611 *
 612 * This routine is always called with interrupts disabled.
 613 *
 614 * Note: there are two ways to get to the "going out" portion
 615 * of this code; either by coming in via the entry (_switch)
 616 * or via "fork" which must set up an environment equivalent
 617 * to the "_switch" path.  If you change this , you'll have to
 618 * change the fork code also.
 619 *
 620 * The code which creates the new task context is in 'copy_thread'
 621 * in arch/ppc/kernel/process.c
 622 */
 623_GLOBAL(_switch)
 624        stwu    r1,-INT_FRAME_SIZE(r1)
 625        mflr    r0
 626        stw     r0,INT_FRAME_SIZE+4(r1)
 627        /* r3-r12 are caller saved -- Cort */
 628        SAVE_NVGPRS(r1)
 629        stw     r0,_NIP(r1)     /* Return to switch caller */
 630        mfmsr   r11
 631        li      r0,MSR_FP       /* Disable floating-point */
 632#ifdef CONFIG_ALTIVEC
 633BEGIN_FTR_SECTION
 634        oris    r0,r0,MSR_VEC@h /* Disable altivec */
 635        mfspr   r12,SPRN_VRSAVE /* save vrsave register value */
 636        stw     r12,THREAD+THREAD_VRSAVE(r2)
 637END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
 638#endif /* CONFIG_ALTIVEC */
 639#ifdef CONFIG_SPE
 640BEGIN_FTR_SECTION
 641        oris    r0,r0,MSR_SPE@h  /* Disable SPE */
 642        mfspr   r12,SPRN_SPEFSCR /* save spefscr register value */
 643        stw     r12,THREAD+THREAD_SPEFSCR(r2)
 644END_FTR_SECTION_IFSET(CPU_FTR_SPE)
 645#endif /* CONFIG_SPE */
 646        and.    r0,r0,r11       /* FP or altivec or SPE enabled? */
 647        beq+    1f
 648        andc    r11,r11,r0
 649        MTMSRD(r11)
 650        isync
 6511:      stw     r11,_MSR(r1)
 652        mfcr    r10
 653        stw     r10,_CCR(r1)
 654        stw     r1,KSP(r3)      /* Set old stack pointer */
 655
 656#ifdef CONFIG_SMP
 657        /* We need a sync somewhere here to make sure that if the
 658         * previous task gets rescheduled on another CPU, it sees all
 659         * stores it has performed on this one.
 660         */
 661        sync
 662#endif /* CONFIG_SMP */
 663
 664        tophys(r0,r4)
 665        mtspr   SPRN_SPRG_THREAD,r0     /* Update current THREAD phys addr */
 666        lwz     r1,KSP(r4)      /* Load new stack pointer */
 667
 668        /* save the old current 'last' for return value */
 669        mr      r3,r2
 670        addi    r2,r4,-THREAD   /* Update current */
 671
 672#ifdef CONFIG_ALTIVEC
 673BEGIN_FTR_SECTION
 674        lwz     r0,THREAD+THREAD_VRSAVE(r2)
 675        mtspr   SPRN_VRSAVE,r0          /* if G4, restore VRSAVE reg */
 676END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
 677#endif /* CONFIG_ALTIVEC */
 678#ifdef CONFIG_SPE
 679BEGIN_FTR_SECTION
 680        lwz     r0,THREAD+THREAD_SPEFSCR(r2)
 681        mtspr   SPRN_SPEFSCR,r0         /* restore SPEFSCR reg */
 682END_FTR_SECTION_IFSET(CPU_FTR_SPE)
 683#endif /* CONFIG_SPE */
 684
 685        lwz     r0,_CCR(r1)
 686        mtcrf   0xFF,r0
 687        /* r3-r12 are destroyed -- Cort */
 688        REST_NVGPRS(r1)
 689
 690        lwz     r4,_NIP(r1)     /* Return to _switch caller in new task */
 691        mtlr    r4
 692        addi    r1,r1,INT_FRAME_SIZE
 693        blr
 694
 695        .globl  fast_exception_return
 696fast_exception_return:
 697#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
 698        andi.   r10,r9,MSR_RI           /* check for recoverable interrupt */
 699        beq     1f                      /* if not, we've got problems */
 700#endif
 701
 7022:      REST_4GPRS(3, r11)
 703        lwz     r10,_CCR(r11)
 704        REST_GPR(1, r11)
 705        mtcr    r10
 706        lwz     r10,_LINK(r11)
 707        mtlr    r10
 708        REST_GPR(10, r11)
 709#ifdef CONFIG_PPC_8xx_PERF_EVENT
 710        mtspr   SPRN_NRI, r0
 711#endif
 712        mtspr   SPRN_SRR1,r9
 713        mtspr   SPRN_SRR0,r12
 714        REST_GPR(9, r11)
 715        REST_GPR(12, r11)
 716        lwz     r11,GPR11(r11)
 717        SYNC
 718        RFI
 719
 720#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
 721/* check if the exception happened in a restartable section */
 7221:      lis     r3,exc_exit_restart_end@ha
 723        addi    r3,r3,exc_exit_restart_end@l
 724        cmplw   r12,r3
 725        bge     3f
 726        lis     r4,exc_exit_restart@ha
 727        addi    r4,r4,exc_exit_restart@l
 728        cmplw   r12,r4
 729        blt     3f
 730        lis     r3,fee_restarts@ha
 731        tophys(r3,r3)
 732        lwz     r5,fee_restarts@l(r3)
 733        addi    r5,r5,1
 734        stw     r5,fee_restarts@l(r3)
 735        mr      r12,r4          /* restart at exc_exit_restart */
 736        b       2b
 737
 738        .section .bss
 739        .align  2
 740fee_restarts:
 741        .space  4
 742        .previous
 743
 744/* aargh, a nonrecoverable interrupt, panic */
 745/* aargh, we don't know which trap this is */
 746/* but the 601 doesn't implement the RI bit, so assume it's OK */
 7473:
 748BEGIN_FTR_SECTION
 749        b       2b
 750END_FTR_SECTION_IFSET(CPU_FTR_601)
 751        li      r10,-1
 752        stw     r10,_TRAP(r11)
 753        addi    r3,r1,STACK_FRAME_OVERHEAD
 754        lis     r10,MSR_KERNEL@h
 755        ori     r10,r10,MSR_KERNEL@l
 756        bl      transfer_to_handler_full
 757        .long   nonrecoverable_exception
 758        .long   ret_from_except
 759#endif
 760
 761        .globl  ret_from_except_full
 762ret_from_except_full:
 763        REST_NVGPRS(r1)
 764        /* fall through */
 765
 766        .globl  ret_from_except
 767ret_from_except:
 768        /* Hard-disable interrupts so that current_thread_info()->flags
 769         * can't change between when we test it and when we return
 770         * from the interrupt. */
 771        /* Note: We don't bother telling lockdep about it */
 772        LOAD_MSR_KERNEL(r10,MSR_KERNEL)
 773        SYNC                    /* Some chip revs have problems here... */
 774        MTMSRD(r10)             /* disable interrupts */
 775
 776        lwz     r3,_MSR(r1)     /* Returning to user mode? */
 777        andi.   r0,r3,MSR_PR
 778        beq     resume_kernel
 779
 780user_exc_return:                /* r10 contains MSR_KERNEL here */
 781        /* Check current_thread_info()->flags */
 782        CURRENT_THREAD_INFO(r9, r1)
 783        lwz     r9,TI_FLAGS(r9)
 784        andi.   r0,r9,_TIF_USER_WORK_MASK
 785        bne     do_work
 786
 787restore_user:
 788#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
 789        /* Check whether this process has its own DBCR0 value.  The internal
 790           debug mode bit tells us that dbcr0 should be loaded. */
 791        lwz     r0,THREAD+THREAD_DBCR0(r2)
 792        andis.  r10,r0,DBCR0_IDM@h
 793        bnel-   load_dbcr0
 794#endif
 795#ifdef CONFIG_VIRT_CPU_ACCOUNTING_NATIVE
 796        CURRENT_THREAD_INFO(r9, r1)
 797        ACCOUNT_CPU_USER_EXIT(r9, r10, r11)
 798#endif
 799
 800        b       restore
 801
 802/* N.B. the only way to get here is from the beq following ret_from_except. */
 803resume_kernel:
 804        /* check current_thread_info, _TIF_EMULATE_STACK_STORE */
 805        CURRENT_THREAD_INFO(r9, r1)
 806        lwz     r8,TI_FLAGS(r9)
 807        andis.  r0,r8,_TIF_EMULATE_STACK_STORE@h
 808        beq+    1f
 809
 810        addi    r8,r1,INT_FRAME_SIZE    /* Get the kprobed function entry */
 811
 812        lwz     r3,GPR1(r1)
 813        subi    r3,r3,INT_FRAME_SIZE    /* dst: Allocate a trampoline exception frame */
 814        mr      r4,r1                   /* src:  current exception frame */
 815        mr      r1,r3                   /* Reroute the trampoline frame to r1 */
 816
 817        /* Copy from the original to the trampoline. */
 818        li      r5,INT_FRAME_SIZE/4     /* size: INT_FRAME_SIZE */
 819        li      r6,0                    /* start offset: 0 */
 820        mtctr   r5
 8212:      lwzx    r0,r6,r4
 822        stwx    r0,r6,r3
 823        addi    r6,r6,4
 824        bdnz    2b
 825
 826        /* Do real store operation to complete stwu */
 827        lwz     r5,GPR1(r1)
 828        stw     r8,0(r5)
 829
 830        /* Clear _TIF_EMULATE_STACK_STORE flag */
 831        lis     r11,_TIF_EMULATE_STACK_STORE@h
 832        addi    r5,r9,TI_FLAGS
 8330:      lwarx   r8,0,r5
 834        andc    r8,r8,r11
 835#ifdef CONFIG_IBM405_ERR77
 836        dcbt    0,r5
 837#endif
 838        stwcx.  r8,0,r5
 839        bne-    0b
 8401:
 841
 842#ifdef CONFIG_PREEMPT
 843        /* check current_thread_info->preempt_count */
 844        lwz     r0,TI_PREEMPT(r9)
 845        cmpwi   0,r0,0          /* if non-zero, just restore regs and return */
 846        bne     restore
 847        andi.   r8,r8,_TIF_NEED_RESCHED
 848        beq+    restore
 849        lwz     r3,_MSR(r1)
 850        andi.   r0,r3,MSR_EE    /* interrupts off? */
 851        beq     restore         /* don't schedule if so */
 852#ifdef CONFIG_TRACE_IRQFLAGS
 853        /* Lockdep thinks irqs are enabled, we need to call
 854         * preempt_schedule_irq with IRQs off, so we inform lockdep
 855         * now that we -did- turn them off already
 856         */
 857        bl      trace_hardirqs_off
 858#endif
 8591:      bl      preempt_schedule_irq
 860        CURRENT_THREAD_INFO(r9, r1)
 861        lwz     r3,TI_FLAGS(r9)
 862        andi.   r0,r3,_TIF_NEED_RESCHED
 863        bne-    1b
 864#ifdef CONFIG_TRACE_IRQFLAGS
 865        /* And now, to properly rebalance the above, we tell lockdep they
 866         * are being turned back on, which will happen when we return
 867         */
 868        bl      trace_hardirqs_on
 869#endif
 870#endif /* CONFIG_PREEMPT */
 871
 872        /* interrupts are hard-disabled at this point */
 873restore:
 874#ifdef CONFIG_44x
 875BEGIN_MMU_FTR_SECTION
 876        b       1f
 877END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_47x)
 878        lis     r4,icache_44x_need_flush@ha
 879        lwz     r5,icache_44x_need_flush@l(r4)
 880        cmplwi  cr0,r5,0
 881        beq+    1f
 882        li      r6,0
 883        iccci   r0,r0
 884        stw     r6,icache_44x_need_flush@l(r4)
 8851:
 886#endif  /* CONFIG_44x */
 887
 888        lwz     r9,_MSR(r1)
 889#ifdef CONFIG_TRACE_IRQFLAGS
 890        /* Lockdep doesn't know about the fact that IRQs are temporarily turned
 891         * off in this assembly code while peeking at TI_FLAGS() and such. However
 892         * we need to inform it if the exception turned interrupts off, and we
 893         * are about to trun them back on.
 894         *
 895         * The problem here sadly is that we don't know whether the exceptions was
 896         * one that turned interrupts off or not. So we always tell lockdep about
 897         * turning them on here when we go back to wherever we came from with EE
 898         * on, even if that may meen some redudant calls being tracked. Maybe later
 899         * we could encode what the exception did somewhere or test the exception
 900         * type in the pt_regs but that sounds overkill
 901         */
 902        andi.   r10,r9,MSR_EE
 903        beq     1f
 904        /*
 905         * Since the ftrace irqsoff latency trace checks CALLER_ADDR1,
 906         * which is the stack frame here, we need to force a stack frame
 907         * in case we came from user space.
 908         */
 909        stwu    r1,-32(r1)
 910        mflr    r0
 911        stw     r0,4(r1)
 912        stwu    r1,-32(r1)
 913        bl      trace_hardirqs_on
 914        lwz     r1,0(r1)
 915        lwz     r1,0(r1)
 916        lwz     r9,_MSR(r1)
 9171:
 918#endif /* CONFIG_TRACE_IRQFLAGS */
 919
 920        lwz     r0,GPR0(r1)
 921        lwz     r2,GPR2(r1)
 922        REST_4GPRS(3, r1)
 923        REST_2GPRS(7, r1)
 924
 925        lwz     r10,_XER(r1)
 926        lwz     r11,_CTR(r1)
 927        mtspr   SPRN_XER,r10
 928        mtctr   r11
 929
 930        PPC405_ERR77(0,r1)
 931BEGIN_FTR_SECTION
 932        lwarx   r11,0,r1
 933END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
 934        stwcx.  r0,0,r1                 /* to clear the reservation */
 935
 936#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
 937        andi.   r10,r9,MSR_RI           /* check if this exception occurred */
 938        beql    nonrecoverable          /* at a bad place (MSR:RI = 0) */
 939
 940        lwz     r10,_CCR(r1)
 941        lwz     r11,_LINK(r1)
 942        mtcrf   0xFF,r10
 943        mtlr    r11
 944
 945        /*
 946         * Once we put values in SRR0 and SRR1, we are in a state
 947         * where exceptions are not recoverable, since taking an
 948         * exception will trash SRR0 and SRR1.  Therefore we clear the
 949         * MSR:RI bit to indicate this.  If we do take an exception,
 950         * we can't return to the point of the exception but we
 951         * can restart the exception exit path at the label
 952         * exc_exit_restart below.  -- paulus
 953         */
 954        LOAD_MSR_KERNEL(r10,MSR_KERNEL & ~MSR_RI)
 955        SYNC
 956        MTMSRD(r10)             /* clear the RI bit */
 957        .globl exc_exit_restart
 958exc_exit_restart:
 959        lwz     r12,_NIP(r1)
 960#ifdef CONFIG_PPC_8xx_PERF_EVENT
 961        mtspr   SPRN_NRI, r0
 962#endif
 963        mtspr   SPRN_SRR0,r12
 964        mtspr   SPRN_SRR1,r9
 965        REST_4GPRS(9, r1)
 966        lwz     r1,GPR1(r1)
 967        .globl exc_exit_restart_end
 968exc_exit_restart_end:
 969        SYNC
 970        RFI
 971
 972#else /* !(CONFIG_4xx || CONFIG_BOOKE) */
 973        /*
 974         * This is a bit different on 4xx/Book-E because it doesn't have
 975         * the RI bit in the MSR.
 976         * The TLB miss handler checks if we have interrupted
 977         * the exception exit path and restarts it if so
 978         * (well maybe one day it will... :).
 979         */
 980        lwz     r11,_LINK(r1)
 981        mtlr    r11
 982        lwz     r10,_CCR(r1)
 983        mtcrf   0xff,r10
 984        REST_2GPRS(9, r1)
 985        .globl exc_exit_restart
 986exc_exit_restart:
 987        lwz     r11,_NIP(r1)
 988        lwz     r12,_MSR(r1)
 989exc_exit_start:
 990        mtspr   SPRN_SRR0,r11
 991        mtspr   SPRN_SRR1,r12
 992        REST_2GPRS(11, r1)
 993        lwz     r1,GPR1(r1)
 994        .globl exc_exit_restart_end
 995exc_exit_restart_end:
 996        PPC405_ERR77_SYNC
 997        rfi
 998        b       .                       /* prevent prefetch past rfi */
 999
1000/*
1001 * Returning from a critical interrupt in user mode doesn't need
1002 * to be any different from a normal exception.  For a critical
1003 * interrupt in the kernel, we just return (without checking for
1004 * preemption) since the interrupt may have happened at some crucial
1005 * place (e.g. inside the TLB miss handler), and because we will be
1006 * running with r1 pointing into critical_stack, not the current
1007 * process's kernel stack (and therefore current_thread_info() will
1008 * give the wrong answer).
1009 * We have to restore various SPRs that may have been in use at the
1010 * time of the critical interrupt.
1011 *
1012 */
1013#ifdef CONFIG_40x
1014#define PPC_40x_TURN_OFF_MSR_DR                                             \
1015        /* avoid any possible TLB misses here by turning off MSR.DR, we     \
1016         * assume the instructions here are mapped by a pinned TLB entry */ \
1017        li      r10,MSR_IR;                                                 \
1018        mtmsr   r10;                                                        \
1019        isync;                                                              \
1020        tophys(r1, r1);
1021#else
1022#define PPC_40x_TURN_OFF_MSR_DR
1023#endif
1024
1025#define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi)     \
1026        REST_NVGPRS(r1);                                                \
1027        lwz     r3,_MSR(r1);                                            \
1028        andi.   r3,r3,MSR_PR;                                           \
1029        LOAD_MSR_KERNEL(r10,MSR_KERNEL);                                \
1030        bne     user_exc_return;                                        \
1031        lwz     r0,GPR0(r1);                                            \
1032        lwz     r2,GPR2(r1);                                            \
1033        REST_4GPRS(3, r1);                                              \
1034        REST_2GPRS(7, r1);                                              \
1035        lwz     r10,_XER(r1);                                           \
1036        lwz     r11,_CTR(r1);                                           \
1037        mtspr   SPRN_XER,r10;                                           \
1038        mtctr   r11;                                                    \
1039        PPC405_ERR77(0,r1);                                             \
1040        stwcx.  r0,0,r1;                /* to clear the reservation */  \
1041        lwz     r11,_LINK(r1);                                          \
1042        mtlr    r11;                                                    \
1043        lwz     r10,_CCR(r1);                                           \
1044        mtcrf   0xff,r10;                                               \
1045        PPC_40x_TURN_OFF_MSR_DR;                                        \
1046        lwz     r9,_DEAR(r1);                                           \
1047        lwz     r10,_ESR(r1);                                           \
1048        mtspr   SPRN_DEAR,r9;                                           \
1049        mtspr   SPRN_ESR,r10;                                           \
1050        lwz     r11,_NIP(r1);                                           \
1051        lwz     r12,_MSR(r1);                                           \
1052        mtspr   exc_lvl_srr0,r11;                                       \
1053        mtspr   exc_lvl_srr1,r12;                                       \
1054        lwz     r9,GPR9(r1);                                            \
1055        lwz     r12,GPR12(r1);                                          \
1056        lwz     r10,GPR10(r1);                                          \
1057        lwz     r11,GPR11(r1);                                          \
1058        lwz     r1,GPR1(r1);                                            \
1059        PPC405_ERR77_SYNC;                                              \
1060        exc_lvl_rfi;                                                    \
1061        b       .;              /* prevent prefetch past exc_lvl_rfi */
1062
1063#define RESTORE_xSRR(exc_lvl_srr0, exc_lvl_srr1)                        \
1064        lwz     r9,_##exc_lvl_srr0(r1);                                 \
1065        lwz     r10,_##exc_lvl_srr1(r1);                                \
1066        mtspr   SPRN_##exc_lvl_srr0,r9;                                 \
1067        mtspr   SPRN_##exc_lvl_srr1,r10;
1068
1069#if defined(CONFIG_PPC_BOOK3E_MMU)
1070#ifdef CONFIG_PHYS_64BIT
1071#define RESTORE_MAS7                                                    \
1072        lwz     r11,MAS7(r1);                                           \
1073        mtspr   SPRN_MAS7,r11;
1074#else
1075#define RESTORE_MAS7
1076#endif /* CONFIG_PHYS_64BIT */
1077#define RESTORE_MMU_REGS                                                \
1078        lwz     r9,MAS0(r1);                                            \
1079        lwz     r10,MAS1(r1);                                           \
1080        lwz     r11,MAS2(r1);                                           \
1081        mtspr   SPRN_MAS0,r9;                                           \
1082        lwz     r9,MAS3(r1);                                            \
1083        mtspr   SPRN_MAS1,r10;                                          \
1084        lwz     r10,MAS6(r1);                                           \
1085        mtspr   SPRN_MAS2,r11;                                          \
1086        mtspr   SPRN_MAS3,r9;                                           \
1087        mtspr   SPRN_MAS6,r10;                                          \
1088        RESTORE_MAS7;
1089#elif defined(CONFIG_44x)
1090#define RESTORE_MMU_REGS                                                \
1091        lwz     r9,MMUCR(r1);                                           \
1092        mtspr   SPRN_MMUCR,r9;
1093#else
1094#define RESTORE_MMU_REGS
1095#endif
1096
1097#ifdef CONFIG_40x
1098        .globl  ret_from_crit_exc
1099ret_from_crit_exc:
1100        mfspr   r9,SPRN_SPRG_THREAD
1101        lis     r10,saved_ksp_limit@ha;
1102        lwz     r10,saved_ksp_limit@l(r10);
1103        tovirt(r9,r9);
1104        stw     r10,KSP_LIMIT(r9)
1105        lis     r9,crit_srr0@ha;
1106        lwz     r9,crit_srr0@l(r9);
1107        lis     r10,crit_srr1@ha;
1108        lwz     r10,crit_srr1@l(r10);
1109        mtspr   SPRN_SRR0,r9;
1110        mtspr   SPRN_SRR1,r10;
1111        RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
1112#endif /* CONFIG_40x */
1113
1114#ifdef CONFIG_BOOKE
1115        .globl  ret_from_crit_exc
1116ret_from_crit_exc:
1117        mfspr   r9,SPRN_SPRG_THREAD
1118        lwz     r10,SAVED_KSP_LIMIT(r1)
1119        stw     r10,KSP_LIMIT(r9)
1120        RESTORE_xSRR(SRR0,SRR1);
1121        RESTORE_MMU_REGS;
1122        RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, PPC_RFCI)
1123
1124        .globl  ret_from_debug_exc
1125ret_from_debug_exc:
1126        mfspr   r9,SPRN_SPRG_THREAD
1127        lwz     r10,SAVED_KSP_LIMIT(r1)
1128        stw     r10,KSP_LIMIT(r9)
1129        lwz     r9,THREAD_INFO-THREAD(r9)
1130        CURRENT_THREAD_INFO(r10, r1)
1131        lwz     r10,TI_PREEMPT(r10)
1132        stw     r10,TI_PREEMPT(r9)
1133        RESTORE_xSRR(SRR0,SRR1);
1134        RESTORE_xSRR(CSRR0,CSRR1);
1135        RESTORE_MMU_REGS;
1136        RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, PPC_RFDI)
1137
1138        .globl  ret_from_mcheck_exc
1139ret_from_mcheck_exc:
1140        mfspr   r9,SPRN_SPRG_THREAD
1141        lwz     r10,SAVED_KSP_LIMIT(r1)
1142        stw     r10,KSP_LIMIT(r9)
1143        RESTORE_xSRR(SRR0,SRR1);
1144        RESTORE_xSRR(CSRR0,CSRR1);
1145        RESTORE_xSRR(DSRR0,DSRR1);
1146        RESTORE_MMU_REGS;
1147        RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, PPC_RFMCI)
1148#endif /* CONFIG_BOOKE */
1149
1150/*
1151 * Load the DBCR0 value for a task that is being ptraced,
1152 * having first saved away the global DBCR0.  Note that r0
1153 * has the dbcr0 value to set upon entry to this.
1154 */
1155load_dbcr0:
1156        mfmsr   r10             /* first disable debug exceptions */
1157        rlwinm  r10,r10,0,~MSR_DE
1158        mtmsr   r10
1159        isync
1160        mfspr   r10,SPRN_DBCR0
1161        lis     r11,global_dbcr0@ha
1162        addi    r11,r11,global_dbcr0@l
1163#ifdef CONFIG_SMP
1164        CURRENT_THREAD_INFO(r9, r1)
1165        lwz     r9,TI_CPU(r9)
1166        slwi    r9,r9,3
1167        add     r11,r11,r9
1168#endif
1169        stw     r10,0(r11)
1170        mtspr   SPRN_DBCR0,r0
1171        lwz     r10,4(r11)
1172        addi    r10,r10,1
1173        stw     r10,4(r11)
1174        li      r11,-1
1175        mtspr   SPRN_DBSR,r11   /* clear all pending debug events */
1176        blr
1177
1178        .section .bss
1179        .align  4
1180global_dbcr0:
1181        .space  8*NR_CPUS
1182        .previous
1183#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
1184
1185do_work:                        /* r10 contains MSR_KERNEL here */
1186        andi.   r0,r9,_TIF_NEED_RESCHED
1187        beq     do_user_signal
1188
1189do_resched:                     /* r10 contains MSR_KERNEL here */
1190        /* Note: We don't need to inform lockdep that we are enabling
1191         * interrupts here. As far as it knows, they are already enabled
1192         */
1193        ori     r10,r10,MSR_EE
1194        SYNC
1195        MTMSRD(r10)             /* hard-enable interrupts */
1196        bl      schedule
1197recheck:
1198        /* Note: And we don't tell it we are disabling them again
1199         * neither. Those disable/enable cycles used to peek at
1200         * TI_FLAGS aren't advertised.
1201         */
1202        LOAD_MSR_KERNEL(r10,MSR_KERNEL)
1203        SYNC
1204        MTMSRD(r10)             /* disable interrupts */
1205        CURRENT_THREAD_INFO(r9, r1)
1206        lwz     r9,TI_FLAGS(r9)
1207        andi.   r0,r9,_TIF_NEED_RESCHED
1208        bne-    do_resched
1209        andi.   r0,r9,_TIF_USER_WORK_MASK
1210        beq     restore_user
1211do_user_signal:                 /* r10 contains MSR_KERNEL here */
1212        ori     r10,r10,MSR_EE
1213        SYNC
1214        MTMSRD(r10)             /* hard-enable interrupts */
1215        /* save r13-r31 in the exception frame, if not already done */
1216        lwz     r3,_TRAP(r1)
1217        andi.   r0,r3,1
1218        beq     2f
1219        SAVE_NVGPRS(r1)
1220        rlwinm  r3,r3,0,0,30
1221        stw     r3,_TRAP(r1)
12222:      addi    r3,r1,STACK_FRAME_OVERHEAD
1223        mr      r4,r9
1224        bl      do_notify_resume
1225        REST_NVGPRS(r1)
1226        b       recheck
1227
1228/*
1229 * We come here when we are at the end of handling an exception
1230 * that occurred at a place where taking an exception will lose
1231 * state information, such as the contents of SRR0 and SRR1.
1232 */
1233nonrecoverable:
1234        lis     r10,exc_exit_restart_end@ha
1235        addi    r10,r10,exc_exit_restart_end@l
1236        cmplw   r12,r10
1237        bge     3f
1238        lis     r11,exc_exit_restart@ha
1239        addi    r11,r11,exc_exit_restart@l
1240        cmplw   r12,r11
1241        blt     3f
1242        lis     r10,ee_restarts@ha
1243        lwz     r12,ee_restarts@l(r10)
1244        addi    r12,r12,1
1245        stw     r12,ee_restarts@l(r10)
1246        mr      r12,r11         /* restart at exc_exit_restart */
1247        blr
12483:      /* OK, we can't recover, kill this process */
1249        /* but the 601 doesn't implement the RI bit, so assume it's OK */
1250BEGIN_FTR_SECTION
1251        blr
1252END_FTR_SECTION_IFSET(CPU_FTR_601)
1253        lwz     r3,_TRAP(r1)
1254        andi.   r0,r3,1
1255        beq     4f
1256        SAVE_NVGPRS(r1)
1257        rlwinm  r3,r3,0,0,30
1258        stw     r3,_TRAP(r1)
12594:      addi    r3,r1,STACK_FRAME_OVERHEAD
1260        bl      nonrecoverable_exception
1261        /* shouldn't return */
1262        b       4b
1263
1264        .section .bss
1265        .align  2
1266ee_restarts:
1267        .space  4
1268        .previous
1269
1270/*
1271 * PROM code for specific machines follows.  Put it
1272 * here so it's easy to add arch-specific sections later.
1273 * -- Cort
1274 */
1275#ifdef CONFIG_PPC_RTAS
1276/*
1277 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
1278 * called with the MMU off.
1279 */
1280_GLOBAL(enter_rtas)
1281        stwu    r1,-INT_FRAME_SIZE(r1)
1282        mflr    r0
1283        stw     r0,INT_FRAME_SIZE+4(r1)
1284        LOAD_REG_ADDR(r4, rtas)
1285        lis     r6,1f@ha        /* physical return address for rtas */
1286        addi    r6,r6,1f@l
1287        tophys(r6,r6)
1288        tophys(r7,r1)
1289        lwz     r8,RTASENTRY(r4)
1290        lwz     r4,RTASBASE(r4)
1291        mfmsr   r9
1292        stw     r9,8(r1)
1293        LOAD_MSR_KERNEL(r0,MSR_KERNEL)
1294        SYNC                    /* disable interrupts so SRR0/1 */
1295        MTMSRD(r0)              /* don't get trashed */
1296        li      r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
1297        mtlr    r6
1298        mtspr   SPRN_SPRG_RTAS,r7
1299        mtspr   SPRN_SRR0,r8
1300        mtspr   SPRN_SRR1,r9
1301        RFI
13021:      tophys(r9,r1)
1303        lwz     r8,INT_FRAME_SIZE+4(r9) /* get return address */
1304        lwz     r9,8(r9)        /* original msr value */
1305        addi    r1,r1,INT_FRAME_SIZE
1306        li      r0,0
1307        mtspr   SPRN_SPRG_RTAS,r0
1308        mtspr   SPRN_SRR0,r8
1309        mtspr   SPRN_SRR1,r9
1310        RFI                     /* return to caller */
1311
1312        .globl  machine_check_in_rtas
1313machine_check_in_rtas:
1314        twi     31,0,0
1315        /* XXX load up BATs and panic */
1316
1317#endif /* CONFIG_PPC_RTAS */
1318
1319#ifdef CONFIG_FUNCTION_TRACER
1320#ifdef CONFIG_DYNAMIC_FTRACE
1321_GLOBAL(mcount)
1322_GLOBAL(_mcount)
1323        /*
1324         * It is required that _mcount on PPC32 must preserve the
1325         * link register. But we have r0 to play with. We use r0
1326         * to push the return address back to the caller of mcount
1327         * into the ctr register, restore the link register and
1328         * then jump back using the ctr register.
1329         */
1330        mflr    r0
1331        mtctr   r0
1332        lwz     r0, 4(r1)
1333        mtlr    r0
1334        bctr
1335
1336_GLOBAL(ftrace_caller)
1337        MCOUNT_SAVE_FRAME
1338        /* r3 ends up with link register */
1339        subi    r3, r3, MCOUNT_INSN_SIZE
1340.globl ftrace_call
1341ftrace_call:
1342        bl      ftrace_stub
1343        nop
1344#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1345.globl ftrace_graph_call
1346ftrace_graph_call:
1347        b       ftrace_graph_stub
1348_GLOBAL(ftrace_graph_stub)
1349#endif
1350        MCOUNT_RESTORE_FRAME
1351        /* old link register ends up in ctr reg */
1352        bctr
1353#else
1354_GLOBAL(mcount)
1355_GLOBAL(_mcount)
1356
1357        MCOUNT_SAVE_FRAME
1358
1359        subi    r3, r3, MCOUNT_INSN_SIZE
1360        LOAD_REG_ADDR(r5, ftrace_trace_function)
1361        lwz     r5,0(r5)
1362
1363        mtctr   r5
1364        bctrl
1365        nop
1366
1367#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1368        b       ftrace_graph_caller
1369#endif
1370        MCOUNT_RESTORE_FRAME
1371        bctr
1372#endif
1373EXPORT_SYMBOL(_mcount)
1374
1375_GLOBAL(ftrace_stub)
1376        blr
1377
1378#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1379_GLOBAL(ftrace_graph_caller)
1380        /* load r4 with local address */
1381        lwz     r4, 44(r1)
1382        subi    r4, r4, MCOUNT_INSN_SIZE
1383
1384        /* Grab the LR out of the caller stack frame */
1385        lwz     r3,52(r1)
1386
1387        bl      prepare_ftrace_return
1388        nop
1389
1390        /*
1391         * prepare_ftrace_return gives us the address we divert to.
1392         * Change the LR in the callers stack frame to this.
1393         */
1394        stw     r3,52(r1)
1395
1396        MCOUNT_RESTORE_FRAME
1397        /* old link register ends up in ctr reg */
1398        bctr
1399
1400_GLOBAL(return_to_handler)
1401        /* need to save return values */
1402        stwu    r1, -32(r1)
1403        stw     r3, 20(r1)
1404        stw     r4, 16(r1)
1405        stw     r31, 12(r1)
1406        mr      r31, r1
1407
1408        bl      ftrace_return_to_handler
1409        nop
1410
1411        /* return value has real return address */
1412        mtlr    r3
1413
1414        lwz     r3, 20(r1)
1415        lwz     r4, 16(r1)
1416        lwz     r31,12(r1)
1417        lwz     r1, 0(r1)
1418
1419        /* Jump back to real return address */
1420        blr
1421#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1422
1423#endif /* CONFIG_FUNCTION_TRACER */
1424