linux/arch/powerpc/kernel/entry_32.S
<<
>>
Prefs
   1/*
   2 *  PowerPC version
   3 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
   4 *  Rewritten by Cort Dougan (cort@fsmlabs.com) for PReP
   5 *    Copyright (C) 1996 Cort Dougan <cort@fsmlabs.com>
   6 *  Adapted for Power Macintosh by Paul Mackerras.
   7 *  Low-level exception handlers and MMU support
   8 *  rewritten by Paul Mackerras.
   9 *    Copyright (C) 1996 Paul Mackerras.
  10 *  MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
  11 *
  12 *  This file contains the system call entry code, context switch
  13 *  code, and exception/interrupt return code for PowerPC.
  14 *
  15 *  This program is free software; you can redistribute it and/or
  16 *  modify it under the terms of the GNU General Public License
  17 *  as published by the Free Software Foundation; either version
  18 *  2 of the License, or (at your option) any later version.
  19 *
  20 */
  21
  22#include <linux/errno.h>
  23#include <linux/sys.h>
  24#include <linux/threads.h>
  25#include <asm/reg.h>
  26#include <asm/page.h>
  27#include <asm/mmu.h>
  28#include <asm/cputable.h>
  29#include <asm/thread_info.h>
  30#include <asm/ppc_asm.h>
  31#include <asm/asm-offsets.h>
  32#include <asm/unistd.h>
  33
  34#undef SHOW_SYSCALLS
  35#undef SHOW_SYSCALLS_TASK
  36
  37/*
  38 * MSR_KERNEL is > 0x10000 on 4xx/Book-E since it include MSR_CE.
  39 */
  40#if MSR_KERNEL >= 0x10000
  41#define LOAD_MSR_KERNEL(r, x)   lis r,(x)@h; ori r,r,(x)@l
  42#else
  43#define LOAD_MSR_KERNEL(r, x)   li r,(x)
  44#endif
  45
  46#ifdef CONFIG_BOOKE
  47#include "head_booke.h"
  48#define TRANSFER_TO_HANDLER_EXC_LEVEL(exc_level)        \
  49        mtspr   exc_level##_SPRG,r8;                    \
  50        BOOKE_LOAD_EXC_LEVEL_STACK(exc_level);          \
  51        lwz     r0,GPR10-INT_FRAME_SIZE(r8);            \
  52        stw     r0,GPR10(r11);                          \
  53        lwz     r0,GPR11-INT_FRAME_SIZE(r8);            \
  54        stw     r0,GPR11(r11);                          \
  55        mfspr   r8,exc_level##_SPRG
  56
  57        .globl  mcheck_transfer_to_handler
  58mcheck_transfer_to_handler:
  59        TRANSFER_TO_HANDLER_EXC_LEVEL(MCHECK)
  60        b       transfer_to_handler_full
  61
  62        .globl  debug_transfer_to_handler
  63debug_transfer_to_handler:
  64        TRANSFER_TO_HANDLER_EXC_LEVEL(DEBUG)
  65        b       transfer_to_handler_full
  66
  67        .globl  crit_transfer_to_handler
  68crit_transfer_to_handler:
  69        TRANSFER_TO_HANDLER_EXC_LEVEL(CRIT)
  70        /* fall through */
  71#endif
  72
  73#ifdef CONFIG_40x
  74        .globl  crit_transfer_to_handler
  75crit_transfer_to_handler:
  76        lwz     r0,crit_r10@l(0)
  77        stw     r0,GPR10(r11)
  78        lwz     r0,crit_r11@l(0)
  79        stw     r0,GPR11(r11)
  80        /* fall through */
  81#endif
  82
  83/*
  84 * This code finishes saving the registers to the exception frame
  85 * and jumps to the appropriate handler for the exception, turning
  86 * on address translation.
  87 * Note that we rely on the caller having set cr0.eq iff the exception
  88 * occurred in kernel mode (i.e. MSR:PR = 0).
  89 */
  90        .globl  transfer_to_handler_full
  91transfer_to_handler_full:
  92        SAVE_NVGPRS(r11)
  93        /* fall through */
  94
  95        .globl  transfer_to_handler
  96transfer_to_handler:
  97        stw     r2,GPR2(r11)
  98        stw     r12,_NIP(r11)
  99        stw     r9,_MSR(r11)
 100        andi.   r2,r9,MSR_PR
 101        mfctr   r12
 102        mfspr   r2,SPRN_XER
 103        stw     r12,_CTR(r11)
 104        stw     r2,_XER(r11)
 105        mfspr   r12,SPRN_SPRG3
 106        addi    r2,r12,-THREAD
 107        tovirt(r2,r2)                   /* set r2 to current */
 108        beq     2f                      /* if from user, fix up THREAD.regs */
 109        addi    r11,r1,STACK_FRAME_OVERHEAD
 110        stw     r11,PT_REGS(r12)
 111#if defined(CONFIG_40x) || defined(CONFIG_BOOKE)
 112        /* Check to see if the dbcr0 register is set up to debug.  Use the
 113           single-step bit to do this. */
 114        lwz     r12,THREAD_DBCR0(r12)
 115        andis.  r12,r12,DBCR0_IC@h
 116        beq+    3f
 117        /* From user and task is ptraced - load up global dbcr0 */
 118        li      r12,-1                  /* clear all pending debug events */
 119        mtspr   SPRN_DBSR,r12
 120        lis     r11,global_dbcr0@ha
 121        tophys(r11,r11)
 122        addi    r11,r11,global_dbcr0@l
 123        lwz     r12,0(r11)
 124        mtspr   SPRN_DBCR0,r12
 125        lwz     r12,4(r11)
 126        addi    r12,r12,-1
 127        stw     r12,4(r11)
 128#endif
 129        b       3f
 130
 1312:      /* if from kernel, check interrupted DOZE/NAP mode and
 132         * check for stack overflow
 133         */
 134        lwz     r9,THREAD_INFO-THREAD(r12)
 135        cmplw   r1,r9                   /* if r1 <= current->thread_info */
 136        ble-    stack_ovf               /* then the kernel stack overflowed */
 1375:
 138#ifdef CONFIG_6xx
 139        tophys(r9,r9)                   /* check local flags */
 140        lwz     r12,TI_LOCAL_FLAGS(r9)
 141        mtcrf   0x01,r12
 142        bt-     31-TLF_NAPPING,4f
 143#endif /* CONFIG_6xx */
 144        .globl transfer_to_handler_cont
 145transfer_to_handler_cont:
 1463:
 147        mflr    r9
 148        lwz     r11,0(r9)               /* virtual address of handler */
 149        lwz     r9,4(r9)                /* where to go when done */
 150        mtspr   SPRN_SRR0,r11
 151        mtspr   SPRN_SRR1,r10
 152        mtlr    r9
 153        SYNC
 154        RFI                             /* jump to handler, enable MMU */
 155
 156#ifdef CONFIG_6xx
 1574:      rlwinm  r12,r12,0,~_TLF_NAPPING
 158        stw     r12,TI_LOCAL_FLAGS(r9)
 159        b       power_save_6xx_restore
 160#endif
 161
 162/*
 163 * On kernel stack overflow, load up an initial stack pointer
 164 * and call StackOverflow(regs), which should not return.
 165 */
 166stack_ovf:
 167        /* sometimes we use a statically-allocated stack, which is OK. */
 168        lis     r12,_end@h
 169        ori     r12,r12,_end@l
 170        cmplw   r1,r12
 171        ble     5b                      /* r1 <= &_end is OK */
 172        SAVE_NVGPRS(r11)
 173        addi    r3,r1,STACK_FRAME_OVERHEAD
 174        lis     r1,init_thread_union@ha
 175        addi    r1,r1,init_thread_union@l
 176        addi    r1,r1,THREAD_SIZE-STACK_FRAME_OVERHEAD
 177        lis     r9,StackOverflow@ha
 178        addi    r9,r9,StackOverflow@l
 179        LOAD_MSR_KERNEL(r10,MSR_KERNEL)
 180        FIX_SRR1(r10,r12)
 181        mtspr   SPRN_SRR0,r9
 182        mtspr   SPRN_SRR1,r10
 183        SYNC
 184        RFI
 185
 186/*
 187 * Handle a system call.
 188 */
 189        .stabs  "arch/powerpc/kernel/",N_SO,0,0,0f
 190        .stabs  "entry_32.S",N_SO,0,0,0f
 1910:
 192
 193_GLOBAL(DoSyscall)
 194        stw     r3,ORIG_GPR3(r1)
 195        li      r12,0
 196        stw     r12,RESULT(r1)
 197        lwz     r11,_CCR(r1)    /* Clear SO bit in CR */
 198        rlwinm  r11,r11,0,4,2
 199        stw     r11,_CCR(r1)
 200#ifdef SHOW_SYSCALLS
 201        bl      do_show_syscall
 202#endif /* SHOW_SYSCALLS */
 203        rlwinm  r10,r1,0,0,(31-THREAD_SHIFT)    /* current_thread_info() */
 204        lwz     r11,TI_FLAGS(r10)
 205        andi.   r11,r11,_TIF_SYSCALL_T_OR_A
 206        bne-    syscall_dotrace
 207syscall_dotrace_cont:
 208        cmplwi  0,r0,NR_syscalls
 209        lis     r10,sys_call_table@h
 210        ori     r10,r10,sys_call_table@l
 211        slwi    r0,r0,2
 212        bge-    66f
 213        lwzx    r10,r10,r0      /* Fetch system call handler [ptr] */
 214        mtlr    r10
 215        addi    r9,r1,STACK_FRAME_OVERHEAD
 216        PPC440EP_ERR42
 217        blrl                    /* Call handler */
 218        .globl  ret_from_syscall
 219ret_from_syscall:
 220#ifdef SHOW_SYSCALLS
 221        bl      do_show_syscall_exit
 222#endif
 223        mr      r6,r3
 224        rlwinm  r12,r1,0,0,(31-THREAD_SHIFT)    /* current_thread_info() */
 225        /* disable interrupts so current_thread_info()->flags can't change */
 226        LOAD_MSR_KERNEL(r10,MSR_KERNEL) /* doesn't include MSR_EE */
 227        SYNC
 228        MTMSRD(r10)
 229        lwz     r9,TI_FLAGS(r12)
 230        li      r8,-_LAST_ERRNO
 231        andi.   r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
 232        bne-    syscall_exit_work
 233        cmplw   0,r3,r8
 234        blt+    syscall_exit_cont
 235        lwz     r11,_CCR(r1)                    /* Load CR */
 236        neg     r3,r3
 237        oris    r11,r11,0x1000  /* Set SO bit in CR */
 238        stw     r11,_CCR(r1)
 239syscall_exit_cont:
 240#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
 241        /* If the process has its own DBCR0 value, load it up.  The single
 242           step bit tells us that dbcr0 should be loaded. */
 243        lwz     r0,THREAD+THREAD_DBCR0(r2)
 244        andis.  r10,r0,DBCR0_IC@h
 245        bnel-   load_dbcr0
 246#endif
 247#ifdef CONFIG_44x
 248        lis     r4,icache_44x_need_flush@ha
 249        lwz     r5,icache_44x_need_flush@l(r4)
 250        cmplwi  cr0,r5,0
 251        bne-    2f
 2521:
 253#endif /* CONFIG_44x */
 254BEGIN_FTR_SECTION
 255        lwarx   r7,0,r1
 256END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
 257        stwcx.  r0,0,r1                 /* to clear the reservation */
 258        lwz     r4,_LINK(r1)
 259        lwz     r5,_CCR(r1)
 260        mtlr    r4
 261        mtcr    r5
 262        lwz     r7,_NIP(r1)
 263        lwz     r8,_MSR(r1)
 264        FIX_SRR1(r8, r0)
 265        lwz     r2,GPR2(r1)
 266        lwz     r1,GPR1(r1)
 267        mtspr   SPRN_SRR0,r7
 268        mtspr   SPRN_SRR1,r8
 269        SYNC
 270        RFI
 271#ifdef CONFIG_44x
 2722:      li      r7,0
 273        iccci   r0,r0
 274        stw     r7,icache_44x_need_flush@l(r4)
 275        b       1b
 276#endif  /* CONFIG_44x */
 277
 27866:     li      r3,-ENOSYS
 279        b       ret_from_syscall
 280
 281        .globl  ret_from_fork
 282ret_from_fork:
 283        REST_NVGPRS(r1)
 284        bl      schedule_tail
 285        li      r3,0
 286        b       ret_from_syscall
 287
 288/* Traced system call support */
 289syscall_dotrace:
 290        SAVE_NVGPRS(r1)
 291        li      r0,0xc00
 292        stw     r0,_TRAP(r1)
 293        addi    r3,r1,STACK_FRAME_OVERHEAD
 294        bl      do_syscall_trace_enter
 295        lwz     r0,GPR0(r1)     /* Restore original registers */
 296        lwz     r3,GPR3(r1)
 297        lwz     r4,GPR4(r1)
 298        lwz     r5,GPR5(r1)
 299        lwz     r6,GPR6(r1)
 300        lwz     r7,GPR7(r1)
 301        lwz     r8,GPR8(r1)
 302        REST_NVGPRS(r1)
 303        b       syscall_dotrace_cont
 304
 305syscall_exit_work:
 306        andi.   r0,r9,_TIF_RESTOREALL
 307        beq+    0f
 308        REST_NVGPRS(r1)
 309        b       2f
 3100:      cmplw   0,r3,r8
 311        blt+    1f
 312        andi.   r0,r9,_TIF_NOERROR
 313        bne-    1f
 314        lwz     r11,_CCR(r1)                    /* Load CR */
 315        neg     r3,r3
 316        oris    r11,r11,0x1000  /* Set SO bit in CR */
 317        stw     r11,_CCR(r1)
 318
 3191:      stw     r6,RESULT(r1)   /* Save result */
 320        stw     r3,GPR3(r1)     /* Update return value */
 3212:      andi.   r0,r9,(_TIF_PERSYSCALL_MASK)
 322        beq     4f
 323
 324        /* Clear per-syscall TIF flags if any are set.  */
 325
 326        li      r11,_TIF_PERSYSCALL_MASK
 327        addi    r12,r12,TI_FLAGS
 3283:      lwarx   r8,0,r12
 329        andc    r8,r8,r11
 330#ifdef CONFIG_IBM405_ERR77
 331        dcbt    0,r12
 332#endif
 333        stwcx.  r8,0,r12
 334        bne-    3b
 335        subi    r12,r12,TI_FLAGS
 336        
 3374:      /* Anything which requires enabling interrupts? */
 338        andi.   r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
 339        beq     ret_from_except
 340
 341        /* Re-enable interrupts */
 342        ori     r10,r10,MSR_EE
 343        SYNC
 344        MTMSRD(r10)
 345
 346        /* Save NVGPRS if they're not saved already */
 347        lwz     r4,_TRAP(r1)
 348        andi.   r4,r4,1
 349        beq     5f
 350        SAVE_NVGPRS(r1)
 351        li      r4,0xc00
 352        stw     r4,_TRAP(r1)
 3535:
 354        addi    r3,r1,STACK_FRAME_OVERHEAD
 355        bl      do_syscall_trace_leave
 356        b       ret_from_except_full
 357
 358#ifdef SHOW_SYSCALLS
 359do_show_syscall:
 360#ifdef SHOW_SYSCALLS_TASK
 361        lis     r11,show_syscalls_task@ha
 362        lwz     r11,show_syscalls_task@l(r11)
 363        cmp     0,r2,r11
 364        bnelr
 365#endif
 366        stw     r31,GPR31(r1)
 367        mflr    r31
 368        lis     r3,7f@ha
 369        addi    r3,r3,7f@l
 370        lwz     r4,GPR0(r1)
 371        lwz     r5,GPR3(r1)
 372        lwz     r6,GPR4(r1)
 373        lwz     r7,GPR5(r1)
 374        lwz     r8,GPR6(r1)
 375        lwz     r9,GPR7(r1)
 376        bl      printk
 377        lis     r3,77f@ha
 378        addi    r3,r3,77f@l
 379        lwz     r4,GPR8(r1)
 380        mr      r5,r2
 381        bl      printk
 382        lwz     r0,GPR0(r1)
 383        lwz     r3,GPR3(r1)
 384        lwz     r4,GPR4(r1)
 385        lwz     r5,GPR5(r1)
 386        lwz     r6,GPR6(r1)
 387        lwz     r7,GPR7(r1)
 388        lwz     r8,GPR8(r1)
 389        mtlr    r31
 390        lwz     r31,GPR31(r1)
 391        blr
 392
 393do_show_syscall_exit:
 394#ifdef SHOW_SYSCALLS_TASK
 395        lis     r11,show_syscalls_task@ha
 396        lwz     r11,show_syscalls_task@l(r11)
 397        cmp     0,r2,r11
 398        bnelr
 399#endif
 400        stw     r31,GPR31(r1)
 401        mflr    r31
 402        stw     r3,RESULT(r1)   /* Save result */
 403        mr      r4,r3
 404        lis     r3,79f@ha
 405        addi    r3,r3,79f@l
 406        bl      printk
 407        lwz     r3,RESULT(r1)
 408        mtlr    r31
 409        lwz     r31,GPR31(r1)
 410        blr
 411
 4127:      .string "syscall %d(%x, %x, %x, %x, %x, "
 41377:     .string "%x), current=%p\n"
 41479:     .string " -> %x\n"
 415        .align  2,0
 416
 417#ifdef SHOW_SYSCALLS_TASK
 418        .data
 419        .globl  show_syscalls_task
 420show_syscalls_task:
 421        .long   -1
 422        .text
 423#endif
 424#endif /* SHOW_SYSCALLS */
 425
 426/*
 427 * The fork/clone functions need to copy the full register set into
 428 * the child process. Therefore we need to save all the nonvolatile
 429 * registers (r13 - r31) before calling the C code.
 430 */
 431        .globl  ppc_fork
 432ppc_fork:
 433        SAVE_NVGPRS(r1)
 434        lwz     r0,_TRAP(r1)
 435        rlwinm  r0,r0,0,0,30            /* clear LSB to indicate full */
 436        stw     r0,_TRAP(r1)            /* register set saved */
 437        b       sys_fork
 438
 439        .globl  ppc_vfork
 440ppc_vfork:
 441        SAVE_NVGPRS(r1)
 442        lwz     r0,_TRAP(r1)
 443        rlwinm  r0,r0,0,0,30            /* clear LSB to indicate full */
 444        stw     r0,_TRAP(r1)            /* register set saved */
 445        b       sys_vfork
 446
 447        .globl  ppc_clone
 448ppc_clone:
 449        SAVE_NVGPRS(r1)
 450        lwz     r0,_TRAP(r1)
 451        rlwinm  r0,r0,0,0,30            /* clear LSB to indicate full */
 452        stw     r0,_TRAP(r1)            /* register set saved */
 453        b       sys_clone
 454
 455        .globl  ppc_swapcontext
 456ppc_swapcontext:
 457        SAVE_NVGPRS(r1)
 458        lwz     r0,_TRAP(r1)
 459        rlwinm  r0,r0,0,0,30            /* clear LSB to indicate full */
 460        stw     r0,_TRAP(r1)            /* register set saved */
 461        b       sys_swapcontext
 462
 463/*
 464 * Top-level page fault handling.
 465 * This is in assembler because if do_page_fault tells us that
 466 * it is a bad kernel page fault, we want to save the non-volatile
 467 * registers before calling bad_page_fault.
 468 */
 469        .globl  handle_page_fault
 470handle_page_fault:
 471        stw     r4,_DAR(r1)
 472        addi    r3,r1,STACK_FRAME_OVERHEAD
 473        bl      do_page_fault
 474        cmpwi   r3,0
 475        beq+    ret_from_except
 476        SAVE_NVGPRS(r1)
 477        lwz     r0,_TRAP(r1)
 478        clrrwi  r0,r0,1
 479        stw     r0,_TRAP(r1)
 480        mr      r5,r3
 481        addi    r3,r1,STACK_FRAME_OVERHEAD
 482        lwz     r4,_DAR(r1)
 483        bl      bad_page_fault
 484        b       ret_from_except_full
 485
 486/*
 487 * This routine switches between two different tasks.  The process
 488 * state of one is saved on its kernel stack.  Then the state
 489 * of the other is restored from its kernel stack.  The memory
 490 * management hardware is updated to the second process's state.
 491 * Finally, we can return to the second process.
 492 * On entry, r3 points to the THREAD for the current task, r4
 493 * points to the THREAD for the new task.
 494 *
 495 * This routine is always called with interrupts disabled.
 496 *
 497 * Note: there are two ways to get to the "going out" portion
 498 * of this code; either by coming in via the entry (_switch)
 499 * or via "fork" which must set up an environment equivalent
 500 * to the "_switch" path.  If you change this , you'll have to
 501 * change the fork code also.
 502 *
 503 * The code which creates the new task context is in 'copy_thread'
 504 * in arch/ppc/kernel/process.c
 505 */
 506_GLOBAL(_switch)
 507        stwu    r1,-INT_FRAME_SIZE(r1)
 508        mflr    r0
 509        stw     r0,INT_FRAME_SIZE+4(r1)
 510        /* r3-r12 are caller saved -- Cort */
 511        SAVE_NVGPRS(r1)
 512        stw     r0,_NIP(r1)     /* Return to switch caller */
 513        mfmsr   r11
 514        li      r0,MSR_FP       /* Disable floating-point */
 515#ifdef CONFIG_ALTIVEC
 516BEGIN_FTR_SECTION
 517        oris    r0,r0,MSR_VEC@h /* Disable altivec */
 518        mfspr   r12,SPRN_VRSAVE /* save vrsave register value */
 519        stw     r12,THREAD+THREAD_VRSAVE(r2)
 520END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
 521#endif /* CONFIG_ALTIVEC */
 522#ifdef CONFIG_SPE
 523BEGIN_FTR_SECTION
 524        oris    r0,r0,MSR_SPE@h  /* Disable SPE */
 525        mfspr   r12,SPRN_SPEFSCR /* save spefscr register value */
 526        stw     r12,THREAD+THREAD_SPEFSCR(r2)
 527END_FTR_SECTION_IFSET(CPU_FTR_SPE)
 528#endif /* CONFIG_SPE */
 529        and.    r0,r0,r11       /* FP or altivec or SPE enabled? */
 530        beq+    1f
 531        andc    r11,r11,r0
 532        MTMSRD(r11)
 533        isync
 5341:      stw     r11,_MSR(r1)
 535        mfcr    r10
 536        stw     r10,_CCR(r1)
 537        stw     r1,KSP(r3)      /* Set old stack pointer */
 538
 539#ifdef CONFIG_SMP
 540        /* We need a sync somewhere here to make sure that if the
 541         * previous task gets rescheduled on another CPU, it sees all
 542         * stores it has performed on this one.
 543         */
 544        sync
 545#endif /* CONFIG_SMP */
 546
 547        tophys(r0,r4)
 548        CLR_TOP32(r0)
 549        mtspr   SPRN_SPRG3,r0   /* Update current THREAD phys addr */
 550        lwz     r1,KSP(r4)      /* Load new stack pointer */
 551
 552        /* save the old current 'last' for return value */
 553        mr      r3,r2
 554        addi    r2,r4,-THREAD   /* Update current */
 555
 556#ifdef CONFIG_ALTIVEC
 557BEGIN_FTR_SECTION
 558        lwz     r0,THREAD+THREAD_VRSAVE(r2)
 559        mtspr   SPRN_VRSAVE,r0          /* if G4, restore VRSAVE reg */
 560END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
 561#endif /* CONFIG_ALTIVEC */
 562#ifdef CONFIG_SPE
 563BEGIN_FTR_SECTION
 564        lwz     r0,THREAD+THREAD_SPEFSCR(r2)
 565        mtspr   SPRN_SPEFSCR,r0         /* restore SPEFSCR reg */
 566END_FTR_SECTION_IFSET(CPU_FTR_SPE)
 567#endif /* CONFIG_SPE */
 568
 569        lwz     r0,_CCR(r1)
 570        mtcrf   0xFF,r0
 571        /* r3-r12 are destroyed -- Cort */
 572        REST_NVGPRS(r1)
 573
 574        lwz     r4,_NIP(r1)     /* Return to _switch caller in new task */
 575        mtlr    r4
 576        addi    r1,r1,INT_FRAME_SIZE
 577        blr
 578
 579        .globl  fast_exception_return
 580fast_exception_return:
 581#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
 582        andi.   r10,r9,MSR_RI           /* check for recoverable interrupt */
 583        beq     1f                      /* if not, we've got problems */
 584#endif
 585
 5862:      REST_4GPRS(3, r11)
 587        lwz     r10,_CCR(r11)
 588        REST_GPR(1, r11)
 589        mtcr    r10
 590        lwz     r10,_LINK(r11)
 591        mtlr    r10
 592        REST_GPR(10, r11)
 593        mtspr   SPRN_SRR1,r9
 594        mtspr   SPRN_SRR0,r12
 595        REST_GPR(9, r11)
 596        REST_GPR(12, r11)
 597        lwz     r11,GPR11(r11)
 598        SYNC
 599        RFI
 600
 601#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
 602/* check if the exception happened in a restartable section */
 6031:      lis     r3,exc_exit_restart_end@ha
 604        addi    r3,r3,exc_exit_restart_end@l
 605        cmplw   r12,r3
 606        bge     3f
 607        lis     r4,exc_exit_restart@ha
 608        addi    r4,r4,exc_exit_restart@l
 609        cmplw   r12,r4
 610        blt     3f
 611        lis     r3,fee_restarts@ha
 612        tophys(r3,r3)
 613        lwz     r5,fee_restarts@l(r3)
 614        addi    r5,r5,1
 615        stw     r5,fee_restarts@l(r3)
 616        mr      r12,r4          /* restart at exc_exit_restart */
 617        b       2b
 618
 619        .section .bss
 620        .align  2
 621fee_restarts:
 622        .space  4
 623        .previous
 624
 625/* aargh, a nonrecoverable interrupt, panic */
 626/* aargh, we don't know which trap this is */
 627/* but the 601 doesn't implement the RI bit, so assume it's OK */
 6283:
 629BEGIN_FTR_SECTION
 630        b       2b
 631END_FTR_SECTION_IFSET(CPU_FTR_601)
 632        li      r10,-1
 633        stw     r10,_TRAP(r11)
 634        addi    r3,r1,STACK_FRAME_OVERHEAD
 635        lis     r10,MSR_KERNEL@h
 636        ori     r10,r10,MSR_KERNEL@l
 637        bl      transfer_to_handler_full
 638        .long   nonrecoverable_exception
 639        .long   ret_from_except
 640#endif
 641
 642        .globl  ret_from_except_full
 643ret_from_except_full:
 644        REST_NVGPRS(r1)
 645        /* fall through */
 646
 647        .globl  ret_from_except
 648ret_from_except:
 649        /* Hard-disable interrupts so that current_thread_info()->flags
 650         * can't change between when we test it and when we return
 651         * from the interrupt. */
 652        LOAD_MSR_KERNEL(r10,MSR_KERNEL)
 653        SYNC                    /* Some chip revs have problems here... */
 654        MTMSRD(r10)             /* disable interrupts */
 655
 656        lwz     r3,_MSR(r1)     /* Returning to user mode? */
 657        andi.   r0,r3,MSR_PR
 658        beq     resume_kernel
 659
 660user_exc_return:                /* r10 contains MSR_KERNEL here */
 661        /* Check current_thread_info()->flags */
 662        rlwinm  r9,r1,0,0,(31-THREAD_SHIFT)
 663        lwz     r9,TI_FLAGS(r9)
 664        andi.   r0,r9,(_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK|_TIF_NEED_RESCHED)
 665        bne     do_work
 666
 667restore_user:
 668#if defined(CONFIG_4xx) || defined(CONFIG_BOOKE)
 669        /* Check whether this process has its own DBCR0 value.  The single
 670           step bit tells us that dbcr0 should be loaded. */
 671        lwz     r0,THREAD+THREAD_DBCR0(r2)
 672        andis.  r10,r0,DBCR0_IC@h
 673        bnel-   load_dbcr0
 674#endif
 675
 676#ifdef CONFIG_PREEMPT
 677        b       restore
 678
 679/* N.B. the only way to get here is from the beq following ret_from_except. */
 680resume_kernel:
 681        /* check current_thread_info->preempt_count */
 682        rlwinm  r9,r1,0,0,(31-THREAD_SHIFT)
 683        lwz     r0,TI_PREEMPT(r9)
 684        cmpwi   0,r0,0          /* if non-zero, just restore regs and return */
 685        bne     restore
 686        lwz     r0,TI_FLAGS(r9)
 687        andi.   r0,r0,_TIF_NEED_RESCHED
 688        beq+    restore
 689        andi.   r0,r3,MSR_EE    /* interrupts off? */
 690        beq     restore         /* don't schedule if so */
 6911:      bl      preempt_schedule_irq
 692        rlwinm  r9,r1,0,0,(31-THREAD_SHIFT)
 693        lwz     r3,TI_FLAGS(r9)
 694        andi.   r0,r3,_TIF_NEED_RESCHED
 695        bne-    1b
 696#else
 697resume_kernel:
 698#endif /* CONFIG_PREEMPT */
 699
 700        /* interrupts are hard-disabled at this point */
 701restore:
 702#ifdef CONFIG_44x
 703        lis     r4,icache_44x_need_flush@ha
 704        lwz     r5,icache_44x_need_flush@l(r4)
 705        cmplwi  cr0,r5,0
 706        beq+    1f
 707        li      r6,0
 708        iccci   r0,r0
 709        stw     r6,icache_44x_need_flush@l(r4)
 7101:
 711#endif  /* CONFIG_44x */
 712        lwz     r0,GPR0(r1)
 713        lwz     r2,GPR2(r1)
 714        REST_4GPRS(3, r1)
 715        REST_2GPRS(7, r1)
 716
 717        lwz     r10,_XER(r1)
 718        lwz     r11,_CTR(r1)
 719        mtspr   SPRN_XER,r10
 720        mtctr   r11
 721
 722        PPC405_ERR77(0,r1)
 723BEGIN_FTR_SECTION
 724        lwarx   r11,0,r1
 725END_FTR_SECTION_IFSET(CPU_FTR_NEED_PAIRED_STWCX)
 726        stwcx.  r0,0,r1                 /* to clear the reservation */
 727
 728#if !(defined(CONFIG_4xx) || defined(CONFIG_BOOKE))
 729        lwz     r9,_MSR(r1)
 730        andi.   r10,r9,MSR_RI           /* check if this exception occurred */
 731        beql    nonrecoverable          /* at a bad place (MSR:RI = 0) */
 732
 733        lwz     r10,_CCR(r1)
 734        lwz     r11,_LINK(r1)
 735        mtcrf   0xFF,r10
 736        mtlr    r11
 737
 738        /*
 739         * Once we put values in SRR0 and SRR1, we are in a state
 740         * where exceptions are not recoverable, since taking an
 741         * exception will trash SRR0 and SRR1.  Therefore we clear the
 742         * MSR:RI bit to indicate this.  If we do take an exception,
 743         * we can't return to the point of the exception but we
 744         * can restart the exception exit path at the label
 745         * exc_exit_restart below.  -- paulus
 746         */
 747        LOAD_MSR_KERNEL(r10,MSR_KERNEL & ~MSR_RI)
 748        SYNC
 749        MTMSRD(r10)             /* clear the RI bit */
 750        .globl exc_exit_restart
 751exc_exit_restart:
 752        lwz     r9,_MSR(r1)
 753        lwz     r12,_NIP(r1)
 754        FIX_SRR1(r9,r10)
 755        mtspr   SPRN_SRR0,r12
 756        mtspr   SPRN_SRR1,r9
 757        REST_4GPRS(9, r1)
 758        lwz     r1,GPR1(r1)
 759        .globl exc_exit_restart_end
 760exc_exit_restart_end:
 761        SYNC
 762        RFI
 763
 764#else /* !(CONFIG_4xx || CONFIG_BOOKE) */
 765        /*
 766         * This is a bit different on 4xx/Book-E because it doesn't have
 767         * the RI bit in the MSR.
 768         * The TLB miss handler checks if we have interrupted
 769         * the exception exit path and restarts it if so
 770         * (well maybe one day it will... :).
 771         */
 772        lwz     r11,_LINK(r1)
 773        mtlr    r11
 774        lwz     r10,_CCR(r1)
 775        mtcrf   0xff,r10
 776        REST_2GPRS(9, r1)
 777        .globl exc_exit_restart
 778exc_exit_restart:
 779        lwz     r11,_NIP(r1)
 780        lwz     r12,_MSR(r1)
 781exc_exit_start:
 782        mtspr   SPRN_SRR0,r11
 783        mtspr   SPRN_SRR1,r12
 784        REST_2GPRS(11, r1)
 785        lwz     r1,GPR1(r1)
 786        .globl exc_exit_restart_end
 787exc_exit_restart_end:
 788        PPC405_ERR77_SYNC
 789        rfi
 790        b       .                       /* prevent prefetch past rfi */
 791
 792/*
 793 * Returning from a critical interrupt in user mode doesn't need
 794 * to be any different from a normal exception.  For a critical
 795 * interrupt in the kernel, we just return (without checking for
 796 * preemption) since the interrupt may have happened at some crucial
 797 * place (e.g. inside the TLB miss handler), and because we will be
 798 * running with r1 pointing into critical_stack, not the current
 799 * process's kernel stack (and therefore current_thread_info() will
 800 * give the wrong answer).
 801 * We have to restore various SPRs that may have been in use at the
 802 * time of the critical interrupt.
 803 *
 804 */
 805#ifdef CONFIG_40x
 806#define PPC_40x_TURN_OFF_MSR_DR                                             \
 807        /* avoid any possible TLB misses here by turning off MSR.DR, we     \
 808         * assume the instructions here are mapped by a pinned TLB entry */ \
 809        li      r10,MSR_IR;                                                 \
 810        mtmsr   r10;                                                        \
 811        isync;                                                              \
 812        tophys(r1, r1);
 813#else
 814#define PPC_40x_TURN_OFF_MSR_DR
 815#endif
 816
 817#define RET_FROM_EXC_LEVEL(exc_lvl_srr0, exc_lvl_srr1, exc_lvl_rfi)     \
 818        REST_NVGPRS(r1);                                                \
 819        lwz     r3,_MSR(r1);                                            \
 820        andi.   r3,r3,MSR_PR;                                           \
 821        LOAD_MSR_KERNEL(r10,MSR_KERNEL);                                \
 822        bne     user_exc_return;                                        \
 823        lwz     r0,GPR0(r1);                                            \
 824        lwz     r2,GPR2(r1);                                            \
 825        REST_4GPRS(3, r1);                                              \
 826        REST_2GPRS(7, r1);                                              \
 827        lwz     r10,_XER(r1);                                           \
 828        lwz     r11,_CTR(r1);                                           \
 829        mtspr   SPRN_XER,r10;                                           \
 830        mtctr   r11;                                                    \
 831        PPC405_ERR77(0,r1);                                             \
 832        stwcx.  r0,0,r1;                /* to clear the reservation */  \
 833        lwz     r11,_LINK(r1);                                          \
 834        mtlr    r11;                                                    \
 835        lwz     r10,_CCR(r1);                                           \
 836        mtcrf   0xff,r10;                                               \
 837        PPC_40x_TURN_OFF_MSR_DR;                                        \
 838        lwz     r9,_DEAR(r1);                                           \
 839        lwz     r10,_ESR(r1);                                           \
 840        mtspr   SPRN_DEAR,r9;                                           \
 841        mtspr   SPRN_ESR,r10;                                           \
 842        lwz     r11,_NIP(r1);                                           \
 843        lwz     r12,_MSR(r1);                                           \
 844        mtspr   exc_lvl_srr0,r11;                                       \
 845        mtspr   exc_lvl_srr1,r12;                                       \
 846        lwz     r9,GPR9(r1);                                            \
 847        lwz     r12,GPR12(r1);                                          \
 848        lwz     r10,GPR10(r1);                                          \
 849        lwz     r11,GPR11(r1);                                          \
 850        lwz     r1,GPR1(r1);                                            \
 851        PPC405_ERR77_SYNC;                                              \
 852        exc_lvl_rfi;                                                    \
 853        b       .;              /* prevent prefetch past exc_lvl_rfi */
 854
 855        .globl  ret_from_crit_exc
 856ret_from_crit_exc:
 857        RET_FROM_EXC_LEVEL(SPRN_CSRR0, SPRN_CSRR1, RFCI)
 858
 859#ifdef CONFIG_BOOKE
 860        .globl  ret_from_debug_exc
 861ret_from_debug_exc:
 862        RET_FROM_EXC_LEVEL(SPRN_DSRR0, SPRN_DSRR1, RFDI)
 863
 864        .globl  ret_from_mcheck_exc
 865ret_from_mcheck_exc:
 866        RET_FROM_EXC_LEVEL(SPRN_MCSRR0, SPRN_MCSRR1, RFMCI)
 867#endif /* CONFIG_BOOKE */
 868
 869/*
 870 * Load the DBCR0 value for a task that is being ptraced,
 871 * having first saved away the global DBCR0.  Note that r0
 872 * has the dbcr0 value to set upon entry to this.
 873 */
 874load_dbcr0:
 875        mfmsr   r10             /* first disable debug exceptions */
 876        rlwinm  r10,r10,0,~MSR_DE
 877        mtmsr   r10
 878        isync
 879        mfspr   r10,SPRN_DBCR0
 880        lis     r11,global_dbcr0@ha
 881        addi    r11,r11,global_dbcr0@l
 882        stw     r10,0(r11)
 883        mtspr   SPRN_DBCR0,r0
 884        lwz     r10,4(r11)
 885        addi    r10,r10,1
 886        stw     r10,4(r11)
 887        li      r11,-1
 888        mtspr   SPRN_DBSR,r11   /* clear all pending debug events */
 889        blr
 890
 891        .section .bss
 892        .align  4
 893global_dbcr0:
 894        .space  8
 895        .previous
 896#endif /* !(CONFIG_4xx || CONFIG_BOOKE) */
 897
 898do_work:                        /* r10 contains MSR_KERNEL here */
 899        andi.   r0,r9,_TIF_NEED_RESCHED
 900        beq     do_user_signal
 901
 902do_resched:                     /* r10 contains MSR_KERNEL here */
 903        ori     r10,r10,MSR_EE
 904        SYNC
 905        MTMSRD(r10)             /* hard-enable interrupts */
 906        bl      schedule
 907recheck:
 908        LOAD_MSR_KERNEL(r10,MSR_KERNEL)
 909        SYNC
 910        MTMSRD(r10)             /* disable interrupts */
 911        rlwinm  r9,r1,0,0,(31-THREAD_SHIFT)
 912        lwz     r9,TI_FLAGS(r9)
 913        andi.   r0,r9,_TIF_NEED_RESCHED
 914        bne-    do_resched
 915        andi.   r0,r9,_TIF_SIGPENDING|_TIF_RESTORE_SIGMASK
 916        beq     restore_user
 917do_user_signal:                 /* r10 contains MSR_KERNEL here */
 918        ori     r10,r10,MSR_EE
 919        SYNC
 920        MTMSRD(r10)             /* hard-enable interrupts */
 921        /* save r13-r31 in the exception frame, if not already done */
 922        lwz     r3,_TRAP(r1)
 923        andi.   r0,r3,1
 924        beq     2f
 925        SAVE_NVGPRS(r1)
 926        rlwinm  r3,r3,0,0,30
 927        stw     r3,_TRAP(r1)
 9282:      li      r3,0
 929        addi    r4,r1,STACK_FRAME_OVERHEAD
 930        bl      do_signal
 931        REST_NVGPRS(r1)
 932        b       recheck
 933
 934/*
 935 * We come here when we are at the end of handling an exception
 936 * that occurred at a place where taking an exception will lose
 937 * state information, such as the contents of SRR0 and SRR1.
 938 */
 939nonrecoverable:
 940        lis     r10,exc_exit_restart_end@ha
 941        addi    r10,r10,exc_exit_restart_end@l
 942        cmplw   r12,r10
 943        bge     3f
 944        lis     r11,exc_exit_restart@ha
 945        addi    r11,r11,exc_exit_restart@l
 946        cmplw   r12,r11
 947        blt     3f
 948        lis     r10,ee_restarts@ha
 949        lwz     r12,ee_restarts@l(r10)
 950        addi    r12,r12,1
 951        stw     r12,ee_restarts@l(r10)
 952        mr      r12,r11         /* restart at exc_exit_restart */
 953        blr
 9543:      /* OK, we can't recover, kill this process */
 955        /* but the 601 doesn't implement the RI bit, so assume it's OK */
 956BEGIN_FTR_SECTION
 957        blr
 958END_FTR_SECTION_IFSET(CPU_FTR_601)
 959        lwz     r3,_TRAP(r1)
 960        andi.   r0,r3,1
 961        beq     4f
 962        SAVE_NVGPRS(r1)
 963        rlwinm  r3,r3,0,0,30
 964        stw     r3,_TRAP(r1)
 9654:      addi    r3,r1,STACK_FRAME_OVERHEAD
 966        bl      nonrecoverable_exception
 967        /* shouldn't return */
 968        b       4b
 969
 970        .section .bss
 971        .align  2
 972ee_restarts:
 973        .space  4
 974        .previous
 975
 976/*
 977 * PROM code for specific machines follows.  Put it
 978 * here so it's easy to add arch-specific sections later.
 979 * -- Cort
 980 */
 981#ifdef CONFIG_PPC_RTAS
 982/*
 983 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
 984 * called with the MMU off.
 985 */
 986_GLOBAL(enter_rtas)
 987        stwu    r1,-INT_FRAME_SIZE(r1)
 988        mflr    r0
 989        stw     r0,INT_FRAME_SIZE+4(r1)
 990        LOAD_REG_ADDR(r4, rtas)
 991        lis     r6,1f@ha        /* physical return address for rtas */
 992        addi    r6,r6,1f@l
 993        tophys(r6,r6)
 994        tophys(r7,r1)
 995        lwz     r8,RTASENTRY(r4)
 996        lwz     r4,RTASBASE(r4)
 997        mfmsr   r9
 998        stw     r9,8(r1)
 999        LOAD_MSR_KERNEL(r0,MSR_KERNEL)
1000        SYNC                    /* disable interrupts so SRR0/1 */
1001        MTMSRD(r0)              /* don't get trashed */
1002        li      r9,MSR_KERNEL & ~(MSR_IR|MSR_DR)
1003        mtlr    r6
1004        mtspr   SPRN_SPRG2,r7
1005        mtspr   SPRN_SRR0,r8
1006        mtspr   SPRN_SRR1,r9
1007        RFI
10081:      tophys(r9,r1)
1009        lwz     r8,INT_FRAME_SIZE+4(r9) /* get return address */
1010        lwz     r9,8(r9)        /* original msr value */
1011        FIX_SRR1(r9,r0)
1012        addi    r1,r1,INT_FRAME_SIZE
1013        li      r0,0
1014        mtspr   SPRN_SPRG2,r0
1015        mtspr   SPRN_SRR0,r8
1016        mtspr   SPRN_SRR1,r9
1017        RFI                     /* return to caller */
1018
1019        .globl  machine_check_in_rtas
1020machine_check_in_rtas:
1021        twi     31,0,0
1022        /* XXX load up BATs and panic */
1023
1024#endif /* CONFIG_PPC_RTAS */
1025