linux/arch/powerpc/kernel/entry_64.S
<<
>>
Prefs
   1/*
   2 *  PowerPC version 
   3 *    Copyright (C) 1995-1996 Gary Thomas (gdt@linuxppc.org)
   4 *  Rewritten by Cort Dougan (cort@cs.nmt.edu) for PReP
   5 *    Copyright (C) 1996 Cort Dougan <cort@cs.nmt.edu>
   6 *  Adapted for Power Macintosh by Paul Mackerras.
   7 *  Low-level exception handlers and MMU support
   8 *  rewritten by Paul Mackerras.
   9 *    Copyright (C) 1996 Paul Mackerras.
  10 *  MPC8xx modifications Copyright (C) 1997 Dan Malek (dmalek@jlc.net).
  11 *
  12 *  This file contains the system call entry code, context switch
  13 *  code, and exception/interrupt return code for PowerPC.
  14 *
  15 *  This program is free software; you can redistribute it and/or
  16 *  modify it under the terms of the GNU General Public License
  17 *  as published by the Free Software Foundation; either version
  18 *  2 of the License, or (at your option) any later version.
  19 */
  20
  21#include <linux/errno.h>
  22#include <asm/unistd.h>
  23#include <asm/processor.h>
  24#include <asm/page.h>
  25#include <asm/mmu.h>
  26#include <asm/thread_info.h>
  27#include <asm/ppc_asm.h>
  28#include <asm/asm-offsets.h>
  29#include <asm/cputable.h>
  30#include <asm/firmware.h>
  31#include <asm/bug.h>
  32#include <asm/ptrace.h>
  33#include <asm/irqflags.h>
  34#include <asm/ftrace.h>
  35#include <asm/hw_irq.h>
  36#include <asm/context_tracking.h>
  37
  38/*
  39 * System calls.
  40 */
  41        .section        ".toc","aw"
  42.SYS_CALL_TABLE:
  43        .tc .sys_call_table[TC],.sys_call_table
  44
  45/* This value is used to mark exception frames on the stack. */
  46exception_marker:
  47        .tc     ID_EXC_MARKER[TC],STACK_FRAME_REGS_MARKER
  48
  49        .section        ".text"
  50        .align 7
  51
  52#undef SHOW_SYSCALLS
  53
  54        .globl system_call_common
  55system_call_common:
  56        andi.   r10,r12,MSR_PR
  57        mr      r10,r1
  58        addi    r1,r1,-INT_FRAME_SIZE
  59        beq-    1f
  60        ld      r1,PACAKSAVE(r13)
  611:      std     r10,0(r1)
  62        std     r11,_NIP(r1)
  63        std     r12,_MSR(r1)
  64        std     r0,GPR0(r1)
  65        std     r10,GPR1(r1)
  66        beq     2f                      /* if from kernel mode */
  67        ACCOUNT_CPU_USER_ENTRY(r10, r11)
  682:      std     r2,GPR2(r1)
  69        std     r3,GPR3(r1)
  70        mfcr    r2
  71        std     r4,GPR4(r1)
  72        std     r5,GPR5(r1)
  73        std     r6,GPR6(r1)
  74        std     r7,GPR7(r1)
  75        std     r8,GPR8(r1)
  76        li      r11,0
  77        std     r11,GPR9(r1)
  78        std     r11,GPR10(r1)
  79        std     r11,GPR11(r1)
  80        std     r11,GPR12(r1)
  81        std     r11,_XER(r1)
  82        std     r11,_CTR(r1)
  83        std     r9,GPR13(r1)
  84        mflr    r10
  85        /*
  86         * This clears CR0.SO (bit 28), which is the error indication on
  87         * return from this system call.
  88         */
  89        rldimi  r2,r11,28,(63-28)
  90        li      r11,0xc01
  91        std     r10,_LINK(r1)
  92        std     r11,_TRAP(r1)
  93        std     r3,ORIG_GPR3(r1)
  94        std     r2,_CCR(r1)
  95        ld      r2,PACATOC(r13)
  96        addi    r9,r1,STACK_FRAME_OVERHEAD
  97        ld      r11,exception_marker@toc(r2)
  98        std     r11,-16(r9)             /* "regshere" marker */
  99#if defined(CONFIG_VIRT_CPU_ACCOUNTING_NATIVE) && defined(CONFIG_PPC_SPLPAR)
 100BEGIN_FW_FTR_SECTION
 101        beq     33f
 102        /* if from user, see if there are any DTL entries to process */
 103        ld      r10,PACALPPACAPTR(r13)  /* get ptr to VPA */
 104        ld      r11,PACA_DTL_RIDX(r13)  /* get log read index */
 105        addi    r10,r10,LPPACA_DTLIDX
 106        LDX_BE  r10,0,r10               /* get log write index */
 107        cmpd    cr1,r11,r10
 108        beq+    cr1,33f
 109        bl      .accumulate_stolen_time
 110        REST_GPR(0,r1)
 111        REST_4GPRS(3,r1)
 112        REST_2GPRS(7,r1)
 113        addi    r9,r1,STACK_FRAME_OVERHEAD
 11433:
 115END_FW_FTR_SECTION_IFSET(FW_FEATURE_SPLPAR)
 116#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE && CONFIG_PPC_SPLPAR */
 117
 118        /*
 119         * A syscall should always be called with interrupts enabled
 120         * so we just unconditionally hard-enable here. When some kind
 121         * of irq tracing is used, we additionally check that condition
 122         * is correct
 123         */
 124#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_BUG)
 125        lbz     r10,PACASOFTIRQEN(r13)
 126        xori    r10,r10,1
 1271:      tdnei   r10,0
 128        EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
 129#endif
 130
 131#ifdef CONFIG_PPC_BOOK3E
 132        wrteei  1
 133#else
 134        ld      r11,PACAKMSR(r13)
 135        ori     r11,r11,MSR_EE
 136        mtmsrd  r11,1
 137#endif /* CONFIG_PPC_BOOK3E */
 138
 139        /* We do need to set SOFTE in the stack frame or the return
 140         * from interrupt will be painful
 141         */
 142        li      r10,1
 143        std     r10,SOFTE(r1)
 144
 145#ifdef SHOW_SYSCALLS
 146        bl      .do_show_syscall
 147        REST_GPR(0,r1)
 148        REST_4GPRS(3,r1)
 149        REST_2GPRS(7,r1)
 150        addi    r9,r1,STACK_FRAME_OVERHEAD
 151#endif
 152        CURRENT_THREAD_INFO(r11, r1)
 153        ld      r10,TI_FLAGS(r11)
 154        andi.   r11,r10,_TIF_SYSCALL_T_OR_A
 155        bne     syscall_dotrace
 156.Lsyscall_dotrace_cont:
 157        cmpldi  0,r0,NR_syscalls
 158        bge-    syscall_enosys
 159
 160system_call:                    /* label this so stack traces look sane */
 161/*
 162 * Need to vector to 32 Bit or default sys_call_table here,
 163 * based on caller's run-mode / personality.
 164 */
 165        ld      r11,.SYS_CALL_TABLE@toc(2)
 166        andi.   r10,r10,_TIF_32BIT
 167        beq     15f
 168        addi    r11,r11,8       /* use 32-bit syscall entries */
 169        clrldi  r3,r3,32
 170        clrldi  r4,r4,32
 171        clrldi  r5,r5,32
 172        clrldi  r6,r6,32
 173        clrldi  r7,r7,32
 174        clrldi  r8,r8,32
 17515:
 176        slwi    r0,r0,4
 177        ldx     r10,r11,r0      /* Fetch system call handler [ptr] */
 178        mtctr   r10
 179        bctrl                   /* Call handler */
 180
 181syscall_exit:
 182        std     r3,RESULT(r1)
 183#ifdef SHOW_SYSCALLS
 184        bl      .do_show_syscall_exit
 185        ld      r3,RESULT(r1)
 186#endif
 187        CURRENT_THREAD_INFO(r12, r1)
 188
 189        ld      r8,_MSR(r1)
 190#ifdef CONFIG_PPC_BOOK3S
 191        /* No MSR:RI on BookE */
 192        andi.   r10,r8,MSR_RI
 193        beq-    unrecov_restore
 194#endif
 195        /*
 196         * Disable interrupts so current_thread_info()->flags can't change,
 197         * and so that we don't get interrupted after loading SRR0/1.
 198         */
 199#ifdef CONFIG_PPC_BOOK3E
 200        wrteei  0
 201#else
 202        ld      r10,PACAKMSR(r13)
 203        /*
 204         * For performance reasons we clear RI the same time that we
 205         * clear EE. We only need to clear RI just before we restore r13
 206         * below, but batching it with EE saves us one expensive mtmsrd call.
 207         * We have to be careful to restore RI if we branch anywhere from
 208         * here (eg syscall_exit_work).
 209         */
 210        li      r9,MSR_RI
 211        andc    r11,r10,r9
 212        mtmsrd  r11,1
 213#endif /* CONFIG_PPC_BOOK3E */
 214
 215        ld      r9,TI_FLAGS(r12)
 216        li      r11,-_LAST_ERRNO
 217        andi.   r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP|_TIF_USER_WORK_MASK|_TIF_PERSYSCALL_MASK)
 218        bne-    syscall_exit_work
 219        cmpld   r3,r11
 220        ld      r5,_CCR(r1)
 221        bge-    syscall_error
 222.Lsyscall_error_cont:
 223        ld      r7,_NIP(r1)
 224BEGIN_FTR_SECTION
 225        stdcx.  r0,0,r1                 /* to clear the reservation */
 226END_FTR_SECTION_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
 227        andi.   r6,r8,MSR_PR
 228        ld      r4,_LINK(r1)
 229
 230        beq-    1f
 231        ACCOUNT_CPU_USER_EXIT(r11, r12)
 232        HMT_MEDIUM_LOW_HAS_PPR
 233        ld      r13,GPR13(r1)   /* only restore r13 if returning to usermode */
 2341:      ld      r2,GPR2(r1)
 235        ld      r1,GPR1(r1)
 236        mtlr    r4
 237        mtcr    r5
 238        mtspr   SPRN_SRR0,r7
 239        mtspr   SPRN_SRR1,r8
 240        RFI
 241        b       .       /* prevent speculative execution */
 242
 243syscall_error:  
 244        oris    r5,r5,0x1000    /* Set SO bit in CR */
 245        neg     r3,r3
 246        std     r5,_CCR(r1)
 247        b       .Lsyscall_error_cont
 248        
 249/* Traced system call support */
 250syscall_dotrace:
 251        bl      .save_nvgprs
 252        addi    r3,r1,STACK_FRAME_OVERHEAD
 253        bl      .do_syscall_trace_enter
 254        /*
 255         * Restore argument registers possibly just changed.
 256         * We use the return value of do_syscall_trace_enter
 257         * for the call number to look up in the table (r0).
 258         */
 259        mr      r0,r3
 260        ld      r3,GPR3(r1)
 261        ld      r4,GPR4(r1)
 262        ld      r5,GPR5(r1)
 263        ld      r6,GPR6(r1)
 264        ld      r7,GPR7(r1)
 265        ld      r8,GPR8(r1)
 266        addi    r9,r1,STACK_FRAME_OVERHEAD
 267        CURRENT_THREAD_INFO(r10, r1)
 268        ld      r10,TI_FLAGS(r10)
 269        b       .Lsyscall_dotrace_cont
 270
 271syscall_enosys:
 272        li      r3,-ENOSYS
 273        b       syscall_exit
 274        
 275syscall_exit_work:
 276#ifdef CONFIG_PPC_BOOK3S
 277        mtmsrd  r10,1           /* Restore RI */
 278#endif
 279        /* If TIF_RESTOREALL is set, don't scribble on either r3 or ccr.
 280         If TIF_NOERROR is set, just save r3 as it is. */
 281
 282        andi.   r0,r9,_TIF_RESTOREALL
 283        beq+    0f
 284        REST_NVGPRS(r1)
 285        b       2f
 2860:      cmpld   r3,r11          /* r10 is -LAST_ERRNO */
 287        blt+    1f
 288        andi.   r0,r9,_TIF_NOERROR
 289        bne-    1f
 290        ld      r5,_CCR(r1)
 291        neg     r3,r3
 292        oris    r5,r5,0x1000    /* Set SO bit in CR */
 293        std     r5,_CCR(r1)
 2941:      std     r3,GPR3(r1)
 2952:      andi.   r0,r9,(_TIF_PERSYSCALL_MASK)
 296        beq     4f
 297
 298        /* Clear per-syscall TIF flags if any are set.  */
 299
 300        li      r11,_TIF_PERSYSCALL_MASK
 301        addi    r12,r12,TI_FLAGS
 3023:      ldarx   r10,0,r12
 303        andc    r10,r10,r11
 304        stdcx.  r10,0,r12
 305        bne-    3b
 306        subi    r12,r12,TI_FLAGS
 307
 3084:      /* Anything else left to do? */
 309        SET_DEFAULT_THREAD_PPR(r3, r10)         /* Set thread.ppr = 3 */
 310        andi.   r0,r9,(_TIF_SYSCALL_T_OR_A|_TIF_SINGLESTEP)
 311        beq     .ret_from_except_lite
 312
 313        /* Re-enable interrupts */
 314#ifdef CONFIG_PPC_BOOK3E
 315        wrteei  1
 316#else
 317        ld      r10,PACAKMSR(r13)
 318        ori     r10,r10,MSR_EE
 319        mtmsrd  r10,1
 320#endif /* CONFIG_PPC_BOOK3E */
 321
 322        bl      .save_nvgprs
 323        addi    r3,r1,STACK_FRAME_OVERHEAD
 324        bl      .do_syscall_trace_leave
 325        b       .ret_from_except
 326
 327/* Save non-volatile GPRs, if not already saved. */
 328_GLOBAL(save_nvgprs)
 329        ld      r11,_TRAP(r1)
 330        andi.   r0,r11,1
 331        beqlr-
 332        SAVE_NVGPRS(r1)
 333        clrrdi  r0,r11,1
 334        std     r0,_TRAP(r1)
 335        blr
 336
 337        
 338/*
 339 * The sigsuspend and rt_sigsuspend system calls can call do_signal
 340 * and thus put the process into the stopped state where we might
 341 * want to examine its user state with ptrace.  Therefore we need
 342 * to save all the nonvolatile registers (r14 - r31) before calling
 343 * the C code.  Similarly, fork, vfork and clone need the full
 344 * register state on the stack so that it can be copied to the child.
 345 */
 346
 347_GLOBAL(ppc_fork)
 348        bl      .save_nvgprs
 349        bl      .sys_fork
 350        b       syscall_exit
 351
 352_GLOBAL(ppc_vfork)
 353        bl      .save_nvgprs
 354        bl      .sys_vfork
 355        b       syscall_exit
 356
 357_GLOBAL(ppc_clone)
 358        bl      .save_nvgprs
 359        bl      .sys_clone
 360        b       syscall_exit
 361
 362_GLOBAL(ppc32_swapcontext)
 363        bl      .save_nvgprs
 364        bl      .compat_sys_swapcontext
 365        b       syscall_exit
 366
 367_GLOBAL(ppc64_swapcontext)
 368        bl      .save_nvgprs
 369        bl      .sys_swapcontext
 370        b       syscall_exit
 371
 372_GLOBAL(ret_from_fork)
 373        bl      .schedule_tail
 374        REST_NVGPRS(r1)
 375        li      r3,0
 376        b       syscall_exit
 377
 378_GLOBAL(ret_from_kernel_thread)
 379        bl      .schedule_tail
 380        REST_NVGPRS(r1)
 381        ld      r14, 0(r14)
 382        mtlr    r14
 383        mr      r3,r15
 384        blrl
 385        li      r3,0
 386        b       syscall_exit
 387
 388        .section        ".toc","aw"
 389DSCR_DEFAULT:
 390        .tc dscr_default[TC],dscr_default
 391
 392        .section        ".text"
 393
 394/*
 395 * This routine switches between two different tasks.  The process
 396 * state of one is saved on its kernel stack.  Then the state
 397 * of the other is restored from its kernel stack.  The memory
 398 * management hardware is updated to the second process's state.
 399 * Finally, we can return to the second process, via ret_from_except.
 400 * On entry, r3 points to the THREAD for the current task, r4
 401 * points to the THREAD for the new task.
 402 *
 403 * Note: there are two ways to get to the "going out" portion
 404 * of this code; either by coming in via the entry (_switch)
 405 * or via "fork" which must set up an environment equivalent
 406 * to the "_switch" path.  If you change this you'll have to change
 407 * the fork code also.
 408 *
 409 * The code which creates the new task context is in 'copy_thread'
 410 * in arch/powerpc/kernel/process.c 
 411 */
 412        .align  7
 413_GLOBAL(_switch)
 414        mflr    r0
 415        std     r0,16(r1)
 416        stdu    r1,-SWITCH_FRAME_SIZE(r1)
 417        /* r3-r13 are caller saved -- Cort */
 418        SAVE_8GPRS(14, r1)
 419        SAVE_10GPRS(22, r1)
 420        mflr    r20             /* Return to switch caller */
 421        mfmsr   r22
 422        li      r0, MSR_FP
 423#ifdef CONFIG_VSX
 424BEGIN_FTR_SECTION
 425        oris    r0,r0,MSR_VSX@h /* Disable VSX */
 426END_FTR_SECTION_IFSET(CPU_FTR_VSX)
 427#endif /* CONFIG_VSX */
 428#ifdef CONFIG_ALTIVEC
 429BEGIN_FTR_SECTION
 430        oris    r0,r0,MSR_VEC@h /* Disable altivec */
 431        mfspr   r24,SPRN_VRSAVE /* save vrsave register value */
 432        std     r24,THREAD_VRSAVE(r3)
 433END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
 434#endif /* CONFIG_ALTIVEC */
 435#ifdef CONFIG_PPC64
 436BEGIN_FTR_SECTION
 437        mfspr   r25,SPRN_DSCR
 438        std     r25,THREAD_DSCR(r3)
 439END_FTR_SECTION_IFSET(CPU_FTR_DSCR)
 440#endif
 441        and.    r0,r0,r22
 442        beq+    1f
 443        andc    r22,r22,r0
 444        MTMSRD(r22)
 445        isync
 4461:      std     r20,_NIP(r1)
 447        mfcr    r23
 448        std     r23,_CCR(r1)
 449        std     r1,KSP(r3)      /* Set old stack pointer */
 450
 451#ifdef CONFIG_PPC_BOOK3S_64
 452BEGIN_FTR_SECTION
 453        /* Event based branch registers */
 454        mfspr   r0, SPRN_BESCR
 455        std     r0, THREAD_BESCR(r3)
 456        mfspr   r0, SPRN_EBBHR
 457        std     r0, THREAD_EBBHR(r3)
 458        mfspr   r0, SPRN_EBBRR
 459        std     r0, THREAD_EBBRR(r3)
 460END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
 461#endif
 462
 463#ifdef CONFIG_SMP
 464        /* We need a sync somewhere here to make sure that if the
 465         * previous task gets rescheduled on another CPU, it sees all
 466         * stores it has performed on this one.
 467         */
 468        sync
 469#endif /* CONFIG_SMP */
 470
 471        /*
 472         * If we optimise away the clear of the reservation in system
 473         * calls because we know the CPU tracks the address of the
 474         * reservation, then we need to clear it here to cover the
 475         * case that the kernel context switch path has no larx
 476         * instructions.
 477         */
 478BEGIN_FTR_SECTION
 479        ldarx   r6,0,r1
 480END_FTR_SECTION_IFSET(CPU_FTR_STCX_CHECKS_ADDRESS)
 481
 482#ifdef CONFIG_PPC_BOOK3S
 483/* Cancel all explict user streams as they will have no use after context
 484 * switch and will stop the HW from creating streams itself
 485 */
 486        DCBT_STOP_ALL_STREAM_IDS(r6)
 487#endif
 488
 489        addi    r6,r4,-THREAD   /* Convert THREAD to 'current' */
 490        std     r6,PACACURRENT(r13)     /* Set new 'current' */
 491
 492        ld      r8,KSP(r4)      /* new stack pointer */
 493#ifdef CONFIG_PPC_BOOK3S
 494BEGIN_FTR_SECTION
 495  BEGIN_FTR_SECTION_NESTED(95)
 496        clrrdi  r6,r8,28        /* get its ESID */
 497        clrrdi  r9,r1,28        /* get current sp ESID */
 498  FTR_SECTION_ELSE_NESTED(95)
 499        clrrdi  r6,r8,40        /* get its 1T ESID */
 500        clrrdi  r9,r1,40        /* get current sp 1T ESID */
 501  ALT_MMU_FTR_SECTION_END_NESTED_IFCLR(MMU_FTR_1T_SEGMENT, 95)
 502FTR_SECTION_ELSE
 503        b       2f
 504ALT_MMU_FTR_SECTION_END_IFSET(MMU_FTR_SLB)
 505        clrldi. r0,r6,2         /* is new ESID c00000000? */
 506        cmpd    cr1,r6,r9       /* or is new ESID the same as current ESID? */
 507        cror    eq,4*cr1+eq,eq
 508        beq     2f              /* if yes, don't slbie it */
 509
 510        /* Bolt in the new stack SLB entry */
 511        ld      r7,KSP_VSID(r4) /* Get new stack's VSID */
 512        oris    r0,r6,(SLB_ESID_V)@h
 513        ori     r0,r0,(SLB_NUM_BOLTED-1)@l
 514BEGIN_FTR_SECTION
 515        li      r9,MMU_SEGSIZE_1T       /* insert B field */
 516        oris    r6,r6,(MMU_SEGSIZE_1T << SLBIE_SSIZE_SHIFT)@h
 517        rldimi  r7,r9,SLB_VSID_SSIZE_SHIFT,0
 518END_MMU_FTR_SECTION_IFSET(MMU_FTR_1T_SEGMENT)
 519
 520        /* Update the last bolted SLB.  No write barriers are needed
 521         * here, provided we only update the current CPU's SLB shadow
 522         * buffer.
 523         */
 524        ld      r9,PACA_SLBSHADOWPTR(r13)
 525        li      r12,0
 526        std     r12,SLBSHADOW_STACKESID(r9)     /* Clear ESID */
 527        li      r12,SLBSHADOW_STACKVSID
 528        STDX_BE r7,r12,r9                       /* Save VSID */
 529        li      r12,SLBSHADOW_STACKESID
 530        STDX_BE r0,r12,r9                       /* Save ESID */
 531
 532        /* No need to check for MMU_FTR_NO_SLBIE_B here, since when
 533         * we have 1TB segments, the only CPUs known to have the errata
 534         * only support less than 1TB of system memory and we'll never
 535         * actually hit this code path.
 536         */
 537
 538        slbie   r6
 539        slbie   r6              /* Workaround POWER5 < DD2.1 issue */
 540        slbmte  r7,r0
 541        isync
 5422:
 543#endif /* !CONFIG_PPC_BOOK3S */
 544
 545        CURRENT_THREAD_INFO(r7, r8)  /* base of new stack */
 546        /* Note: this uses SWITCH_FRAME_SIZE rather than INT_FRAME_SIZE
 547           because we don't need to leave the 288-byte ABI gap at the
 548           top of the kernel stack. */
 549        addi    r7,r7,THREAD_SIZE-SWITCH_FRAME_SIZE
 550
 551        mr      r1,r8           /* start using new stack pointer */
 552        std     r7,PACAKSAVE(r13)
 553
 554#ifdef CONFIG_PPC_BOOK3S_64
 555BEGIN_FTR_SECTION
 556        /* Event based branch registers */
 557        ld      r0, THREAD_BESCR(r4)
 558        mtspr   SPRN_BESCR, r0
 559        ld      r0, THREAD_EBBHR(r4)
 560        mtspr   SPRN_EBBHR, r0
 561        ld      r0, THREAD_EBBRR(r4)
 562        mtspr   SPRN_EBBRR, r0
 563
 564        ld      r0,THREAD_TAR(r4)
 565        mtspr   SPRN_TAR,r0
 566END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
 567#endif
 568
 569#ifdef CONFIG_ALTIVEC
 570BEGIN_FTR_SECTION
 571        ld      r0,THREAD_VRSAVE(r4)
 572        mtspr   SPRN_VRSAVE,r0          /* if G4, restore VRSAVE reg */
 573END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
 574#endif /* CONFIG_ALTIVEC */
 575#ifdef CONFIG_PPC64
 576BEGIN_FTR_SECTION
 577        lwz     r6,THREAD_DSCR_INHERIT(r4)
 578        ld      r7,DSCR_DEFAULT@toc(2)
 579        ld      r0,THREAD_DSCR(r4)
 580        cmpwi   r6,0
 581        bne     1f
 582        ld      r0,0(r7)
 5831:
 584BEGIN_FTR_SECTION_NESTED(70)
 585        mfspr   r8, SPRN_FSCR
 586        rldimi  r8, r6, FSCR_DSCR_LG, (63 - FSCR_DSCR_LG)
 587        mtspr   SPRN_FSCR, r8
 588END_FTR_SECTION_NESTED(CPU_FTR_ARCH_207S, CPU_FTR_ARCH_207S, 70)
 589        cmpd    r0,r25
 590        beq     2f
 591        mtspr   SPRN_DSCR,r0
 5922:
 593END_FTR_SECTION_IFSET(CPU_FTR_DSCR)
 594#endif
 595
 596        ld      r6,_CCR(r1)
 597        mtcrf   0xFF,r6
 598
 599        /* r3-r13 are destroyed -- Cort */
 600        REST_8GPRS(14, r1)
 601        REST_10GPRS(22, r1)
 602
 603        /* convert old thread to its task_struct for return value */
 604        addi    r3,r3,-THREAD
 605        ld      r7,_NIP(r1)     /* Return to _switch caller in new task */
 606        mtlr    r7
 607        addi    r1,r1,SWITCH_FRAME_SIZE
 608        blr
 609
 610        .align  7
 611_GLOBAL(ret_from_except)
 612        ld      r11,_TRAP(r1)
 613        andi.   r0,r11,1
 614        bne     .ret_from_except_lite
 615        REST_NVGPRS(r1)
 616
 617_GLOBAL(ret_from_except_lite)
 618        /*
 619         * Disable interrupts so that current_thread_info()->flags
 620         * can't change between when we test it and when we return
 621         * from the interrupt.
 622         */
 623#ifdef CONFIG_PPC_BOOK3E
 624        wrteei  0
 625#else
 626        ld      r10,PACAKMSR(r13) /* Get kernel MSR without EE */
 627        mtmsrd  r10,1             /* Update machine state */
 628#endif /* CONFIG_PPC_BOOK3E */
 629
 630        CURRENT_THREAD_INFO(r9, r1)
 631        ld      r3,_MSR(r1)
 632#ifdef CONFIG_PPC_BOOK3E
 633        ld      r10,PACACURRENT(r13)
 634#endif /* CONFIG_PPC_BOOK3E */
 635        ld      r4,TI_FLAGS(r9)
 636        andi.   r3,r3,MSR_PR
 637        beq     resume_kernel
 638#ifdef CONFIG_PPC_BOOK3E
 639        lwz     r3,(THREAD+THREAD_DBCR0)(r10)
 640#endif /* CONFIG_PPC_BOOK3E */
 641
 642        /* Check current_thread_info()->flags */
 643        andi.   r0,r4,_TIF_USER_WORK_MASK
 644#ifdef CONFIG_PPC_BOOK3E
 645        bne     1f
 646        /*
 647         * Check to see if the dbcr0 register is set up to debug.
 648         * Use the internal debug mode bit to do this.
 649         */
 650        andis.  r0,r3,DBCR0_IDM@h
 651        beq     restore
 652        mfmsr   r0
 653        rlwinm  r0,r0,0,~MSR_DE /* Clear MSR.DE */
 654        mtmsr   r0
 655        mtspr   SPRN_DBCR0,r3
 656        li      r10, -1
 657        mtspr   SPRN_DBSR,r10
 658        b       restore
 659#else
 660        beq     restore
 661#endif
 6621:      andi.   r0,r4,_TIF_NEED_RESCHED
 663        beq     2f
 664        bl      .restore_interrupts
 665        SCHEDULE_USER
 666        b       .ret_from_except_lite
 667
 6682:      bl      .save_nvgprs
 669        bl      .restore_interrupts
 670        addi    r3,r1,STACK_FRAME_OVERHEAD
 671        bl      .do_notify_resume
 672        b       .ret_from_except
 673
 674resume_kernel:
 675        /* check current_thread_info, _TIF_EMULATE_STACK_STORE */
 676        CURRENT_THREAD_INFO(r9, r1)
 677        ld      r8,TI_FLAGS(r9)
 678        andis.  r8,r8,_TIF_EMULATE_STACK_STORE@h
 679        beq+    1f
 680
 681        addi    r8,r1,INT_FRAME_SIZE    /* Get the kprobed function entry */
 682
 683        lwz     r3,GPR1(r1)
 684        subi    r3,r3,INT_FRAME_SIZE    /* dst: Allocate a trampoline exception frame */
 685        mr      r4,r1                   /* src:  current exception frame */
 686        mr      r1,r3                   /* Reroute the trampoline frame to r1 */
 687
 688        /* Copy from the original to the trampoline. */
 689        li      r5,INT_FRAME_SIZE/8     /* size: INT_FRAME_SIZE */
 690        li      r6,0                    /* start offset: 0 */
 691        mtctr   r5
 6922:      ldx     r0,r6,r4
 693        stdx    r0,r6,r3
 694        addi    r6,r6,8
 695        bdnz    2b
 696
 697        /* Do real store operation to complete stwu */
 698        lwz     r5,GPR1(r1)
 699        std     r8,0(r5)
 700
 701        /* Clear _TIF_EMULATE_STACK_STORE flag */
 702        lis     r11,_TIF_EMULATE_STACK_STORE@h
 703        addi    r5,r9,TI_FLAGS
 7040:      ldarx   r4,0,r5
 705        andc    r4,r4,r11
 706        stdcx.  r4,0,r5
 707        bne-    0b
 7081:
 709
 710#ifdef CONFIG_PREEMPT
 711        /* Check if we need to preempt */
 712        andi.   r0,r4,_TIF_NEED_RESCHED
 713        beq+    restore
 714        /* Check that preempt_count() == 0 and interrupts are enabled */
 715        lwz     r8,TI_PREEMPT(r9)
 716        cmpwi   cr1,r8,0
 717        ld      r0,SOFTE(r1)
 718        cmpdi   r0,0
 719        crandc  eq,cr1*4+eq,eq
 720        bne     restore
 721
 722        /*
 723         * Here we are preempting the current task. We want to make
 724         * sure we are soft-disabled first and reconcile irq state.
 725         */
 726        RECONCILE_IRQ_STATE(r3,r4)
 7271:      bl      .preempt_schedule_irq
 728
 729        /* Re-test flags and eventually loop */
 730        CURRENT_THREAD_INFO(r9, r1)
 731        ld      r4,TI_FLAGS(r9)
 732        andi.   r0,r4,_TIF_NEED_RESCHED
 733        bne     1b
 734
 735        /*
 736         * arch_local_irq_restore() from preempt_schedule_irq above may
 737         * enable hard interrupt but we really should disable interrupts
 738         * when we return from the interrupt, and so that we don't get
 739         * interrupted after loading SRR0/1.
 740         */
 741#ifdef CONFIG_PPC_BOOK3E
 742        wrteei  0
 743#else
 744        ld      r10,PACAKMSR(r13) /* Get kernel MSR without EE */
 745        mtmsrd  r10,1             /* Update machine state */
 746#endif /* CONFIG_PPC_BOOK3E */
 747#endif /* CONFIG_PREEMPT */
 748
 749        .globl  fast_exc_return_irq
 750fast_exc_return_irq:
 751restore:
 752        /*
 753         * This is the main kernel exit path. First we check if we
 754         * are about to re-enable interrupts
 755         */
 756        ld      r5,SOFTE(r1)
 757        lbz     r6,PACASOFTIRQEN(r13)
 758        cmpwi   cr0,r5,0
 759        beq     restore_irq_off
 760
 761        /* We are enabling, were we already enabled ? Yes, just return */
 762        cmpwi   cr0,r6,1
 763        beq     cr0,do_restore
 764
 765        /*
 766         * We are about to soft-enable interrupts (we are hard disabled
 767         * at this point). We check if there's anything that needs to
 768         * be replayed first.
 769         */
 770        lbz     r0,PACAIRQHAPPENED(r13)
 771        cmpwi   cr0,r0,0
 772        bne-    restore_check_irq_replay
 773
 774        /*
 775         * Get here when nothing happened while soft-disabled, just
 776         * soft-enable and move-on. We will hard-enable as a side
 777         * effect of rfi
 778         */
 779restore_no_replay:
 780        TRACE_ENABLE_INTS
 781        li      r0,1
 782        stb     r0,PACASOFTIRQEN(r13);
 783
 784        /*
 785         * Final return path. BookE is handled in a different file
 786         */
 787do_restore:
 788#ifdef CONFIG_PPC_BOOK3E
 789        b       .exception_return_book3e
 790#else
 791        /*
 792         * Clear the reservation. If we know the CPU tracks the address of
 793         * the reservation then we can potentially save some cycles and use
 794         * a larx. On POWER6 and POWER7 this is significantly faster.
 795         */
 796BEGIN_FTR_SECTION
 797        stdcx.  r0,0,r1         /* to clear the reservation */
 798FTR_SECTION_ELSE
 799        ldarx   r4,0,r1
 800ALT_FTR_SECTION_END_IFCLR(CPU_FTR_STCX_CHECKS_ADDRESS)
 801
 802        /*
 803         * Some code path such as load_up_fpu or altivec return directly
 804         * here. They run entirely hard disabled and do not alter the
 805         * interrupt state. They also don't use lwarx/stwcx. and thus
 806         * are known not to leave dangling reservations.
 807         */
 808        .globl  fast_exception_return
 809fast_exception_return:
 810        ld      r3,_MSR(r1)
 811        ld      r4,_CTR(r1)
 812        ld      r0,_LINK(r1)
 813        mtctr   r4
 814        mtlr    r0
 815        ld      r4,_XER(r1)
 816        mtspr   SPRN_XER,r4
 817
 818        REST_8GPRS(5, r1)
 819
 820        andi.   r0,r3,MSR_RI
 821        beq-    unrecov_restore
 822
 823        /*
 824         * Clear RI before restoring r13.  If we are returning to
 825         * userspace and we take an exception after restoring r13,
 826         * we end up corrupting the userspace r13 value.
 827         */
 828        ld      r4,PACAKMSR(r13) /* Get kernel MSR without EE */
 829        andc    r4,r4,r0         /* r0 contains MSR_RI here */
 830        mtmsrd  r4,1
 831
 832#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
 833        /* TM debug */
 834        std     r3, PACATMSCRATCH(r13) /* Stash returned-to MSR */
 835#endif
 836        /*
 837         * r13 is our per cpu area, only restore it if we are returning to
 838         * userspace the value stored in the stack frame may belong to
 839         * another CPU.
 840         */
 841        andi.   r0,r3,MSR_PR
 842        beq     1f
 843        ACCOUNT_CPU_USER_EXIT(r2, r4)
 844        RESTORE_PPR(r2, r4)
 845        REST_GPR(13, r1)
 8461:
 847        mtspr   SPRN_SRR1,r3
 848
 849        ld      r2,_CCR(r1)
 850        mtcrf   0xFF,r2
 851        ld      r2,_NIP(r1)
 852        mtspr   SPRN_SRR0,r2
 853
 854        ld      r0,GPR0(r1)
 855        ld      r2,GPR2(r1)
 856        ld      r3,GPR3(r1)
 857        ld      r4,GPR4(r1)
 858        ld      r1,GPR1(r1)
 859
 860        rfid
 861        b       .       /* prevent speculative execution */
 862
 863#endif /* CONFIG_PPC_BOOK3E */
 864
 865        /*
 866         * We are returning to a context with interrupts soft disabled.
 867         *
 868         * However, we may also about to hard enable, so we need to
 869         * make sure that in this case, we also clear PACA_IRQ_HARD_DIS
 870         * or that bit can get out of sync and bad things will happen
 871         */
 872restore_irq_off:
 873        ld      r3,_MSR(r1)
 874        lbz     r7,PACAIRQHAPPENED(r13)
 875        andi.   r0,r3,MSR_EE
 876        beq     1f
 877        rlwinm  r7,r7,0,~PACA_IRQ_HARD_DIS
 878        stb     r7,PACAIRQHAPPENED(r13)
 8791:      li      r0,0
 880        stb     r0,PACASOFTIRQEN(r13);
 881        TRACE_DISABLE_INTS
 882        b       do_restore
 883
 884        /*
 885         * Something did happen, check if a re-emit is needed
 886         * (this also clears paca->irq_happened)
 887         */
 888restore_check_irq_replay:
 889        /* XXX: We could implement a fast path here where we check
 890         * for irq_happened being just 0x01, in which case we can
 891         * clear it and return. That means that we would potentially
 892         * miss a decrementer having wrapped all the way around.
 893         *
 894         * Still, this might be useful for things like hash_page
 895         */
 896        bl      .__check_irq_replay
 897        cmpwi   cr0,r3,0
 898        beq     restore_no_replay
 899 
 900        /*
 901         * We need to re-emit an interrupt. We do so by re-using our
 902         * existing exception frame. We first change the trap value,
 903         * but we need to ensure we preserve the low nibble of it
 904         */
 905        ld      r4,_TRAP(r1)
 906        clrldi  r4,r4,60
 907        or      r4,r4,r3
 908        std     r4,_TRAP(r1)
 909
 910        /*
 911         * Then find the right handler and call it. Interrupts are
 912         * still soft-disabled and we keep them that way.
 913        */
 914        cmpwi   cr0,r3,0x500
 915        bne     1f
 916        addi    r3,r1,STACK_FRAME_OVERHEAD;
 917        bl      .do_IRQ
 918        b       .ret_from_except
 9191:      cmpwi   cr0,r3,0x900
 920        bne     1f
 921        addi    r3,r1,STACK_FRAME_OVERHEAD;
 922        bl      .timer_interrupt
 923        b       .ret_from_except
 924#ifdef CONFIG_PPC_DOORBELL
 9251:
 926#ifdef CONFIG_PPC_BOOK3E
 927        cmpwi   cr0,r3,0x280
 928#else
 929        BEGIN_FTR_SECTION
 930                cmpwi   cr0,r3,0xe80
 931        FTR_SECTION_ELSE
 932                cmpwi   cr0,r3,0xa00
 933        ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
 934#endif /* CONFIG_PPC_BOOK3E */
 935        bne     1f
 936        addi    r3,r1,STACK_FRAME_OVERHEAD;
 937        bl      .doorbell_exception
 938        b       .ret_from_except
 939#endif /* CONFIG_PPC_DOORBELL */
 9401:      b       .ret_from_except /* What else to do here ? */
 941 
 942unrecov_restore:
 943        addi    r3,r1,STACK_FRAME_OVERHEAD
 944        bl      .unrecoverable_exception
 945        b       unrecov_restore
 946
 947#ifdef CONFIG_PPC_RTAS
 948/*
 949 * On CHRP, the Run-Time Abstraction Services (RTAS) have to be
 950 * called with the MMU off.
 951 *
 952 * In addition, we need to be in 32b mode, at least for now.
 953 * 
 954 * Note: r3 is an input parameter to rtas, so don't trash it...
 955 */
 956_GLOBAL(enter_rtas)
 957        mflr    r0
 958        std     r0,16(r1)
 959        stdu    r1,-RTAS_FRAME_SIZE(r1) /* Save SP and create stack space. */
 960
 961        /* Because RTAS is running in 32b mode, it clobbers the high order half
 962         * of all registers that it saves.  We therefore save those registers
 963         * RTAS might touch to the stack.  (r0, r3-r13 are caller saved)
 964         */
 965        SAVE_GPR(2, r1)                 /* Save the TOC */
 966        SAVE_GPR(13, r1)                /* Save paca */
 967        SAVE_8GPRS(14, r1)              /* Save the non-volatiles */
 968        SAVE_10GPRS(22, r1)             /* ditto */
 969
 970        mfcr    r4
 971        std     r4,_CCR(r1)
 972        mfctr   r5
 973        std     r5,_CTR(r1)
 974        mfspr   r6,SPRN_XER
 975        std     r6,_XER(r1)
 976        mfdar   r7
 977        std     r7,_DAR(r1)
 978        mfdsisr r8
 979        std     r8,_DSISR(r1)
 980
 981        /* Temporary workaround to clear CR until RTAS can be modified to
 982         * ignore all bits.
 983         */
 984        li      r0,0
 985        mtcr    r0
 986
 987#ifdef CONFIG_BUG       
 988        /* There is no way it is acceptable to get here with interrupts enabled,
 989         * check it with the asm equivalent of WARN_ON
 990         */
 991        lbz     r0,PACASOFTIRQEN(r13)
 9921:      tdnei   r0,0
 993        EMIT_BUG_ENTRY 1b,__FILE__,__LINE__,BUGFLAG_WARNING
 994#endif
 995        
 996        /* Hard-disable interrupts */
 997        mfmsr   r6
 998        rldicl  r7,r6,48,1
 999        rotldi  r7,r7,16
1000        mtmsrd  r7,1
1001
1002        /* Unfortunately, the stack pointer and the MSR are also clobbered,
1003         * so they are saved in the PACA which allows us to restore
1004         * our original state after RTAS returns.
1005         */
1006        std     r1,PACAR1(r13)
1007        std     r6,PACASAVEDMSR(r13)
1008
1009        /* Setup our real return addr */        
1010        LOAD_REG_ADDR(r4,.rtas_return_loc)
1011        clrldi  r4,r4,2                 /* convert to realmode address */
1012        mtlr    r4
1013
1014        li      r0,0
1015        ori     r0,r0,MSR_EE|MSR_SE|MSR_BE|MSR_RI
1016        andc    r0,r6,r0
1017        
1018        li      r9,1
1019        rldicr  r9,r9,MSR_SF_LG,(63-MSR_SF_LG)
1020        ori     r9,r9,MSR_IR|MSR_DR|MSR_FE0|MSR_FE1|MSR_FP|MSR_RI
1021        andc    r6,r0,r9
1022        sync                            /* disable interrupts so SRR0/1 */
1023        mtmsrd  r0                      /* don't get trashed */
1024
1025        LOAD_REG_ADDR(r4, rtas)
1026        ld      r5,RTASENTRY(r4)        /* get the rtas->entry value */
1027        ld      r4,RTASBASE(r4)         /* get the rtas->base value */
1028        
1029        mtspr   SPRN_SRR0,r5
1030        mtspr   SPRN_SRR1,r6
1031        rfid
1032        b       .       /* prevent speculative execution */
1033
1034_STATIC(rtas_return_loc)
1035        /* relocation is off at this point */
1036        GET_PACA(r4)
1037        clrldi  r4,r4,2                 /* convert to realmode address */
1038
1039        bcl     20,31,$+4
10400:      mflr    r3
1041        ld      r3,(1f-0b)(r3)          /* get &.rtas_restore_regs */
1042
1043        mfmsr   r6
1044        li      r0,MSR_RI
1045        andc    r6,r6,r0
1046        sync    
1047        mtmsrd  r6
1048        
1049        ld      r1,PACAR1(r4)           /* Restore our SP */
1050        ld      r4,PACASAVEDMSR(r4)     /* Restore our MSR */
1051
1052        mtspr   SPRN_SRR0,r3
1053        mtspr   SPRN_SRR1,r4
1054        rfid
1055        b       .       /* prevent speculative execution */
1056
1057        .align  3
10581:      .llong  .rtas_restore_regs
1059
1060_STATIC(rtas_restore_regs)
1061        /* relocation is on at this point */
1062        REST_GPR(2, r1)                 /* Restore the TOC */
1063        REST_GPR(13, r1)                /* Restore paca */
1064        REST_8GPRS(14, r1)              /* Restore the non-volatiles */
1065        REST_10GPRS(22, r1)             /* ditto */
1066
1067        GET_PACA(r13)
1068
1069        ld      r4,_CCR(r1)
1070        mtcr    r4
1071        ld      r5,_CTR(r1)
1072        mtctr   r5
1073        ld      r6,_XER(r1)
1074        mtspr   SPRN_XER,r6
1075        ld      r7,_DAR(r1)
1076        mtdar   r7
1077        ld      r8,_DSISR(r1)
1078        mtdsisr r8
1079
1080        addi    r1,r1,RTAS_FRAME_SIZE   /* Unstack our frame */
1081        ld      r0,16(r1)               /* get return address */
1082
1083        mtlr    r0
1084        blr                             /* return to caller */
1085
1086#endif /* CONFIG_PPC_RTAS */
1087
1088_GLOBAL(enter_prom)
1089        mflr    r0
1090        std     r0,16(r1)
1091        stdu    r1,-PROM_FRAME_SIZE(r1) /* Save SP and create stack space */
1092
1093        /* Because PROM is running in 32b mode, it clobbers the high order half
1094         * of all registers that it saves.  We therefore save those registers
1095         * PROM might touch to the stack.  (r0, r3-r13 are caller saved)
1096         */
1097        SAVE_GPR(2, r1)
1098        SAVE_GPR(13, r1)
1099        SAVE_8GPRS(14, r1)
1100        SAVE_10GPRS(22, r1)
1101        mfcr    r10
1102        mfmsr   r11
1103        std     r10,_CCR(r1)
1104        std     r11,_MSR(r1)
1105
1106        /* Get the PROM entrypoint */
1107        mtlr    r4
1108
1109        /* Switch MSR to 32 bits mode
1110         */
1111#ifdef CONFIG_PPC_BOOK3E
1112        rlwinm  r11,r11,0,1,31
1113        mtmsr   r11
1114#else /* CONFIG_PPC_BOOK3E */
1115        mfmsr   r11
1116        li      r12,1
1117        rldicr  r12,r12,MSR_SF_LG,(63-MSR_SF_LG)
1118        andc    r11,r11,r12
1119        li      r12,1
1120        rldicr  r12,r12,MSR_ISF_LG,(63-MSR_ISF_LG)
1121        andc    r11,r11,r12
1122        mtmsrd  r11
1123#endif /* CONFIG_PPC_BOOK3E */
1124        isync
1125
1126        /* Enter PROM here... */
1127        blrl
1128
1129        /* Just make sure that r1 top 32 bits didn't get
1130         * corrupt by OF
1131         */
1132        rldicl  r1,r1,0,32
1133
1134        /* Restore the MSR (back to 64 bits) */
1135        ld      r0,_MSR(r1)
1136        MTMSRD(r0)
1137        isync
1138
1139        /* Restore other registers */
1140        REST_GPR(2, r1)
1141        REST_GPR(13, r1)
1142        REST_8GPRS(14, r1)
1143        REST_10GPRS(22, r1)
1144        ld      r4,_CCR(r1)
1145        mtcr    r4
1146        
1147        addi    r1,r1,PROM_FRAME_SIZE
1148        ld      r0,16(r1)
1149        mtlr    r0
1150        blr
1151
1152#ifdef CONFIG_FUNCTION_TRACER
1153#ifdef CONFIG_DYNAMIC_FTRACE
1154_GLOBAL(mcount)
1155_GLOBAL(_mcount)
1156        blr
1157
1158_GLOBAL(ftrace_caller)
1159        /* Taken from output of objdump from lib64/glibc */
1160        mflr    r3
1161        ld      r11, 0(r1)
1162        stdu    r1, -112(r1)
1163        std     r3, 128(r1)
1164        ld      r4, 16(r11)
1165        subi    r3, r3, MCOUNT_INSN_SIZE
1166.globl ftrace_call
1167ftrace_call:
1168        bl      ftrace_stub
1169        nop
1170#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1171.globl ftrace_graph_call
1172ftrace_graph_call:
1173        b       ftrace_graph_stub
1174_GLOBAL(ftrace_graph_stub)
1175#endif
1176        ld      r0, 128(r1)
1177        mtlr    r0
1178        addi    r1, r1, 112
1179_GLOBAL(ftrace_stub)
1180        blr
1181#else
1182_GLOBAL(mcount)
1183        blr
1184
1185_GLOBAL(_mcount)
1186        /* Taken from output of objdump from lib64/glibc */
1187        mflr    r3
1188        ld      r11, 0(r1)
1189        stdu    r1, -112(r1)
1190        std     r3, 128(r1)
1191        ld      r4, 16(r11)
1192
1193        subi    r3, r3, MCOUNT_INSN_SIZE
1194        LOAD_REG_ADDR(r5,ftrace_trace_function)
1195        ld      r5,0(r5)
1196        ld      r5,0(r5)
1197        mtctr   r5
1198        bctrl
1199        nop
1200
1201
1202#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1203        b       ftrace_graph_caller
1204#endif
1205        ld      r0, 128(r1)
1206        mtlr    r0
1207        addi    r1, r1, 112
1208_GLOBAL(ftrace_stub)
1209        blr
1210
1211#endif /* CONFIG_DYNAMIC_FTRACE */
1212
1213#ifdef CONFIG_FUNCTION_GRAPH_TRACER
1214_GLOBAL(ftrace_graph_caller)
1215        /* load r4 with local address */
1216        ld      r4, 128(r1)
1217        subi    r4, r4, MCOUNT_INSN_SIZE
1218
1219        /* get the parent address */
1220        ld      r11, 112(r1)
1221        addi    r3, r11, 16
1222
1223        bl      .prepare_ftrace_return
1224        nop
1225
1226        ld      r0, 128(r1)
1227        mtlr    r0
1228        addi    r1, r1, 112
1229        blr
1230
1231_GLOBAL(return_to_handler)
1232        /* need to save return values */
1233        std     r4,  -24(r1)
1234        std     r3,  -16(r1)
1235        std     r31, -8(r1)
1236        mr      r31, r1
1237        stdu    r1, -112(r1)
1238
1239        bl      .ftrace_return_to_handler
1240        nop
1241
1242        /* return value has real return address */
1243        mtlr    r3
1244
1245        ld      r1, 0(r1)
1246        ld      r4,  -24(r1)
1247        ld      r3,  -16(r1)
1248        ld      r31, -8(r1)
1249
1250        /* Jump back to real return address */
1251        blr
1252
1253_GLOBAL(mod_return_to_handler)
1254        /* need to save return values */
1255        std     r4,  -32(r1)
1256        std     r3,  -24(r1)
1257        /* save TOC */
1258        std     r2,  -16(r1)
1259        std     r31, -8(r1)
1260        mr      r31, r1
1261        stdu    r1, -112(r1)
1262
1263        /*
1264         * We are in a module using the module's TOC.
1265         * Switch to our TOC to run inside the core kernel.
1266         */
1267        ld      r2, PACATOC(r13)
1268
1269        bl      .ftrace_return_to_handler
1270        nop
1271
1272        /* return value has real return address */
1273        mtlr    r3
1274
1275        ld      r1, 0(r1)
1276        ld      r4,  -32(r1)
1277        ld      r3,  -24(r1)
1278        ld      r2,  -16(r1)
1279        ld      r31, -8(r1)
1280
1281        /* Jump back to real return address */
1282        blr
1283#endif /* CONFIG_FUNCTION_GRAPH_TRACER */
1284#endif /* CONFIG_FUNCTION_TRACER */
1285