linux/arch/powerpc/kernel/exceptions-64s.S
<<
>>
Prefs
   1/*
   2 * This file contains the 64-bit "server" PowerPC variant
   3 * of the low level exception handling including exception
   4 * vectors, exception return, part of the slb and stab
   5 * handling and other fixed offset specific things.
   6 *
   7 * This file is meant to be #included from head_64.S due to
   8 * position dependent assembly.
   9 *
  10 * Most of this originates from head_64.S and thus has the same
  11 * copyright history.
  12 *
  13 */
  14
  15#include <asm/hw_irq.h>
  16#include <asm/exception-64s.h>
  17#include <asm/ptrace.h>
  18#include <asm/cpuidle.h>
  19
  20/*
  21 * We layout physical memory as follows:
  22 * 0x0000 - 0x00ff : Secondary processor spin code
  23 * 0x0100 - 0x17ff : pSeries Interrupt prologs
  24 * 0x1800 - 0x4000 : interrupt support common interrupt prologs
  25 * 0x4000 - 0x5fff : pSeries interrupts with IR=1,DR=1
  26 * 0x6000 - 0x6fff : more interrupt support including for IR=1,DR=1
  27 * 0x7000 - 0x7fff : FWNMI data area
  28 * 0x8000 - 0x8fff : Initial (CPU0) segment table
  29 * 0x9000 -        : Early init and support code
  30 */
  31        /* Syscall routine is used twice, in reloc-off and reloc-on paths */
  32#define SYSCALL_PSERIES_1                                       \
  33BEGIN_FTR_SECTION                                               \
  34        cmpdi   r0,0x1ebe ;                                     \
  35        beq-    1f ;                                            \
  36END_FTR_SECTION_IFSET(CPU_FTR_REAL_LE)                          \
  37        mr      r9,r13 ;                                        \
  38        GET_PACA(r13) ;                                         \
  39        mfspr   r11,SPRN_SRR0 ;                                 \
  400:
  41
  42#define SYSCALL_PSERIES_2_RFID                                  \
  43        mfspr   r12,SPRN_SRR1 ;                                 \
  44        ld      r10,PACAKBASE(r13) ;                            \
  45        LOAD_HANDLER(r10, system_call_entry) ;                  \
  46        mtspr   SPRN_SRR0,r10 ;                                 \
  47        ld      r10,PACAKMSR(r13) ;                             \
  48        mtspr   SPRN_SRR1,r10 ;                                 \
  49        rfid ;                                                  \
  50        b       . ;     /* prevent speculative execution */
  51
  52#define SYSCALL_PSERIES_3                                       \
  53        /* Fast LE/BE switch system call */                     \
  541:      mfspr   r12,SPRN_SRR1 ;                                 \
  55        xori    r12,r12,MSR_LE ;                                \
  56        mtspr   SPRN_SRR1,r12 ;                                 \
  57        rfid ;          /* return to userspace */               \
  58        b       . ;     /* prevent speculative execution */
  59
  60#if defined(CONFIG_RELOCATABLE)
  61        /*
  62         * We can't branch directly; in the direct case we use LR
  63         * and system_call_entry restores LR.  (We thus need to move
  64         * LR to r10 in the RFID case too.)
  65         */
  66#define SYSCALL_PSERIES_2_DIRECT                                \
  67        mflr    r10 ;                                           \
  68        ld      r12,PACAKBASE(r13) ;                            \
  69        LOAD_HANDLER(r12, system_call_entry_direct) ;           \
  70        mtctr   r12 ;                                           \
  71        mfspr   r12,SPRN_SRR1 ;                                 \
  72        /* Re-use of r13... No spare regs to do this */ \
  73        li      r13,MSR_RI ;                                    \
  74        mtmsrd  r13,1 ;                                         \
  75        GET_PACA(r13) ; /* get r13 back */                      \
  76        bctr ;
  77#else
  78        /* We can branch directly */
  79#define SYSCALL_PSERIES_2_DIRECT                                \
  80        mfspr   r12,SPRN_SRR1 ;                                 \
  81        li      r10,MSR_RI ;                                    \
  82        mtmsrd  r10,1 ;                 /* Set RI (EE=0) */     \
  83        b       system_call_entry_direct ;
  84#endif
  85
  86/*
  87 * This is the start of the interrupt handlers for pSeries
  88 * This code runs with relocation off.
  89 * Code from here to __end_interrupts gets copied down to real
  90 * address 0x100 when we are running a relocatable kernel.
  91 * Therefore any relative branches in this section must only
  92 * branch to labels in this section.
  93 */
  94        . = 0x100
  95        .globl __start_interrupts
  96__start_interrupts:
  97
  98        .globl system_reset_pSeries;
  99system_reset_pSeries:
 100        HMT_MEDIUM_PPR_DISCARD
 101        SET_SCRATCH0(r13)
 102#ifdef CONFIG_PPC_P7_NAP
 103BEGIN_FTR_SECTION
 104        /* Running native on arch 2.06 or later, check if we are
 105         * waking up from nap/sleep/winkle.
 106         */
 107        mfspr   r13,SPRN_SRR1
 108        rlwinm. r13,r13,47-31,30,31
 109        beq     9f
 110
 111        cmpwi   cr3,r13,2
 112
 113        /*
 114         * Check if last bit of HSPGR0 is set. This indicates whether we are
 115         * waking up from winkle.
 116         */
 117        GET_PACA(r13)
 118        clrldi  r5,r13,63
 119        clrrdi  r13,r13,1
 120        cmpwi   cr4,r5,1
 121        mtspr   SPRN_HSPRG0,r13
 122
 123        lbz     r0,PACA_THREAD_IDLE_STATE(r13)
 124        cmpwi   cr2,r0,PNV_THREAD_NAP
 125        bgt     cr2,8f                          /* Either sleep or Winkle */
 126
 127        /* Waking up from nap should not cause hypervisor state loss */
 128        bgt     cr3,.
 129
 130        /* Waking up from nap */
 131        li      r0,PNV_THREAD_RUNNING
 132        stb     r0,PACA_THREAD_IDLE_STATE(r13)  /* Clear thread state */
 133
 134#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
 135        li      r0,KVM_HWTHREAD_IN_KERNEL
 136        stb     r0,HSTATE_HWTHREAD_STATE(r13)
 137        /* Order setting hwthread_state vs. testing hwthread_req */
 138        sync
 139        lbz     r0,HSTATE_HWTHREAD_REQ(r13)
 140        cmpwi   r0,0
 141        beq     1f
 142        b       kvm_start_guest
 1431:
 144#endif
 145
 146        /* Return SRR1 from power7_nap() */
 147        mfspr   r3,SPRN_SRR1
 148        beq     cr3,2f
 149        b       power7_wakeup_noloss
 1502:      b       power7_wakeup_loss
 151
 152        /* Fast Sleep wakeup on PowerNV */
 1538:      GET_PACA(r13)
 154        b       power7_wakeup_tb_loss
 155
 1569:
 157END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
 158#endif /* CONFIG_PPC_P7_NAP */
 159        EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD,
 160                                 NOTEST, 0x100)
 161
 162        . = 0x200
 163machine_check_pSeries_1:
 164        /* This is moved out of line as it can be patched by FW, but
 165         * some code path might still want to branch into the original
 166         * vector
 167         */
 168        HMT_MEDIUM_PPR_DISCARD
 169        SET_SCRATCH0(r13)               /* save r13 */
 170#ifdef CONFIG_PPC_P7_NAP
 171BEGIN_FTR_SECTION
 172        /* Running native on arch 2.06 or later, check if we are
 173         * waking up from nap. We only handle no state loss and
 174         * supervisor state loss. We do -not- handle hypervisor
 175         * state loss at this time.
 176         */
 177        mfspr   r13,SPRN_SRR1
 178        rlwinm. r13,r13,47-31,30,31
 179        OPT_GET_SPR(r13, SPRN_CFAR, CPU_FTR_CFAR)
 180        beq     9f
 181
 182        mfspr   r13,SPRN_SRR1
 183        rlwinm. r13,r13,47-31,30,31
 184        /* waking up from powersave (nap) state */
 185        cmpwi   cr1,r13,2
 186        /* Total loss of HV state is fatal. let's just stay stuck here */
 187        OPT_GET_SPR(r13, SPRN_CFAR, CPU_FTR_CFAR)
 188        bgt     cr1,.
 1899:
 190        OPT_SET_SPR(r13, SPRN_CFAR, CPU_FTR_CFAR)
 191END_FTR_SECTION_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
 192#endif /* CONFIG_PPC_P7_NAP */
 193        EXCEPTION_PROLOG_0(PACA_EXMC)
 194BEGIN_FTR_SECTION
 195        b       machine_check_pSeries_early
 196FTR_SECTION_ELSE
 197        b       machine_check_pSeries_0
 198ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
 199
 200        . = 0x300
 201        .globl data_access_pSeries
 202data_access_pSeries:
 203        HMT_MEDIUM_PPR_DISCARD
 204        SET_SCRATCH0(r13)
 205BEGIN_FTR_SECTION
 206        b       data_access_check_stab
 207data_access_not_stab:
 208END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB)
 209        EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, data_access_common, EXC_STD,
 210                                 KVMTEST, 0x300)
 211
 212        . = 0x380
 213        .globl data_access_slb_pSeries
 214data_access_slb_pSeries:
 215        HMT_MEDIUM_PPR_DISCARD
 216        SET_SCRATCH0(r13)
 217        EXCEPTION_PROLOG_0(PACA_EXSLB)
 218        EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST, 0x380)
 219        std     r3,PACA_EXSLB+EX_R3(r13)
 220        mfspr   r3,SPRN_DAR
 221#ifdef __DISABLED__
 222        /* Keep that around for when we re-implement dynamic VSIDs */
 223        cmpdi   r3,0
 224        bge     slb_miss_user_pseries
 225#endif /* __DISABLED__ */
 226        mfspr   r12,SPRN_SRR1
 227#ifndef CONFIG_RELOCATABLE
 228        b       slb_miss_realmode
 229#else
 230        /*
 231         * We can't just use a direct branch to slb_miss_realmode
 232         * because the distance from here to there depends on where
 233         * the kernel ends up being put.
 234         */
 235        mfctr   r11
 236        ld      r10,PACAKBASE(r13)
 237        LOAD_HANDLER(r10, slb_miss_realmode)
 238        mtctr   r10
 239        bctr
 240#endif
 241
 242        STD_EXCEPTION_PSERIES(0x400, 0x400, instruction_access)
 243
 244        . = 0x480
 245        .globl instruction_access_slb_pSeries
 246instruction_access_slb_pSeries:
 247        HMT_MEDIUM_PPR_DISCARD
 248        SET_SCRATCH0(r13)
 249        EXCEPTION_PROLOG_0(PACA_EXSLB)
 250        EXCEPTION_PROLOG_1(PACA_EXSLB, KVMTEST_PR, 0x480)
 251        std     r3,PACA_EXSLB+EX_R3(r13)
 252        mfspr   r3,SPRN_SRR0            /* SRR0 is faulting address */
 253#ifdef __DISABLED__
 254        /* Keep that around for when we re-implement dynamic VSIDs */
 255        cmpdi   r3,0
 256        bge     slb_miss_user_pseries
 257#endif /* __DISABLED__ */
 258        mfspr   r12,SPRN_SRR1
 259#ifndef CONFIG_RELOCATABLE
 260        b       slb_miss_realmode
 261#else
 262        mfctr   r11
 263        ld      r10,PACAKBASE(r13)
 264        LOAD_HANDLER(r10, slb_miss_realmode)
 265        mtctr   r10
 266        bctr
 267#endif
 268
 269        /* We open code these as we can't have a ". = x" (even with
 270         * x = "." within a feature section
 271         */
 272        . = 0x500;
 273        .globl hardware_interrupt_pSeries;
 274        .globl hardware_interrupt_hv;
 275hardware_interrupt_pSeries:
 276hardware_interrupt_hv:
 277        HMT_MEDIUM_PPR_DISCARD
 278        BEGIN_FTR_SECTION
 279                _MASKABLE_EXCEPTION_PSERIES(0x502, hardware_interrupt,
 280                                            EXC_HV, SOFTEN_TEST_HV)
 281                KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x502)
 282        FTR_SECTION_ELSE
 283                _MASKABLE_EXCEPTION_PSERIES(0x500, hardware_interrupt,
 284                                            EXC_STD, SOFTEN_TEST_HV_201)
 285                KVM_HANDLER(PACA_EXGEN, EXC_STD, 0x500)
 286        ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE | CPU_FTR_ARCH_206)
 287
 288        STD_EXCEPTION_PSERIES(0x600, 0x600, alignment)
 289        KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x600)
 290
 291        STD_EXCEPTION_PSERIES(0x700, 0x700, program_check)
 292        KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x700)
 293
 294        STD_EXCEPTION_PSERIES(0x800, 0x800, fp_unavailable)
 295        KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x800)
 296
 297        . = 0x900
 298        .globl decrementer_pSeries
 299decrementer_pSeries:
 300        _MASKABLE_EXCEPTION_PSERIES(0x900, decrementer, EXC_STD, SOFTEN_TEST_PR)
 301
 302        STD_EXCEPTION_HV(0x980, 0x982, hdecrementer)
 303
 304        MASKABLE_EXCEPTION_PSERIES(0xa00, 0xa00, doorbell_super)
 305        KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xa00)
 306
 307        STD_EXCEPTION_PSERIES(0xb00, 0xb00, trap_0b)
 308        KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xb00)
 309
 310        . = 0xc00
 311        .globl  system_call_pSeries
 312system_call_pSeries:
 313        HMT_MEDIUM
 314#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
 315        SET_SCRATCH0(r13)
 316        GET_PACA(r13)
 317        std     r9,PACA_EXGEN+EX_R9(r13)
 318        std     r10,PACA_EXGEN+EX_R10(r13)
 319        mfcr    r9
 320        KVMTEST(0xc00)
 321        GET_SCRATCH0(r13)
 322#endif
 323        SYSCALL_PSERIES_1
 324        SYSCALL_PSERIES_2_RFID
 325        SYSCALL_PSERIES_3
 326        KVM_HANDLER(PACA_EXGEN, EXC_STD, 0xc00)
 327
 328        STD_EXCEPTION_PSERIES(0xd00, 0xd00, single_step)
 329        KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xd00)
 330
 331        /* At 0xe??? we have a bunch of hypervisor exceptions, we branch
 332         * out of line to handle them
 333         */
 334        . = 0xe00
 335hv_data_storage_trampoline:
 336        SET_SCRATCH0(r13)
 337        EXCEPTION_PROLOG_0(PACA_EXGEN)
 338        b       h_data_storage_hv
 339
 340        . = 0xe20
 341hv_instr_storage_trampoline:
 342        SET_SCRATCH0(r13)
 343        EXCEPTION_PROLOG_0(PACA_EXGEN)
 344        b       h_instr_storage_hv
 345
 346        . = 0xe40
 347emulation_assist_trampoline:
 348        SET_SCRATCH0(r13)
 349        EXCEPTION_PROLOG_0(PACA_EXGEN)
 350        b       emulation_assist_hv
 351
 352        . = 0xe60
 353hv_exception_trampoline:
 354        SET_SCRATCH0(r13)
 355        EXCEPTION_PROLOG_0(PACA_EXGEN)
 356        b       hmi_exception_early
 357
 358        . = 0xe80
 359hv_doorbell_trampoline:
 360        SET_SCRATCH0(r13)
 361        EXCEPTION_PROLOG_0(PACA_EXGEN)
 362        b       h_doorbell_hv
 363
 364        /* We need to deal with the Altivec unavailable exception
 365         * here which is at 0xf20, thus in the middle of the
 366         * prolog code of the PerformanceMonitor one. A little
 367         * trickery is thus necessary
 368         */
 369        . = 0xf00
 370performance_monitor_pseries_trampoline:
 371        SET_SCRATCH0(r13)
 372        EXCEPTION_PROLOG_0(PACA_EXGEN)
 373        b       performance_monitor_pSeries
 374
 375        . = 0xf20
 376altivec_unavailable_pseries_trampoline:
 377        SET_SCRATCH0(r13)
 378        EXCEPTION_PROLOG_0(PACA_EXGEN)
 379        b       altivec_unavailable_pSeries
 380
 381        . = 0xf40
 382vsx_unavailable_pseries_trampoline:
 383        SET_SCRATCH0(r13)
 384        EXCEPTION_PROLOG_0(PACA_EXGEN)
 385        b       vsx_unavailable_pSeries
 386
 387        . = 0xf60
 388facility_unavailable_trampoline:
 389        SET_SCRATCH0(r13)
 390        EXCEPTION_PROLOG_0(PACA_EXGEN)
 391        b       facility_unavailable_pSeries
 392
 393        . = 0xf80
 394hv_facility_unavailable_trampoline:
 395        SET_SCRATCH0(r13)
 396        EXCEPTION_PROLOG_0(PACA_EXGEN)
 397        b       facility_unavailable_hv
 398
 399#ifdef CONFIG_CBE_RAS
 400        STD_EXCEPTION_HV(0x1200, 0x1202, cbe_system_error)
 401        KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1202)
 402#endif /* CONFIG_CBE_RAS */
 403
 404        STD_EXCEPTION_PSERIES(0x1300, 0x1300, instruction_breakpoint)
 405        KVM_HANDLER_PR_SKIP(PACA_EXGEN, EXC_STD, 0x1300)
 406
 407        . = 0x1500
 408        .global denorm_exception_hv
 409denorm_exception_hv:
 410        HMT_MEDIUM_PPR_DISCARD
 411        mtspr   SPRN_SPRG_HSCRATCH0,r13
 412        EXCEPTION_PROLOG_0(PACA_EXGEN)
 413        EXCEPTION_PROLOG_1(PACA_EXGEN, NOTEST, 0x1500)
 414
 415#ifdef CONFIG_PPC_DENORMALISATION
 416        mfspr   r10,SPRN_HSRR1
 417        mfspr   r11,SPRN_HSRR0          /* save HSRR0 */
 418        andis.  r10,r10,(HSRR1_DENORM)@h /* denorm? */
 419        addi    r11,r11,-4              /* HSRR0 is next instruction */
 420        bne+    denorm_assist
 421#endif
 422
 423        KVMTEST(0x1500)
 424        EXCEPTION_PROLOG_PSERIES_1(denorm_common, EXC_HV)
 425        KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x1500)
 426
 427#ifdef CONFIG_CBE_RAS
 428        STD_EXCEPTION_HV(0x1600, 0x1602, cbe_maintenance)
 429        KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1602)
 430#endif /* CONFIG_CBE_RAS */
 431
 432        STD_EXCEPTION_PSERIES(0x1700, 0x1700, altivec_assist)
 433        KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x1700)
 434
 435#ifdef CONFIG_CBE_RAS
 436        STD_EXCEPTION_HV(0x1800, 0x1802, cbe_thermal)
 437        KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0x1802)
 438#else
 439        . = 0x1800
 440#endif /* CONFIG_CBE_RAS */
 441
 442
 443/*** Out of line interrupts support ***/
 444
 445        .align  7
 446        /* moved from 0x200 */
 447machine_check_pSeries_early:
 448BEGIN_FTR_SECTION
 449        EXCEPTION_PROLOG_1(PACA_EXMC, NOTEST, 0x200)
 450        /*
 451         * Register contents:
 452         * R13          = PACA
 453         * R9           = CR
 454         * Original R9 to R13 is saved on PACA_EXMC
 455         *
 456         * Switch to mc_emergency stack and handle re-entrancy (we limit
 457         * the nested MCE upto level 4 to avoid stack overflow).
 458         * Save MCE registers srr1, srr0, dar and dsisr and then set ME=1
 459         *
 460         * We use paca->in_mce to check whether this is the first entry or
 461         * nested machine check. We increment paca->in_mce to track nested
 462         * machine checks.
 463         *
 464         * If this is the first entry then set stack pointer to
 465         * paca->mc_emergency_sp, otherwise r1 is already pointing to
 466         * stack frame on mc_emergency stack.
 467         *
 468         * NOTE: We are here with MSR_ME=0 (off), which means we risk a
 469         * checkstop if we get another machine check exception before we do
 470         * rfid with MSR_ME=1.
 471         */
 472        mr      r11,r1                  /* Save r1 */
 473        lhz     r10,PACA_IN_MCE(r13)
 474        cmpwi   r10,0                   /* Are we in nested machine check */
 475        bne     0f                      /* Yes, we are. */
 476        /* First machine check entry */
 477        ld      r1,PACAMCEMERGSP(r13)   /* Use MC emergency stack */
 4780:      subi    r1,r1,INT_FRAME_SIZE    /* alloc stack frame */
 479        addi    r10,r10,1               /* increment paca->in_mce */
 480        sth     r10,PACA_IN_MCE(r13)
 481        /* Limit nested MCE to level 4 to avoid stack overflow */
 482        cmpwi   r10,4
 483        bgt     2f                      /* Check if we hit limit of 4 */
 484        std     r11,GPR1(r1)            /* Save r1 on the stack. */
 485        std     r11,0(r1)               /* make stack chain pointer */
 486        mfspr   r11,SPRN_SRR0           /* Save SRR0 */
 487        std     r11,_NIP(r1)
 488        mfspr   r11,SPRN_SRR1           /* Save SRR1 */
 489        std     r11,_MSR(r1)
 490        mfspr   r11,SPRN_DAR            /* Save DAR */
 491        std     r11,_DAR(r1)
 492        mfspr   r11,SPRN_DSISR          /* Save DSISR */
 493        std     r11,_DSISR(r1)
 494        std     r9,_CCR(r1)             /* Save CR in stackframe */
 495        /* Save r9 through r13 from EXMC save area to stack frame. */
 496        EXCEPTION_PROLOG_COMMON_2(PACA_EXMC)
 497        mfmsr   r11                     /* get MSR value */
 498        ori     r11,r11,MSR_ME          /* turn on ME bit */
 499        ori     r11,r11,MSR_RI          /* turn on RI bit */
 500        ld      r12,PACAKBASE(r13)      /* get high part of &label */
 501        LOAD_HANDLER(r12, machine_check_handle_early)
 5021:      mtspr   SPRN_SRR0,r12
 503        mtspr   SPRN_SRR1,r11
 504        rfid
 505        b       .       /* prevent speculative execution */
 5062:
 507        /* Stack overflow. Stay on emergency stack and panic.
 508         * Keep the ME bit off while panic-ing, so that if we hit
 509         * another machine check we checkstop.
 510         */
 511        addi    r1,r1,INT_FRAME_SIZE    /* go back to previous stack frame */
 512        ld      r11,PACAKMSR(r13)
 513        ld      r12,PACAKBASE(r13)
 514        LOAD_HANDLER(r12, unrecover_mce)
 515        li      r10,MSR_ME
 516        andc    r11,r11,r10             /* Turn off MSR_ME */
 517        b       1b
 518        b       .       /* prevent speculative execution */
 519END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
 520
 521machine_check_pSeries:
 522        .globl machine_check_fwnmi
 523machine_check_fwnmi:
 524        HMT_MEDIUM_PPR_DISCARD
 525        SET_SCRATCH0(r13)               /* save r13 */
 526        EXCEPTION_PROLOG_0(PACA_EXMC)
 527machine_check_pSeries_0:
 528        EXCEPTION_PROLOG_1(PACA_EXMC, KVMTEST, 0x200)
 529        EXCEPTION_PROLOG_PSERIES_1(machine_check_common, EXC_STD)
 530        KVM_HANDLER_SKIP(PACA_EXMC, EXC_STD, 0x200)
 531
 532        /* moved from 0x300 */
 533data_access_check_stab:
 534        GET_PACA(r13)
 535        std     r9,PACA_EXSLB+EX_R9(r13)
 536        std     r10,PACA_EXSLB+EX_R10(r13)
 537        mfspr   r10,SPRN_DAR
 538        mfspr   r9,SPRN_DSISR
 539        srdi    r10,r10,60
 540        rlwimi  r10,r9,16,0x20
 541#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
 542        lbz     r9,HSTATE_IN_GUEST(r13)
 543        rlwimi  r10,r9,8,0x300
 544#endif
 545        mfcr    r9
 546        cmpwi   r10,0x2c
 547        beq     do_stab_bolted_pSeries
 548        mtcrf   0x80,r9
 549        ld      r9,PACA_EXSLB+EX_R9(r13)
 550        ld      r10,PACA_EXSLB+EX_R10(r13)
 551        b       data_access_not_stab
 552do_stab_bolted_pSeries:
 553        std     r11,PACA_EXSLB+EX_R11(r13)
 554        std     r12,PACA_EXSLB+EX_R12(r13)
 555        GET_SCRATCH0(r10)
 556        std     r10,PACA_EXSLB+EX_R13(r13)
 557        EXCEPTION_PROLOG_PSERIES_1(do_stab_bolted, EXC_STD)
 558
 559        KVM_HANDLER_SKIP(PACA_EXGEN, EXC_STD, 0x300)
 560        KVM_HANDLER_SKIP(PACA_EXSLB, EXC_STD, 0x380)
 561        KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x400)
 562        KVM_HANDLER_PR(PACA_EXSLB, EXC_STD, 0x480)
 563        KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0x900)
 564        KVM_HANDLER(PACA_EXGEN, EXC_HV, 0x982)
 565
 566#ifdef CONFIG_PPC_DENORMALISATION
 567denorm_assist:
 568BEGIN_FTR_SECTION
 569/*
 570 * To denormalise we need to move a copy of the register to itself.
 571 * For POWER6 do that here for all FP regs.
 572 */
 573        mfmsr   r10
 574        ori     r10,r10,(MSR_FP|MSR_FE0|MSR_FE1)
 575        xori    r10,r10,(MSR_FE0|MSR_FE1)
 576        mtmsrd  r10
 577        sync
 578
 579#define FMR2(n)  fmr (n), (n) ; fmr n+1, n+1
 580#define FMR4(n)  FMR2(n) ; FMR2(n+2)
 581#define FMR8(n)  FMR4(n) ; FMR4(n+4)
 582#define FMR16(n) FMR8(n) ; FMR8(n+8)
 583#define FMR32(n) FMR16(n) ; FMR16(n+16)
 584        FMR32(0)
 585
 586FTR_SECTION_ELSE
 587/*
 588 * To denormalise we need to move a copy of the register to itself.
 589 * For POWER7 do that here for the first 32 VSX registers only.
 590 */
 591        mfmsr   r10
 592        oris    r10,r10,MSR_VSX@h
 593        mtmsrd  r10
 594        sync
 595
 596#define XVCPSGNDP2(n) XVCPSGNDP(n,n,n) ; XVCPSGNDP(n+1,n+1,n+1)
 597#define XVCPSGNDP4(n) XVCPSGNDP2(n) ; XVCPSGNDP2(n+2)
 598#define XVCPSGNDP8(n) XVCPSGNDP4(n) ; XVCPSGNDP4(n+4)
 599#define XVCPSGNDP16(n) XVCPSGNDP8(n) ; XVCPSGNDP8(n+8)
 600#define XVCPSGNDP32(n) XVCPSGNDP16(n) ; XVCPSGNDP16(n+16)
 601        XVCPSGNDP32(0)
 602
 603ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_206)
 604
 605BEGIN_FTR_SECTION
 606        b       denorm_done
 607END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
 608/*
 609 * To denormalise we need to move a copy of the register to itself.
 610 * For POWER8 we need to do that for all 64 VSX registers
 611 */
 612        XVCPSGNDP32(32)
 613denorm_done:
 614        mtspr   SPRN_HSRR0,r11
 615        mtcrf   0x80,r9
 616        ld      r9,PACA_EXGEN+EX_R9(r13)
 617        RESTORE_PPR_PACA(PACA_EXGEN, r10)
 618BEGIN_FTR_SECTION
 619        ld      r10,PACA_EXGEN+EX_CFAR(r13)
 620        mtspr   SPRN_CFAR,r10
 621END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
 622        ld      r10,PACA_EXGEN+EX_R10(r13)
 623        ld      r11,PACA_EXGEN+EX_R11(r13)
 624        ld      r12,PACA_EXGEN+EX_R12(r13)
 625        ld      r13,PACA_EXGEN+EX_R13(r13)
 626        HRFID
 627        b       .
 628#endif
 629
 630        .align  7
 631        /* moved from 0xe00 */
 632        STD_EXCEPTION_HV_OOL(0xe02, h_data_storage)
 633        KVM_HANDLER_SKIP(PACA_EXGEN, EXC_HV, 0xe02)
 634        STD_EXCEPTION_HV_OOL(0xe22, h_instr_storage)
 635        KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe22)
 636        STD_EXCEPTION_HV_OOL(0xe42, emulation_assist)
 637        KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe42)
 638        MASKABLE_EXCEPTION_HV_OOL(0xe62, hmi_exception)
 639        KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe62)
 640
 641        .globl hmi_exception_early
 642hmi_exception_early:
 643        EXCEPTION_PROLOG_1(PACA_EXGEN, KVMTEST, 0xe62)
 644        mr      r10,r1                  /* Save r1                      */
 645        ld      r1,PACAEMERGSP(r13)     /* Use emergency stack          */
 646        subi    r1,r1,INT_FRAME_SIZE    /* alloc stack frame            */
 647        std     r9,_CCR(r1)             /* save CR in stackframe        */
 648        mfspr   r11,SPRN_HSRR0          /* Save HSRR0 */
 649        std     r11,_NIP(r1)            /* save HSRR0 in stackframe     */
 650        mfspr   r12,SPRN_HSRR1          /* Save SRR1 */
 651        std     r12,_MSR(r1)            /* save SRR1 in stackframe      */
 652        std     r10,0(r1)               /* make stack chain pointer     */
 653        std     r0,GPR0(r1)             /* save r0 in stackframe        */
 654        std     r10,GPR1(r1)            /* save r1 in stackframe        */
 655        EXCEPTION_PROLOG_COMMON_2(PACA_EXGEN)
 656        EXCEPTION_PROLOG_COMMON_3(0xe60)
 657        addi    r3,r1,STACK_FRAME_OVERHEAD
 658        bl      hmi_exception_realmode
 659        /* Windup the stack. */
 660        /* Move original HSRR0 and HSRR1 into the respective regs */
 661        ld      r9,_MSR(r1)
 662        mtspr   SPRN_HSRR1,r9
 663        ld      r3,_NIP(r1)
 664        mtspr   SPRN_HSRR0,r3
 665        ld      r9,_CTR(r1)
 666        mtctr   r9
 667        ld      r9,_XER(r1)
 668        mtxer   r9
 669        ld      r9,_LINK(r1)
 670        mtlr    r9
 671        REST_GPR(0, r1)
 672        REST_8GPRS(2, r1)
 673        REST_GPR(10, r1)
 674        ld      r11,_CCR(r1)
 675        mtcr    r11
 676        REST_GPR(11, r1)
 677        REST_2GPRS(12, r1)
 678        /* restore original r1. */
 679        ld      r1,GPR1(r1)
 680
 681        /*
 682         * Go to virtual mode and pull the HMI event information from
 683         * firmware.
 684         */
 685        .globl hmi_exception_after_realmode
 686hmi_exception_after_realmode:
 687        SET_SCRATCH0(r13)
 688        EXCEPTION_PROLOG_0(PACA_EXGEN)
 689        b       hmi_exception_hv
 690
 691        MASKABLE_EXCEPTION_HV_OOL(0xe82, h_doorbell)
 692        KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xe82)
 693
 694        /* moved from 0xf00 */
 695        STD_EXCEPTION_PSERIES_OOL(0xf00, performance_monitor)
 696        KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf00)
 697        STD_EXCEPTION_PSERIES_OOL(0xf20, altivec_unavailable)
 698        KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf20)
 699        STD_EXCEPTION_PSERIES_OOL(0xf40, vsx_unavailable)
 700        KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf40)
 701        STD_EXCEPTION_PSERIES_OOL(0xf60, facility_unavailable)
 702        KVM_HANDLER_PR(PACA_EXGEN, EXC_STD, 0xf60)
 703        STD_EXCEPTION_HV_OOL(0xf82, facility_unavailable)
 704        KVM_HANDLER(PACA_EXGEN, EXC_HV, 0xf82)
 705
 706/*
 707 * An interrupt came in while soft-disabled. We set paca->irq_happened, then:
 708 * - If it was a decrementer interrupt, we bump the dec to max and and return.
 709 * - If it was a doorbell we return immediately since doorbells are edge
 710 *   triggered and won't automatically refire.
 711 * - If it was a HMI we return immediately since we handled it in realmode
 712 *   and it won't refire.
 713 * - else we hard disable and return.
 714 * This is called with r10 containing the value to OR to the paca field.
 715 */
 716#define MASKED_INTERRUPT(_H)                            \
 717masked_##_H##interrupt:                                 \
 718        std     r11,PACA_EXGEN+EX_R11(r13);             \
 719        lbz     r11,PACAIRQHAPPENED(r13);               \
 720        or      r11,r11,r10;                            \
 721        stb     r11,PACAIRQHAPPENED(r13);               \
 722        cmpwi   r10,PACA_IRQ_DEC;                       \
 723        bne     1f;                                     \
 724        lis     r10,0x7fff;                             \
 725        ori     r10,r10,0xffff;                         \
 726        mtspr   SPRN_DEC,r10;                           \
 727        b       2f;                                     \
 7281:      cmpwi   r10,PACA_IRQ_DBELL;                     \
 729        beq     2f;                                     \
 730        cmpwi   r10,PACA_IRQ_HMI;                       \
 731        beq     2f;                                     \
 732        mfspr   r10,SPRN_##_H##SRR1;                    \
 733        rldicl  r10,r10,48,1; /* clear MSR_EE */        \
 734        rotldi  r10,r10,16;                             \
 735        mtspr   SPRN_##_H##SRR1,r10;                    \
 7362:      mtcrf   0x80,r9;                                \
 737        ld      r9,PACA_EXGEN+EX_R9(r13);               \
 738        ld      r10,PACA_EXGEN+EX_R10(r13);             \
 739        ld      r11,PACA_EXGEN+EX_R11(r13);             \
 740        GET_SCRATCH0(r13);                              \
 741        ##_H##rfid;                                     \
 742        b       .
 743        
 744        MASKED_INTERRUPT()
 745        MASKED_INTERRUPT(H)
 746
 747/*
 748 * Called from arch_local_irq_enable when an interrupt needs
 749 * to be resent. r3 contains 0x500, 0x900, 0xa00 or 0xe80 to indicate
 750 * which kind of interrupt. MSR:EE is already off. We generate a
 751 * stackframe like if a real interrupt had happened.
 752 *
 753 * Note: While MSR:EE is off, we need to make sure that _MSR
 754 * in the generated frame has EE set to 1 or the exception
 755 * handler will not properly re-enable them.
 756 */
 757_GLOBAL(__replay_interrupt)
 758        /* We are going to jump to the exception common code which
 759         * will retrieve various register values from the PACA which
 760         * we don't give a damn about, so we don't bother storing them.
 761         */
 762        mfmsr   r12
 763        mflr    r11
 764        mfcr    r9
 765        ori     r12,r12,MSR_EE
 766        cmpwi   r3,0x900
 767        beq     decrementer_common
 768        cmpwi   r3,0x500
 769        beq     hardware_interrupt_common
 770BEGIN_FTR_SECTION
 771        cmpwi   r3,0xe80
 772        beq     h_doorbell_common
 773        cmpwi   r3,0xe60
 774        beq     hmi_exception_common
 775FTR_SECTION_ELSE
 776        cmpwi   r3,0xa00
 777        beq     doorbell_super_common
 778ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
 779        blr
 780
 781#ifdef CONFIG_PPC_PSERIES
 782/*
 783 * Vectors for the FWNMI option.  Share common code.
 784 */
 785        .globl system_reset_fwnmi
 786      .align 7
 787system_reset_fwnmi:
 788        HMT_MEDIUM_PPR_DISCARD
 789        SET_SCRATCH0(r13)               /* save r13 */
 790        EXCEPTION_PROLOG_PSERIES(PACA_EXGEN, system_reset_common, EXC_STD,
 791                                 NOTEST, 0x100)
 792
 793#endif /* CONFIG_PPC_PSERIES */
 794
 795#ifdef __DISABLED__
 796/*
 797 * This is used for when the SLB miss handler has to go virtual,
 798 * which doesn't happen for now anymore but will once we re-implement
 799 * dynamic VSIDs for shared page tables
 800 */
 801slb_miss_user_pseries:
 802        std     r10,PACA_EXGEN+EX_R10(r13)
 803        std     r11,PACA_EXGEN+EX_R11(r13)
 804        std     r12,PACA_EXGEN+EX_R12(r13)
 805        GET_SCRATCH0(r10)
 806        ld      r11,PACA_EXSLB+EX_R9(r13)
 807        ld      r12,PACA_EXSLB+EX_R3(r13)
 808        std     r10,PACA_EXGEN+EX_R13(r13)
 809        std     r11,PACA_EXGEN+EX_R9(r13)
 810        std     r12,PACA_EXGEN+EX_R3(r13)
 811        clrrdi  r12,r13,32
 812        mfmsr   r10
 813        mfspr   r11,SRR0                        /* save SRR0 */
 814        ori     r12,r12,slb_miss_user_common@l  /* virt addr of handler */
 815        ori     r10,r10,MSR_IR|MSR_DR|MSR_RI
 816        mtspr   SRR0,r12
 817        mfspr   r12,SRR1                        /* and SRR1 */
 818        mtspr   SRR1,r10
 819        rfid
 820        b       .                               /* prevent spec. execution */
 821#endif /* __DISABLED__ */
 822
 823#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
 824kvmppc_skip_interrupt:
 825        /*
 826         * Here all GPRs are unchanged from when the interrupt happened
 827         * except for r13, which is saved in SPRG_SCRATCH0.
 828         */
 829        mfspr   r13, SPRN_SRR0
 830        addi    r13, r13, 4
 831        mtspr   SPRN_SRR0, r13
 832        GET_SCRATCH0(r13)
 833        rfid
 834        b       .
 835
 836kvmppc_skip_Hinterrupt:
 837        /*
 838         * Here all GPRs are unchanged from when the interrupt happened
 839         * except for r13, which is saved in SPRG_SCRATCH0.
 840         */
 841        mfspr   r13, SPRN_HSRR0
 842        addi    r13, r13, 4
 843        mtspr   SPRN_HSRR0, r13
 844        GET_SCRATCH0(r13)
 845        hrfid
 846        b       .
 847#endif
 848
 849/*
 850 * Code from here down to __end_handlers is invoked from the
 851 * exception prologs above.  Because the prologs assemble the
 852 * addresses of these handlers using the LOAD_HANDLER macro,
 853 * which uses an ori instruction, these handlers must be in
 854 * the first 64k of the kernel image.
 855 */
 856
 857/*** Common interrupt handlers ***/
 858
 859        STD_EXCEPTION_COMMON(0x100, system_reset, system_reset_exception)
 860
 861        STD_EXCEPTION_COMMON_ASYNC(0x500, hardware_interrupt, do_IRQ)
 862        STD_EXCEPTION_COMMON_ASYNC(0x900, decrementer, timer_interrupt)
 863        STD_EXCEPTION_COMMON(0x980, hdecrementer, hdec_interrupt)
 864#ifdef CONFIG_PPC_DOORBELL
 865        STD_EXCEPTION_COMMON_ASYNC(0xa00, doorbell_super, doorbell_exception)
 866#else
 867        STD_EXCEPTION_COMMON_ASYNC(0xa00, doorbell_super, unknown_exception)
 868#endif
 869        STD_EXCEPTION_COMMON(0xb00, trap_0b, unknown_exception)
 870        STD_EXCEPTION_COMMON(0xd00, single_step, single_step_exception)
 871        STD_EXCEPTION_COMMON(0xe00, trap_0e, unknown_exception)
 872        STD_EXCEPTION_COMMON(0xe40, emulation_assist, emulation_assist_interrupt)
 873        STD_EXCEPTION_COMMON_ASYNC(0xe60, hmi_exception, handle_hmi_exception)
 874#ifdef CONFIG_PPC_DOORBELL
 875        STD_EXCEPTION_COMMON_ASYNC(0xe80, h_doorbell, doorbell_exception)
 876#else
 877        STD_EXCEPTION_COMMON_ASYNC(0xe80, h_doorbell, unknown_exception)
 878#endif
 879        STD_EXCEPTION_COMMON_ASYNC(0xf00, performance_monitor, performance_monitor_exception)
 880        STD_EXCEPTION_COMMON(0x1300, instruction_breakpoint, instruction_breakpoint_exception)
 881        STD_EXCEPTION_COMMON(0x1502, denorm, unknown_exception)
 882#ifdef CONFIG_ALTIVEC
 883        STD_EXCEPTION_COMMON(0x1700, altivec_assist, altivec_assist_exception)
 884#else
 885        STD_EXCEPTION_COMMON(0x1700, altivec_assist, unknown_exception)
 886#endif
 887#ifdef CONFIG_CBE_RAS
 888        STD_EXCEPTION_COMMON(0x1200, cbe_system_error, cbe_system_error_exception)
 889        STD_EXCEPTION_COMMON(0x1600, cbe_maintenance, cbe_maintenance_exception)
 890        STD_EXCEPTION_COMMON(0x1800, cbe_thermal, cbe_thermal_exception)
 891#endif /* CONFIG_CBE_RAS */
 892
 893        /*
 894         * Relocation-on interrupts: A subset of the interrupts can be delivered
 895         * with IR=1/DR=1, if AIL==2 and MSR.HV won't be changed by delivering
 896         * it.  Addresses are the same as the original interrupt addresses, but
 897         * offset by 0xc000000000004000.
 898         * It's impossible to receive interrupts below 0x300 via this mechanism.
 899         * KVM: None of these traps are from the guest ; anything that escalated
 900         * to HV=1 from HV=0 is delivered via real mode handlers.
 901         */
 902
 903        /*
 904         * This uses the standard macro, since the original 0x300 vector
 905         * only has extra guff for STAB-based processors -- which never
 906         * come here.
 907         */
 908        STD_RELON_EXCEPTION_PSERIES(0x4300, 0x300, data_access)
 909        . = 0x4380
 910        .globl data_access_slb_relon_pSeries
 911data_access_slb_relon_pSeries:
 912        SET_SCRATCH0(r13)
 913        EXCEPTION_PROLOG_0(PACA_EXSLB)
 914        EXCEPTION_PROLOG_1(PACA_EXSLB, NOTEST, 0x380)
 915        std     r3,PACA_EXSLB+EX_R3(r13)
 916        mfspr   r3,SPRN_DAR
 917        mfspr   r12,SPRN_SRR1
 918#ifndef CONFIG_RELOCATABLE
 919        b       slb_miss_realmode
 920#else
 921        /*
 922         * We can't just use a direct branch to slb_miss_realmode
 923         * because the distance from here to there depends on where
 924         * the kernel ends up being put.
 925         */
 926        mfctr   r11
 927        ld      r10,PACAKBASE(r13)
 928        LOAD_HANDLER(r10, slb_miss_realmode)
 929        mtctr   r10
 930        bctr
 931#endif
 932
 933        STD_RELON_EXCEPTION_PSERIES(0x4400, 0x400, instruction_access)
 934        . = 0x4480
 935        .globl instruction_access_slb_relon_pSeries
 936instruction_access_slb_relon_pSeries:
 937        SET_SCRATCH0(r13)
 938        EXCEPTION_PROLOG_0(PACA_EXSLB)
 939        EXCEPTION_PROLOG_1(PACA_EXSLB, NOTEST, 0x480)
 940        std     r3,PACA_EXSLB+EX_R3(r13)
 941        mfspr   r3,SPRN_SRR0            /* SRR0 is faulting address */
 942        mfspr   r12,SPRN_SRR1
 943#ifndef CONFIG_RELOCATABLE
 944        b       slb_miss_realmode
 945#else
 946        mfctr   r11
 947        ld      r10,PACAKBASE(r13)
 948        LOAD_HANDLER(r10, slb_miss_realmode)
 949        mtctr   r10
 950        bctr
 951#endif
 952
 953        . = 0x4500
 954        .globl hardware_interrupt_relon_pSeries;
 955        .globl hardware_interrupt_relon_hv;
 956hardware_interrupt_relon_pSeries:
 957hardware_interrupt_relon_hv:
 958        BEGIN_FTR_SECTION
 959                _MASKABLE_RELON_EXCEPTION_PSERIES(0x502, hardware_interrupt, EXC_HV, SOFTEN_TEST_HV)
 960        FTR_SECTION_ELSE
 961                _MASKABLE_RELON_EXCEPTION_PSERIES(0x500, hardware_interrupt, EXC_STD, SOFTEN_TEST_PR)
 962        ALT_FTR_SECTION_END_IFSET(CPU_FTR_HVMODE)
 963        STD_RELON_EXCEPTION_PSERIES(0x4600, 0x600, alignment)
 964        STD_RELON_EXCEPTION_PSERIES(0x4700, 0x700, program_check)
 965        STD_RELON_EXCEPTION_PSERIES(0x4800, 0x800, fp_unavailable)
 966        MASKABLE_RELON_EXCEPTION_PSERIES(0x4900, 0x900, decrementer)
 967        STD_RELON_EXCEPTION_HV(0x4980, 0x982, hdecrementer)
 968        MASKABLE_RELON_EXCEPTION_PSERIES(0x4a00, 0xa00, doorbell_super)
 969        STD_RELON_EXCEPTION_PSERIES(0x4b00, 0xb00, trap_0b)
 970
 971        . = 0x4c00
 972        .globl system_call_relon_pSeries
 973system_call_relon_pSeries:
 974        HMT_MEDIUM
 975        SYSCALL_PSERIES_1
 976        SYSCALL_PSERIES_2_DIRECT
 977        SYSCALL_PSERIES_3
 978
 979        STD_RELON_EXCEPTION_PSERIES(0x4d00, 0xd00, single_step)
 980
 981        . = 0x4e00
 982        b       .       /* Can't happen, see v2.07 Book III-S section 6.5 */
 983
 984        . = 0x4e20
 985        b       .       /* Can't happen, see v2.07 Book III-S section 6.5 */
 986
 987        . = 0x4e40
 988emulation_assist_relon_trampoline:
 989        SET_SCRATCH0(r13)
 990        EXCEPTION_PROLOG_0(PACA_EXGEN)
 991        b       emulation_assist_relon_hv
 992
 993        . = 0x4e60
 994        b       .       /* Can't happen, see v2.07 Book III-S section 6.5 */
 995
 996        . = 0x4e80
 997h_doorbell_relon_trampoline:
 998        SET_SCRATCH0(r13)
 999        EXCEPTION_PROLOG_0(PACA_EXGEN)
1000        b       h_doorbell_relon_hv
1001
1002        . = 0x4f00
1003performance_monitor_relon_pseries_trampoline:
1004        SET_SCRATCH0(r13)
1005        EXCEPTION_PROLOG_0(PACA_EXGEN)
1006        b       performance_monitor_relon_pSeries
1007
1008        . = 0x4f20
1009altivec_unavailable_relon_pseries_trampoline:
1010        SET_SCRATCH0(r13)
1011        EXCEPTION_PROLOG_0(PACA_EXGEN)
1012        b       altivec_unavailable_relon_pSeries
1013
1014        . = 0x4f40
1015vsx_unavailable_relon_pseries_trampoline:
1016        SET_SCRATCH0(r13)
1017        EXCEPTION_PROLOG_0(PACA_EXGEN)
1018        b       vsx_unavailable_relon_pSeries
1019
1020        . = 0x4f60
1021facility_unavailable_relon_trampoline:
1022        SET_SCRATCH0(r13)
1023        EXCEPTION_PROLOG_0(PACA_EXGEN)
1024        b       facility_unavailable_relon_pSeries
1025
1026        . = 0x4f80
1027hv_facility_unavailable_relon_trampoline:
1028        SET_SCRATCH0(r13)
1029        EXCEPTION_PROLOG_0(PACA_EXGEN)
1030        b       hv_facility_unavailable_relon_hv
1031
1032        STD_RELON_EXCEPTION_PSERIES(0x5300, 0x1300, instruction_breakpoint)
1033#ifdef CONFIG_PPC_DENORMALISATION
1034        . = 0x5500
1035        b       denorm_exception_hv
1036#endif
1037        STD_RELON_EXCEPTION_PSERIES(0x5700, 0x1700, altivec_assist)
1038
1039        /* Other future vectors */
1040        .align  7
1041        .globl  __end_interrupts
1042__end_interrupts:
1043
1044        .align  7
1045system_call_entry_direct:
1046#if defined(CONFIG_RELOCATABLE)
1047        /* The first level prologue may have used LR to get here, saving
1048         * orig in r10.  To save hacking/ifdeffing common code, restore here.
1049         */
1050        mtlr    r10
1051#endif
1052system_call_entry:
1053        b       system_call_common
1054
1055ppc64_runlatch_on_trampoline:
1056        b       __ppc64_runlatch_on
1057
1058/*
1059 * Here we have detected that the kernel stack pointer is bad.
1060 * R9 contains the saved CR, r13 points to the paca,
1061 * r10 contains the (bad) kernel stack pointer,
1062 * r11 and r12 contain the saved SRR0 and SRR1.
1063 * We switch to using an emergency stack, save the registers there,
1064 * and call kernel_bad_stack(), which panics.
1065 */
1066bad_stack:
1067        ld      r1,PACAEMERGSP(r13)
1068        subi    r1,r1,64+INT_FRAME_SIZE
1069        std     r9,_CCR(r1)
1070        std     r10,GPR1(r1)
1071        std     r11,_NIP(r1)
1072        std     r12,_MSR(r1)
1073        mfspr   r11,SPRN_DAR
1074        mfspr   r12,SPRN_DSISR
1075        std     r11,_DAR(r1)
1076        std     r12,_DSISR(r1)
1077        mflr    r10
1078        mfctr   r11
1079        mfxer   r12
1080        std     r10,_LINK(r1)
1081        std     r11,_CTR(r1)
1082        std     r12,_XER(r1)
1083        SAVE_GPR(0,r1)
1084        SAVE_GPR(2,r1)
1085        ld      r10,EX_R3(r3)
1086        std     r10,GPR3(r1)
1087        SAVE_GPR(4,r1)
1088        SAVE_4GPRS(5,r1)
1089        ld      r9,EX_R9(r3)
1090        ld      r10,EX_R10(r3)
1091        SAVE_2GPRS(9,r1)
1092        ld      r9,EX_R11(r3)
1093        ld      r10,EX_R12(r3)
1094        ld      r11,EX_R13(r3)
1095        std     r9,GPR11(r1)
1096        std     r10,GPR12(r1)
1097        std     r11,GPR13(r1)
1098BEGIN_FTR_SECTION
1099        ld      r10,EX_CFAR(r3)
1100        std     r10,ORIG_GPR3(r1)
1101END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
1102        SAVE_8GPRS(14,r1)
1103        SAVE_10GPRS(22,r1)
1104        lhz     r12,PACA_TRAP_SAVE(r13)
1105        std     r12,_TRAP(r1)
1106        addi    r11,r1,INT_FRAME_SIZE
1107        std     r11,0(r1)
1108        li      r12,0
1109        std     r12,0(r11)
1110        ld      r2,PACATOC(r13)
1111        ld      r11,exception_marker@toc(r2)
1112        std     r12,RESULT(r1)
1113        std     r11,STACK_FRAME_OVERHEAD-16(r1)
11141:      addi    r3,r1,STACK_FRAME_OVERHEAD
1115        bl      kernel_bad_stack
1116        b       1b
1117
1118/*
1119 * Here r13 points to the paca, r9 contains the saved CR,
1120 * SRR0 and SRR1 are saved in r11 and r12,
1121 * r9 - r13 are saved in paca->exgen.
1122 */
1123        .align  7
1124        .globl data_access_common
1125data_access_common:
1126        mfspr   r10,SPRN_DAR
1127        std     r10,PACA_EXGEN+EX_DAR(r13)
1128        mfspr   r10,SPRN_DSISR
1129        stw     r10,PACA_EXGEN+EX_DSISR(r13)
1130        EXCEPTION_PROLOG_COMMON(0x300, PACA_EXGEN)
1131        DISABLE_INTS
1132        ld      r12,_MSR(r1)
1133        ld      r3,PACA_EXGEN+EX_DAR(r13)
1134        lwz     r4,PACA_EXGEN+EX_DSISR(r13)
1135        li      r5,0x300
1136        b       do_hash_page            /* Try to handle as hpte fault */
1137
1138        .align  7
1139        .globl  h_data_storage_common
1140h_data_storage_common:
1141        mfspr   r10,SPRN_HDAR
1142        std     r10,PACA_EXGEN+EX_DAR(r13)
1143        mfspr   r10,SPRN_HDSISR
1144        stw     r10,PACA_EXGEN+EX_DSISR(r13)
1145        EXCEPTION_PROLOG_COMMON(0xe00, PACA_EXGEN)
1146        bl      save_nvgprs
1147        DISABLE_INTS
1148        addi    r3,r1,STACK_FRAME_OVERHEAD
1149        bl      unknown_exception
1150        b       ret_from_except
1151
1152        .align  7
1153        .globl instruction_access_common
1154instruction_access_common:
1155        EXCEPTION_PROLOG_COMMON(0x400, PACA_EXGEN)
1156        DISABLE_INTS
1157        ld      r12,_MSR(r1)
1158        ld      r3,_NIP(r1)
1159        andis.  r4,r12,0x5820
1160        li      r5,0x400
1161        b       do_hash_page            /* Try to handle as hpte fault */
1162
1163        STD_EXCEPTION_COMMON(0xe20, h_instr_storage, unknown_exception)
1164
1165/*
1166 * Here is the common SLB miss user that is used when going to virtual
1167 * mode for SLB misses, that is currently not used
1168 */
1169#ifdef __DISABLED__
1170        .align  7
1171        .globl  slb_miss_user_common
1172slb_miss_user_common:
1173        mflr    r10
1174        std     r3,PACA_EXGEN+EX_DAR(r13)
1175        stw     r9,PACA_EXGEN+EX_CCR(r13)
1176        std     r10,PACA_EXGEN+EX_LR(r13)
1177        std     r11,PACA_EXGEN+EX_SRR0(r13)
1178        bl      slb_allocate_user
1179
1180        ld      r10,PACA_EXGEN+EX_LR(r13)
1181        ld      r3,PACA_EXGEN+EX_R3(r13)
1182        lwz     r9,PACA_EXGEN+EX_CCR(r13)
1183        ld      r11,PACA_EXGEN+EX_SRR0(r13)
1184        mtlr    r10
1185        beq-    slb_miss_fault
1186
1187        andi.   r10,r12,MSR_RI          /* check for unrecoverable exception */
1188        beq-    unrecov_user_slb
1189        mfmsr   r10
1190
1191.machine push
1192.machine "power4"
1193        mtcrf   0x80,r9
1194.machine pop
1195
1196        clrrdi  r10,r10,2               /* clear RI before setting SRR0/1 */
1197        mtmsrd  r10,1
1198
1199        mtspr   SRR0,r11
1200        mtspr   SRR1,r12
1201
1202        ld      r9,PACA_EXGEN+EX_R9(r13)
1203        ld      r10,PACA_EXGEN+EX_R10(r13)
1204        ld      r11,PACA_EXGEN+EX_R11(r13)
1205        ld      r12,PACA_EXGEN+EX_R12(r13)
1206        ld      r13,PACA_EXGEN+EX_R13(r13)
1207        rfid
1208        b       .
1209
1210slb_miss_fault:
1211        EXCEPTION_PROLOG_COMMON(0x380, PACA_EXGEN)
1212        ld      r4,PACA_EXGEN+EX_DAR(r13)
1213        li      r5,0
1214        std     r4,_DAR(r1)
1215        std     r5,_DSISR(r1)
1216        b       handle_page_fault
1217
1218unrecov_user_slb:
1219        EXCEPTION_PROLOG_COMMON(0x4200, PACA_EXGEN)
1220        DISABLE_INTS
1221        bl      save_nvgprs
12221:      addi    r3,r1,STACK_FRAME_OVERHEAD
1223        bl      unrecoverable_exception
1224        b       1b
1225
1226#endif /* __DISABLED__ */
1227
1228
1229        /*
1230         * Machine check is different because we use a different
1231         * save area: PACA_EXMC instead of PACA_EXGEN.
1232         */
1233        .align  7
1234        .globl machine_check_common
1235machine_check_common:
1236
1237        mfspr   r10,SPRN_DAR
1238        std     r10,PACA_EXGEN+EX_DAR(r13)
1239        mfspr   r10,SPRN_DSISR
1240        stw     r10,PACA_EXGEN+EX_DSISR(r13)
1241        EXCEPTION_PROLOG_COMMON(0x200, PACA_EXMC)
1242        FINISH_NAP
1243        DISABLE_INTS
1244        ld      r3,PACA_EXGEN+EX_DAR(r13)
1245        lwz     r4,PACA_EXGEN+EX_DSISR(r13)
1246        std     r3,_DAR(r1)
1247        std     r4,_DSISR(r1)
1248        bl      save_nvgprs
1249        addi    r3,r1,STACK_FRAME_OVERHEAD
1250        bl      machine_check_exception
1251        b       ret_from_except
1252
1253        .align  7
1254        .globl alignment_common
1255alignment_common:
1256        mfspr   r10,SPRN_DAR
1257        std     r10,PACA_EXGEN+EX_DAR(r13)
1258        mfspr   r10,SPRN_DSISR
1259        stw     r10,PACA_EXGEN+EX_DSISR(r13)
1260        EXCEPTION_PROLOG_COMMON(0x600, PACA_EXGEN)
1261        ld      r3,PACA_EXGEN+EX_DAR(r13)
1262        lwz     r4,PACA_EXGEN+EX_DSISR(r13)
1263        std     r3,_DAR(r1)
1264        std     r4,_DSISR(r1)
1265        bl      save_nvgprs
1266        DISABLE_INTS
1267        addi    r3,r1,STACK_FRAME_OVERHEAD
1268        bl      alignment_exception
1269        b       ret_from_except
1270
1271        .align  7
1272        .globl program_check_common
1273program_check_common:
1274        EXCEPTION_PROLOG_COMMON(0x700, PACA_EXGEN)
1275        bl      save_nvgprs
1276        DISABLE_INTS
1277        addi    r3,r1,STACK_FRAME_OVERHEAD
1278        bl      program_check_exception
1279        b       ret_from_except
1280
1281        .align  7
1282        .globl fp_unavailable_common
1283fp_unavailable_common:
1284        EXCEPTION_PROLOG_COMMON(0x800, PACA_EXGEN)
1285        bne     1f                      /* if from user, just load it up */
1286        bl      save_nvgprs
1287        DISABLE_INTS
1288        addi    r3,r1,STACK_FRAME_OVERHEAD
1289        bl      kernel_fp_unavailable_exception
1290        BUG_OPCODE
12911:
1292#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1293BEGIN_FTR_SECTION
1294        /* Test if 2 TM state bits are zero.  If non-zero (ie. userspace was in
1295         * transaction), go do TM stuff
1296         */
1297        rldicl. r0, r12, (64-MSR_TS_LG), (64-2)
1298        bne-    2f
1299END_FTR_SECTION_IFSET(CPU_FTR_TM)
1300#endif
1301        bl      load_up_fpu
1302        b       fast_exception_return
1303#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
13042:      /* User process was in a transaction */
1305        bl      save_nvgprs
1306        DISABLE_INTS
1307        addi    r3,r1,STACK_FRAME_OVERHEAD
1308        bl      fp_unavailable_tm
1309        b       ret_from_except
1310#endif
1311        .align  7
1312        .globl altivec_unavailable_common
1313altivec_unavailable_common:
1314        EXCEPTION_PROLOG_COMMON(0xf20, PACA_EXGEN)
1315#ifdef CONFIG_ALTIVEC
1316BEGIN_FTR_SECTION
1317        beq     1f
1318#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1319  BEGIN_FTR_SECTION_NESTED(69)
1320        /* Test if 2 TM state bits are zero.  If non-zero (ie. userspace was in
1321         * transaction), go do TM stuff
1322         */
1323        rldicl. r0, r12, (64-MSR_TS_LG), (64-2)
1324        bne-    2f
1325  END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69)
1326#endif
1327        bl      load_up_altivec
1328        b       fast_exception_return
1329#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
13302:      /* User process was in a transaction */
1331        bl      save_nvgprs
1332        DISABLE_INTS
1333        addi    r3,r1,STACK_FRAME_OVERHEAD
1334        bl      altivec_unavailable_tm
1335        b       ret_from_except
1336#endif
13371:
1338END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
1339#endif
1340        bl      save_nvgprs
1341        DISABLE_INTS
1342        addi    r3,r1,STACK_FRAME_OVERHEAD
1343        bl      altivec_unavailable_exception
1344        b       ret_from_except
1345
1346        .align  7
1347        .globl vsx_unavailable_common
1348vsx_unavailable_common:
1349        EXCEPTION_PROLOG_COMMON(0xf40, PACA_EXGEN)
1350#ifdef CONFIG_VSX
1351BEGIN_FTR_SECTION
1352        beq     1f
1353#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1354  BEGIN_FTR_SECTION_NESTED(69)
1355        /* Test if 2 TM state bits are zero.  If non-zero (ie. userspace was in
1356         * transaction), go do TM stuff
1357         */
1358        rldicl. r0, r12, (64-MSR_TS_LG), (64-2)
1359        bne-    2f
1360  END_FTR_SECTION_NESTED(CPU_FTR_TM, CPU_FTR_TM, 69)
1361#endif
1362        b       load_up_vsx
1363#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
13642:      /* User process was in a transaction */
1365        bl      save_nvgprs
1366        DISABLE_INTS
1367        addi    r3,r1,STACK_FRAME_OVERHEAD
1368        bl      vsx_unavailable_tm
1369        b       ret_from_except
1370#endif
13711:
1372END_FTR_SECTION_IFSET(CPU_FTR_VSX)
1373#endif
1374        bl      save_nvgprs
1375        DISABLE_INTS
1376        addi    r3,r1,STACK_FRAME_OVERHEAD
1377        bl      vsx_unavailable_exception
1378        b       ret_from_except
1379
1380        STD_EXCEPTION_COMMON(0xf60, facility_unavailable, facility_unavailable_exception)
1381        STD_EXCEPTION_COMMON(0xf80, hv_facility_unavailable, facility_unavailable_exception)
1382
1383        .align  7
1384        .globl  __end_handlers
1385__end_handlers:
1386
1387        /* Equivalents to the above handlers for relocation-on interrupt vectors */
1388        STD_RELON_EXCEPTION_HV_OOL(0xe40, emulation_assist)
1389        MASKABLE_RELON_EXCEPTION_HV_OOL(0xe80, h_doorbell)
1390
1391        STD_RELON_EXCEPTION_PSERIES_OOL(0xf00, performance_monitor)
1392        STD_RELON_EXCEPTION_PSERIES_OOL(0xf20, altivec_unavailable)
1393        STD_RELON_EXCEPTION_PSERIES_OOL(0xf40, vsx_unavailable)
1394        STD_RELON_EXCEPTION_PSERIES_OOL(0xf60, facility_unavailable)
1395        STD_RELON_EXCEPTION_HV_OOL(0xf80, hv_facility_unavailable)
1396
1397#if defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV)
1398/*
1399 * Data area reserved for FWNMI option.
1400 * This address (0x7000) is fixed by the RPA.
1401 */
1402        .= 0x7000
1403        .globl fwnmi_data_area
1404fwnmi_data_area:
1405
1406        /* pseries and powernv need to keep the whole page from
1407         * 0x7000 to 0x8000 free for use by the firmware
1408         */
1409        . = 0x8000
1410#endif /* defined(CONFIG_PPC_PSERIES) || defined(CONFIG_PPC_POWERNV) */
1411
1412/* Space for CPU0's segment table */
1413        .balign 4096
1414        .globl initial_stab
1415initial_stab:
1416        .space  4096
1417
1418#ifdef CONFIG_PPC_POWERNV
1419_GLOBAL(opal_mc_secondary_handler)
1420        HMT_MEDIUM_PPR_DISCARD
1421        SET_SCRATCH0(r13)
1422        GET_PACA(r13)
1423        clrldi  r3,r3,2
1424        tovirt(r3,r3)
1425        std     r3,PACA_OPAL_MC_EVT(r13)
1426        ld      r13,OPAL_MC_SRR0(r3)
1427        mtspr   SPRN_SRR0,r13
1428        ld      r13,OPAL_MC_SRR1(r3)
1429        mtspr   SPRN_SRR1,r13
1430        ld      r3,OPAL_MC_GPR3(r3)
1431        GET_SCRATCH0(r13)
1432        b       machine_check_pSeries
1433#endif /* CONFIG_PPC_POWERNV */
1434
1435
1436#define MACHINE_CHECK_HANDLER_WINDUP                    \
1437        /* Clear MSR_RI before setting SRR0 and SRR1. */\
1438        li      r0,MSR_RI;                              \
1439        mfmsr   r9;             /* get MSR value */     \
1440        andc    r9,r9,r0;                               \
1441        mtmsrd  r9,1;           /* Clear MSR_RI */      \
1442        /* Move original SRR0 and SRR1 into the respective regs */      \
1443        ld      r9,_MSR(r1);                            \
1444        mtspr   SPRN_SRR1,r9;                           \
1445        ld      r3,_NIP(r1);                            \
1446        mtspr   SPRN_SRR0,r3;                           \
1447        ld      r9,_CTR(r1);                            \
1448        mtctr   r9;                                     \
1449        ld      r9,_XER(r1);                            \
1450        mtxer   r9;                                     \
1451        ld      r9,_LINK(r1);                           \
1452        mtlr    r9;                                     \
1453        REST_GPR(0, r1);                                \
1454        REST_8GPRS(2, r1);                              \
1455        REST_GPR(10, r1);                               \
1456        ld      r11,_CCR(r1);                           \
1457        mtcr    r11;                                    \
1458        /* Decrement paca->in_mce. */                   \
1459        lhz     r12,PACA_IN_MCE(r13);                   \
1460        subi    r12,r12,1;                              \
1461        sth     r12,PACA_IN_MCE(r13);                   \
1462        REST_GPR(11, r1);                               \
1463        REST_2GPRS(12, r1);                             \
1464        /* restore original r1. */                      \
1465        ld      r1,GPR1(r1)
1466
1467        /*
1468         * Handle machine check early in real mode. We come here with
1469         * ME=1, MMU (IR=0 and DR=0) off and using MC emergency stack.
1470         */
1471        .align  7
1472        .globl machine_check_handle_early
1473machine_check_handle_early:
1474        std     r0,GPR0(r1)     /* Save r0 */
1475        EXCEPTION_PROLOG_COMMON_3(0x200)
1476        bl      save_nvgprs
1477        addi    r3,r1,STACK_FRAME_OVERHEAD
1478        bl      machine_check_early
1479        std     r3,RESULT(r1)   /* Save result */
1480        ld      r12,_MSR(r1)
1481#ifdef  CONFIG_PPC_P7_NAP
1482        /*
1483         * Check if thread was in power saving mode. We come here when any
1484         * of the following is true:
1485         * a. thread wasn't in power saving mode
1486         * b. thread was in power saving mode with no state loss or
1487         *    supervisor state loss
1488         *
1489         * Go back to nap again if (b) is true.
1490         */
1491        rlwinm. r11,r12,47-31,30,31     /* Was it in power saving mode? */
1492        beq     4f                      /* No, it wasn;t */
1493        /* Thread was in power saving mode. Go back to nap again. */
1494        cmpwi   r11,2
1495        bne     3f
1496        /* Supervisor state loss */
1497        li      r0,1
1498        stb     r0,PACA_NAPSTATELOST(r13)
14993:      bl      machine_check_queue_event
1500        MACHINE_CHECK_HANDLER_WINDUP
1501        GET_PACA(r13)
1502        ld      r1,PACAR1(r13)
1503        li      r3,PNV_THREAD_NAP
1504        b       power7_enter_nap_mode
15054:
1506#endif
1507        /*
1508         * Check if we are coming from hypervisor userspace. If yes then we
1509         * continue in host kernel in V mode to deliver the MC event.
1510         */
1511        rldicl. r11,r12,4,63            /* See if MC hit while in HV mode. */
1512        beq     5f
1513        andi.   r11,r12,MSR_PR          /* See if coming from user. */
1514        bne     9f                      /* continue in V mode if we are. */
1515
15165:
1517#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
1518        /*
1519         * We are coming from kernel context. Check if we are coming from
1520         * guest. if yes, then we can continue. We will fall through
1521         * do_kvm_200->kvmppc_interrupt to deliver the MC event to guest.
1522         */
1523        lbz     r11,HSTATE_IN_GUEST(r13)
1524        cmpwi   r11,0                   /* Check if coming from guest */
1525        bne     9f                      /* continue if we are. */
1526#endif
1527        /*
1528         * At this point we are not sure about what context we come from.
1529         * Queue up the MCE event and return from the interrupt.
1530         * But before that, check if this is an un-recoverable exception.
1531         * If yes, then stay on emergency stack and panic.
1532         */
1533        andi.   r11,r12,MSR_RI
1534        bne     2f
15351:      mfspr   r11,SPRN_SRR0
1536        ld      r10,PACAKBASE(r13)
1537        LOAD_HANDLER(r10,unrecover_mce)
1538        mtspr   SPRN_SRR0,r10
1539        ld      r10,PACAKMSR(r13)
1540        /*
1541         * We are going down. But there are chances that we might get hit by
1542         * another MCE during panic path and we may run into unstable state
1543         * with no way out. Hence, turn ME bit off while going down, so that
1544         * when another MCE is hit during panic path, system will checkstop
1545         * and hypervisor will get restarted cleanly by SP.
1546         */
1547        li      r3,MSR_ME
1548        andc    r10,r10,r3              /* Turn off MSR_ME */
1549        mtspr   SPRN_SRR1,r10
1550        rfid
1551        b       .
15522:
1553        /*
1554         * Check if we have successfully handled/recovered from error, if not
1555         * then stay on emergency stack and panic.
1556         */
1557        ld      r3,RESULT(r1)   /* Load result */
1558        cmpdi   r3,0            /* see if we handled MCE successfully */
1559
1560        beq     1b              /* if !handled then panic */
1561        /*
1562         * Return from MC interrupt.
1563         * Queue up the MCE event so that we can log it later, while
1564         * returning from kernel or opal call.
1565         */
1566        bl      machine_check_queue_event
1567        MACHINE_CHECK_HANDLER_WINDUP
1568        rfid
15699:
1570        /* Deliver the machine check to host kernel in V mode. */
1571        MACHINE_CHECK_HANDLER_WINDUP
1572        b       machine_check_pSeries
1573
1574unrecover_mce:
1575        /* Invoke machine_check_exception to print MCE event and panic. */
1576        addi    r3,r1,STACK_FRAME_OVERHEAD
1577        bl      machine_check_exception
1578        /*
1579         * We will not reach here. Even if we did, there is no way out. Call
1580         * unrecoverable_exception and die.
1581         */
15821:      addi    r3,r1,STACK_FRAME_OVERHEAD
1583        bl      unrecoverable_exception
1584        b       1b
1585/*
1586 * r13 points to the PACA, r9 contains the saved CR,
1587 * r12 contain the saved SRR1, SRR0 is still ready for return
1588 * r3 has the faulting address
1589 * r9 - r13 are saved in paca->exslb.
1590 * r3 is saved in paca->slb_r3
1591 * We assume we aren't going to take any exceptions during this procedure.
1592 */
1593slb_miss_realmode:
1594        mflr    r10
1595#ifdef CONFIG_RELOCATABLE
1596        mtctr   r11
1597#endif
1598
1599        stw     r9,PACA_EXSLB+EX_CCR(r13)       /* save CR in exc. frame */
1600        std     r10,PACA_EXSLB+EX_LR(r13)       /* save LR */
1601
1602        bl      slb_allocate_realmode
1603
1604        /* All done -- return from exception. */
1605
1606        ld      r10,PACA_EXSLB+EX_LR(r13)
1607        ld      r3,PACA_EXSLB+EX_R3(r13)
1608        lwz     r9,PACA_EXSLB+EX_CCR(r13)       /* get saved CR */
1609
1610        mtlr    r10
1611
1612        andi.   r10,r12,MSR_RI  /* check for unrecoverable exception */
1613        beq-    2f
1614
1615.machine        push
1616.machine        "power4"
1617        mtcrf   0x80,r9
1618        mtcrf   0x01,r9         /* slb_allocate uses cr0 and cr7 */
1619.machine        pop
1620
1621        RESTORE_PPR_PACA(PACA_EXSLB, r9)
1622        ld      r9,PACA_EXSLB+EX_R9(r13)
1623        ld      r10,PACA_EXSLB+EX_R10(r13)
1624        ld      r11,PACA_EXSLB+EX_R11(r13)
1625        ld      r12,PACA_EXSLB+EX_R12(r13)
1626        ld      r13,PACA_EXSLB+EX_R13(r13)
1627        rfid
1628        b       .       /* prevent speculative execution */
1629
16302:      mfspr   r11,SPRN_SRR0
1631        ld      r10,PACAKBASE(r13)
1632        LOAD_HANDLER(r10,unrecov_slb)
1633        mtspr   SPRN_SRR0,r10
1634        ld      r10,PACAKMSR(r13)
1635        mtspr   SPRN_SRR1,r10
1636        rfid
1637        b       .
1638
1639unrecov_slb:
1640        EXCEPTION_PROLOG_COMMON(0x4100, PACA_EXSLB)
1641        DISABLE_INTS
1642        bl      save_nvgprs
16431:      addi    r3,r1,STACK_FRAME_OVERHEAD
1644        bl      unrecoverable_exception
1645        b       1b
1646
1647
1648#ifdef CONFIG_PPC_970_NAP
1649power4_fixup_nap:
1650        andc    r9,r9,r10
1651        std     r9,TI_LOCAL_FLAGS(r11)
1652        ld      r10,_LINK(r1)           /* make idle task do the */
1653        std     r10,_NIP(r1)            /* equivalent of a blr */
1654        blr
1655#endif
1656
1657/*
1658 * Hash table stuff
1659 */
1660        .align  7
1661do_hash_page:
1662        std     r3,_DAR(r1)
1663        std     r4,_DSISR(r1)
1664
1665        andis.  r0,r4,0xa410            /* weird error? */
1666        bne-    handle_page_fault       /* if not, try to insert a HPTE */
1667        andis.  r0,r4,DSISR_DABRMATCH@h
1668        bne-    handle_dabr_fault
1669
1670BEGIN_FTR_SECTION
1671        andis.  r0,r4,0x0020            /* Is it a segment table fault? */
1672        bne-    do_ste_alloc            /* If so handle it */
1673END_MMU_FTR_SECTION_IFCLR(MMU_FTR_SLB)
1674
1675        CURRENT_THREAD_INFO(r11, r1)
1676        lwz     r0,TI_PREEMPT(r11)      /* If we're in an "NMI" */
1677        andis.  r0,r0,NMI_MASK@h        /* (i.e. an irq when soft-disabled) */
1678        bne     77f                     /* then don't call hash_page now */
1679        /*
1680         * We need to set the _PAGE_USER bit if MSR_PR is set or if we are
1681         * accessing a userspace segment (even from the kernel). We assume
1682         * kernel addresses always have the high bit set.
1683         */
1684        rlwinm  r4,r4,32-25+9,31-9,31-9 /* DSISR_STORE -> _PAGE_RW */
1685        rotldi  r0,r3,15                /* Move high bit into MSR_PR posn */
1686        orc     r0,r12,r0               /* MSR_PR | ~high_bit */
1687        rlwimi  r4,r0,32-13,30,30       /* becomes _PAGE_USER access bit */
1688        ori     r4,r4,1                 /* add _PAGE_PRESENT */
1689        rlwimi  r4,r5,22+2,31-2,31-2    /* Set _PAGE_EXEC if trap is 0x400 */
1690
1691        /*
1692         * r3 contains the faulting address
1693         * r4 contains the required access permissions
1694         * r5 contains the trap number
1695         * r6 contains dsisr
1696         *
1697         * at return r3 = 0 for success, 1 for page fault, negative for error
1698         */
1699        ld      r6,_DSISR(r1)
1700        bl      hash_page               /* build HPTE if possible */
1701        cmpdi   r3,0                    /* see if hash_page succeeded */
1702
1703        /* Success */
1704        beq     fast_exc_return_irq     /* Return from exception on success */
1705
1706        /* Error */
1707        blt-    13f
1708
1709/* Here we have a page fault that hash_page can't handle. */
1710handle_page_fault:
171111:     ld      r4,_DAR(r1)
1712        ld      r5,_DSISR(r1)
1713        addi    r3,r1,STACK_FRAME_OVERHEAD
1714        bl      do_page_fault
1715        cmpdi   r3,0
1716        beq+    12f
1717        bl      save_nvgprs
1718        mr      r5,r3
1719        addi    r3,r1,STACK_FRAME_OVERHEAD
1720        lwz     r4,_DAR(r1)
1721        bl      bad_page_fault
1722        b       ret_from_except
1723
1724/* We have a data breakpoint exception - handle it */
1725handle_dabr_fault:
1726        bl      save_nvgprs
1727        ld      r4,_DAR(r1)
1728        ld      r5,_DSISR(r1)
1729        addi    r3,r1,STACK_FRAME_OVERHEAD
1730        bl      do_break
173112:     b       ret_from_except_lite
1732
1733
1734/* We have a page fault that hash_page could handle but HV refused
1735 * the PTE insertion
1736 */
173713:     bl      save_nvgprs
1738        mr      r5,r3
1739        addi    r3,r1,STACK_FRAME_OVERHEAD
1740        ld      r4,_DAR(r1)
1741        bl      low_hash_fault
1742        b       ret_from_except
1743
1744/*
1745 * We come here as a result of a DSI at a point where we don't want
1746 * to call hash_page, such as when we are accessing memory (possibly
1747 * user memory) inside a PMU interrupt that occurred while interrupts
1748 * were soft-disabled.  We want to invoke the exception handler for
1749 * the access, or panic if there isn't a handler.
1750 */
175177:     bl      save_nvgprs
1752        mr      r4,r3
1753        addi    r3,r1,STACK_FRAME_OVERHEAD
1754        li      r5,SIGSEGV
1755        bl      bad_page_fault
1756        b       ret_from_except
1757
1758        /* here we have a segment miss */
1759do_ste_alloc:
1760        bl      ste_allocate            /* try to insert stab entry */
1761        cmpdi   r3,0
1762        bne-    handle_page_fault
1763        b       fast_exception_return
1764
1765/*
1766 * r13 points to the PACA, r9 contains the saved CR,
1767 * r11 and r12 contain the saved SRR0 and SRR1.
1768 * r9 - r13 are saved in paca->exslb.
1769 * We assume we aren't going to take any exceptions during this procedure.
1770 * We assume (DAR >> 60) == 0xc.
1771 */
1772        .align  7
1773do_stab_bolted:
1774        stw     r9,PACA_EXSLB+EX_CCR(r13)       /* save CR in exc. frame */
1775        std     r11,PACA_EXSLB+EX_SRR0(r13)     /* save SRR0 in exc. frame */
1776        mfspr   r11,SPRN_DAR                    /* ea */
1777
1778        /*
1779         * check for bad kernel/user address
1780         * (ea & ~REGION_MASK) >= PGTABLE_RANGE
1781         */
1782        rldicr. r9,r11,4,(63 - 46 - 4)
1783        li      r9,0    /* VSID = 0 for bad address */
1784        bne-    0f
1785
1786        /*
1787         * Calculate VSID:
1788         * This is the kernel vsid, we take the top for context from
1789         * the range. context = (MAX_USER_CONTEXT) + ((ea >> 60) - 0xc) + 1
1790         * Here we know that (ea >> 60) == 0xc
1791         */
1792        lis     r9,(MAX_USER_CONTEXT + 1)@ha
1793        addi    r9,r9,(MAX_USER_CONTEXT + 1)@l
1794
1795        srdi    r10,r11,SID_SHIFT
1796        rldimi  r10,r9,ESID_BITS,0 /* proto vsid */
1797        ASM_VSID_SCRAMBLE(r10, r9, 256M)
1798        rldic   r9,r10,12,16    /* r9 = vsid << 12 */
1799
18000:
1801        /* Hash to the primary group */
1802        ld      r10,PACASTABVIRT(r13)
1803        srdi    r11,r11,SID_SHIFT
1804        rldimi  r10,r11,7,52    /* r10 = first ste of the group */
1805
1806        /* Search the primary group for a free entry */
18071:      ld      r11,0(r10)      /* Test valid bit of the current ste    */
1808        andi.   r11,r11,0x80
1809        beq     2f
1810        addi    r10,r10,16
1811        andi.   r11,r10,0x70
1812        bne     1b
1813
1814        /* Stick for only searching the primary group for now.          */
1815        /* At least for now, we use a very simple random castout scheme */
1816        /* Use the TB as a random number ;  OR in 1 to avoid entry 0    */
1817        mftb    r11
1818        rldic   r11,r11,4,57    /* r11 = (r11 << 4) & 0x70 */
1819        ori     r11,r11,0x10
1820
1821        /* r10 currently points to an ste one past the group of interest */
1822        /* make it point to the randomly selected entry                 */
1823        subi    r10,r10,128
1824        or      r10,r10,r11     /* r10 is the entry to invalidate       */
1825
1826        isync                   /* mark the entry invalid               */
1827        ld      r11,0(r10)
1828        rldicl  r11,r11,56,1    /* clear the valid bit */
1829        rotldi  r11,r11,8
1830        std     r11,0(r10)
1831        sync
1832
1833        clrrdi  r11,r11,28      /* Get the esid part of the ste         */
1834        slbie   r11
1835
18362:      std     r9,8(r10)       /* Store the vsid part of the ste       */
1837        eieio
1838
1839        mfspr   r11,SPRN_DAR            /* Get the new esid                     */
1840        clrrdi  r11,r11,28      /* Permits a full 32b of ESID           */
1841        ori     r11,r11,0x90    /* Turn on valid and kp                 */
1842        std     r11,0(r10)      /* Put new entry back into the stab     */
1843
1844        sync
1845
1846        /* All done -- return from exception. */
1847        lwz     r9,PACA_EXSLB+EX_CCR(r13)       /* get saved CR */
1848        ld      r11,PACA_EXSLB+EX_SRR0(r13)     /* get saved SRR0 */
1849
1850        andi.   r10,r12,MSR_RI
1851        beq-    unrecov_slb
1852
1853        mtcrf   0x80,r9                 /* restore CR */
1854
1855        mfmsr   r10
1856        clrrdi  r10,r10,2
1857        mtmsrd  r10,1
1858
1859        mtspr   SPRN_SRR0,r11
1860        mtspr   SPRN_SRR1,r12
1861        ld      r9,PACA_EXSLB+EX_R9(r13)
1862        ld      r10,PACA_EXSLB+EX_R10(r13)
1863        ld      r11,PACA_EXSLB+EX_R11(r13)
1864        ld      r12,PACA_EXSLB+EX_R12(r13)
1865        ld      r13,PACA_EXSLB+EX_R13(r13)
1866        rfid
1867        b       .       /* prevent speculative execution */
1868