linux/arch/powerpc/kvm/bookehv_interrupts.S
<<
>>
Prefs
   1/* SPDX-License-Identifier: GPL-2.0-only */
   2/*
   3 *
   4 * Copyright (C) 2010-2011 Freescale Semiconductor, Inc.
   5 *
   6 * Author: Varun Sethi <varun.sethi@freescale.com>
   7 * Author: Scott Wood <scotwood@freescale.com>
   8 * Author: Mihai Caraman <mihai.caraman@freescale.com>
   9 *
  10 * This file is derived from arch/powerpc/kvm/booke_interrupts.S
  11 */
  12
  13#include <asm/ppc_asm.h>
  14#include <asm/kvm_asm.h>
  15#include <asm/reg.h>
  16#include <asm/page.h>
  17#include <asm/asm-compat.h>
  18#include <asm/asm-offsets.h>
  19#include <asm/bitsperlong.h>
  20
  21#ifdef CONFIG_64BIT
  22#include <asm/exception-64e.h>
  23#include <asm/hw_irq.h>
  24#include <asm/irqflags.h>
  25#else
  26#include "../kernel/head_booke.h" /* for THREAD_NORMSAVE() */
  27#endif
  28
  29#define LONGBYTES               (BITS_PER_LONG / 8)
  30
  31#define VCPU_GUEST_SPRG(n)      (VCPU_GUEST_SPRGS + (n * LONGBYTES))
  32
  33/* The host stack layout: */
  34#define HOST_R1         0 /* Implied by stwu. */
  35#define HOST_CALLEE_LR  PPC_LR_STKOFF
  36#define HOST_RUN        (HOST_CALLEE_LR + LONGBYTES)
  37/*
  38 * r2 is special: it holds 'current', and it made nonvolatile in the
  39 * kernel with the -ffixed-r2 gcc option.
  40 */
  41#define HOST_R2         (HOST_RUN + LONGBYTES)
  42#define HOST_CR         (HOST_R2 + LONGBYTES)
  43#define HOST_NV_GPRS    (HOST_CR + LONGBYTES)
  44#define __HOST_NV_GPR(n)  (HOST_NV_GPRS + ((n - 14) * LONGBYTES))
  45#define HOST_NV_GPR(n)  __HOST_NV_GPR(__REG_##n)
  46#define HOST_MIN_STACK_SIZE (HOST_NV_GPR(R31) + LONGBYTES)
  47#define HOST_STACK_SIZE ((HOST_MIN_STACK_SIZE + 15) & ~15) /* Align. */
  48/* LR in caller stack frame. */
  49#define HOST_STACK_LR   (HOST_STACK_SIZE + PPC_LR_STKOFF)
  50
  51#define NEED_EMU                0x00000001 /* emulation -- save nv regs */
  52#define NEED_DEAR               0x00000002 /* save faulting DEAR */
  53#define NEED_ESR                0x00000004 /* save faulting ESR */
  54
  55/*
  56 * On entry:
  57 * r4 = vcpu, r5 = srr0, r6 = srr1
  58 * saved in vcpu: cr, ctr, r3-r13
  59 */
  60.macro kvm_handler_common intno, srr0, flags
  61        /* Restore host stack pointer */
  62        PPC_STL r1, VCPU_GPR(R1)(r4)
  63        PPC_STL r2, VCPU_GPR(R2)(r4)
  64        PPC_LL  r1, VCPU_HOST_STACK(r4)
  65        PPC_LL  r2, HOST_R2(r1)
  66
  67START_BTB_FLUSH_SECTION
  68        BTB_FLUSH(r10)
  69END_BTB_FLUSH_SECTION
  70
  71        mfspr   r10, SPRN_PID
  72        lwz     r8, VCPU_HOST_PID(r4)
  73        PPC_LL  r11, VCPU_SHARED(r4)
  74        PPC_STL r14, VCPU_GPR(R14)(r4) /* We need a non-volatile GPR. */
  75        li      r14, \intno
  76
  77        stw     r10, VCPU_GUEST_PID(r4)
  78        mtspr   SPRN_PID, r8
  79
  80#ifdef CONFIG_KVM_EXIT_TIMING
  81        /* save exit time */
  821:      mfspr   r7, SPRN_TBRU
  83        mfspr   r8, SPRN_TBRL
  84        mfspr   r9, SPRN_TBRU
  85        cmpw    r9, r7
  86        stw     r8, VCPU_TIMING_EXIT_TBL(r4)
  87        bne-    1b
  88        stw     r9, VCPU_TIMING_EXIT_TBU(r4)
  89#endif
  90
  91        oris    r8, r6, MSR_CE@h
  92        PPC_STD(r6, VCPU_SHARED_MSR, r11)
  93        ori     r8, r8, MSR_ME | MSR_RI
  94        PPC_STL r5, VCPU_PC(r4)
  95
  96        /*
  97         * Make sure CE/ME/RI are set (if appropriate for exception type)
  98         * whether or not the guest had it set.  Since mfmsr/mtmsr are
  99         * somewhat expensive, skip in the common case where the guest
 100         * had all these bits set (and thus they're still set if
 101         * appropriate for the exception type).
 102         */
 103        cmpw    r6, r8
 104        beq     1f
 105        mfmsr   r7
 106        .if     \srr0 != SPRN_MCSRR0 && \srr0 != SPRN_CSRR0
 107        oris    r7, r7, MSR_CE@h
 108        .endif
 109        .if     \srr0 != SPRN_MCSRR0
 110        ori     r7, r7, MSR_ME | MSR_RI
 111        .endif
 112        mtmsr   r7
 1131:
 114
 115        .if     \flags & NEED_EMU
 116        PPC_STL r15, VCPU_GPR(R15)(r4)
 117        PPC_STL r16, VCPU_GPR(R16)(r4)
 118        PPC_STL r17, VCPU_GPR(R17)(r4)
 119        PPC_STL r18, VCPU_GPR(R18)(r4)
 120        PPC_STL r19, VCPU_GPR(R19)(r4)
 121        PPC_STL r20, VCPU_GPR(R20)(r4)
 122        PPC_STL r21, VCPU_GPR(R21)(r4)
 123        PPC_STL r22, VCPU_GPR(R22)(r4)
 124        PPC_STL r23, VCPU_GPR(R23)(r4)
 125        PPC_STL r24, VCPU_GPR(R24)(r4)
 126        PPC_STL r25, VCPU_GPR(R25)(r4)
 127        PPC_STL r26, VCPU_GPR(R26)(r4)
 128        PPC_STL r27, VCPU_GPR(R27)(r4)
 129        PPC_STL r28, VCPU_GPR(R28)(r4)
 130        PPC_STL r29, VCPU_GPR(R29)(r4)
 131        PPC_STL r30, VCPU_GPR(R30)(r4)
 132        PPC_STL r31, VCPU_GPR(R31)(r4)
 133
 134        /*
 135         * We don't use external PID support. lwepx faults would need to be
 136         * handled by KVM and this implies aditional code in DO_KVM (for
 137         * DTB_MISS, DSI and LRAT) to check ESR[EPID] and EPLC[EGS] which
 138         * is too intrusive for the host. Get last instuction in
 139         * kvmppc_get_last_inst().
 140         */
 141        li      r9, KVM_INST_FETCH_FAILED
 142        stw     r9, VCPU_LAST_INST(r4)
 143        .endif
 144
 145        .if     \flags & NEED_ESR
 146        mfspr   r8, SPRN_ESR
 147        PPC_STL r8, VCPU_FAULT_ESR(r4)
 148        .endif
 149
 150        .if     \flags & NEED_DEAR
 151        mfspr   r9, SPRN_DEAR
 152        PPC_STL r9, VCPU_FAULT_DEAR(r4)
 153        .endif
 154
 155        b       kvmppc_resume_host
 156.endm
 157
 158#ifdef CONFIG_64BIT
 159/* Exception types */
 160#define EX_GEN                  1
 161#define EX_GDBELL               2
 162#define EX_DBG                  3
 163#define EX_MC                   4
 164#define EX_CRIT                 5
 165#define EX_TLB                  6
 166
 167/*
 168 * For input register values, see arch/powerpc/include/asm/kvm_booke_hv_asm.h
 169 */
 170.macro kvm_handler intno type scratch, paca_ex, ex_r10, ex_r11, srr0, srr1, flags
 171 _GLOBAL(kvmppc_handler_\intno\()_\srr1)
 172        mr      r11, r4
 173        /*
 174         * Get vcpu from Paca: paca->__current.thread->kvm_vcpu
 175         */
 176        PPC_LL  r4, PACACURRENT(r13)
 177        PPC_LL  r4, (THREAD + THREAD_KVM_VCPU)(r4)
 178        PPC_STL r10, VCPU_CR(r4)
 179        PPC_STL r11, VCPU_GPR(R4)(r4)
 180        PPC_STL r5, VCPU_GPR(R5)(r4)
 181        PPC_STL r6, VCPU_GPR(R6)(r4)
 182        PPC_STL r8, VCPU_GPR(R8)(r4)
 183        PPC_STL r9, VCPU_GPR(R9)(r4)
 184        .if \type == EX_TLB
 185        PPC_LL  r5, EX_TLB_R13(r12)
 186        PPC_LL  r6, EX_TLB_R10(r12)
 187        PPC_LL  r8, EX_TLB_R11(r12)
 188        mfspr   r12, \scratch
 189        .else
 190        mfspr   r5, \scratch
 191        PPC_LL  r6, (\paca_ex + \ex_r10)(r13)
 192        PPC_LL  r8, (\paca_ex + \ex_r11)(r13)
 193        .endif
 194        PPC_STL r5, VCPU_GPR(R13)(r4)
 195        PPC_STL r3, VCPU_GPR(R3)(r4)
 196        PPC_STL r7, VCPU_GPR(R7)(r4)
 197        PPC_STL r12, VCPU_GPR(R12)(r4)
 198        PPC_STL r6, VCPU_GPR(R10)(r4)
 199        PPC_STL r8, VCPU_GPR(R11)(r4)
 200        mfctr   r5
 201        PPC_STL r5, VCPU_CTR(r4)
 202        mfspr   r5, \srr0
 203        mfspr   r6, \srr1
 204        kvm_handler_common \intno, \srr0, \flags
 205.endm
 206
 207#define EX_PARAMS(type) \
 208        EX_##type, \
 209        SPRN_SPRG_##type##_SCRATCH, \
 210        PACA_EX##type, \
 211        EX_R10, \
 212        EX_R11
 213
 214#define EX_PARAMS_TLB \
 215        EX_TLB, \
 216        SPRN_SPRG_GEN_SCRATCH, \
 217        PACA_EXTLB, \
 218        EX_TLB_R10, \
 219        EX_TLB_R11
 220
 221kvm_handler BOOKE_INTERRUPT_CRITICAL, EX_PARAMS(CRIT), \
 222        SPRN_CSRR0, SPRN_CSRR1, 0
 223kvm_handler BOOKE_INTERRUPT_MACHINE_CHECK, EX_PARAMS(MC), \
 224        SPRN_MCSRR0, SPRN_MCSRR1, 0
 225kvm_handler BOOKE_INTERRUPT_DATA_STORAGE, EX_PARAMS(GEN), \
 226        SPRN_SRR0, SPRN_SRR1,(NEED_EMU | NEED_DEAR | NEED_ESR)
 227kvm_handler BOOKE_INTERRUPT_INST_STORAGE, EX_PARAMS(GEN), \
 228        SPRN_SRR0, SPRN_SRR1, NEED_ESR
 229kvm_handler BOOKE_INTERRUPT_EXTERNAL, EX_PARAMS(GEN), \
 230        SPRN_SRR0, SPRN_SRR1, 0
 231kvm_handler BOOKE_INTERRUPT_ALIGNMENT, EX_PARAMS(GEN), \
 232        SPRN_SRR0, SPRN_SRR1,(NEED_DEAR | NEED_ESR)
 233kvm_handler BOOKE_INTERRUPT_PROGRAM, EX_PARAMS(GEN), \
 234        SPRN_SRR0, SPRN_SRR1, (NEED_ESR | NEED_EMU)
 235kvm_handler BOOKE_INTERRUPT_FP_UNAVAIL, EX_PARAMS(GEN), \
 236        SPRN_SRR0, SPRN_SRR1, 0
 237kvm_handler BOOKE_INTERRUPT_AP_UNAVAIL, EX_PARAMS(GEN), \
 238        SPRN_SRR0, SPRN_SRR1, 0
 239kvm_handler BOOKE_INTERRUPT_DECREMENTER, EX_PARAMS(GEN), \
 240        SPRN_SRR0, SPRN_SRR1, 0
 241kvm_handler BOOKE_INTERRUPT_FIT, EX_PARAMS(GEN), \
 242        SPRN_SRR0, SPRN_SRR1, 0
 243kvm_handler BOOKE_INTERRUPT_WATCHDOG, EX_PARAMS(CRIT),\
 244        SPRN_CSRR0, SPRN_CSRR1, 0
 245/*
 246 * Only bolted TLB miss exception handlers are supported for now
 247 */
 248kvm_handler BOOKE_INTERRUPT_DTLB_MISS, EX_PARAMS_TLB, \
 249        SPRN_SRR0, SPRN_SRR1, (NEED_EMU | NEED_DEAR | NEED_ESR)
 250kvm_handler BOOKE_INTERRUPT_ITLB_MISS, EX_PARAMS_TLB, \
 251        SPRN_SRR0, SPRN_SRR1, 0
 252kvm_handler BOOKE_INTERRUPT_ALTIVEC_UNAVAIL, EX_PARAMS(GEN), \
 253        SPRN_SRR0, SPRN_SRR1, 0
 254kvm_handler BOOKE_INTERRUPT_ALTIVEC_ASSIST, EX_PARAMS(GEN), \
 255        SPRN_SRR0, SPRN_SRR1, 0
 256kvm_handler BOOKE_INTERRUPT_PERFORMANCE_MONITOR, EX_PARAMS(GEN), \
 257        SPRN_SRR0, SPRN_SRR1, 0
 258kvm_handler BOOKE_INTERRUPT_DOORBELL, EX_PARAMS(GEN), \
 259        SPRN_SRR0, SPRN_SRR1, 0
 260kvm_handler BOOKE_INTERRUPT_DOORBELL_CRITICAL, EX_PARAMS(CRIT), \
 261        SPRN_CSRR0, SPRN_CSRR1, 0
 262kvm_handler BOOKE_INTERRUPT_HV_PRIV, EX_PARAMS(GEN), \
 263        SPRN_SRR0, SPRN_SRR1, NEED_EMU
 264kvm_handler BOOKE_INTERRUPT_HV_SYSCALL, EX_PARAMS(GEN), \
 265        SPRN_SRR0, SPRN_SRR1, 0
 266kvm_handler BOOKE_INTERRUPT_GUEST_DBELL, EX_PARAMS(GDBELL), \
 267        SPRN_GSRR0, SPRN_GSRR1, 0
 268kvm_handler BOOKE_INTERRUPT_GUEST_DBELL_CRIT, EX_PARAMS(CRIT), \
 269        SPRN_CSRR0, SPRN_CSRR1, 0
 270kvm_handler BOOKE_INTERRUPT_DEBUG, EX_PARAMS(DBG), \
 271        SPRN_DSRR0, SPRN_DSRR1, 0
 272kvm_handler BOOKE_INTERRUPT_DEBUG, EX_PARAMS(CRIT), \
 273        SPRN_CSRR0, SPRN_CSRR1, 0
 274kvm_handler BOOKE_INTERRUPT_LRAT_ERROR, EX_PARAMS(GEN), \
 275        SPRN_SRR0, SPRN_SRR1, (NEED_EMU | NEED_DEAR | NEED_ESR)
 276#else
 277/*
 278 * For input register values, see arch/powerpc/include/asm/kvm_booke_hv_asm.h
 279 */
 280.macro kvm_handler intno srr0, srr1, flags
 281_GLOBAL(kvmppc_handler_\intno\()_\srr1)
 282        PPC_LL  r11, THREAD_KVM_VCPU(r10)
 283        PPC_STL r3, VCPU_GPR(R3)(r11)
 284        mfspr   r3, SPRN_SPRG_RSCRATCH0
 285        PPC_STL r4, VCPU_GPR(R4)(r11)
 286        PPC_LL  r4, THREAD_NORMSAVE(0)(r10)
 287        PPC_STL r5, VCPU_GPR(R5)(r11)
 288        PPC_STL r13, VCPU_CR(r11)
 289        mfspr   r5, \srr0
 290        PPC_STL r3, VCPU_GPR(R10)(r11)
 291        PPC_LL  r3, THREAD_NORMSAVE(2)(r10)
 292        PPC_STL r6, VCPU_GPR(R6)(r11)
 293        PPC_STL r4, VCPU_GPR(R11)(r11)
 294        mfspr   r6, \srr1
 295        PPC_STL r7, VCPU_GPR(R7)(r11)
 296        PPC_STL r8, VCPU_GPR(R8)(r11)
 297        PPC_STL r9, VCPU_GPR(R9)(r11)
 298        PPC_STL r3, VCPU_GPR(R13)(r11)
 299        mfctr   r7
 300        PPC_STL r12, VCPU_GPR(R12)(r11)
 301        PPC_STL r7, VCPU_CTR(r11)
 302        mr      r4, r11
 303        kvm_handler_common \intno, \srr0, \flags
 304.endm
 305
 306.macro kvm_lvl_handler intno scratch srr0, srr1, flags
 307_GLOBAL(kvmppc_handler_\intno\()_\srr1)
 308        mfspr   r10, SPRN_SPRG_THREAD
 309        PPC_LL  r11, THREAD_KVM_VCPU(r10)
 310        PPC_STL r3, VCPU_GPR(R3)(r11)
 311        mfspr   r3, \scratch
 312        PPC_STL r4, VCPU_GPR(R4)(r11)
 313        PPC_LL  r4, GPR9(r8)
 314        PPC_STL r5, VCPU_GPR(R5)(r11)
 315        PPC_STL r9, VCPU_CR(r11)
 316        mfspr   r5, \srr0
 317        PPC_STL r3, VCPU_GPR(R8)(r11)
 318        PPC_LL  r3, GPR10(r8)
 319        PPC_STL r6, VCPU_GPR(R6)(r11)
 320        PPC_STL r4, VCPU_GPR(R9)(r11)
 321        mfspr   r6, \srr1
 322        PPC_LL  r4, GPR11(r8)
 323        PPC_STL r7, VCPU_GPR(R7)(r11)
 324        PPC_STL r3, VCPU_GPR(R10)(r11)
 325        mfctr   r7
 326        PPC_STL r12, VCPU_GPR(R12)(r11)
 327        PPC_STL r13, VCPU_GPR(R13)(r11)
 328        PPC_STL r4, VCPU_GPR(R11)(r11)
 329        PPC_STL r7, VCPU_CTR(r11)
 330        mr      r4, r11
 331        kvm_handler_common \intno, \srr0, \flags
 332.endm
 333
 334kvm_lvl_handler BOOKE_INTERRUPT_CRITICAL, \
 335        SPRN_SPRG_RSCRATCH_CRIT, SPRN_CSRR0, SPRN_CSRR1, 0
 336kvm_lvl_handler BOOKE_INTERRUPT_MACHINE_CHECK, \
 337        SPRN_SPRG_RSCRATCH_MC, SPRN_MCSRR0, SPRN_MCSRR1, 0
 338kvm_handler BOOKE_INTERRUPT_DATA_STORAGE, \
 339        SPRN_SRR0, SPRN_SRR1, (NEED_EMU | NEED_DEAR | NEED_ESR)
 340kvm_handler BOOKE_INTERRUPT_INST_STORAGE, SPRN_SRR0, SPRN_SRR1, NEED_ESR
 341kvm_handler BOOKE_INTERRUPT_EXTERNAL, SPRN_SRR0, SPRN_SRR1, 0
 342kvm_handler BOOKE_INTERRUPT_ALIGNMENT, \
 343        SPRN_SRR0, SPRN_SRR1, (NEED_DEAR | NEED_ESR)
 344kvm_handler BOOKE_INTERRUPT_PROGRAM, SPRN_SRR0, SPRN_SRR1, (NEED_ESR | NEED_EMU)
 345kvm_handler BOOKE_INTERRUPT_FP_UNAVAIL, SPRN_SRR0, SPRN_SRR1, 0
 346kvm_handler BOOKE_INTERRUPT_SYSCALL, SPRN_SRR0, SPRN_SRR1, 0
 347kvm_handler BOOKE_INTERRUPT_AP_UNAVAIL, SPRN_SRR0, SPRN_SRR1, 0
 348kvm_handler BOOKE_INTERRUPT_DECREMENTER, SPRN_SRR0, SPRN_SRR1, 0
 349kvm_handler BOOKE_INTERRUPT_FIT, SPRN_SRR0, SPRN_SRR1, 0
 350kvm_lvl_handler BOOKE_INTERRUPT_WATCHDOG, \
 351        SPRN_SPRG_RSCRATCH_CRIT, SPRN_CSRR0, SPRN_CSRR1, 0
 352kvm_handler BOOKE_INTERRUPT_DTLB_MISS, \
 353        SPRN_SRR0, SPRN_SRR1, (NEED_EMU | NEED_DEAR | NEED_ESR)
 354kvm_handler BOOKE_INTERRUPT_ITLB_MISS, SPRN_SRR0, SPRN_SRR1, 0
 355kvm_handler BOOKE_INTERRUPT_PERFORMANCE_MONITOR, SPRN_SRR0, SPRN_SRR1, 0
 356kvm_handler BOOKE_INTERRUPT_DOORBELL, SPRN_SRR0, SPRN_SRR1, 0
 357kvm_lvl_handler BOOKE_INTERRUPT_DOORBELL_CRITICAL, \
 358        SPRN_SPRG_RSCRATCH_CRIT, SPRN_CSRR0, SPRN_CSRR1, 0
 359kvm_handler BOOKE_INTERRUPT_HV_PRIV, SPRN_SRR0, SPRN_SRR1, NEED_EMU
 360kvm_handler BOOKE_INTERRUPT_HV_SYSCALL, SPRN_SRR0, SPRN_SRR1, 0
 361kvm_handler BOOKE_INTERRUPT_GUEST_DBELL, SPRN_GSRR0, SPRN_GSRR1, 0
 362kvm_lvl_handler BOOKE_INTERRUPT_GUEST_DBELL_CRIT, \
 363        SPRN_SPRG_RSCRATCH_CRIT, SPRN_CSRR0, SPRN_CSRR1, 0
 364kvm_lvl_handler BOOKE_INTERRUPT_DEBUG, \
 365        SPRN_SPRG_RSCRATCH_CRIT, SPRN_CSRR0, SPRN_CSRR1, 0
 366kvm_lvl_handler BOOKE_INTERRUPT_DEBUG, \
 367        SPRN_SPRG_RSCRATCH_DBG, SPRN_DSRR0, SPRN_DSRR1, 0
 368#endif
 369
 370/* Registers:
 371 *  SPRG_SCRATCH0: guest r10
 372 *  r4: vcpu pointer
 373 *  r11: vcpu->arch.shared
 374 *  r14: KVM exit number
 375 */
 376_GLOBAL(kvmppc_resume_host)
 377        /* Save remaining volatile guest register state to vcpu. */
 378        mfspr   r3, SPRN_VRSAVE
 379        PPC_STL r0, VCPU_GPR(R0)(r4)
 380        mflr    r5
 381        mfspr   r6, SPRN_SPRG4
 382        PPC_STL r5, VCPU_LR(r4)
 383        mfspr   r7, SPRN_SPRG5
 384        stw     r3, VCPU_VRSAVE(r4)
 385#ifdef CONFIG_64BIT
 386        PPC_LL  r3, PACA_SPRG_VDSO(r13)
 387#endif
 388        mfspr   r5, SPRN_SPRG9
 389        PPC_STD(r6, VCPU_SHARED_SPRG4, r11)
 390        mfspr   r8, SPRN_SPRG6
 391        PPC_STD(r7, VCPU_SHARED_SPRG5, r11)
 392        mfspr   r9, SPRN_SPRG7
 393#ifdef CONFIG_64BIT
 394        mtspr   SPRN_SPRG_VDSO_WRITE, r3
 395#endif
 396        PPC_STD(r5, VCPU_SPRG9, r4)
 397        PPC_STD(r8, VCPU_SHARED_SPRG6, r11)
 398        mfxer   r3
 399        PPC_STD(r9, VCPU_SHARED_SPRG7, r11)
 400
 401        /* save guest MAS registers and restore host mas4 & mas6 */
 402        mfspr   r5, SPRN_MAS0
 403        PPC_STL r3, VCPU_XER(r4)
 404        mfspr   r6, SPRN_MAS1
 405        stw     r5, VCPU_SHARED_MAS0(r11)
 406        mfspr   r7, SPRN_MAS2
 407        stw     r6, VCPU_SHARED_MAS1(r11)
 408        PPC_STD(r7, VCPU_SHARED_MAS2, r11)
 409        mfspr   r5, SPRN_MAS3
 410        mfspr   r6, SPRN_MAS4
 411        stw     r5, VCPU_SHARED_MAS7_3+4(r11)
 412        mfspr   r7, SPRN_MAS6
 413        stw     r6, VCPU_SHARED_MAS4(r11)
 414        mfspr   r5, SPRN_MAS7
 415        lwz     r6, VCPU_HOST_MAS4(r4)
 416        stw     r7, VCPU_SHARED_MAS6(r11)
 417        lwz     r8, VCPU_HOST_MAS6(r4)
 418        mtspr   SPRN_MAS4, r6
 419        stw     r5, VCPU_SHARED_MAS7_3+0(r11)
 420        mtspr   SPRN_MAS6, r8
 421        /* Enable MAS register updates via exception */
 422        mfspr   r3, SPRN_EPCR
 423        rlwinm  r3, r3, 0, ~SPRN_EPCR_DMIUH
 424        mtspr   SPRN_EPCR, r3
 425        isync
 426
 427#ifdef CONFIG_64BIT
 428        /*
 429         * We enter with interrupts disabled in hardware, but
 430         * we need to call RECONCILE_IRQ_STATE to ensure
 431         * that the software state is kept in sync.
 432         */
 433        RECONCILE_IRQ_STATE(r3,r5)
 434#endif
 435
 436        /* Switch to kernel stack and jump to handler. */
 437        mr      r3, r4
 438        mr      r5, r14 /* intno */
 439        mr      r14, r4 /* Save vcpu pointer. */
 440        mr      r4, r5
 441        bl      kvmppc_handle_exit
 442
 443        /* Restore vcpu pointer and the nonvolatiles we used. */
 444        mr      r4, r14
 445        PPC_LL  r14, VCPU_GPR(R14)(r4)
 446
 447        andi.   r5, r3, RESUME_FLAG_NV
 448        beq     skip_nv_load
 449        PPC_LL  r15, VCPU_GPR(R15)(r4)
 450        PPC_LL  r16, VCPU_GPR(R16)(r4)
 451        PPC_LL  r17, VCPU_GPR(R17)(r4)
 452        PPC_LL  r18, VCPU_GPR(R18)(r4)
 453        PPC_LL  r19, VCPU_GPR(R19)(r4)
 454        PPC_LL  r20, VCPU_GPR(R20)(r4)
 455        PPC_LL  r21, VCPU_GPR(R21)(r4)
 456        PPC_LL  r22, VCPU_GPR(R22)(r4)
 457        PPC_LL  r23, VCPU_GPR(R23)(r4)
 458        PPC_LL  r24, VCPU_GPR(R24)(r4)
 459        PPC_LL  r25, VCPU_GPR(R25)(r4)
 460        PPC_LL  r26, VCPU_GPR(R26)(r4)
 461        PPC_LL  r27, VCPU_GPR(R27)(r4)
 462        PPC_LL  r28, VCPU_GPR(R28)(r4)
 463        PPC_LL  r29, VCPU_GPR(R29)(r4)
 464        PPC_LL  r30, VCPU_GPR(R30)(r4)
 465        PPC_LL  r31, VCPU_GPR(R31)(r4)
 466skip_nv_load:
 467        /* Should we return to the guest? */
 468        andi.   r5, r3, RESUME_FLAG_HOST
 469        beq     lightweight_exit
 470
 471        srawi   r3, r3, 2 /* Shift -ERR back down. */
 472
 473heavyweight_exit:
 474        /* Not returning to guest. */
 475        PPC_LL  r5, HOST_STACK_LR(r1)
 476        lwz     r6, HOST_CR(r1)
 477
 478        /*
 479         * We already saved guest volatile register state; now save the
 480         * non-volatiles.
 481         */
 482
 483        PPC_STL r15, VCPU_GPR(R15)(r4)
 484        PPC_STL r16, VCPU_GPR(R16)(r4)
 485        PPC_STL r17, VCPU_GPR(R17)(r4)
 486        PPC_STL r18, VCPU_GPR(R18)(r4)
 487        PPC_STL r19, VCPU_GPR(R19)(r4)
 488        PPC_STL r20, VCPU_GPR(R20)(r4)
 489        PPC_STL r21, VCPU_GPR(R21)(r4)
 490        PPC_STL r22, VCPU_GPR(R22)(r4)
 491        PPC_STL r23, VCPU_GPR(R23)(r4)
 492        PPC_STL r24, VCPU_GPR(R24)(r4)
 493        PPC_STL r25, VCPU_GPR(R25)(r4)
 494        PPC_STL r26, VCPU_GPR(R26)(r4)
 495        PPC_STL r27, VCPU_GPR(R27)(r4)
 496        PPC_STL r28, VCPU_GPR(R28)(r4)
 497        PPC_STL r29, VCPU_GPR(R29)(r4)
 498        PPC_STL r30, VCPU_GPR(R30)(r4)
 499        PPC_STL r31, VCPU_GPR(R31)(r4)
 500
 501        /* Load host non-volatile register state from host stack. */
 502        PPC_LL  r14, HOST_NV_GPR(R14)(r1)
 503        PPC_LL  r15, HOST_NV_GPR(R15)(r1)
 504        PPC_LL  r16, HOST_NV_GPR(R16)(r1)
 505        PPC_LL  r17, HOST_NV_GPR(R17)(r1)
 506        PPC_LL  r18, HOST_NV_GPR(R18)(r1)
 507        PPC_LL  r19, HOST_NV_GPR(R19)(r1)
 508        PPC_LL  r20, HOST_NV_GPR(R20)(r1)
 509        PPC_LL  r21, HOST_NV_GPR(R21)(r1)
 510        PPC_LL  r22, HOST_NV_GPR(R22)(r1)
 511        PPC_LL  r23, HOST_NV_GPR(R23)(r1)
 512        PPC_LL  r24, HOST_NV_GPR(R24)(r1)
 513        PPC_LL  r25, HOST_NV_GPR(R25)(r1)
 514        PPC_LL  r26, HOST_NV_GPR(R26)(r1)
 515        PPC_LL  r27, HOST_NV_GPR(R27)(r1)
 516        PPC_LL  r28, HOST_NV_GPR(R28)(r1)
 517        PPC_LL  r29, HOST_NV_GPR(R29)(r1)
 518        PPC_LL  r30, HOST_NV_GPR(R30)(r1)
 519        PPC_LL  r31, HOST_NV_GPR(R31)(r1)
 520
 521        /* Return to kvm_vcpu_run(). */
 522        mtlr    r5
 523        mtcr    r6
 524        addi    r1, r1, HOST_STACK_SIZE
 525        /* r3 still contains the return code from kvmppc_handle_exit(). */
 526        blr
 527
 528/* Registers:
 529 *  r3: vcpu pointer
 530 */
 531_GLOBAL(__kvmppc_vcpu_run)
 532        stwu    r1, -HOST_STACK_SIZE(r1)
 533        PPC_STL r1, VCPU_HOST_STACK(r3) /* Save stack pointer to vcpu. */
 534
 535        /* Save host state to stack. */
 536        mr      r4, r3
 537        mflr    r3
 538        mfcr    r5
 539        PPC_STL r3, HOST_STACK_LR(r1)
 540
 541        stw     r5, HOST_CR(r1)
 542
 543        /* Save host non-volatile register state to stack. */
 544        PPC_STL r14, HOST_NV_GPR(R14)(r1)
 545        PPC_STL r15, HOST_NV_GPR(R15)(r1)
 546        PPC_STL r16, HOST_NV_GPR(R16)(r1)
 547        PPC_STL r17, HOST_NV_GPR(R17)(r1)
 548        PPC_STL r18, HOST_NV_GPR(R18)(r1)
 549        PPC_STL r19, HOST_NV_GPR(R19)(r1)
 550        PPC_STL r20, HOST_NV_GPR(R20)(r1)
 551        PPC_STL r21, HOST_NV_GPR(R21)(r1)
 552        PPC_STL r22, HOST_NV_GPR(R22)(r1)
 553        PPC_STL r23, HOST_NV_GPR(R23)(r1)
 554        PPC_STL r24, HOST_NV_GPR(R24)(r1)
 555        PPC_STL r25, HOST_NV_GPR(R25)(r1)
 556        PPC_STL r26, HOST_NV_GPR(R26)(r1)
 557        PPC_STL r27, HOST_NV_GPR(R27)(r1)
 558        PPC_STL r28, HOST_NV_GPR(R28)(r1)
 559        PPC_STL r29, HOST_NV_GPR(R29)(r1)
 560        PPC_STL r30, HOST_NV_GPR(R30)(r1)
 561        PPC_STL r31, HOST_NV_GPR(R31)(r1)
 562
 563        /* Load guest non-volatiles. */
 564        PPC_LL  r14, VCPU_GPR(R14)(r4)
 565        PPC_LL  r15, VCPU_GPR(R15)(r4)
 566        PPC_LL  r16, VCPU_GPR(R16)(r4)
 567        PPC_LL  r17, VCPU_GPR(R17)(r4)
 568        PPC_LL  r18, VCPU_GPR(R18)(r4)
 569        PPC_LL  r19, VCPU_GPR(R19)(r4)
 570        PPC_LL  r20, VCPU_GPR(R20)(r4)
 571        PPC_LL  r21, VCPU_GPR(R21)(r4)
 572        PPC_LL  r22, VCPU_GPR(R22)(r4)
 573        PPC_LL  r23, VCPU_GPR(R23)(r4)
 574        PPC_LL  r24, VCPU_GPR(R24)(r4)
 575        PPC_LL  r25, VCPU_GPR(R25)(r4)
 576        PPC_LL  r26, VCPU_GPR(R26)(r4)
 577        PPC_LL  r27, VCPU_GPR(R27)(r4)
 578        PPC_LL  r28, VCPU_GPR(R28)(r4)
 579        PPC_LL  r29, VCPU_GPR(R29)(r4)
 580        PPC_LL  r30, VCPU_GPR(R30)(r4)
 581        PPC_LL  r31, VCPU_GPR(R31)(r4)
 582
 583
 584lightweight_exit:
 585        PPC_STL r2, HOST_R2(r1)
 586
 587        mfspr   r3, SPRN_PID
 588        stw     r3, VCPU_HOST_PID(r4)
 589        lwz     r3, VCPU_GUEST_PID(r4)
 590        mtspr   SPRN_PID, r3
 591
 592        PPC_LL  r11, VCPU_SHARED(r4)
 593        /* Disable MAS register updates via exception */
 594        mfspr   r3, SPRN_EPCR
 595        oris    r3, r3, SPRN_EPCR_DMIUH@h
 596        mtspr   SPRN_EPCR, r3
 597        isync
 598        /* Save host mas4 and mas6 and load guest MAS registers */
 599        mfspr   r3, SPRN_MAS4
 600        stw     r3, VCPU_HOST_MAS4(r4)
 601        mfspr   r3, SPRN_MAS6
 602        stw     r3, VCPU_HOST_MAS6(r4)
 603        lwz     r3, VCPU_SHARED_MAS0(r11)
 604        lwz     r5, VCPU_SHARED_MAS1(r11)
 605        PPC_LD(r6, VCPU_SHARED_MAS2, r11)
 606        lwz     r7, VCPU_SHARED_MAS7_3+4(r11)
 607        lwz     r8, VCPU_SHARED_MAS4(r11)
 608        mtspr   SPRN_MAS0, r3
 609        mtspr   SPRN_MAS1, r5
 610        mtspr   SPRN_MAS2, r6
 611        mtspr   SPRN_MAS3, r7
 612        mtspr   SPRN_MAS4, r8
 613        lwz     r3, VCPU_SHARED_MAS6(r11)
 614        lwz     r5, VCPU_SHARED_MAS7_3+0(r11)
 615        mtspr   SPRN_MAS6, r3
 616        mtspr   SPRN_MAS7, r5
 617
 618        /*
 619         * Host interrupt handlers may have clobbered these guest-readable
 620         * SPRGs, so we need to reload them here with the guest's values.
 621         */
 622        lwz     r3, VCPU_VRSAVE(r4)
 623        PPC_LD(r5, VCPU_SHARED_SPRG4, r11)
 624        mtspr   SPRN_VRSAVE, r3
 625        PPC_LD(r6, VCPU_SHARED_SPRG5, r11)
 626        mtspr   SPRN_SPRG4W, r5
 627        PPC_LD(r7, VCPU_SHARED_SPRG6, r11)
 628        mtspr   SPRN_SPRG5W, r6
 629        PPC_LD(r8, VCPU_SHARED_SPRG7, r11)
 630        mtspr   SPRN_SPRG6W, r7
 631        PPC_LD(r5, VCPU_SPRG9, r4)
 632        mtspr   SPRN_SPRG7W, r8
 633        mtspr   SPRN_SPRG9, r5
 634
 635        /* Load some guest volatiles. */
 636        PPC_LL  r3, VCPU_LR(r4)
 637        PPC_LL  r5, VCPU_XER(r4)
 638        PPC_LL  r6, VCPU_CTR(r4)
 639        PPC_LL  r7, VCPU_CR(r4)
 640        PPC_LL  r8, VCPU_PC(r4)
 641        PPC_LD(r9, VCPU_SHARED_MSR, r11)
 642        PPC_LL  r0, VCPU_GPR(R0)(r4)
 643        PPC_LL  r1, VCPU_GPR(R1)(r4)
 644        PPC_LL  r2, VCPU_GPR(R2)(r4)
 645        PPC_LL  r10, VCPU_GPR(R10)(r4)
 646        PPC_LL  r11, VCPU_GPR(R11)(r4)
 647        PPC_LL  r12, VCPU_GPR(R12)(r4)
 648        PPC_LL  r13, VCPU_GPR(R13)(r4)
 649        mtlr    r3
 650        mtxer   r5
 651        mtctr   r6
 652        mtsrr0  r8
 653        mtsrr1  r9
 654
 655#ifdef CONFIG_KVM_EXIT_TIMING
 656        /* save enter time */
 6571:
 658        mfspr   r6, SPRN_TBRU
 659        mfspr   r9, SPRN_TBRL
 660        mfspr   r8, SPRN_TBRU
 661        cmpw    r8, r6
 662        stw     r9, VCPU_TIMING_LAST_ENTER_TBL(r4)
 663        bne     1b
 664        stw     r8, VCPU_TIMING_LAST_ENTER_TBU(r4)
 665#endif
 666
 667        /*
 668         * Don't execute any instruction which can change CR after
 669         * below instruction.
 670         */
 671        mtcr    r7
 672
 673        /* Finish loading guest volatiles and jump to guest. */
 674        PPC_LL  r5, VCPU_GPR(R5)(r4)
 675        PPC_LL  r6, VCPU_GPR(R6)(r4)
 676        PPC_LL  r7, VCPU_GPR(R7)(r4)
 677        PPC_LL  r8, VCPU_GPR(R8)(r4)
 678        PPC_LL  r9, VCPU_GPR(R9)(r4)
 679
 680        PPC_LL  r3, VCPU_GPR(R3)(r4)
 681        PPC_LL  r4, VCPU_GPR(R4)(r4)
 682        rfi
 683