linux/arch/powerpc/kvm/book3s_rmhandlers.S
<<
>>
Prefs
   1/*
   2 * This program is free software; you can redistribute it and/or modify
   3 * it under the terms of the GNU General Public License, version 2, as
   4 * published by the Free Software Foundation.
   5 *
   6 * This program is distributed in the hope that it will be useful,
   7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
   8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   9 * GNU General Public License for more details.
  10 *
  11 * You should have received a copy of the GNU General Public License
  12 * along with this program; if not, write to the Free Software
  13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
  14 *
  15 * Copyright SUSE Linux Products GmbH 2009
  16 *
  17 * Authors: Alexander Graf <agraf@suse.de>
  18 */
  19
  20#include <asm/ppc_asm.h>
  21#include <asm/kvm_asm.h>
  22#include <asm/reg.h>
  23#include <asm/mmu.h>
  24#include <asm/page.h>
  25#include <asm/asm-offsets.h>
  26
  27#ifdef CONFIG_PPC_BOOK3S_64
  28#include <asm/exception-64s.h>
  29#endif
  30
  31/*****************************************************************************
  32 *                                                                           *
  33 *        Real Mode handlers that need to be in low physical memory          *
  34 *                                                                           *
  35 ****************************************************************************/
  36
  37#if defined(CONFIG_PPC_BOOK3S_64)
  38
  39#define FUNC(name)              GLUE(.,name)
  40
  41        .globl  kvmppc_skip_interrupt
  42kvmppc_skip_interrupt:
  43        /*
  44         * Here all GPRs are unchanged from when the interrupt happened
  45         * except for r13, which is saved in SPRG_SCRATCH0.
  46         */
  47        mfspr   r13, SPRN_SRR0
  48        addi    r13, r13, 4
  49        mtspr   SPRN_SRR0, r13
  50        GET_SCRATCH0(r13)
  51        rfid
  52        b       .
  53
  54        .globl  kvmppc_skip_Hinterrupt
  55kvmppc_skip_Hinterrupt:
  56        /*
  57         * Here all GPRs are unchanged from when the interrupt happened
  58         * except for r13, which is saved in SPRG_SCRATCH0.
  59         */
  60        mfspr   r13, SPRN_HSRR0
  61        addi    r13, r13, 4
  62        mtspr   SPRN_HSRR0, r13
  63        GET_SCRATCH0(r13)
  64        hrfid
  65        b       .
  66
  67#elif defined(CONFIG_PPC_BOOK3S_32)
  68
  69#define FUNC(name)              name
  70
  71.macro INTERRUPT_TRAMPOLINE intno
  72
  73.global kvmppc_trampoline_\intno
  74kvmppc_trampoline_\intno:
  75
  76        mtspr   SPRN_SPRG_SCRATCH0, r13         /* Save r13 */
  77
  78        /*
  79         * First thing to do is to find out if we're coming
  80         * from a KVM guest or a Linux process.
  81         *
  82         * To distinguish, we check a magic byte in the PACA/current
  83         */
  84        mfspr   r13, SPRN_SPRG_THREAD
  85        lwz     r13, THREAD_KVM_SVCPU(r13)
  86        /* PPC32 can have a NULL pointer - let's check for that */
  87        mtspr   SPRN_SPRG_SCRATCH1, r12         /* Save r12 */
  88        mfcr    r12
  89        cmpwi   r13, 0
  90        bne     1f
  912:      mtcr    r12
  92        mfspr   r12, SPRN_SPRG_SCRATCH1
  93        mfspr   r13, SPRN_SPRG_SCRATCH0         /* r13 = original r13 */
  94        b       kvmppc_resume_\intno            /* Get back original handler */
  95
  961:      tophys(r13, r13)
  97        stw     r12, HSTATE_SCRATCH1(r13)
  98        mfspr   r12, SPRN_SPRG_SCRATCH1
  99        stw     r12, HSTATE_SCRATCH0(r13)
 100        lbz     r12, HSTATE_IN_GUEST(r13)
 101        cmpwi   r12, KVM_GUEST_MODE_NONE
 102        bne     ..kvmppc_handler_hasmagic_\intno
 103        /* No KVM guest? Then jump back to the Linux handler! */
 104        lwz     r12, HSTATE_SCRATCH1(r13)
 105        b       2b
 106
 107        /* Now we know we're handling a KVM guest */
 108..kvmppc_handler_hasmagic_\intno:
 109
 110        /* Should we just skip the faulting instruction? */
 111        cmpwi   r12, KVM_GUEST_MODE_SKIP
 112        beq     kvmppc_handler_skip_ins
 113
 114        /* Let's store which interrupt we're handling */
 115        li      r12, \intno
 116
 117        /* Jump into the SLB exit code that goes to the highmem handler */
 118        b       kvmppc_handler_trampoline_exit
 119
 120.endm
 121
 122INTERRUPT_TRAMPOLINE    BOOK3S_INTERRUPT_SYSTEM_RESET
 123INTERRUPT_TRAMPOLINE    BOOK3S_INTERRUPT_MACHINE_CHECK
 124INTERRUPT_TRAMPOLINE    BOOK3S_INTERRUPT_DATA_STORAGE
 125INTERRUPT_TRAMPOLINE    BOOK3S_INTERRUPT_INST_STORAGE
 126INTERRUPT_TRAMPOLINE    BOOK3S_INTERRUPT_EXTERNAL
 127INTERRUPT_TRAMPOLINE    BOOK3S_INTERRUPT_ALIGNMENT
 128INTERRUPT_TRAMPOLINE    BOOK3S_INTERRUPT_PROGRAM
 129INTERRUPT_TRAMPOLINE    BOOK3S_INTERRUPT_FP_UNAVAIL
 130INTERRUPT_TRAMPOLINE    BOOK3S_INTERRUPT_DECREMENTER
 131INTERRUPT_TRAMPOLINE    BOOK3S_INTERRUPT_SYSCALL
 132INTERRUPT_TRAMPOLINE    BOOK3S_INTERRUPT_TRACE
 133INTERRUPT_TRAMPOLINE    BOOK3S_INTERRUPT_PERFMON
 134INTERRUPT_TRAMPOLINE    BOOK3S_INTERRUPT_ALTIVEC
 135
 136/*
 137 * Bring us back to the faulting code, but skip the
 138 * faulting instruction.
 139 *
 140 * This is a generic exit path from the interrupt
 141 * trampolines above.
 142 *
 143 * Input Registers:
 144 *
 145 * R12            = free
 146 * R13            = Shadow VCPU (PACA)
 147 * HSTATE.SCRATCH0 = guest R12
 148 * HSTATE.SCRATCH1 = guest CR
 149 * SPRG_SCRATCH0  = guest R13
 150 *
 151 */
 152kvmppc_handler_skip_ins:
 153
 154        /* Patch the IP to the next instruction */
 155        mfsrr0  r12
 156        addi    r12, r12, 4
 157        mtsrr0  r12
 158
 159        /* Clean up all state */
 160        lwz     r12, HSTATE_SCRATCH1(r13)
 161        mtcr    r12
 162        PPC_LL  r12, HSTATE_SCRATCH0(r13)
 163        GET_SCRATCH0(r13)
 164
 165        /* And get back into the code */
 166        RFI
 167#endif
 168
 169/*
 170 * Call kvmppc_handler_trampoline_enter in real mode
 171 *
 172 * On entry, r4 contains the guest shadow MSR
 173 * MSR.EE has to be 0 when calling this function
 174 */
 175_GLOBAL(kvmppc_entry_trampoline)
 176        mfmsr   r5
 177        LOAD_REG_ADDR(r7, kvmppc_handler_trampoline_enter)
 178        toreal(r7)
 179
 180        li      r6, MSR_IR | MSR_DR
 181        andc    r6, r5, r6      /* Clear DR and IR in MSR value */
 182        /*
 183         * Set EE in HOST_MSR so that it's enabled when we get into our
 184         * C exit handler function
 185         */
 186        ori     r5, r5, MSR_EE
 187        mtsrr0  r7
 188        mtsrr1  r6
 189        RFI
 190
 191#if defined(CONFIG_PPC_BOOK3S_32)
 192#define STACK_LR        INT_FRAME_SIZE+4
 193
 194/* load_up_xxx have to run with MSR_DR=0 on Book3S_32 */
 195#define MSR_EXT_START                                           \
 196        PPC_STL r20, _NIP(r1);                                  \
 197        mfmsr   r20;                                            \
 198        LOAD_REG_IMMEDIATE(r3, MSR_DR|MSR_EE);                  \
 199        andc    r3,r20,r3;              /* Disable DR,EE */     \
 200        mtmsr   r3;                                             \
 201        sync
 202
 203#define MSR_EXT_END                                             \
 204        mtmsr   r20;                    /* Enable DR,EE */      \
 205        sync;                                                   \
 206        PPC_LL  r20, _NIP(r1)
 207
 208#elif defined(CONFIG_PPC_BOOK3S_64)
 209#define STACK_LR        _LINK
 210#define MSR_EXT_START
 211#define MSR_EXT_END
 212#endif
 213
 214/*
 215 * Activate current's external feature (FPU/Altivec/VSX)
 216 */
 217#define define_load_up(what)                                    \
 218                                                                \
 219_GLOBAL(kvmppc_load_up_ ## what);                               \
 220        PPC_STLU r1, -INT_FRAME_SIZE(r1);                       \
 221        mflr    r3;                                             \
 222        PPC_STL r3, STACK_LR(r1);                               \
 223        MSR_EXT_START;                                          \
 224                                                                \
 225        bl      FUNC(load_up_ ## what);                         \
 226                                                                \
 227        MSR_EXT_END;                                            \
 228        PPC_LL  r3, STACK_LR(r1);                               \
 229        mtlr    r3;                                             \
 230        addi    r1, r1, INT_FRAME_SIZE;                         \
 231        blr
 232
 233define_load_up(fpu)
 234#ifdef CONFIG_ALTIVEC
 235define_load_up(altivec)
 236#endif
 237
 238#include "book3s_segment.S"
 239