linux/arch/powerpc/kvm/book3s_interrupts.S
<<
>>
Prefs
   1/*
   2 * This program is free software; you can redistribute it and/or modify
   3 * it under the terms of the GNU General Public License, version 2, as
   4 * published by the Free Software Foundation.
   5 *
   6 * This program is distributed in the hope that it will be useful,
   7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
   8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   9 * GNU General Public License for more details.
  10 *
  11 * You should have received a copy of the GNU General Public License
  12 * along with this program; if not, write to the Free Software
  13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
  14 *
  15 * Copyright SUSE Linux Products GmbH 2009
  16 *
  17 * Authors: Alexander Graf <agraf@suse.de>
  18 */
  19
  20#include <asm/ppc_asm.h>
  21#include <asm/kvm_asm.h>
  22#include <asm/reg.h>
  23#include <asm/page.h>
  24#include <asm/asm-offsets.h>
  25#include <asm/exception-64s.h>
  26
  27#if defined(CONFIG_PPC_BOOK3S_64)
  28#define FUNC(name)              GLUE(.,name)
  29#elif defined(CONFIG_PPC_BOOK3S_32)
  30#define FUNC(name)              name
  31#endif /* CONFIG_PPC_BOOK3S_XX */
  32
  33#define VCPU_LOAD_NVGPRS(vcpu) \
  34        PPC_LL  r14, VCPU_GPR(R14)(vcpu); \
  35        PPC_LL  r15, VCPU_GPR(R15)(vcpu); \
  36        PPC_LL  r16, VCPU_GPR(R16)(vcpu); \
  37        PPC_LL  r17, VCPU_GPR(R17)(vcpu); \
  38        PPC_LL  r18, VCPU_GPR(R18)(vcpu); \
  39        PPC_LL  r19, VCPU_GPR(R19)(vcpu); \
  40        PPC_LL  r20, VCPU_GPR(R20)(vcpu); \
  41        PPC_LL  r21, VCPU_GPR(R21)(vcpu); \
  42        PPC_LL  r22, VCPU_GPR(R22)(vcpu); \
  43        PPC_LL  r23, VCPU_GPR(R23)(vcpu); \
  44        PPC_LL  r24, VCPU_GPR(R24)(vcpu); \
  45        PPC_LL  r25, VCPU_GPR(R25)(vcpu); \
  46        PPC_LL  r26, VCPU_GPR(R26)(vcpu); \
  47        PPC_LL  r27, VCPU_GPR(R27)(vcpu); \
  48        PPC_LL  r28, VCPU_GPR(R28)(vcpu); \
  49        PPC_LL  r29, VCPU_GPR(R29)(vcpu); \
  50        PPC_LL  r30, VCPU_GPR(R30)(vcpu); \
  51        PPC_LL  r31, VCPU_GPR(R31)(vcpu); \
  52
  53/*****************************************************************************
  54 *                                                                           *
  55 *     Guest entry / exit code that is in kernel module memory (highmem)     *
  56 *                                                                           *
  57 ****************************************************************************/
  58
  59/* Registers:
  60 *  r3: kvm_run pointer
  61 *  r4: vcpu pointer
  62 */
  63_GLOBAL(__kvmppc_vcpu_run)
  64
  65kvm_start_entry:
  66        /* Write correct stack frame */
  67        mflr    r0
  68        PPC_STL r0,PPC_LR_STKOFF(r1)
  69
  70        /* Save host state to the stack */
  71        PPC_STLU r1, -SWITCH_FRAME_SIZE(r1)
  72
  73        /* Save r3 (kvm_run) and r4 (vcpu) */
  74        SAVE_2GPRS(3, r1)
  75
  76        /* Save non-volatile registers (r14 - r31) */
  77        SAVE_NVGPRS(r1)
  78
  79        /* Save CR */
  80        mfcr    r14
  81        stw     r14, _CCR(r1)
  82
  83        /* Save LR */
  84        PPC_STL r0, _LINK(r1)
  85
  86        /* Load non-volatile guest state from the vcpu */
  87        VCPU_LOAD_NVGPRS(r4)
  88
  89kvm_start_lightweight:
  90
  91#ifdef CONFIG_PPC_BOOK3S_64
  92        PPC_LL  r3, VCPU_HFLAGS(r4)
  93        rldicl  r3, r3, 0, 63           /* r3 &= 1 */
  94        stb     r3, HSTATE_RESTORE_HID5(r13)
  95#endif /* CONFIG_PPC_BOOK3S_64 */
  96
  97        PPC_LL  r4, VCPU_SHADOW_MSR(r4) /* get shadow_msr */
  98
  99        /* Jump to segment patching handler and into our guest */
 100        bl      FUNC(kvmppc_entry_trampoline)
 101        nop
 102
 103/*
 104 * This is the handler in module memory. It gets jumped at from the
 105 * lowmem trampoline code, so it's basically the guest exit code.
 106 *
 107 */
 108
 109.global kvmppc_handler_highmem
 110kvmppc_handler_highmem:
 111
 112        /*
 113         * Register usage at this point:
 114         *
 115         * R1       = host R1
 116         * R2       = host R2
 117         * R12      = exit handler id
 118         * R13      = PACA
 119         * SVCPU.*  = guest *
 120         *
 121         */
 122
 123        /* R7 = vcpu */
 124        PPC_LL  r7, GPR4(r1)
 125
 126        PPC_STL r14, VCPU_GPR(R14)(r7)
 127        PPC_STL r15, VCPU_GPR(R15)(r7)
 128        PPC_STL r16, VCPU_GPR(R16)(r7)
 129        PPC_STL r17, VCPU_GPR(R17)(r7)
 130        PPC_STL r18, VCPU_GPR(R18)(r7)
 131        PPC_STL r19, VCPU_GPR(R19)(r7)
 132        PPC_STL r20, VCPU_GPR(R20)(r7)
 133        PPC_STL r21, VCPU_GPR(R21)(r7)
 134        PPC_STL r22, VCPU_GPR(R22)(r7)
 135        PPC_STL r23, VCPU_GPR(R23)(r7)
 136        PPC_STL r24, VCPU_GPR(R24)(r7)
 137        PPC_STL r25, VCPU_GPR(R25)(r7)
 138        PPC_STL r26, VCPU_GPR(R26)(r7)
 139        PPC_STL r27, VCPU_GPR(R27)(r7)
 140        PPC_STL r28, VCPU_GPR(R28)(r7)
 141        PPC_STL r29, VCPU_GPR(R29)(r7)
 142        PPC_STL r30, VCPU_GPR(R30)(r7)
 143        PPC_STL r31, VCPU_GPR(R31)(r7)
 144
 145        /* Pass the exit number as 3rd argument to kvmppc_handle_exit */
 146        mr      r5, r12
 147
 148        /* Restore r3 (kvm_run) and r4 (vcpu) */
 149        REST_2GPRS(3, r1)
 150        bl      FUNC(kvmppc_handle_exit)
 151
 152        /* If RESUME_GUEST, get back in the loop */
 153        cmpwi   r3, RESUME_GUEST
 154        beq     kvm_loop_lightweight
 155
 156        cmpwi   r3, RESUME_GUEST_NV
 157        beq     kvm_loop_heavyweight
 158
 159kvm_exit_loop:
 160
 161        PPC_LL  r4, _LINK(r1)
 162        mtlr    r4
 163
 164        lwz     r14, _CCR(r1)
 165        mtcr    r14
 166
 167        /* Restore non-volatile host registers (r14 - r31) */
 168        REST_NVGPRS(r1)
 169
 170        addi    r1, r1, SWITCH_FRAME_SIZE
 171        blr
 172
 173kvm_loop_heavyweight:
 174
 175        PPC_LL  r4, _LINK(r1)
 176        PPC_STL r4, (PPC_LR_STKOFF + SWITCH_FRAME_SIZE)(r1)
 177
 178        /* Load vcpu and cpu_run */
 179        REST_2GPRS(3, r1)
 180
 181        /* Load non-volatile guest state from the vcpu */
 182        VCPU_LOAD_NVGPRS(r4)
 183
 184        /* Jump back into the beginning of this function */
 185        b       kvm_start_lightweight
 186
 187kvm_loop_lightweight:
 188
 189        /* We'll need the vcpu pointer */
 190        REST_GPR(4, r1)
 191
 192        /* Jump back into the beginning of this function */
 193        b       kvm_start_lightweight
 194