linux/arch/powerpc/kvm/book3s_segment.S
<<
>>
Prefs
   1/*
   2 * This program is free software; you can redistribute it and/or modify
   3 * it under the terms of the GNU General Public License, version 2, as
   4 * published by the Free Software Foundation.
   5 *
   6 * This program is distributed in the hope that it will be useful,
   7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
   8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
   9 * GNU General Public License for more details.
  10 *
  11 * You should have received a copy of the GNU General Public License
  12 * along with this program; if not, write to the Free Software
  13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301, USA.
  14 *
  15 * Copyright SUSE Linux Products GmbH 2010
  16 *
  17 * Authors: Alexander Graf <agraf@suse.de>
  18 */
  19
  20/* Real mode helpers */
  21
  22#if defined(CONFIG_PPC_BOOK3S_64)
  23
  24#define GET_SHADOW_VCPU(reg)    \
  25        mr      reg, r13
  26
  27#elif defined(CONFIG_PPC_BOOK3S_32)
  28
  29#define GET_SHADOW_VCPU(reg)                            \
  30        tophys(reg, r2);                        \
  31        lwz     reg, (THREAD + THREAD_KVM_SVCPU)(reg);  \
  32        tophys(reg, reg)
  33
  34#endif
  35
  36/* Disable for nested KVM */
  37#define USE_QUICK_LAST_INST
  38
  39
  40/* Get helper functions for subarch specific functionality */
  41
  42#if defined(CONFIG_PPC_BOOK3S_64)
  43#include "book3s_64_slb.S"
  44#elif defined(CONFIG_PPC_BOOK3S_32)
  45#include "book3s_32_sr.S"
  46#endif
  47
  48/******************************************************************************
  49 *                                                                            *
  50 *                               Entry code                                   *
  51 *                                                                            *
  52 *****************************************************************************/
  53
  54.global kvmppc_handler_trampoline_enter
  55kvmppc_handler_trampoline_enter:
  56
  57        /* Required state:
  58         *
  59         * MSR = ~IR|DR
  60         * R13 = PACA
  61         * R1 = host R1
  62         * R2 = host R2
  63         * R10 = guest MSR
  64         * all other volatile GPRS = free
  65         * SVCPU[CR] = guest CR
  66         * SVCPU[XER] = guest XER
  67         * SVCPU[CTR] = guest CTR
  68         * SVCPU[LR] = guest LR
  69         */
  70
  71        /* r3 = shadow vcpu */
  72        GET_SHADOW_VCPU(r3)
  73
  74        /* Save R1/R2 in the PACA (64-bit) or shadow_vcpu (32-bit) */
  75        PPC_STL r1, HSTATE_HOST_R1(r3)
  76        PPC_STL r2, HSTATE_HOST_R2(r3)
  77
  78        /* Move SRR0 and SRR1 into the respective regs */
  79        PPC_LL  r9, SVCPU_PC(r3)
  80        mtsrr0  r9
  81        mtsrr1  r10
  82
  83        /* Activate guest mode, so faults get handled by KVM */
  84        li      r11, KVM_GUEST_MODE_GUEST
  85        stb     r11, HSTATE_IN_GUEST(r3)
  86
  87        /* Switch to guest segment. This is subarch specific. */
  88        LOAD_GUEST_SEGMENTS
  89
  90        /* Enter guest */
  91
  92        PPC_LL  r4, SVCPU_CTR(r3)
  93        PPC_LL  r5, SVCPU_LR(r3)
  94        lwz     r6, SVCPU_CR(r3)
  95        lwz     r7, SVCPU_XER(r3)
  96
  97        mtctr   r4
  98        mtlr    r5
  99        mtcr    r6
 100        mtxer   r7
 101
 102        PPC_LL  r0, SVCPU_R0(r3)
 103        PPC_LL  r1, SVCPU_R1(r3)
 104        PPC_LL  r2, SVCPU_R2(r3)
 105        PPC_LL  r4, SVCPU_R4(r3)
 106        PPC_LL  r5, SVCPU_R5(r3)
 107        PPC_LL  r6, SVCPU_R6(r3)
 108        PPC_LL  r7, SVCPU_R7(r3)
 109        PPC_LL  r8, SVCPU_R8(r3)
 110        PPC_LL  r9, SVCPU_R9(r3)
 111        PPC_LL  r10, SVCPU_R10(r3)
 112        PPC_LL  r11, SVCPU_R11(r3)
 113        PPC_LL  r12, SVCPU_R12(r3)
 114        PPC_LL  r13, SVCPU_R13(r3)
 115
 116        PPC_LL  r3, (SVCPU_R3)(r3)
 117
 118        RFI
 119kvmppc_handler_trampoline_enter_end:
 120
 121
 122
 123/******************************************************************************
 124 *                                                                            *
 125 *                               Exit code                                    *
 126 *                                                                            *
 127 *****************************************************************************/
 128
 129.global kvmppc_handler_trampoline_exit
 130kvmppc_handler_trampoline_exit:
 131
 132.global kvmppc_interrupt
 133kvmppc_interrupt:
 134
 135        /* Register usage at this point:
 136         *
 137         * SPRG_SCRATCH0  = guest R13
 138         * R12            = exit handler id
 139         * R13            = shadow vcpu (32-bit) or PACA (64-bit)
 140         * HSTATE.SCRATCH0 = guest R12
 141         * HSTATE.SCRATCH1 = guest CR
 142         *
 143         */
 144
 145        /* Save registers */
 146
 147        PPC_STL r0, SVCPU_R0(r13)
 148        PPC_STL r1, SVCPU_R1(r13)
 149        PPC_STL r2, SVCPU_R2(r13)
 150        PPC_STL r3, SVCPU_R3(r13)
 151        PPC_STL r4, SVCPU_R4(r13)
 152        PPC_STL r5, SVCPU_R5(r13)
 153        PPC_STL r6, SVCPU_R6(r13)
 154        PPC_STL r7, SVCPU_R7(r13)
 155        PPC_STL r8, SVCPU_R8(r13)
 156        PPC_STL r9, SVCPU_R9(r13)
 157        PPC_STL r10, SVCPU_R10(r13)
 158        PPC_STL r11, SVCPU_R11(r13)
 159
 160        /* Restore R1/R2 so we can handle faults */
 161        PPC_LL  r1, HSTATE_HOST_R1(r13)
 162        PPC_LL  r2, HSTATE_HOST_R2(r13)
 163
 164        /* Save guest PC and MSR */
 165#ifdef CONFIG_PPC64
 166BEGIN_FTR_SECTION
 167        andi.   r0,r12,0x2
 168        beq     1f
 169        mfspr   r3,SPRN_HSRR0
 170        mfspr   r4,SPRN_HSRR1
 171        andi.   r12,r12,0x3ffd
 172        b       2f
 173END_FTR_SECTION_IFSET(CPU_FTR_HVMODE)
 174#endif
 1751:      mfsrr0  r3
 176        mfsrr1  r4
 1772:
 178        PPC_STL r3, SVCPU_PC(r13)
 179        PPC_STL r4, SVCPU_SHADOW_SRR1(r13)
 180
 181        /* Get scratch'ed off registers */
 182        GET_SCRATCH0(r9)
 183        PPC_LL  r8, HSTATE_SCRATCH0(r13)
 184        lwz     r7, HSTATE_SCRATCH1(r13)
 185
 186        PPC_STL r9, SVCPU_R13(r13)
 187        PPC_STL r8, SVCPU_R12(r13)
 188        stw     r7, SVCPU_CR(r13)
 189
 190        /* Save more register state  */
 191
 192        mfxer   r5
 193        mfdar   r6
 194        mfdsisr r7
 195        mfctr   r8
 196        mflr    r9
 197
 198        stw     r5, SVCPU_XER(r13)
 199        PPC_STL r6, SVCPU_FAULT_DAR(r13)
 200        stw     r7, SVCPU_FAULT_DSISR(r13)
 201        PPC_STL r8, SVCPU_CTR(r13)
 202        PPC_STL r9, SVCPU_LR(r13)
 203
 204        /*
 205         * In order for us to easily get the last instruction,
 206         * we got the #vmexit at, we exploit the fact that the
 207         * virtual layout is still the same here, so we can just
 208         * ld from the guest's PC address
 209         */
 210
 211        /* We only load the last instruction when it's safe */
 212        cmpwi   r12, BOOK3S_INTERRUPT_DATA_STORAGE
 213        beq     ld_last_inst
 214        cmpwi   r12, BOOK3S_INTERRUPT_PROGRAM
 215        beq     ld_last_inst
 216        cmpwi   r12, BOOK3S_INTERRUPT_ALIGNMENT
 217        beq-    ld_last_inst
 218
 219        b       no_ld_last_inst
 220
 221ld_last_inst:
 222        /* Save off the guest instruction we're at */
 223
 224        /* In case lwz faults */
 225        li      r0, KVM_INST_FETCH_FAILED
 226
 227#ifdef USE_QUICK_LAST_INST
 228
 229        /* Set guest mode to 'jump over instruction' so if lwz faults
 230         * we'll just continue at the next IP. */
 231        li      r9, KVM_GUEST_MODE_SKIP
 232        stb     r9, HSTATE_IN_GUEST(r13)
 233
 234        /*    1) enable paging for data */
 235        mfmsr   r9
 236        ori     r11, r9, MSR_DR                 /* Enable paging for data */
 237        mtmsr   r11
 238        sync
 239        /*    2) fetch the instruction */
 240        lwz     r0, 0(r3)
 241        /*    3) disable paging again */
 242        mtmsr   r9
 243        sync
 244
 245#endif
 246        stw     r0, SVCPU_LAST_INST(r13)
 247
 248no_ld_last_inst:
 249
 250        /* Unset guest mode */
 251        li      r9, KVM_GUEST_MODE_NONE
 252        stb     r9, HSTATE_IN_GUEST(r13)
 253
 254        /* Switch back to host MMU */
 255        LOAD_HOST_SEGMENTS
 256
 257        /* Register usage at this point:
 258         *
 259         * R1       = host R1
 260         * R2       = host R2
 261         * R12      = exit handler id
 262         * R13      = shadow vcpu (32-bit) or PACA (64-bit)
 263         * SVCPU.*  = guest *
 264         *
 265         */
 266
 267        /* RFI into the highmem handler */
 268        mfmsr   r7
 269        ori     r7, r7, MSR_IR|MSR_DR|MSR_RI|MSR_ME     /* Enable paging */
 270        mtsrr1  r7
 271        /* Load highmem handler address */
 272        PPC_LL  r8, HSTATE_VMHANDLER(r13)
 273        mtsrr0  r8
 274
 275        RFI
 276kvmppc_handler_trampoline_exit_end:
 277