qemu/target/i386/hvf/vmx.h
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2016 Veertu Inc,
   3 * Copyright (C) 2017 Google Inc,
   4 * Based on Veertu vddh/vmm/vmx.h
   5 *
   6 * Interfaces to Hypervisor.framework to read/write X86 registers and VMCS.
   7 *
   8 * This program is free software; you can redistribute it and/or
   9 * modify it under the terms of the GNU Lesser General Public
  10 * License as published by the Free Software Foundation; either
  11 * version 2.1 of the License, or (at your option) any later version.
  12 *
  13 * This program is distributed in the hope that it will be useful,
  14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  16 * Lesser General Public License for more details.
  17 *
  18 * You should have received a copy of the GNU Lesser General Public
  19 * License along with this program; if not, see <http://www.gnu.org/licenses/>.
  20 *
  21 * This file contain code under public domain from the hvdos project:
  22 * https://github.com/mist64/hvdos
  23 */
  24
  25#ifndef VMX_H
  26#define VMX_H
  27
  28#include <Hypervisor/hv.h>
  29#include <Hypervisor/hv_vmx.h>
  30#include "vmcs.h"
  31#include "cpu.h"
  32#include "x86.h"
  33
  34#include "exec/address-spaces.h"
  35
  36static inline uint64_t rreg(hv_vcpuid_t vcpu, hv_x86_reg_t reg)
  37{
  38    uint64_t v;
  39
  40    if (hv_vcpu_read_register(vcpu, reg, &v)) {
  41        abort();
  42    }
  43
  44    return v;
  45}
  46
  47/* write GPR */
  48static inline void wreg(hv_vcpuid_t vcpu, hv_x86_reg_t reg, uint64_t v)
  49{
  50    if (hv_vcpu_write_register(vcpu, reg, v)) {
  51        abort();
  52    }
  53}
  54
  55/* read VMCS field */
  56static inline uint64_t rvmcs(hv_vcpuid_t vcpu, uint32_t field)
  57{
  58    uint64_t v;
  59
  60    hv_vmx_vcpu_read_vmcs(vcpu, field, &v);
  61
  62    return v;
  63}
  64
  65/* write VMCS field */
  66static inline void wvmcs(hv_vcpuid_t vcpu, uint32_t field, uint64_t v)
  67{
  68    hv_vmx_vcpu_write_vmcs(vcpu, field, v);
  69}
  70
  71/* desired control word constrained by hardware/hypervisor capabilities */
  72static inline uint64_t cap2ctrl(uint64_t cap, uint64_t ctrl)
  73{
  74    return (ctrl | (cap & 0xffffffff)) & (cap >> 32);
  75}
  76
  77#define VM_ENTRY_GUEST_LMA (1LL << 9)
  78
  79#define AR_TYPE_ACCESSES_MASK 1
  80#define AR_TYPE_READABLE_MASK (1 << 1)
  81#define AR_TYPE_WRITEABLE_MASK (1 << 2)
  82#define AR_TYPE_CODE_MASK (1 << 3)
  83#define AR_TYPE_MASK 0x0f
  84#define AR_TYPE_BUSY_64_TSS 11
  85#define AR_TYPE_BUSY_32_TSS 11
  86#define AR_TYPE_BUSY_16_TSS 3
  87#define AR_TYPE_LDT 2
  88
  89static void enter_long_mode(hv_vcpuid_t vcpu, uint64_t cr0, uint64_t efer)
  90{
  91    uint64_t entry_ctls;
  92
  93    efer |= MSR_EFER_LMA;
  94    wvmcs(vcpu, VMCS_GUEST_IA32_EFER, efer);
  95    entry_ctls = rvmcs(vcpu, VMCS_ENTRY_CTLS);
  96    wvmcs(vcpu, VMCS_ENTRY_CTLS, rvmcs(vcpu, VMCS_ENTRY_CTLS) |
  97          VM_ENTRY_GUEST_LMA);
  98
  99    uint64_t guest_tr_ar = rvmcs(vcpu, VMCS_GUEST_TR_ACCESS_RIGHTS);
 100    if ((efer & MSR_EFER_LME) &&
 101        (guest_tr_ar & AR_TYPE_MASK) != AR_TYPE_BUSY_64_TSS) {
 102        wvmcs(vcpu, VMCS_GUEST_TR_ACCESS_RIGHTS,
 103              (guest_tr_ar & ~AR_TYPE_MASK) | AR_TYPE_BUSY_64_TSS);
 104    }
 105}
 106
 107static void exit_long_mode(hv_vcpuid_t vcpu, uint64_t cr0, uint64_t efer)
 108{
 109    uint64_t entry_ctls;
 110
 111    entry_ctls = rvmcs(vcpu, VMCS_ENTRY_CTLS);
 112    wvmcs(vcpu, VMCS_ENTRY_CTLS, entry_ctls & ~VM_ENTRY_GUEST_LMA);
 113
 114    efer &= ~MSR_EFER_LMA;
 115    wvmcs(vcpu, VMCS_GUEST_IA32_EFER, efer);
 116}
 117
 118static inline void macvm_set_cr0(hv_vcpuid_t vcpu, uint64_t cr0)
 119{
 120    int i;
 121    uint64_t pdpte[4] = {0, 0, 0, 0};
 122    uint64_t efer = rvmcs(vcpu, VMCS_GUEST_IA32_EFER);
 123    uint64_t old_cr0 = rvmcs(vcpu, VMCS_GUEST_CR0);
 124    uint64_t changed_cr0 = old_cr0 ^ cr0;
 125    uint64_t mask = CR0_PG | CR0_CD | CR0_NW | CR0_NE | CR0_ET;
 126    uint64_t entry_ctls;
 127
 128    if ((cr0 & CR0_PG) && (rvmcs(vcpu, VMCS_GUEST_CR4) & CR4_PAE) &&
 129        !(efer & MSR_EFER_LME)) {
 130        address_space_read(&address_space_memory,
 131                           rvmcs(vcpu, VMCS_GUEST_CR3) & ~0x1f,
 132                           MEMTXATTRS_UNSPECIFIED, pdpte, 32);
 133        /* Only set PDPTE when appropriate. */
 134        for (i = 0; i < 4; i++) {
 135            wvmcs(vcpu, VMCS_GUEST_PDPTE0 + i * 2, pdpte[i]);
 136        }
 137    }
 138
 139    wvmcs(vcpu, VMCS_CR0_MASK, mask);
 140    wvmcs(vcpu, VMCS_CR0_SHADOW, cr0);
 141
 142    if (efer & MSR_EFER_LME) {
 143        if (changed_cr0 & CR0_PG) {
 144            if (cr0 & CR0_PG) {
 145                enter_long_mode(vcpu, cr0, efer);
 146            } else {
 147                exit_long_mode(vcpu, cr0, efer);
 148            }
 149        }
 150    } else {
 151        entry_ctls = rvmcs(vcpu, VMCS_ENTRY_CTLS);
 152        wvmcs(vcpu, VMCS_ENTRY_CTLS, entry_ctls & ~VM_ENTRY_GUEST_LMA);
 153    }
 154
 155    /* Filter new CR0 after we are finished examining it above. */
 156    cr0 = (cr0 & ~(mask & ~CR0_PG));
 157    wvmcs(vcpu, VMCS_GUEST_CR0, cr0 | CR0_NE | CR0_ET);
 158
 159    hv_vcpu_invalidate_tlb(vcpu);
 160    hv_vcpu_flush(vcpu);
 161}
 162
 163static inline void macvm_set_cr4(hv_vcpuid_t vcpu, uint64_t cr4)
 164{
 165    uint64_t guest_cr4 = cr4 | CR4_VMXE;
 166
 167    wvmcs(vcpu, VMCS_GUEST_CR4, guest_cr4);
 168    wvmcs(vcpu, VMCS_CR4_SHADOW, cr4);
 169    wvmcs(vcpu, VMCS_CR4_MASK, CR4_VMXE);
 170
 171    hv_vcpu_invalidate_tlb(vcpu);
 172    hv_vcpu_flush(vcpu);
 173}
 174
 175static inline void macvm_set_rip(CPUState *cpu, uint64_t rip)
 176{
 177    X86CPU *x86_cpu = X86_CPU(cpu);
 178    CPUX86State *env = &x86_cpu->env;
 179    uint64_t val;
 180
 181    /* BUG, should take considering overlap.. */
 182    wreg(cpu->hvf_fd, HV_X86_RIP, rip);
 183    env->eip = rip;
 184
 185    /* after moving forward in rip, we need to clean INTERRUPTABILITY */
 186   val = rvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY);
 187   if (val & (VMCS_INTERRUPTIBILITY_STI_BLOCKING |
 188               VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING)) {
 189        env->hflags &= ~HF_INHIBIT_IRQ_MASK;
 190        wvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY,
 191               val & ~(VMCS_INTERRUPTIBILITY_STI_BLOCKING |
 192               VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING));
 193   }
 194}
 195
 196static inline void vmx_clear_nmi_blocking(CPUState *cpu)
 197{
 198    X86CPU *x86_cpu = X86_CPU(cpu);
 199    CPUX86State *env = &x86_cpu->env;
 200
 201    env->hflags2 &= ~HF2_NMI_MASK;
 202    uint32_t gi = (uint32_t) rvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY);
 203    gi &= ~VMCS_INTERRUPTIBILITY_NMI_BLOCKING;
 204    wvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY, gi);
 205}
 206
 207static inline void vmx_set_nmi_blocking(CPUState *cpu)
 208{
 209    X86CPU *x86_cpu = X86_CPU(cpu);
 210    CPUX86State *env = &x86_cpu->env;
 211
 212    env->hflags2 |= HF2_NMI_MASK;
 213    uint32_t gi = (uint32_t)rvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY);
 214    gi |= VMCS_INTERRUPTIBILITY_NMI_BLOCKING;
 215    wvmcs(cpu->hvf_fd, VMCS_GUEST_INTERRUPTIBILITY, gi);
 216}
 217
 218static inline void vmx_set_nmi_window_exiting(CPUState *cpu)
 219{
 220    uint64_t val;
 221    val = rvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS);
 222    wvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS, val |
 223          VMCS_PRI_PROC_BASED_CTLS_NMI_WINDOW_EXITING);
 224
 225}
 226
 227static inline void vmx_clear_nmi_window_exiting(CPUState *cpu)
 228{
 229
 230    uint64_t val;
 231    val = rvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS);
 232    wvmcs(cpu->hvf_fd, VMCS_PRI_PROC_BASED_CTLS, val &
 233          ~VMCS_PRI_PROC_BASED_CTLS_NMI_WINDOW_EXITING);
 234}
 235
 236#endif
 237