qemu/target/i386/hvf/vmx.h
<<
>>
Prefs
   1/*
   2 * Copyright (C) 2016 Veertu Inc,
   3 * Copyright (C) 2017 Google Inc,
   4 * Based on Veertu vddh/vmm/vmx.h
   5 *
   6 * Interfaces to Hypervisor.framework to read/write X86 registers and VMCS.
   7 *
   8 * This program is free software; you can redistribute it and/or
   9 * modify it under the terms of the GNU Lesser General Public
  10 * License as published by the Free Software Foundation; either
  11 * version 2.1 of the License, or (at your option) any later version.
  12 *
  13 * This program is distributed in the hope that it will be useful,
  14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
  15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
  16 * Lesser General Public License for more details.
  17 *
  18 * You should have received a copy of the GNU Lesser General Public
  19 * License along with this program; if not, see <http://www.gnu.org/licenses/>.
  20 *
  21 * This file contain code under public domain from the hvdos project:
  22 * https://github.com/mist64/hvdos
  23 */
  24
  25#ifndef VMX_H
  26#define VMX_H
  27
  28#include <Hypervisor/hv.h>
  29#include <Hypervisor/hv_vmx.h>
  30#include "vmcs.h"
  31#include "cpu.h"
  32#include "x86.h"
  33#include "sysemu/hvf.h"
  34#include "sysemu/hvf_int.h"
  35
  36#include "exec/address-spaces.h"
  37
  38static inline uint64_t rreg(hv_vcpuid_t vcpu, hv_x86_reg_t reg)
  39{
  40    uint64_t v;
  41
  42    if (hv_vcpu_read_register(vcpu, reg, &v)) {
  43        abort();
  44    }
  45
  46    return v;
  47}
  48
  49/* write GPR */
  50static inline void wreg(hv_vcpuid_t vcpu, hv_x86_reg_t reg, uint64_t v)
  51{
  52    if (hv_vcpu_write_register(vcpu, reg, v)) {
  53        abort();
  54    }
  55}
  56
  57/* read VMCS field */
  58static inline uint64_t rvmcs(hv_vcpuid_t vcpu, uint32_t field)
  59{
  60    uint64_t v;
  61
  62    hv_vmx_vcpu_read_vmcs(vcpu, field, &v);
  63
  64    return v;
  65}
  66
  67/* write VMCS field */
  68static inline void wvmcs(hv_vcpuid_t vcpu, uint32_t field, uint64_t v)
  69{
  70    hv_vmx_vcpu_write_vmcs(vcpu, field, v);
  71}
  72
  73/* desired control word constrained by hardware/hypervisor capabilities */
  74static inline uint64_t cap2ctrl(uint64_t cap, uint64_t ctrl)
  75{
  76    return (ctrl | (cap & 0xffffffff)) & (cap >> 32);
  77}
  78
  79#define VM_ENTRY_GUEST_LMA (1LL << 9)
  80
  81#define AR_TYPE_ACCESSES_MASK 1
  82#define AR_TYPE_READABLE_MASK (1 << 1)
  83#define AR_TYPE_WRITEABLE_MASK (1 << 2)
  84#define AR_TYPE_CODE_MASK (1 << 3)
  85#define AR_TYPE_MASK 0x0f
  86#define AR_TYPE_BUSY_64_TSS 11
  87#define AR_TYPE_BUSY_32_TSS 11
  88#define AR_TYPE_BUSY_16_TSS 3
  89#define AR_TYPE_LDT 2
  90
  91static void enter_long_mode(hv_vcpuid_t vcpu, uint64_t cr0, uint64_t efer)
  92{
  93    uint64_t entry_ctls;
  94
  95    efer |= MSR_EFER_LMA;
  96    wvmcs(vcpu, VMCS_GUEST_IA32_EFER, efer);
  97    entry_ctls = rvmcs(vcpu, VMCS_ENTRY_CTLS);
  98    wvmcs(vcpu, VMCS_ENTRY_CTLS, rvmcs(vcpu, VMCS_ENTRY_CTLS) |
  99          VM_ENTRY_GUEST_LMA);
 100
 101    uint64_t guest_tr_ar = rvmcs(vcpu, VMCS_GUEST_TR_ACCESS_RIGHTS);
 102    if ((efer & MSR_EFER_LME) &&
 103        (guest_tr_ar & AR_TYPE_MASK) != AR_TYPE_BUSY_64_TSS) {
 104        wvmcs(vcpu, VMCS_GUEST_TR_ACCESS_RIGHTS,
 105              (guest_tr_ar & ~AR_TYPE_MASK) | AR_TYPE_BUSY_64_TSS);
 106    }
 107}
 108
 109static void exit_long_mode(hv_vcpuid_t vcpu, uint64_t cr0, uint64_t efer)
 110{
 111    uint64_t entry_ctls;
 112
 113    entry_ctls = rvmcs(vcpu, VMCS_ENTRY_CTLS);
 114    wvmcs(vcpu, VMCS_ENTRY_CTLS, entry_ctls & ~VM_ENTRY_GUEST_LMA);
 115
 116    efer &= ~MSR_EFER_LMA;
 117    wvmcs(vcpu, VMCS_GUEST_IA32_EFER, efer);
 118}
 119
 120static inline void macvm_set_cr0(hv_vcpuid_t vcpu, uint64_t cr0)
 121{
 122    int i;
 123    uint64_t pdpte[4] = {0, 0, 0, 0};
 124    uint64_t efer = rvmcs(vcpu, VMCS_GUEST_IA32_EFER);
 125    uint64_t old_cr0 = rvmcs(vcpu, VMCS_GUEST_CR0);
 126    uint64_t changed_cr0 = old_cr0 ^ cr0;
 127    uint64_t mask = CR0_PG | CR0_CD | CR0_NW | CR0_NE | CR0_ET;
 128    uint64_t entry_ctls;
 129
 130    if ((cr0 & CR0_PG) && (rvmcs(vcpu, VMCS_GUEST_CR4) & CR4_PAE) &&
 131        !(efer & MSR_EFER_LME)) {
 132        address_space_read(&address_space_memory,
 133                           rvmcs(vcpu, VMCS_GUEST_CR3) & ~0x1f,
 134                           MEMTXATTRS_UNSPECIFIED, pdpte, 32);
 135        /* Only set PDPTE when appropriate. */
 136        for (i = 0; i < 4; i++) {
 137            wvmcs(vcpu, VMCS_GUEST_PDPTE0 + i * 2, pdpte[i]);
 138        }
 139    }
 140
 141    wvmcs(vcpu, VMCS_CR0_MASK, mask);
 142    wvmcs(vcpu, VMCS_CR0_SHADOW, cr0);
 143
 144    if (efer & MSR_EFER_LME) {
 145        if (changed_cr0 & CR0_PG) {
 146            if (cr0 & CR0_PG) {
 147                enter_long_mode(vcpu, cr0, efer);
 148            } else {
 149                exit_long_mode(vcpu, cr0, efer);
 150            }
 151        }
 152    } else {
 153        entry_ctls = rvmcs(vcpu, VMCS_ENTRY_CTLS);
 154        wvmcs(vcpu, VMCS_ENTRY_CTLS, entry_ctls & ~VM_ENTRY_GUEST_LMA);
 155    }
 156
 157    /* Filter new CR0 after we are finished examining it above. */
 158    cr0 = (cr0 & ~(mask & ~CR0_PG));
 159    wvmcs(vcpu, VMCS_GUEST_CR0, cr0 | CR0_NE | CR0_ET);
 160
 161    hv_vcpu_invalidate_tlb(vcpu);
 162    hv_vcpu_flush(vcpu);
 163}
 164
 165static inline void macvm_set_cr4(hv_vcpuid_t vcpu, uint64_t cr4)
 166{
 167    uint64_t guest_cr4 = cr4 | CR4_VMXE;
 168
 169    wvmcs(vcpu, VMCS_GUEST_CR4, guest_cr4);
 170    wvmcs(vcpu, VMCS_CR4_SHADOW, cr4);
 171    wvmcs(vcpu, VMCS_CR4_MASK, CR4_VMXE);
 172
 173    hv_vcpu_invalidate_tlb(vcpu);
 174    hv_vcpu_flush(vcpu);
 175}
 176
 177static inline void macvm_set_rip(CPUState *cpu, uint64_t rip)
 178{
 179    X86CPU *x86_cpu = X86_CPU(cpu);
 180    CPUX86State *env = &x86_cpu->env;
 181    uint64_t val;
 182
 183    /* BUG, should take considering overlap.. */
 184    wreg(cpu->hvf->fd, HV_X86_RIP, rip);
 185    env->eip = rip;
 186
 187    /* after moving forward in rip, we need to clean INTERRUPTABILITY */
 188   val = rvmcs(cpu->hvf->fd, VMCS_GUEST_INTERRUPTIBILITY);
 189   if (val & (VMCS_INTERRUPTIBILITY_STI_BLOCKING |
 190               VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING)) {
 191        env->hflags &= ~HF_INHIBIT_IRQ_MASK;
 192        wvmcs(cpu->hvf->fd, VMCS_GUEST_INTERRUPTIBILITY,
 193               val & ~(VMCS_INTERRUPTIBILITY_STI_BLOCKING |
 194               VMCS_INTERRUPTIBILITY_MOVSS_BLOCKING));
 195   }
 196}
 197
 198static inline void vmx_clear_nmi_blocking(CPUState *cpu)
 199{
 200    X86CPU *x86_cpu = X86_CPU(cpu);
 201    CPUX86State *env = &x86_cpu->env;
 202
 203    env->hflags2 &= ~HF2_NMI_MASK;
 204    uint32_t gi = (uint32_t) rvmcs(cpu->hvf->fd, VMCS_GUEST_INTERRUPTIBILITY);
 205    gi &= ~VMCS_INTERRUPTIBILITY_NMI_BLOCKING;
 206    wvmcs(cpu->hvf->fd, VMCS_GUEST_INTERRUPTIBILITY, gi);
 207}
 208
 209static inline void vmx_set_nmi_blocking(CPUState *cpu)
 210{
 211    X86CPU *x86_cpu = X86_CPU(cpu);
 212    CPUX86State *env = &x86_cpu->env;
 213
 214    env->hflags2 |= HF2_NMI_MASK;
 215    uint32_t gi = (uint32_t)rvmcs(cpu->hvf->fd, VMCS_GUEST_INTERRUPTIBILITY);
 216    gi |= VMCS_INTERRUPTIBILITY_NMI_BLOCKING;
 217    wvmcs(cpu->hvf->fd, VMCS_GUEST_INTERRUPTIBILITY, gi);
 218}
 219
 220static inline void vmx_set_nmi_window_exiting(CPUState *cpu)
 221{
 222    uint64_t val;
 223    val = rvmcs(cpu->hvf->fd, VMCS_PRI_PROC_BASED_CTLS);
 224    wvmcs(cpu->hvf->fd, VMCS_PRI_PROC_BASED_CTLS, val |
 225          VMCS_PRI_PROC_BASED_CTLS_NMI_WINDOW_EXITING);
 226
 227}
 228
 229static inline void vmx_clear_nmi_window_exiting(CPUState *cpu)
 230{
 231
 232    uint64_t val;
 233    val = rvmcs(cpu->hvf->fd, VMCS_PRI_PROC_BASED_CTLS);
 234    wvmcs(cpu->hvf->fd, VMCS_PRI_PROC_BASED_CTLS, val &
 235          ~VMCS_PRI_PROC_BASED_CTLS_NMI_WINDOW_EXITING);
 236}
 237
 238#endif
 239