linux/tools/testing/selftests/kvm/lib/x86_64/svm.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * tools/testing/selftests/kvm/lib/x86_64/svm.c
   4 * Helpers used for nested SVM testing
   5 * Largely inspired from KVM unit test svm.c
   6 *
   7 * Copyright (C) 2020, Red Hat, Inc.
   8 */
   9
  10#include "test_util.h"
  11#include "kvm_util.h"
  12#include "../kvm_util_internal.h"
  13#include "processor.h"
  14#include "svm_util.h"
  15
  16struct gpr64_regs guest_regs;
  17u64 rflags;
  18
  19/* Allocate memory regions for nested SVM tests.
  20 *
  21 * Input Args:
  22 *   vm - The VM to allocate guest-virtual addresses in.
  23 *
  24 * Output Args:
  25 *   p_svm_gva - The guest virtual address for the struct svm_test_data.
  26 *
  27 * Return:
  28 *   Pointer to structure with the addresses of the SVM areas.
  29 */
  30struct svm_test_data *
  31vcpu_alloc_svm(struct kvm_vm *vm, vm_vaddr_t *p_svm_gva)
  32{
  33        vm_vaddr_t svm_gva = vm_vaddr_alloc_page(vm);
  34        struct svm_test_data *svm = addr_gva2hva(vm, svm_gva);
  35
  36        svm->vmcb = (void *)vm_vaddr_alloc_page(vm);
  37        svm->vmcb_hva = addr_gva2hva(vm, (uintptr_t)svm->vmcb);
  38        svm->vmcb_gpa = addr_gva2gpa(vm, (uintptr_t)svm->vmcb);
  39
  40        svm->save_area = (void *)vm_vaddr_alloc_page(vm);
  41        svm->save_area_hva = addr_gva2hva(vm, (uintptr_t)svm->save_area);
  42        svm->save_area_gpa = addr_gva2gpa(vm, (uintptr_t)svm->save_area);
  43
  44        *p_svm_gva = svm_gva;
  45        return svm;
  46}
  47
  48static void vmcb_set_seg(struct vmcb_seg *seg, u16 selector,
  49                         u64 base, u32 limit, u32 attr)
  50{
  51        seg->selector = selector;
  52        seg->attrib = attr;
  53        seg->limit = limit;
  54        seg->base = base;
  55}
  56
  57void generic_svm_setup(struct svm_test_data *svm, void *guest_rip, void *guest_rsp)
  58{
  59        struct vmcb *vmcb = svm->vmcb;
  60        uint64_t vmcb_gpa = svm->vmcb_gpa;
  61        struct vmcb_save_area *save = &vmcb->save;
  62        struct vmcb_control_area *ctrl = &vmcb->control;
  63        u32 data_seg_attr = 3 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_P_MASK
  64              | SVM_SELECTOR_DB_MASK | SVM_SELECTOR_G_MASK;
  65        u32 code_seg_attr = 9 | SVM_SELECTOR_S_MASK | SVM_SELECTOR_P_MASK
  66                | SVM_SELECTOR_L_MASK | SVM_SELECTOR_G_MASK;
  67        uint64_t efer;
  68
  69        efer = rdmsr(MSR_EFER);
  70        wrmsr(MSR_EFER, efer | EFER_SVME);
  71        wrmsr(MSR_VM_HSAVE_PA, svm->save_area_gpa);
  72
  73        memset(vmcb, 0, sizeof(*vmcb));
  74        asm volatile ("vmsave %0\n\t" : : "a" (vmcb_gpa) : "memory");
  75        vmcb_set_seg(&save->es, get_es(), 0, -1U, data_seg_attr);
  76        vmcb_set_seg(&save->cs, get_cs(), 0, -1U, code_seg_attr);
  77        vmcb_set_seg(&save->ss, get_ss(), 0, -1U, data_seg_attr);
  78        vmcb_set_seg(&save->ds, get_ds(), 0, -1U, data_seg_attr);
  79        vmcb_set_seg(&save->gdtr, 0, get_gdt().address, get_gdt().size, 0);
  80        vmcb_set_seg(&save->idtr, 0, get_idt().address, get_idt().size, 0);
  81
  82        ctrl->asid = 1;
  83        save->cpl = 0;
  84        save->efer = rdmsr(MSR_EFER);
  85        asm volatile ("mov %%cr4, %0" : "=r"(save->cr4) : : "memory");
  86        asm volatile ("mov %%cr3, %0" : "=r"(save->cr3) : : "memory");
  87        asm volatile ("mov %%cr0, %0" : "=r"(save->cr0) : : "memory");
  88        asm volatile ("mov %%dr7, %0" : "=r"(save->dr7) : : "memory");
  89        asm volatile ("mov %%dr6, %0" : "=r"(save->dr6) : : "memory");
  90        asm volatile ("mov %%cr2, %0" : "=r"(save->cr2) : : "memory");
  91        save->g_pat = rdmsr(MSR_IA32_CR_PAT);
  92        save->dbgctl = rdmsr(MSR_IA32_DEBUGCTLMSR);
  93        ctrl->intercept = (1ULL << INTERCEPT_VMRUN) |
  94                                (1ULL << INTERCEPT_VMMCALL);
  95
  96        vmcb->save.rip = (u64)guest_rip;
  97        vmcb->save.rsp = (u64)guest_rsp;
  98        guest_regs.rdi = (u64)svm;
  99}
 100
 101/*
 102 * save/restore 64-bit general registers except rax, rip, rsp
 103 * which are directly handed through the VMCB guest processor state
 104 */
 105#define SAVE_GPR_C                              \
 106        "xchg %%rbx, guest_regs+0x20\n\t"       \
 107        "xchg %%rcx, guest_regs+0x10\n\t"       \
 108        "xchg %%rdx, guest_regs+0x18\n\t"       \
 109        "xchg %%rbp, guest_regs+0x30\n\t"       \
 110        "xchg %%rsi, guest_regs+0x38\n\t"       \
 111        "xchg %%rdi, guest_regs+0x40\n\t"       \
 112        "xchg %%r8,  guest_regs+0x48\n\t"       \
 113        "xchg %%r9,  guest_regs+0x50\n\t"       \
 114        "xchg %%r10, guest_regs+0x58\n\t"       \
 115        "xchg %%r11, guest_regs+0x60\n\t"       \
 116        "xchg %%r12, guest_regs+0x68\n\t"       \
 117        "xchg %%r13, guest_regs+0x70\n\t"       \
 118        "xchg %%r14, guest_regs+0x78\n\t"       \
 119        "xchg %%r15, guest_regs+0x80\n\t"
 120
 121#define LOAD_GPR_C      SAVE_GPR_C
 122
 123/*
 124 * selftests do not use interrupts so we dropped clgi/sti/cli/stgi
 125 * for now. registers involved in LOAD/SAVE_GPR_C are eventually
 126 * unmodified so they do not need to be in the clobber list.
 127 */
 128void run_guest(struct vmcb *vmcb, uint64_t vmcb_gpa)
 129{
 130        asm volatile (
 131                "vmload %[vmcb_gpa]\n\t"
 132                "mov rflags, %%r15\n\t" // rflags
 133                "mov %%r15, 0x170(%[vmcb])\n\t"
 134                "mov guest_regs, %%r15\n\t"     // rax
 135                "mov %%r15, 0x1f8(%[vmcb])\n\t"
 136                LOAD_GPR_C
 137                "vmrun %[vmcb_gpa]\n\t"
 138                SAVE_GPR_C
 139                "mov 0x170(%[vmcb]), %%r15\n\t" // rflags
 140                "mov %%r15, rflags\n\t"
 141                "mov 0x1f8(%[vmcb]), %%r15\n\t" // rax
 142                "mov %%r15, guest_regs\n\t"
 143                "vmsave %[vmcb_gpa]\n\t"
 144                : : [vmcb] "r" (vmcb), [vmcb_gpa] "a" (vmcb_gpa)
 145                : "r15", "memory");
 146}
 147
 148bool nested_svm_supported(void)
 149{
 150        struct kvm_cpuid_entry2 *entry =
 151                kvm_get_supported_cpuid_entry(0x80000001);
 152
 153        return entry->ecx & CPUID_SVM;
 154}
 155
 156void nested_svm_check_supported(void)
 157{
 158        if (!nested_svm_supported()) {
 159                print_skip("nested SVM not enabled");
 160                exit(KSFT_SKIP);
 161        }
 162}
 163