linux/tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c
<<
>>
Prefs
   1/*
   2 * vmx_tsc_adjust_test
   3 *
   4 * Copyright (C) 2018, Google LLC.
   5 *
   6 * This work is licensed under the terms of the GNU GPL, version 2.
   7 *
   8 *
   9 * IA32_TSC_ADJUST test
  10 *
  11 * According to the SDM, "if an execution of WRMSR to the
  12 * IA32_TIME_STAMP_COUNTER MSR adds (or subtracts) value X from the TSC,
  13 * the logical processor also adds (or subtracts) value X from the
  14 * IA32_TSC_ADJUST MSR.
  15 *
  16 * Note that when L1 doesn't intercept writes to IA32_TSC, a
  17 * WRMSR(IA32_TSC) from L2 sets L1's TSC value, not L2's perceived TSC
  18 * value.
  19 *
  20 * This test verifies that this unusual case is handled correctly.
  21 */
  22
  23#include "test_util.h"
  24#include "kvm_util.h"
  25#include "processor.h"
  26#include "vmx.h"
  27
  28#include <string.h>
  29#include <sys/ioctl.h>
  30
  31#include "kselftest.h"
  32
  33#ifndef MSR_IA32_TSC_ADJUST
  34#define MSR_IA32_TSC_ADJUST 0x3b
  35#endif
  36
  37#define PAGE_SIZE       4096
  38#define VCPU_ID         5
  39
  40#define TSC_ADJUST_VALUE (1ll << 32)
  41#define TSC_OFFSET_VALUE -(1ll << 48)
  42
  43enum {
  44        PORT_ABORT = 0x1000,
  45        PORT_REPORT,
  46        PORT_DONE,
  47};
  48
  49enum {
  50        VMXON_PAGE = 0,
  51        VMCS_PAGE,
  52        MSR_BITMAP_PAGE,
  53
  54        NUM_VMX_PAGES,
  55};
  56
  57struct kvm_single_msr {
  58        struct kvm_msrs header;
  59        struct kvm_msr_entry entry;
  60} __attribute__((packed));
  61
  62/* The virtual machine object. */
  63static struct kvm_vm *vm;
  64
  65static void check_ia32_tsc_adjust(int64_t max)
  66{
  67        int64_t adjust;
  68
  69        adjust = rdmsr(MSR_IA32_TSC_ADJUST);
  70        GUEST_SYNC(adjust);
  71        GUEST_ASSERT(adjust <= max);
  72}
  73
  74static void l2_guest_code(void)
  75{
  76        uint64_t l1_tsc = rdtsc() - TSC_OFFSET_VALUE;
  77
  78        wrmsr(MSR_IA32_TSC, l1_tsc - TSC_ADJUST_VALUE);
  79        check_ia32_tsc_adjust(-2 * TSC_ADJUST_VALUE);
  80
  81        /* Exit to L1 */
  82        __asm__ __volatile__("vmcall");
  83}
  84
  85static void l1_guest_code(struct vmx_pages *vmx_pages)
  86{
  87#define L2_GUEST_STACK_SIZE 64
  88        unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
  89        uint32_t control;
  90        uintptr_t save_cr3;
  91
  92        GUEST_ASSERT(rdtsc() < TSC_ADJUST_VALUE);
  93        wrmsr(MSR_IA32_TSC, rdtsc() - TSC_ADJUST_VALUE);
  94        check_ia32_tsc_adjust(-1 * TSC_ADJUST_VALUE);
  95
  96        GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
  97        GUEST_ASSERT(load_vmcs(vmx_pages));
  98
  99        /* Prepare the VMCS for L2 execution. */
 100        prepare_vmcs(vmx_pages, l2_guest_code,
 101                     &l2_guest_stack[L2_GUEST_STACK_SIZE]);
 102        control = vmreadz(CPU_BASED_VM_EXEC_CONTROL);
 103        control |= CPU_BASED_USE_MSR_BITMAPS | CPU_BASED_USE_TSC_OFFSETING;
 104        vmwrite(CPU_BASED_VM_EXEC_CONTROL, control);
 105        vmwrite(TSC_OFFSET, TSC_OFFSET_VALUE);
 106
 107        /* Jump into L2.  First, test failure to load guest CR3.  */
 108        save_cr3 = vmreadz(GUEST_CR3);
 109        vmwrite(GUEST_CR3, -1ull);
 110        GUEST_ASSERT(!vmlaunch());
 111        GUEST_ASSERT(vmreadz(VM_EXIT_REASON) ==
 112                     (EXIT_REASON_FAILED_VMENTRY | EXIT_REASON_INVALID_STATE));
 113        check_ia32_tsc_adjust(-1 * TSC_ADJUST_VALUE);
 114        vmwrite(GUEST_CR3, save_cr3);
 115
 116        GUEST_ASSERT(!vmlaunch());
 117        GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
 118
 119        check_ia32_tsc_adjust(-2 * TSC_ADJUST_VALUE);
 120
 121        GUEST_DONE();
 122}
 123
 124void report(int64_t val)
 125{
 126        printf("IA32_TSC_ADJUST is %ld (%lld * TSC_ADJUST_VALUE + %lld).\n",
 127               val, val / TSC_ADJUST_VALUE, val % TSC_ADJUST_VALUE);
 128}
 129
 130int main(int argc, char *argv[])
 131{
 132        struct vmx_pages *vmx_pages;
 133        vm_vaddr_t vmx_pages_gva;
 134        struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1);
 135
 136        if (!(entry->ecx & CPUID_VMX)) {
 137                fprintf(stderr, "nested VMX not enabled, skipping test\n");
 138                exit(KSFT_SKIP);
 139        }
 140
 141        vm = vm_create_default(VCPU_ID, 0, (void *) l1_guest_code);
 142        vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
 143
 144        /* Allocate VMX pages and shared descriptors (vmx_pages). */
 145        vmx_pages = vcpu_alloc_vmx(vm, &vmx_pages_gva);
 146        vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_gva);
 147
 148        for (;;) {
 149                volatile struct kvm_run *run = vcpu_state(vm, VCPU_ID);
 150                struct ucall uc;
 151
 152                vcpu_run(vm, VCPU_ID);
 153                TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
 154                            "Got exit_reason other than KVM_EXIT_IO: %u (%s)\n",
 155                            run->exit_reason,
 156                            exit_reason_str(run->exit_reason));
 157
 158                switch (get_ucall(vm, VCPU_ID, &uc)) {
 159                case UCALL_ABORT:
 160                        TEST_ASSERT(false, "%s", (const char *)uc.args[0]);
 161                        /* NOT REACHED */
 162                case UCALL_SYNC:
 163                        report(uc.args[1]);
 164                        break;
 165                case UCALL_DONE:
 166                        goto done;
 167                default:
 168                        TEST_ASSERT(false, "Unknown ucall 0x%x.", uc.cmd);
 169                }
 170        }
 171
 172        kvm_vm_free(vm);
 173done:
 174        return 0;
 175}
 176