linux/tools/testing/selftests/kvm/x86_64/vmx_tsc_adjust_test.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * vmx_tsc_adjust_test
   4 *
   5 * Copyright (C) 2018, Google LLC.
   6 *
   7 * IA32_TSC_ADJUST test
   8 *
   9 * According to the SDM, "if an execution of WRMSR to the
  10 * IA32_TIME_STAMP_COUNTER MSR adds (or subtracts) value X from the TSC,
  11 * the logical processor also adds (or subtracts) value X from the
  12 * IA32_TSC_ADJUST MSR.
  13 *
  14 * Note that when L1 doesn't intercept writes to IA32_TSC, a
  15 * WRMSR(IA32_TSC) from L2 sets L1's TSC value, not L2's perceived TSC
  16 * value.
  17 *
  18 * This test verifies that this unusual case is handled correctly.
  19 */
  20
  21#include "test_util.h"
  22#include "kvm_util.h"
  23#include "processor.h"
  24#include "vmx.h"
  25
  26#include <string.h>
  27#include <sys/ioctl.h>
  28
  29#include "kselftest.h"
  30
  31#ifndef MSR_IA32_TSC_ADJUST
  32#define MSR_IA32_TSC_ADJUST 0x3b
  33#endif
  34
  35#define PAGE_SIZE       4096
  36#define VCPU_ID         5
  37
  38#define TSC_ADJUST_VALUE (1ll << 32)
  39#define TSC_OFFSET_VALUE -(1ll << 48)
  40
  41enum {
  42        PORT_ABORT = 0x1000,
  43        PORT_REPORT,
  44        PORT_DONE,
  45};
  46
  47enum {
  48        VMXON_PAGE = 0,
  49        VMCS_PAGE,
  50        MSR_BITMAP_PAGE,
  51
  52        NUM_VMX_PAGES,
  53};
  54
  55struct kvm_single_msr {
  56        struct kvm_msrs header;
  57        struct kvm_msr_entry entry;
  58} __attribute__((packed));
  59
  60/* The virtual machine object. */
  61static struct kvm_vm *vm;
  62
  63static void check_ia32_tsc_adjust(int64_t max)
  64{
  65        int64_t adjust;
  66
  67        adjust = rdmsr(MSR_IA32_TSC_ADJUST);
  68        GUEST_SYNC(adjust);
  69        GUEST_ASSERT(adjust <= max);
  70}
  71
  72static void l2_guest_code(void)
  73{
  74        uint64_t l1_tsc = rdtsc() - TSC_OFFSET_VALUE;
  75
  76        wrmsr(MSR_IA32_TSC, l1_tsc - TSC_ADJUST_VALUE);
  77        check_ia32_tsc_adjust(-2 * TSC_ADJUST_VALUE);
  78
  79        /* Exit to L1 */
  80        __asm__ __volatile__("vmcall");
  81}
  82
  83static void l1_guest_code(struct vmx_pages *vmx_pages)
  84{
  85#define L2_GUEST_STACK_SIZE 64
  86        unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
  87        uint32_t control;
  88        uintptr_t save_cr3;
  89
  90        GUEST_ASSERT(rdtsc() < TSC_ADJUST_VALUE);
  91        wrmsr(MSR_IA32_TSC, rdtsc() - TSC_ADJUST_VALUE);
  92        check_ia32_tsc_adjust(-1 * TSC_ADJUST_VALUE);
  93
  94        GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
  95        GUEST_ASSERT(load_vmcs(vmx_pages));
  96
  97        /* Prepare the VMCS for L2 execution. */
  98        prepare_vmcs(vmx_pages, l2_guest_code,
  99                     &l2_guest_stack[L2_GUEST_STACK_SIZE]);
 100        control = vmreadz(CPU_BASED_VM_EXEC_CONTROL);
 101        control |= CPU_BASED_USE_MSR_BITMAPS | CPU_BASED_USE_TSC_OFFSETTING;
 102        vmwrite(CPU_BASED_VM_EXEC_CONTROL, control);
 103        vmwrite(TSC_OFFSET, TSC_OFFSET_VALUE);
 104
 105        /* Jump into L2.  First, test failure to load guest CR3.  */
 106        save_cr3 = vmreadz(GUEST_CR3);
 107        vmwrite(GUEST_CR3, -1ull);
 108        GUEST_ASSERT(!vmlaunch());
 109        GUEST_ASSERT(vmreadz(VM_EXIT_REASON) ==
 110                     (EXIT_REASON_FAILED_VMENTRY | EXIT_REASON_INVALID_STATE));
 111        check_ia32_tsc_adjust(-1 * TSC_ADJUST_VALUE);
 112        vmwrite(GUEST_CR3, save_cr3);
 113
 114        GUEST_ASSERT(!vmlaunch());
 115        GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
 116
 117        check_ia32_tsc_adjust(-2 * TSC_ADJUST_VALUE);
 118
 119        GUEST_DONE();
 120}
 121
 122static void report(int64_t val)
 123{
 124        pr_info("IA32_TSC_ADJUST is %ld (%lld * TSC_ADJUST_VALUE + %lld).\n",
 125                val, val / TSC_ADJUST_VALUE, val % TSC_ADJUST_VALUE);
 126}
 127
 128int main(int argc, char *argv[])
 129{
 130        vm_vaddr_t vmx_pages_gva;
 131
 132        nested_vmx_check_supported();
 133
 134        vm = vm_create_default(VCPU_ID, 0, (void *) l1_guest_code);
 135        vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
 136
 137        /* Allocate VMX pages and shared descriptors (vmx_pages). */
 138        vcpu_alloc_vmx(vm, &vmx_pages_gva);
 139        vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_gva);
 140
 141        for (;;) {
 142                volatile struct kvm_run *run = vcpu_state(vm, VCPU_ID);
 143                struct ucall uc;
 144
 145                vcpu_run(vm, VCPU_ID);
 146                TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
 147                            "Got exit_reason other than KVM_EXIT_IO: %u (%s)\n",
 148                            run->exit_reason,
 149                            exit_reason_str(run->exit_reason));
 150
 151                switch (get_ucall(vm, VCPU_ID, &uc)) {
 152                case UCALL_ABORT:
 153                        TEST_FAIL("%s", (const char *)uc.args[0]);
 154                        /* NOT REACHED */
 155                case UCALL_SYNC:
 156                        report(uc.args[1]);
 157                        break;
 158                case UCALL_DONE:
 159                        goto done;
 160                default:
 161                        TEST_FAIL("Unknown ucall %lu", uc.cmd);
 162                }
 163        }
 164
 165        kvm_vm_free(vm);
 166done:
 167        return 0;
 168}
 169