linux/tools/testing/selftests/kvm/x86_64/vmx_preemption_timer_test.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * VMX-preemption timer test
   4 *
   5 * Copyright (C) 2020, Google, LLC.
   6 *
   7 * Test to ensure the VM-Enter after migration doesn't
   8 * incorrectly restarts the timer with the full timer
   9 * value instead of partially decayed timer value
  10 *
  11 */
  12#define _GNU_SOURCE /* for program_invocation_short_name */
  13#include <fcntl.h>
  14#include <stdio.h>
  15#include <stdlib.h>
  16#include <string.h>
  17#include <sys/ioctl.h>
  18
  19#include "test_util.h"
  20
  21#include "kvm_util.h"
  22#include "processor.h"
  23#include "vmx.h"
  24
  25#define VCPU_ID         5
  26#define PREEMPTION_TIMER_VALUE                  100000000ull
  27#define PREEMPTION_TIMER_VALUE_THRESHOLD1        80000000ull
  28
  29u32 vmx_pt_rate;
  30bool l2_save_restore_done;
  31static u64 l2_vmx_pt_start;
  32volatile u64 l2_vmx_pt_finish;
  33
  34union vmx_basic basic;
  35union vmx_ctrl_msr ctrl_pin_rev;
  36union vmx_ctrl_msr ctrl_exit_rev;
  37
  38void l2_guest_code(void)
  39{
  40        u64 vmx_pt_delta;
  41
  42        vmcall();
  43        l2_vmx_pt_start = (rdtsc() >> vmx_pt_rate) << vmx_pt_rate;
  44
  45        /*
  46         * Wait until the 1st threshold has passed
  47         */
  48        do {
  49                l2_vmx_pt_finish = rdtsc();
  50                vmx_pt_delta = (l2_vmx_pt_finish - l2_vmx_pt_start) >>
  51                                vmx_pt_rate;
  52        } while (vmx_pt_delta < PREEMPTION_TIMER_VALUE_THRESHOLD1);
  53
  54        /*
  55         * Force L2 through Save and Restore cycle
  56         */
  57        GUEST_SYNC(1);
  58
  59        l2_save_restore_done = 1;
  60
  61        /*
  62         * Now wait for the preemption timer to fire and
  63         * exit to L1
  64         */
  65        while ((l2_vmx_pt_finish = rdtsc()))
  66                ;
  67}
  68
  69void l1_guest_code(struct vmx_pages *vmx_pages)
  70{
  71#define L2_GUEST_STACK_SIZE 64
  72        unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
  73        u64 l1_vmx_pt_start;
  74        u64 l1_vmx_pt_finish;
  75        u64 l1_tsc_deadline, l2_tsc_deadline;
  76
  77        GUEST_ASSERT(vmx_pages->vmcs_gpa);
  78        GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
  79        GUEST_ASSERT(load_vmcs(vmx_pages));
  80        GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa);
  81
  82        prepare_vmcs(vmx_pages, l2_guest_code,
  83                     &l2_guest_stack[L2_GUEST_STACK_SIZE]);
  84
  85        /*
  86         * Check for Preemption timer support
  87         */
  88        basic.val = rdmsr(MSR_IA32_VMX_BASIC);
  89        ctrl_pin_rev.val = rdmsr(basic.ctrl ? MSR_IA32_VMX_TRUE_PINBASED_CTLS
  90                        : MSR_IA32_VMX_PINBASED_CTLS);
  91        ctrl_exit_rev.val = rdmsr(basic.ctrl ? MSR_IA32_VMX_TRUE_EXIT_CTLS
  92                        : MSR_IA32_VMX_EXIT_CTLS);
  93
  94        if (!(ctrl_pin_rev.clr & PIN_BASED_VMX_PREEMPTION_TIMER) ||
  95            !(ctrl_exit_rev.clr & VM_EXIT_SAVE_VMX_PREEMPTION_TIMER))
  96                return;
  97
  98        GUEST_ASSERT(!vmlaunch());
  99        GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
 100        vmwrite(GUEST_RIP, vmreadz(GUEST_RIP) + vmreadz(VM_EXIT_INSTRUCTION_LEN));
 101
 102        /*
 103         * Turn on PIN control and resume the guest
 104         */
 105        GUEST_ASSERT(!vmwrite(PIN_BASED_VM_EXEC_CONTROL,
 106                              vmreadz(PIN_BASED_VM_EXEC_CONTROL) |
 107                              PIN_BASED_VMX_PREEMPTION_TIMER));
 108
 109        GUEST_ASSERT(!vmwrite(VMX_PREEMPTION_TIMER_VALUE,
 110                              PREEMPTION_TIMER_VALUE));
 111
 112        vmx_pt_rate = rdmsr(MSR_IA32_VMX_MISC) & 0x1F;
 113
 114        l2_save_restore_done = 0;
 115
 116        l1_vmx_pt_start = (rdtsc() >> vmx_pt_rate) << vmx_pt_rate;
 117
 118        GUEST_ASSERT(!vmresume());
 119
 120        l1_vmx_pt_finish = rdtsc();
 121
 122        /*
 123         * Ensure exit from L2 happens after L2 goes through
 124         * save and restore
 125         */
 126        GUEST_ASSERT(l2_save_restore_done);
 127
 128        /*
 129         * Ensure the exit from L2 is due to preemption timer expiry
 130         */
 131        GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_PREEMPTION_TIMER);
 132
 133        l1_tsc_deadline = l1_vmx_pt_start +
 134                (PREEMPTION_TIMER_VALUE << vmx_pt_rate);
 135
 136        l2_tsc_deadline = l2_vmx_pt_start +
 137                (PREEMPTION_TIMER_VALUE << vmx_pt_rate);
 138
 139        /*
 140         * Sync with the host and pass the l1|l2 pt_expiry_finish times and
 141         * tsc deadlines so that host can verify they are as expected
 142         */
 143        GUEST_SYNC_ARGS(2, l1_vmx_pt_finish, l1_tsc_deadline,
 144                l2_vmx_pt_finish, l2_tsc_deadline);
 145}
 146
 147void guest_code(struct vmx_pages *vmx_pages)
 148{
 149        if (vmx_pages)
 150                l1_guest_code(vmx_pages);
 151
 152        GUEST_DONE();
 153}
 154
 155int main(int argc, char *argv[])
 156{
 157        vm_vaddr_t vmx_pages_gva = 0;
 158
 159        struct kvm_regs regs1, regs2;
 160        struct kvm_vm *vm;
 161        struct kvm_run *run;
 162        struct kvm_x86_state *state;
 163        struct ucall uc;
 164        int stage;
 165
 166        /*
 167         * AMD currently does not implement any VMX features, so for now we
 168         * just early out.
 169         */
 170        nested_vmx_check_supported();
 171
 172        /* Create VM */
 173        vm = vm_create_default(VCPU_ID, 0, guest_code);
 174        vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
 175        run = vcpu_state(vm, VCPU_ID);
 176
 177        vcpu_regs_get(vm, VCPU_ID, &regs1);
 178
 179        if (kvm_check_cap(KVM_CAP_NESTED_STATE)) {
 180                vcpu_alloc_vmx(vm, &vmx_pages_gva);
 181                vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_gva);
 182        } else {
 183                pr_info("will skip vmx preemption timer checks\n");
 184                goto done;
 185        }
 186
 187        for (stage = 1;; stage++) {
 188                _vcpu_run(vm, VCPU_ID);
 189                TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
 190                            "Stage %d: unexpected exit reason: %u (%s),\n",
 191                            stage, run->exit_reason,
 192                            exit_reason_str(run->exit_reason));
 193
 194                switch (get_ucall(vm, VCPU_ID, &uc)) {
 195                case UCALL_ABORT:
 196                        TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0],
 197                                  __FILE__, uc.args[1]);
 198                        /* NOT REACHED */
 199                case UCALL_SYNC:
 200                        break;
 201                case UCALL_DONE:
 202                        goto done;
 203                default:
 204                        TEST_FAIL("Unknown ucall %lu", uc.cmd);
 205                }
 206
 207                /* UCALL_SYNC is handled here.  */
 208                TEST_ASSERT(!strcmp((const char *)uc.args[0], "hello") &&
 209                            uc.args[1] == stage, "Stage %d: Unexpected register values vmexit, got %lx",
 210                            stage, (ulong)uc.args[1]);
 211                /*
 212                 * If this stage 2 then we should verify the vmx pt expiry
 213                 * is as expected.
 214                 * From L1's perspective verify Preemption timer hasn't
 215                 * expired too early.
 216                 * From L2's perspective verify Preemption timer hasn't
 217                 * expired too late.
 218                 */
 219                if (stage == 2) {
 220
 221                        pr_info("Stage %d: L1 PT expiry TSC (%lu) , L1 TSC deadline (%lu)\n",
 222                                stage, uc.args[2], uc.args[3]);
 223
 224                        pr_info("Stage %d: L2 PT expiry TSC (%lu) , L2 TSC deadline (%lu)\n",
 225                                stage, uc.args[4], uc.args[5]);
 226
 227                        TEST_ASSERT(uc.args[2] >= uc.args[3],
 228                                "Stage %d: L1 PT expiry TSC (%lu) < L1 TSC deadline (%lu)",
 229                                stage, uc.args[2], uc.args[3]);
 230
 231                        TEST_ASSERT(uc.args[4] < uc.args[5],
 232                                "Stage %d: L2 PT expiry TSC (%lu) > L2 TSC deadline (%lu)",
 233                                stage, uc.args[4], uc.args[5]);
 234                }
 235
 236                state = vcpu_save_state(vm, VCPU_ID);
 237                memset(&regs1, 0, sizeof(regs1));
 238                vcpu_regs_get(vm, VCPU_ID, &regs1);
 239
 240                kvm_vm_release(vm);
 241
 242                /* Restore state in a new VM.  */
 243                kvm_vm_restart(vm, O_RDWR);
 244                vm_vcpu_add(vm, VCPU_ID);
 245                vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
 246                vcpu_load_state(vm, VCPU_ID, state);
 247                run = vcpu_state(vm, VCPU_ID);
 248                free(state);
 249
 250                memset(&regs2, 0, sizeof(regs2));
 251                vcpu_regs_get(vm, VCPU_ID, &regs2);
 252                TEST_ASSERT(!memcmp(&regs1, &regs2, sizeof(regs2)),
 253                            "Unexpected register values after vcpu_load_state; rdi: %lx rsi: %lx",
 254                            (ulong) regs2.rdi, (ulong) regs2.rsi);
 255        }
 256
 257done:
 258        kvm_vm_free(vm);
 259}
 260