linux/tools/testing/selftests/kvm/x86_64/vmx_preemption_timer_test.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * VMX-preemption timer test
   4 *
   5 * Copyright (C) 2020, Google, LLC.
   6 *
   7 * Test to ensure the VM-Enter after migration doesn't
   8 * incorrectly restarts the timer with the full timer
   9 * value instead of partially decayed timer value
  10 *
  11 */
  12#define _GNU_SOURCE /* for program_invocation_short_name */
  13#include <fcntl.h>
  14#include <stdio.h>
  15#include <stdlib.h>
  16#include <string.h>
  17#include <sys/ioctl.h>
  18
  19#include "test_util.h"
  20
  21#include "kvm_util.h"
  22#include "processor.h"
  23#include "vmx.h"
  24
  25#define VCPU_ID         5
  26#define PREEMPTION_TIMER_VALUE                  100000000ull
  27#define PREEMPTION_TIMER_VALUE_THRESHOLD1        80000000ull
  28
  29u32 vmx_pt_rate;
  30bool l2_save_restore_done;
  31static u64 l2_vmx_pt_start;
  32volatile u64 l2_vmx_pt_finish;
  33
  34union vmx_basic basic;
  35union vmx_ctrl_msr ctrl_pin_rev;
  36union vmx_ctrl_msr ctrl_exit_rev;
  37
  38void l2_guest_code(void)
  39{
  40        u64 vmx_pt_delta;
  41
  42        vmcall();
  43        l2_vmx_pt_start = (rdtsc() >> vmx_pt_rate) << vmx_pt_rate;
  44
  45        /*
  46         * Wait until the 1st threshold has passed
  47         */
  48        do {
  49                l2_vmx_pt_finish = rdtsc();
  50                vmx_pt_delta = (l2_vmx_pt_finish - l2_vmx_pt_start) >>
  51                                vmx_pt_rate;
  52        } while (vmx_pt_delta < PREEMPTION_TIMER_VALUE_THRESHOLD1);
  53
  54        /*
  55         * Force L2 through Save and Restore cycle
  56         */
  57        GUEST_SYNC(1);
  58
  59        l2_save_restore_done = 1;
  60
  61        /*
  62         * Now wait for the preemption timer to fire and
  63         * exit to L1
  64         */
  65        while ((l2_vmx_pt_finish = rdtsc()))
  66                ;
  67}
  68
  69void l1_guest_code(struct vmx_pages *vmx_pages)
  70{
  71#define L2_GUEST_STACK_SIZE 64
  72        unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
  73        u64 l1_vmx_pt_start;
  74        u64 l1_vmx_pt_finish;
  75        u64 l1_tsc_deadline, l2_tsc_deadline;
  76
  77        GUEST_ASSERT(vmx_pages->vmcs_gpa);
  78        GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
  79        GUEST_ASSERT(load_vmcs(vmx_pages));
  80        GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa);
  81
  82        prepare_vmcs(vmx_pages, l2_guest_code,
  83                     &l2_guest_stack[L2_GUEST_STACK_SIZE]);
  84
  85        /*
  86         * Check for Preemption timer support
  87         */
  88        basic.val = rdmsr(MSR_IA32_VMX_BASIC);
  89        ctrl_pin_rev.val = rdmsr(basic.ctrl ? MSR_IA32_VMX_TRUE_PINBASED_CTLS
  90                        : MSR_IA32_VMX_PINBASED_CTLS);
  91        ctrl_exit_rev.val = rdmsr(basic.ctrl ? MSR_IA32_VMX_TRUE_EXIT_CTLS
  92                        : MSR_IA32_VMX_EXIT_CTLS);
  93
  94        if (!(ctrl_pin_rev.clr & PIN_BASED_VMX_PREEMPTION_TIMER) ||
  95            !(ctrl_exit_rev.clr & VM_EXIT_SAVE_VMX_PREEMPTION_TIMER))
  96                return;
  97
  98        GUEST_ASSERT(!vmlaunch());
  99        GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
 100        vmwrite(GUEST_RIP, vmreadz(GUEST_RIP) + vmreadz(VM_EXIT_INSTRUCTION_LEN));
 101
 102        /*
 103         * Turn on PIN control and resume the guest
 104         */
 105        GUEST_ASSERT(!vmwrite(PIN_BASED_VM_EXEC_CONTROL,
 106                              vmreadz(PIN_BASED_VM_EXEC_CONTROL) |
 107                              PIN_BASED_VMX_PREEMPTION_TIMER));
 108
 109        GUEST_ASSERT(!vmwrite(VMX_PREEMPTION_TIMER_VALUE,
 110                              PREEMPTION_TIMER_VALUE));
 111
 112        vmx_pt_rate = rdmsr(MSR_IA32_VMX_MISC) & 0x1F;
 113
 114        l2_save_restore_done = 0;
 115
 116        l1_vmx_pt_start = (rdtsc() >> vmx_pt_rate) << vmx_pt_rate;
 117
 118        GUEST_ASSERT(!vmresume());
 119
 120        l1_vmx_pt_finish = rdtsc();
 121
 122        /*
 123         * Ensure exit from L2 happens after L2 goes through
 124         * save and restore
 125         */
 126        GUEST_ASSERT(l2_save_restore_done);
 127
 128        /*
 129         * Ensure the exit from L2 is due to preemption timer expiry
 130         */
 131        GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_PREEMPTION_TIMER);
 132
 133        l1_tsc_deadline = l1_vmx_pt_start +
 134                (PREEMPTION_TIMER_VALUE << vmx_pt_rate);
 135
 136        l2_tsc_deadline = l2_vmx_pt_start +
 137                (PREEMPTION_TIMER_VALUE << vmx_pt_rate);
 138
 139        /*
 140         * Sync with the host and pass the l1|l2 pt_expiry_finish times and
 141         * tsc deadlines so that host can verify they are as expected
 142         */
 143        GUEST_SYNC_ARGS(2, l1_vmx_pt_finish, l1_tsc_deadline,
 144                l2_vmx_pt_finish, l2_tsc_deadline);
 145}
 146
 147void guest_code(struct vmx_pages *vmx_pages)
 148{
 149        if (vmx_pages)
 150                l1_guest_code(vmx_pages);
 151
 152        GUEST_DONE();
 153}
 154
 155int main(int argc, char *argv[])
 156{
 157        vm_vaddr_t vmx_pages_gva = 0;
 158
 159        struct kvm_regs regs1, regs2;
 160        struct kvm_vm *vm;
 161        struct kvm_run *run;
 162        struct kvm_x86_state *state;
 163        struct ucall uc;
 164        int stage;
 165
 166        /*
 167         * AMD currently does not implement any VMX features, so for now we
 168         * just early out.
 169         */
 170        nested_vmx_check_supported();
 171
 172        if (!kvm_check_cap(KVM_CAP_NESTED_STATE)) {
 173                print_skip("KVM_CAP_NESTED_STATE not supported");
 174                exit(KSFT_SKIP);
 175        }
 176
 177        /* Create VM */
 178        vm = vm_create_default(VCPU_ID, 0, guest_code);
 179        run = vcpu_state(vm, VCPU_ID);
 180
 181        vcpu_regs_get(vm, VCPU_ID, &regs1);
 182
 183        vcpu_alloc_vmx(vm, &vmx_pages_gva);
 184        vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_gva);
 185
 186        for (stage = 1;; stage++) {
 187                _vcpu_run(vm, VCPU_ID);
 188                TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
 189                            "Stage %d: unexpected exit reason: %u (%s),\n",
 190                            stage, run->exit_reason,
 191                            exit_reason_str(run->exit_reason));
 192
 193                switch (get_ucall(vm, VCPU_ID, &uc)) {
 194                case UCALL_ABORT:
 195                        TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0],
 196                                  __FILE__, uc.args[1]);
 197                        /* NOT REACHED */
 198                case UCALL_SYNC:
 199                        break;
 200                case UCALL_DONE:
 201                        goto done;
 202                default:
 203                        TEST_FAIL("Unknown ucall %lu", uc.cmd);
 204                }
 205
 206                /* UCALL_SYNC is handled here.  */
 207                TEST_ASSERT(!strcmp((const char *)uc.args[0], "hello") &&
 208                            uc.args[1] == stage, "Stage %d: Unexpected register values vmexit, got %lx",
 209                            stage, (ulong)uc.args[1]);
 210                /*
 211                 * If this stage 2 then we should verify the vmx pt expiry
 212                 * is as expected.
 213                 * From L1's perspective verify Preemption timer hasn't
 214                 * expired too early.
 215                 * From L2's perspective verify Preemption timer hasn't
 216                 * expired too late.
 217                 */
 218                if (stage == 2) {
 219
 220                        pr_info("Stage %d: L1 PT expiry TSC (%lu) , L1 TSC deadline (%lu)\n",
 221                                stage, uc.args[2], uc.args[3]);
 222
 223                        pr_info("Stage %d: L2 PT expiry TSC (%lu) , L2 TSC deadline (%lu)\n",
 224                                stage, uc.args[4], uc.args[5]);
 225
 226                        TEST_ASSERT(uc.args[2] >= uc.args[3],
 227                                "Stage %d: L1 PT expiry TSC (%lu) < L1 TSC deadline (%lu)",
 228                                stage, uc.args[2], uc.args[3]);
 229
 230                        TEST_ASSERT(uc.args[4] < uc.args[5],
 231                                "Stage %d: L2 PT expiry TSC (%lu) > L2 TSC deadline (%lu)",
 232                                stage, uc.args[4], uc.args[5]);
 233                }
 234
 235                state = vcpu_save_state(vm, VCPU_ID);
 236                memset(&regs1, 0, sizeof(regs1));
 237                vcpu_regs_get(vm, VCPU_ID, &regs1);
 238
 239                kvm_vm_release(vm);
 240
 241                /* Restore state in a new VM.  */
 242                kvm_vm_restart(vm, O_RDWR);
 243                vm_vcpu_add(vm, VCPU_ID);
 244                vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
 245                vcpu_load_state(vm, VCPU_ID, state);
 246                run = vcpu_state(vm, VCPU_ID);
 247                free(state);
 248
 249                memset(&regs2, 0, sizeof(regs2));
 250                vcpu_regs_get(vm, VCPU_ID, &regs2);
 251                TEST_ASSERT(!memcmp(&regs1, &regs2, sizeof(regs2)),
 252                            "Unexpected register values after vcpu_load_state; rdi: %lx rsi: %lx",
 253                            (ulong) regs2.rdi, (ulong) regs2.rsi);
 254        }
 255
 256done:
 257        kvm_vm_free(vm);
 258}
 259