linux/tools/testing/selftests/kvm/x86_64/state_test.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * KVM_GET/SET_* tests
   4 *
   5 * Copyright (C) 2018, Red Hat, Inc.
   6 *
   7 * Tests for vCPU state save/restore, including nested guest state.
   8 */
   9#define _GNU_SOURCE /* for program_invocation_short_name */
  10#include <fcntl.h>
  11#include <stdio.h>
  12#include <stdlib.h>
  13#include <string.h>
  14#include <sys/ioctl.h>
  15
  16#include "test_util.h"
  17
  18#include "kvm_util.h"
  19#include "processor.h"
  20#include "vmx.h"
  21#include "svm_util.h"
  22
  23#define VCPU_ID         5
  24#define L2_GUEST_STACK_SIZE 256
  25
  26void svm_l2_guest_code(void)
  27{
  28        GUEST_SYNC(4);
  29        /* Exit to L1 */
  30        vmcall();
  31        GUEST_SYNC(6);
  32        /* Done, exit to L1 and never come back.  */
  33        vmcall();
  34}
  35
  36static void svm_l1_guest_code(struct svm_test_data *svm)
  37{
  38        unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
  39        struct vmcb *vmcb = svm->vmcb;
  40
  41        GUEST_ASSERT(svm->vmcb_gpa);
  42        /* Prepare for L2 execution. */
  43        generic_svm_setup(svm, svm_l2_guest_code,
  44                          &l2_guest_stack[L2_GUEST_STACK_SIZE]);
  45
  46        GUEST_SYNC(3);
  47        run_guest(vmcb, svm->vmcb_gpa);
  48        GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_VMMCALL);
  49        GUEST_SYNC(5);
  50        vmcb->save.rip += 3;
  51        run_guest(vmcb, svm->vmcb_gpa);
  52        GUEST_ASSERT(vmcb->control.exit_code == SVM_EXIT_VMMCALL);
  53        GUEST_SYNC(7);
  54}
  55
  56void vmx_l2_guest_code(void)
  57{
  58        GUEST_SYNC(6);
  59
  60        /* Exit to L1 */
  61        vmcall();
  62
  63        /* L1 has now set up a shadow VMCS for us.  */
  64        GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffee);
  65        GUEST_SYNC(10);
  66        GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffee);
  67        GUEST_ASSERT(!vmwrite(GUEST_RIP, 0xc0fffee));
  68        GUEST_SYNC(11);
  69        GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0fffee);
  70        GUEST_ASSERT(!vmwrite(GUEST_RIP, 0xc0ffffee));
  71        GUEST_SYNC(12);
  72
  73        /* Done, exit to L1 and never come back.  */
  74        vmcall();
  75}
  76
  77static void vmx_l1_guest_code(struct vmx_pages *vmx_pages)
  78{
  79        unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
  80
  81        GUEST_ASSERT(vmx_pages->vmcs_gpa);
  82        GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
  83        GUEST_SYNC(3);
  84        GUEST_ASSERT(load_vmcs(vmx_pages));
  85        GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa);
  86
  87        GUEST_SYNC(4);
  88        GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa);
  89
  90        prepare_vmcs(vmx_pages, vmx_l2_guest_code,
  91                     &l2_guest_stack[L2_GUEST_STACK_SIZE]);
  92
  93        GUEST_SYNC(5);
  94        GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa);
  95        GUEST_ASSERT(!vmlaunch());
  96        GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa);
  97        GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
  98
  99        /* Check that the launched state is preserved.  */
 100        GUEST_ASSERT(vmlaunch());
 101
 102        GUEST_ASSERT(!vmresume());
 103        GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
 104
 105        GUEST_SYNC(7);
 106        GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
 107
 108        GUEST_ASSERT(!vmresume());
 109        GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
 110
 111        vmwrite(GUEST_RIP, vmreadz(GUEST_RIP) + 3);
 112
 113        vmwrite(SECONDARY_VM_EXEC_CONTROL, SECONDARY_EXEC_SHADOW_VMCS);
 114        vmwrite(VMCS_LINK_POINTER, vmx_pages->shadow_vmcs_gpa);
 115
 116        GUEST_ASSERT(!vmptrld(vmx_pages->shadow_vmcs_gpa));
 117        GUEST_ASSERT(vmlaunch());
 118        GUEST_SYNC(8);
 119        GUEST_ASSERT(vmlaunch());
 120        GUEST_ASSERT(vmresume());
 121
 122        vmwrite(GUEST_RIP, 0xc0ffee);
 123        GUEST_SYNC(9);
 124        GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffee);
 125
 126        GUEST_ASSERT(!vmptrld(vmx_pages->vmcs_gpa));
 127        GUEST_ASSERT(!vmresume());
 128        GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
 129
 130        GUEST_ASSERT(!vmptrld(vmx_pages->shadow_vmcs_gpa));
 131        GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffffee);
 132        GUEST_ASSERT(vmlaunch());
 133        GUEST_ASSERT(vmresume());
 134        GUEST_SYNC(13);
 135        GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffffee);
 136        GUEST_ASSERT(vmlaunch());
 137        GUEST_ASSERT(vmresume());
 138}
 139
 140static void __attribute__((__flatten__)) guest_code(void *arg)
 141{
 142        GUEST_SYNC(1);
 143        GUEST_SYNC(2);
 144
 145        if (arg) {
 146                if (cpu_has_svm())
 147                        svm_l1_guest_code(arg);
 148                else
 149                        vmx_l1_guest_code(arg);
 150        }
 151
 152        GUEST_DONE();
 153}
 154
 155int main(int argc, char *argv[])
 156{
 157        vm_vaddr_t nested_gva = 0;
 158
 159        struct kvm_regs regs1, regs2;
 160        struct kvm_vm *vm;
 161        struct kvm_run *run;
 162        struct kvm_x86_state *state;
 163        struct ucall uc;
 164        int stage;
 165
 166        /* Create VM */
 167        vm = vm_create_default(VCPU_ID, 0, guest_code);
 168        vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
 169        run = vcpu_state(vm, VCPU_ID);
 170
 171        vcpu_regs_get(vm, VCPU_ID, &regs1);
 172
 173        if (kvm_check_cap(KVM_CAP_NESTED_STATE)) {
 174                if (nested_svm_supported())
 175                        vcpu_alloc_svm(vm, &nested_gva);
 176                else if (nested_vmx_supported())
 177                        vcpu_alloc_vmx(vm, &nested_gva);
 178        }
 179
 180        if (!nested_gva)
 181                pr_info("will skip nested state checks\n");
 182
 183        vcpu_args_set(vm, VCPU_ID, 1, nested_gva);
 184
 185        for (stage = 1;; stage++) {
 186                _vcpu_run(vm, VCPU_ID);
 187                TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
 188                            "Stage %d: unexpected exit reason: %u (%s),\n",
 189                            stage, run->exit_reason,
 190                            exit_reason_str(run->exit_reason));
 191
 192                switch (get_ucall(vm, VCPU_ID, &uc)) {
 193                case UCALL_ABORT:
 194                        TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0],
 195                                  __FILE__, uc.args[1]);
 196                        /* NOT REACHED */
 197                case UCALL_SYNC:
 198                        break;
 199                case UCALL_DONE:
 200                        goto done;
 201                default:
 202                        TEST_FAIL("Unknown ucall %lu", uc.cmd);
 203                }
 204
 205                /* UCALL_SYNC is handled here.  */
 206                TEST_ASSERT(!strcmp((const char *)uc.args[0], "hello") &&
 207                            uc.args[1] == stage, "Stage %d: Unexpected register values vmexit, got %lx",
 208                            stage, (ulong)uc.args[1]);
 209
 210                state = vcpu_save_state(vm, VCPU_ID);
 211                memset(&regs1, 0, sizeof(regs1));
 212                vcpu_regs_get(vm, VCPU_ID, &regs1);
 213
 214                kvm_vm_release(vm);
 215
 216                /* Restore state in a new VM.  */
 217                kvm_vm_restart(vm, O_RDWR);
 218                vm_vcpu_add(vm, VCPU_ID);
 219                vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
 220                vcpu_load_state(vm, VCPU_ID, state);
 221                run = vcpu_state(vm, VCPU_ID);
 222                free(state);
 223
 224                memset(&regs2, 0, sizeof(regs2));
 225                vcpu_regs_get(vm, VCPU_ID, &regs2);
 226                TEST_ASSERT(!memcmp(&regs1, &regs2, sizeof(regs2)),
 227                            "Unexpected register values after vcpu_load_state; rdi: %lx rsi: %lx",
 228                            (ulong) regs2.rdi, (ulong) regs2.rsi);
 229        }
 230
 231done:
 232        kvm_vm_free(vm);
 233}
 234