linux/tools/testing/selftests/kvm/x86_64/state_test.c
<<
>>
Prefs
   1/*
   2 * KVM_GET/SET_* tests
   3 *
   4 * Copyright (C) 2018, Red Hat, Inc.
   5 *
   6 * This work is licensed under the terms of the GNU GPL, version 2.
   7 *
   8 * Tests for vCPU state save/restore, including nested guest state.
   9 */
  10#define _GNU_SOURCE /* for program_invocation_short_name */
  11#include <fcntl.h>
  12#include <stdio.h>
  13#include <stdlib.h>
  14#include <string.h>
  15#include <sys/ioctl.h>
  16
  17#include "test_util.h"
  18
  19#include "kvm_util.h"
  20#include "processor.h"
  21#include "vmx.h"
  22
  23#define VCPU_ID         5
  24
  25static bool have_nested_state;
  26
  27void l2_guest_code(void)
  28{
  29        GUEST_SYNC(6);
  30
  31        /* Exit to L1 */
  32        vmcall();
  33
  34        /* L1 has now set up a shadow VMCS for us.  */
  35        GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffee);
  36        GUEST_SYNC(10);
  37        GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffee);
  38        GUEST_ASSERT(!vmwrite(GUEST_RIP, 0xc0fffee));
  39        GUEST_SYNC(11);
  40        GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0fffee);
  41        GUEST_ASSERT(!vmwrite(GUEST_RIP, 0xc0ffffee));
  42        GUEST_SYNC(12);
  43
  44        /* Done, exit to L1 and never come back.  */
  45        vmcall();
  46}
  47
  48void l1_guest_code(struct vmx_pages *vmx_pages)
  49{
  50#define L2_GUEST_STACK_SIZE 64
  51        unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
  52
  53        GUEST_ASSERT(vmx_pages->vmcs_gpa);
  54        GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
  55        GUEST_SYNC(3);
  56        GUEST_ASSERT(load_vmcs(vmx_pages));
  57        GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa);
  58
  59        GUEST_SYNC(4);
  60        GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa);
  61
  62        prepare_vmcs(vmx_pages, l2_guest_code,
  63                     &l2_guest_stack[L2_GUEST_STACK_SIZE]);
  64
  65        GUEST_SYNC(5);
  66        GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa);
  67        GUEST_ASSERT(!vmlaunch());
  68        GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa);
  69        GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
  70
  71        /* Check that the launched state is preserved.  */
  72        GUEST_ASSERT(vmlaunch());
  73
  74        GUEST_ASSERT(!vmresume());
  75        GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
  76
  77        GUEST_SYNC(7);
  78        GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
  79
  80        GUEST_ASSERT(!vmresume());
  81        GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
  82
  83        vmwrite(GUEST_RIP, vmreadz(GUEST_RIP) + 3);
  84
  85        vmwrite(SECONDARY_VM_EXEC_CONTROL, SECONDARY_EXEC_SHADOW_VMCS);
  86        vmwrite(VMCS_LINK_POINTER, vmx_pages->shadow_vmcs_gpa);
  87
  88        GUEST_ASSERT(!vmptrld(vmx_pages->shadow_vmcs_gpa));
  89        GUEST_ASSERT(vmlaunch());
  90        GUEST_SYNC(8);
  91        GUEST_ASSERT(vmlaunch());
  92        GUEST_ASSERT(vmresume());
  93
  94        vmwrite(GUEST_RIP, 0xc0ffee);
  95        GUEST_SYNC(9);
  96        GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffee);
  97
  98        GUEST_ASSERT(!vmptrld(vmx_pages->vmcs_gpa));
  99        GUEST_ASSERT(!vmresume());
 100        GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
 101
 102        GUEST_ASSERT(!vmptrld(vmx_pages->shadow_vmcs_gpa));
 103        GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffffee);
 104        GUEST_ASSERT(vmlaunch());
 105        GUEST_ASSERT(vmresume());
 106        GUEST_SYNC(13);
 107        GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffffee);
 108        GUEST_ASSERT(vmlaunch());
 109        GUEST_ASSERT(vmresume());
 110}
 111
 112void guest_code(struct vmx_pages *vmx_pages)
 113{
 114        GUEST_SYNC(1);
 115        GUEST_SYNC(2);
 116
 117        if (vmx_pages)
 118                l1_guest_code(vmx_pages);
 119
 120        GUEST_DONE();
 121}
 122
 123int main(int argc, char *argv[])
 124{
 125        struct vmx_pages *vmx_pages = NULL;
 126        vm_vaddr_t vmx_pages_gva = 0;
 127
 128        struct kvm_regs regs1, regs2;
 129        struct kvm_vm *vm;
 130        struct kvm_run *run;
 131        struct kvm_x86_state *state;
 132        struct ucall uc;
 133        int stage;
 134
 135        struct kvm_cpuid_entry2 *entry = kvm_get_supported_cpuid_entry(1);
 136
 137        /* Create VM */
 138        vm = vm_create_default(VCPU_ID, 0, guest_code);
 139        vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
 140        run = vcpu_state(vm, VCPU_ID);
 141
 142        vcpu_regs_get(vm, VCPU_ID, &regs1);
 143
 144        if (kvm_check_cap(KVM_CAP_NESTED_STATE)) {
 145                vmx_pages = vcpu_alloc_vmx(vm, &vmx_pages_gva);
 146                vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_gva);
 147        } else {
 148                printf("will skip nested state checks\n");
 149                vcpu_args_set(vm, VCPU_ID, 1, 0);
 150        }
 151
 152        for (stage = 1;; stage++) {
 153                _vcpu_run(vm, VCPU_ID);
 154                TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
 155                            "Stage %d: unexpected exit reason: %u (%s),\n",
 156                            stage, run->exit_reason,
 157                            exit_reason_str(run->exit_reason));
 158
 159                memset(&regs1, 0, sizeof(regs1));
 160                vcpu_regs_get(vm, VCPU_ID, &regs1);
 161                switch (get_ucall(vm, VCPU_ID, &uc)) {
 162                case UCALL_ABORT:
 163                        TEST_ASSERT(false, "%s at %s:%d", (const char *)uc.args[0],
 164                                    __FILE__, uc.args[1]);
 165                        /* NOT REACHED */
 166                case UCALL_SYNC:
 167                        break;
 168                case UCALL_DONE:
 169                        goto done;
 170                default:
 171                        TEST_ASSERT(false, "Unknown ucall 0x%x.", uc.cmd);
 172                }
 173
 174                /* UCALL_SYNC is handled here.  */
 175                TEST_ASSERT(!strcmp((const char *)uc.args[0], "hello") &&
 176                            uc.args[1] == stage, "Unexpected register values vmexit #%lx, got %lx",
 177                            stage, (ulong)uc.args[1]);
 178
 179                state = vcpu_save_state(vm, VCPU_ID);
 180                kvm_vm_release(vm);
 181
 182                /* Restore state in a new VM.  */
 183                kvm_vm_restart(vm, O_RDWR);
 184                vm_vcpu_add(vm, VCPU_ID, 0, 0);
 185                vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
 186                vcpu_load_state(vm, VCPU_ID, state);
 187                run = vcpu_state(vm, VCPU_ID);
 188                free(state);
 189
 190                memset(&regs2, 0, sizeof(regs2));
 191                vcpu_regs_get(vm, VCPU_ID, &regs2);
 192                TEST_ASSERT(!memcmp(&regs1, &regs2, sizeof(regs2)),
 193                            "Unexpected register values after vcpu_load_state; rdi: %lx rsi: %lx",
 194                            (ulong) regs2.rdi, (ulong) regs2.rsi);
 195        }
 196
 197done:
 198        kvm_vm_free(vm);
 199}
 200