linux/tools/testing/selftests/kvm/x86_64/state_test.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * KVM_GET/SET_* tests
   4 *
   5 * Copyright (C) 2018, Red Hat, Inc.
   6 *
   7 * Tests for vCPU state save/restore, including nested guest state.
   8 */
   9#define _GNU_SOURCE /* for program_invocation_short_name */
  10#include <fcntl.h>
  11#include <stdio.h>
  12#include <stdlib.h>
  13#include <string.h>
  14#include <sys/ioctl.h>
  15
  16#include "test_util.h"
  17
  18#include "kvm_util.h"
  19#include "processor.h"
  20#include "vmx.h"
  21
  22#define VCPU_ID         5
  23
  24void l2_guest_code(void)
  25{
  26        GUEST_SYNC(6);
  27
  28        /* Exit to L1 */
  29        vmcall();
  30
  31        /* L1 has now set up a shadow VMCS for us.  */
  32        GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffee);
  33        GUEST_SYNC(10);
  34        GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffee);
  35        GUEST_ASSERT(!vmwrite(GUEST_RIP, 0xc0fffee));
  36        GUEST_SYNC(11);
  37        GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0fffee);
  38        GUEST_ASSERT(!vmwrite(GUEST_RIP, 0xc0ffffee));
  39        GUEST_SYNC(12);
  40
  41        /* Done, exit to L1 and never come back.  */
  42        vmcall();
  43}
  44
  45void l1_guest_code(struct vmx_pages *vmx_pages)
  46{
  47#define L2_GUEST_STACK_SIZE 64
  48        unsigned long l2_guest_stack[L2_GUEST_STACK_SIZE];
  49
  50        GUEST_ASSERT(vmx_pages->vmcs_gpa);
  51        GUEST_ASSERT(prepare_for_vmx_operation(vmx_pages));
  52        GUEST_SYNC(3);
  53        GUEST_ASSERT(load_vmcs(vmx_pages));
  54        GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa);
  55
  56        GUEST_SYNC(4);
  57        GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa);
  58
  59        prepare_vmcs(vmx_pages, l2_guest_code,
  60                     &l2_guest_stack[L2_GUEST_STACK_SIZE]);
  61
  62        GUEST_SYNC(5);
  63        GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa);
  64        GUEST_ASSERT(!vmlaunch());
  65        GUEST_ASSERT(vmptrstz() == vmx_pages->vmcs_gpa);
  66        GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
  67
  68        /* Check that the launched state is preserved.  */
  69        GUEST_ASSERT(vmlaunch());
  70
  71        GUEST_ASSERT(!vmresume());
  72        GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
  73
  74        GUEST_SYNC(7);
  75        GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
  76
  77        GUEST_ASSERT(!vmresume());
  78        GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
  79
  80        vmwrite(GUEST_RIP, vmreadz(GUEST_RIP) + 3);
  81
  82        vmwrite(SECONDARY_VM_EXEC_CONTROL, SECONDARY_EXEC_SHADOW_VMCS);
  83        vmwrite(VMCS_LINK_POINTER, vmx_pages->shadow_vmcs_gpa);
  84
  85        GUEST_ASSERT(!vmptrld(vmx_pages->shadow_vmcs_gpa));
  86        GUEST_ASSERT(vmlaunch());
  87        GUEST_SYNC(8);
  88        GUEST_ASSERT(vmlaunch());
  89        GUEST_ASSERT(vmresume());
  90
  91        vmwrite(GUEST_RIP, 0xc0ffee);
  92        GUEST_SYNC(9);
  93        GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffee);
  94
  95        GUEST_ASSERT(!vmptrld(vmx_pages->vmcs_gpa));
  96        GUEST_ASSERT(!vmresume());
  97        GUEST_ASSERT(vmreadz(VM_EXIT_REASON) == EXIT_REASON_VMCALL);
  98
  99        GUEST_ASSERT(!vmptrld(vmx_pages->shadow_vmcs_gpa));
 100        GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffffee);
 101        GUEST_ASSERT(vmlaunch());
 102        GUEST_ASSERT(vmresume());
 103        GUEST_SYNC(13);
 104        GUEST_ASSERT(vmreadz(GUEST_RIP) == 0xc0ffffee);
 105        GUEST_ASSERT(vmlaunch());
 106        GUEST_ASSERT(vmresume());
 107}
 108
 109void guest_code(struct vmx_pages *vmx_pages)
 110{
 111        GUEST_SYNC(1);
 112        GUEST_SYNC(2);
 113
 114        if (vmx_pages)
 115                l1_guest_code(vmx_pages);
 116
 117        GUEST_DONE();
 118}
 119
 120int main(int argc, char *argv[])
 121{
 122        vm_vaddr_t vmx_pages_gva = 0;
 123
 124        struct kvm_regs regs1, regs2;
 125        struct kvm_vm *vm;
 126        struct kvm_run *run;
 127        struct kvm_x86_state *state;
 128        struct ucall uc;
 129        int stage;
 130
 131        /* Create VM */
 132        vm = vm_create_default(VCPU_ID, 0, guest_code);
 133        vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
 134        run = vcpu_state(vm, VCPU_ID);
 135
 136        vcpu_regs_get(vm, VCPU_ID, &regs1);
 137
 138        if (kvm_check_cap(KVM_CAP_NESTED_STATE)) {
 139                vcpu_alloc_vmx(vm, &vmx_pages_gva);
 140                vcpu_args_set(vm, VCPU_ID, 1, vmx_pages_gva);
 141        } else {
 142                printf("will skip nested state checks\n");
 143                vcpu_args_set(vm, VCPU_ID, 1, 0);
 144        }
 145
 146        for (stage = 1;; stage++) {
 147                _vcpu_run(vm, VCPU_ID);
 148                TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
 149                            "Stage %d: unexpected exit reason: %u (%s),\n",
 150                            stage, run->exit_reason,
 151                            exit_reason_str(run->exit_reason));
 152
 153                switch (get_ucall(vm, VCPU_ID, &uc)) {
 154                case UCALL_ABORT:
 155                        TEST_ASSERT(false, "%s at %s:%d", (const char *)uc.args[0],
 156                                    __FILE__, uc.args[1]);
 157                        /* NOT REACHED */
 158                case UCALL_SYNC:
 159                        break;
 160                case UCALL_DONE:
 161                        goto done;
 162                default:
 163                        TEST_ASSERT(false, "Unknown ucall 0x%x.", uc.cmd);
 164                }
 165
 166                /* UCALL_SYNC is handled here.  */
 167                TEST_ASSERT(!strcmp((const char *)uc.args[0], "hello") &&
 168                            uc.args[1] == stage, "Unexpected register values vmexit #%lx, got %lx",
 169                            stage, (ulong)uc.args[1]);
 170
 171                state = vcpu_save_state(vm, VCPU_ID);
 172                memset(&regs1, 0, sizeof(regs1));
 173                vcpu_regs_get(vm, VCPU_ID, &regs1);
 174
 175                kvm_vm_release(vm);
 176
 177                /* Restore state in a new VM.  */
 178                kvm_vm_restart(vm, O_RDWR);
 179                vm_vcpu_add(vm, VCPU_ID);
 180                vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
 181                vcpu_load_state(vm, VCPU_ID, state);
 182                run = vcpu_state(vm, VCPU_ID);
 183                free(state);
 184
 185                memset(&regs2, 0, sizeof(regs2));
 186                vcpu_regs_get(vm, VCPU_ID, &regs2);
 187                TEST_ASSERT(!memcmp(&regs1, &regs2, sizeof(regs2)),
 188                            "Unexpected register values after vcpu_load_state; rdi: %lx rsi: %lx",
 189                            (ulong) regs2.rdi, (ulong) regs2.rsi);
 190        }
 191
 192done:
 193        kvm_vm_free(vm);
 194}
 195