linux/tools/testing/selftests/kvm/x86_64/emulator_error_test.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2020, Google LLC.
   4 *
   5 * Tests for KVM_CAP_EXIT_ON_EMULATION_FAILURE capability.
   6 */
   7
   8#define _GNU_SOURCE /* for program_invocation_short_name */
   9
  10#include "test_util.h"
  11#include "kvm_util.h"
  12#include "vmx.h"
  13
  14#define VCPU_ID    1
  15#define PAGE_SIZE  4096
  16#define MAXPHYADDR 36
  17
  18#define MEM_REGION_GVA  0x0000123456789000
  19#define MEM_REGION_GPA  0x0000000700000000
  20#define MEM_REGION_SLOT 10
  21#define MEM_REGION_SIZE PAGE_SIZE
  22
  23static void guest_code(void)
  24{
  25        __asm__ __volatile__("flds (%[addr])"
  26                             :: [addr]"r"(MEM_REGION_GVA));
  27
  28        GUEST_DONE();
  29}
  30
  31static void run_guest(struct kvm_vm *vm)
  32{
  33        int rc;
  34
  35        rc = _vcpu_run(vm, VCPU_ID);
  36        TEST_ASSERT(rc == 0, "vcpu_run failed: %d\n", rc);
  37}
  38
  39/*
  40 * Accessors to get R/M, REG, and Mod bits described in the SDM vol 2,
  41 * figure 2-2 "Table Interpretation of ModR/M Byte (C8H)".
  42 */
  43#define GET_RM(insn_byte) (insn_byte & 0x7)
  44#define GET_REG(insn_byte) ((insn_byte & 0x38) >> 3)
  45#define GET_MOD(insn_byte) ((insn_byte & 0xc) >> 6)
  46
  47/* Ensure we are dealing with a simple 2-byte flds instruction. */
  48static bool is_flds(uint8_t *insn_bytes, uint8_t insn_size)
  49{
  50        return insn_size >= 2 &&
  51               insn_bytes[0] == 0xd9 &&
  52               GET_REG(insn_bytes[1]) == 0x0 &&
  53               GET_MOD(insn_bytes[1]) == 0x0 &&
  54               /* Ensure there is no SIB byte. */
  55               GET_RM(insn_bytes[1]) != 0x4 &&
  56               /* Ensure there is no displacement byte. */
  57               GET_RM(insn_bytes[1]) != 0x5;
  58}
  59
  60static void process_exit_on_emulation_error(struct kvm_vm *vm)
  61{
  62        struct kvm_run *run = vcpu_state(vm, VCPU_ID);
  63        struct kvm_regs regs;
  64        uint8_t *insn_bytes;
  65        uint8_t insn_size;
  66        uint64_t flags;
  67
  68        TEST_ASSERT(run->exit_reason == KVM_EXIT_INTERNAL_ERROR,
  69                    "Unexpected exit reason: %u (%s)",
  70                    run->exit_reason,
  71                    exit_reason_str(run->exit_reason));
  72
  73        TEST_ASSERT(run->emulation_failure.suberror == KVM_INTERNAL_ERROR_EMULATION,
  74                    "Unexpected suberror: %u",
  75                    run->emulation_failure.suberror);
  76
  77        if (run->emulation_failure.ndata >= 1) {
  78                flags = run->emulation_failure.flags;
  79                if ((flags & KVM_INTERNAL_ERROR_EMULATION_FLAG_INSTRUCTION_BYTES) &&
  80                    run->emulation_failure.ndata >= 3) {
  81                        insn_size = run->emulation_failure.insn_size;
  82                        insn_bytes = run->emulation_failure.insn_bytes;
  83
  84                        TEST_ASSERT(insn_size <= 15 && insn_size > 0,
  85                                    "Unexpected instruction size: %u",
  86                                    insn_size);
  87
  88                        TEST_ASSERT(is_flds(insn_bytes, insn_size),
  89                                    "Unexpected instruction.  Expected 'flds' (0xd9 /0)");
  90
  91                        /*
  92                         * If is_flds() succeeded then the instruction bytes
  93                         * contained an flds instruction that is 2-bytes in
  94                         * length (ie: no prefix, no SIB, no displacement).
  95                         */
  96                        vcpu_regs_get(vm, VCPU_ID, &regs);
  97                        regs.rip += 2;
  98                        vcpu_regs_set(vm, VCPU_ID, &regs);
  99                }
 100        }
 101}
 102
 103static void do_guest_assert(struct kvm_vm *vm, struct ucall *uc)
 104{
 105        TEST_FAIL("%s at %s:%ld", (const char *)uc->args[0], __FILE__,
 106                  uc->args[1]);
 107}
 108
 109static void check_for_guest_assert(struct kvm_vm *vm)
 110{
 111        struct kvm_run *run = vcpu_state(vm, VCPU_ID);
 112        struct ucall uc;
 113
 114        if (run->exit_reason == KVM_EXIT_IO &&
 115            get_ucall(vm, VCPU_ID, &uc) == UCALL_ABORT) {
 116                do_guest_assert(vm, &uc);
 117        }
 118}
 119
 120static void process_ucall_done(struct kvm_vm *vm)
 121{
 122        struct kvm_run *run = vcpu_state(vm, VCPU_ID);
 123        struct ucall uc;
 124
 125        check_for_guest_assert(vm);
 126
 127        TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
 128                    "Unexpected exit reason: %u (%s)",
 129                    run->exit_reason,
 130                    exit_reason_str(run->exit_reason));
 131
 132        TEST_ASSERT(get_ucall(vm, VCPU_ID, &uc) == UCALL_DONE,
 133                    "Unexpected ucall command: %lu, expected UCALL_DONE (%d)",
 134                    uc.cmd, UCALL_DONE);
 135}
 136
 137static uint64_t process_ucall(struct kvm_vm *vm)
 138{
 139        struct kvm_run *run = vcpu_state(vm, VCPU_ID);
 140        struct ucall uc;
 141
 142        TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
 143                    "Unexpected exit reason: %u (%s)",
 144                    run->exit_reason,
 145                    exit_reason_str(run->exit_reason));
 146
 147        switch (get_ucall(vm, VCPU_ID, &uc)) {
 148        case UCALL_SYNC:
 149                break;
 150        case UCALL_ABORT:
 151                do_guest_assert(vm, &uc);
 152                break;
 153        case UCALL_DONE:
 154                process_ucall_done(vm);
 155                break;
 156        default:
 157                TEST_ASSERT(false, "Unexpected ucall");
 158        }
 159
 160        return uc.cmd;
 161}
 162
 163int main(int argc, char *argv[])
 164{
 165        struct kvm_enable_cap emul_failure_cap = {
 166                .cap = KVM_CAP_EXIT_ON_EMULATION_FAILURE,
 167                .args[0] = 1,
 168        };
 169        struct kvm_cpuid_entry2 *entry;
 170        struct kvm_cpuid2 *cpuid;
 171        struct kvm_vm *vm;
 172        uint64_t gpa, pte;
 173        uint64_t *hva;
 174        int rc;
 175
 176        /* Tell stdout not to buffer its content */
 177        setbuf(stdout, NULL);
 178
 179        vm = vm_create_default(VCPU_ID, 0, guest_code);
 180
 181        if (!kvm_check_cap(KVM_CAP_SMALLER_MAXPHYADDR)) {
 182                printf("module parameter 'allow_smaller_maxphyaddr' is not set.  Skipping test.\n");
 183                return 0;
 184        }
 185
 186        cpuid = kvm_get_supported_cpuid();
 187
 188        entry = kvm_get_supported_cpuid_index(0x80000008, 0);
 189        entry->eax = (entry->eax & 0xffffff00) | MAXPHYADDR;
 190        set_cpuid(cpuid, entry);
 191
 192        vcpu_set_cpuid(vm, VCPU_ID, cpuid);
 193
 194        rc = kvm_check_cap(KVM_CAP_EXIT_ON_EMULATION_FAILURE);
 195        TEST_ASSERT(rc, "KVM_CAP_EXIT_ON_EMULATION_FAILURE is unavailable");
 196        vm_enable_cap(vm, &emul_failure_cap);
 197
 198        vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
 199                                    MEM_REGION_GPA, MEM_REGION_SLOT,
 200                                    MEM_REGION_SIZE / PAGE_SIZE, 0);
 201        gpa = vm_phy_pages_alloc(vm, MEM_REGION_SIZE / PAGE_SIZE,
 202                                 MEM_REGION_GPA, MEM_REGION_SLOT);
 203        TEST_ASSERT(gpa == MEM_REGION_GPA, "Failed vm_phy_pages_alloc\n");
 204        virt_map(vm, MEM_REGION_GVA, MEM_REGION_GPA, 1);
 205        hva = addr_gpa2hva(vm, MEM_REGION_GPA);
 206        memset(hva, 0, PAGE_SIZE);
 207        pte = vm_get_page_table_entry(vm, VCPU_ID, MEM_REGION_GVA);
 208        vm_set_page_table_entry(vm, VCPU_ID, MEM_REGION_GVA, pte | (1ull << 36));
 209
 210        run_guest(vm);
 211        process_exit_on_emulation_error(vm);
 212        run_guest(vm);
 213
 214        TEST_ASSERT(process_ucall(vm) == UCALL_DONE, "Expected UCALL_DONE");
 215
 216        kvm_vm_free(vm);
 217
 218        return 0;
 219}
 220