linux/tools/testing/selftests/kvm/x86_64/mmu_role_test.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2
   3#include "kvm_util.h"
   4#include "processor.h"
   5
   6#define VCPU_ID                 1
   7
   8#define MMIO_GPA        0x100000000ull
   9
  10static void guest_code(void)
  11{
  12        (void)READ_ONCE(*((uint64_t *)MMIO_GPA));
  13        (void)READ_ONCE(*((uint64_t *)MMIO_GPA));
  14
  15        GUEST_ASSERT(0);
  16}
  17
  18static void guest_pf_handler(struct ex_regs *regs)
  19{
  20        /* PFEC == RSVD | PRESENT (read, kernel). */
  21        GUEST_ASSERT(regs->error_code == 0x9);
  22        GUEST_DONE();
  23}
  24
  25static void mmu_role_test(u32 *cpuid_reg, u32 evil_cpuid_val)
  26{
  27        u32 good_cpuid_val = *cpuid_reg;
  28        struct kvm_run *run;
  29        struct kvm_vm *vm;
  30        uint64_t cmd;
  31        int r;
  32
  33        /* Create VM */
  34        vm = vm_create_default(VCPU_ID, 0, guest_code);
  35        run = vcpu_state(vm, VCPU_ID);
  36
  37        /* Map 1gb page without a backing memlot. */
  38        __virt_pg_map(vm, MMIO_GPA, MMIO_GPA, X86_PAGE_SIZE_1G);
  39
  40        r = _vcpu_run(vm, VCPU_ID);
  41
  42        /* Guest access to the 1gb page should trigger MMIO. */
  43        TEST_ASSERT(r == 0, "vcpu_run failed: %d\n", r);
  44        TEST_ASSERT(run->exit_reason == KVM_EXIT_MMIO,
  45                    "Unexpected exit reason: %u (%s), expected MMIO exit (1gb page w/o memslot)\n",
  46                    run->exit_reason, exit_reason_str(run->exit_reason));
  47
  48        TEST_ASSERT(run->mmio.len == 8, "Unexpected exit mmio size = %u", run->mmio.len);
  49
  50        TEST_ASSERT(run->mmio.phys_addr == MMIO_GPA,
  51                    "Unexpected exit mmio address = 0x%llx", run->mmio.phys_addr);
  52
  53        /*
  54         * Effect the CPUID change for the guest and re-enter the guest.  Its
  55         * access should now #PF due to the PAGE_SIZE bit being reserved or
  56         * the resulting GPA being invalid.  Note, kvm_get_supported_cpuid()
  57         * returns the struct that contains the entry being modified.  Eww.
  58         */
  59        *cpuid_reg = evil_cpuid_val;
  60        vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
  61
  62        /*
  63         * Add a dummy memslot to coerce KVM into bumping the MMIO generation.
  64         * KVM does not "officially" support mucking with CPUID after KVM_RUN,
  65         * and will incorrectly reuse MMIO SPTEs.  Don't delete the memslot!
  66         * KVM x86 zaps all shadow pages on memslot deletion.
  67         */
  68        vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
  69                                    MMIO_GPA << 1, 10, 1, 0);
  70
  71        /* Set up a #PF handler to eat the RSVD #PF and signal all done! */
  72        vm_init_descriptor_tables(vm);
  73        vcpu_init_descriptor_tables(vm, VCPU_ID);
  74        vm_install_exception_handler(vm, PF_VECTOR, guest_pf_handler);
  75
  76        r = _vcpu_run(vm, VCPU_ID);
  77        TEST_ASSERT(r == 0, "vcpu_run failed: %d\n", r);
  78
  79        cmd = get_ucall(vm, VCPU_ID, NULL);
  80        TEST_ASSERT(cmd == UCALL_DONE,
  81                    "Unexpected guest exit, exit_reason=%s, ucall.cmd = %lu\n",
  82                    exit_reason_str(run->exit_reason), cmd);
  83
  84        /*
  85         * Restore the happy CPUID value for the next test.  Yes, changes are
  86         * indeed persistent across VM destruction.
  87         */
  88        *cpuid_reg = good_cpuid_val;
  89
  90        kvm_vm_free(vm);
  91}
  92
  93int main(int argc, char *argv[])
  94{
  95        struct kvm_cpuid_entry2 *entry;
  96        int opt;
  97
  98        /*
  99         * All tests are opt-in because TDP doesn't play nice with reserved #PF
 100         * in the GVA->GPA translation.  The hardware page walker doesn't let
 101         * software change GBPAGES or MAXPHYADDR, and KVM doesn't manually walk
 102         * the GVA on fault for performance reasons.
 103         */
 104        bool do_gbpages = false;
 105        bool do_maxphyaddr = false;
 106
 107        setbuf(stdout, NULL);
 108
 109        while ((opt = getopt(argc, argv, "gm")) != -1) {
 110                switch (opt) {
 111                case 'g':
 112                        do_gbpages = true;
 113                        break;
 114                case 'm':
 115                        do_maxphyaddr = true;
 116                        break;
 117                case 'h':
 118                default:
 119                        printf("usage: %s [-g (GBPAGES)] [-m (MAXPHYADDR)]\n", argv[0]);
 120                        break;
 121                }
 122        }
 123
 124        if (!do_gbpages && !do_maxphyaddr) {
 125                print_skip("No sub-tests selected");
 126                return 0;
 127        }
 128
 129        entry = kvm_get_supported_cpuid_entry(0x80000001);
 130        if (!(entry->edx & CPUID_GBPAGES)) {
 131                print_skip("1gb hugepages not supported");
 132                return 0;
 133        }
 134
 135        if (do_gbpages) {
 136                pr_info("Test MMIO after toggling CPUID.GBPAGES\n\n");
 137                mmu_role_test(&entry->edx, entry->edx & ~CPUID_GBPAGES);
 138        }
 139
 140        if (do_maxphyaddr) {
 141                pr_info("Test MMIO after changing CPUID.MAXPHYADDR\n\n");
 142                entry = kvm_get_supported_cpuid_entry(0x80000008);
 143                mmu_role_test(&entry->eax, (entry->eax & ~0xff) | 0x20);
 144        }
 145
 146        return 0;
 147}
 148