linux/tools/testing/selftests/kvm/lib/aarch64/processor.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * AArch64 code
   4 *
   5 * Copyright (C) 2018, Red Hat, Inc.
   6 */
   7
   8#include <linux/compiler.h>
   9
  10#include "kvm_util.h"
  11#include "../kvm_util_internal.h"
  12#include "processor.h"
  13
  14#define KVM_GUEST_PAGE_TABLE_MIN_PADDR          0x180000
  15#define DEFAULT_ARM64_GUEST_STACK_VADDR_MIN     0xac0000
  16
  17static uint64_t page_align(struct kvm_vm *vm, uint64_t v)
  18{
  19        return (v + vm->page_size) & ~(vm->page_size - 1);
  20}
  21
  22static uint64_t pgd_index(struct kvm_vm *vm, vm_vaddr_t gva)
  23{
  24        unsigned int shift = (vm->pgtable_levels - 1) * (vm->page_shift - 3) + vm->page_shift;
  25        uint64_t mask = (1UL << (vm->va_bits - shift)) - 1;
  26
  27        return (gva >> shift) & mask;
  28}
  29
  30static uint64_t pud_index(struct kvm_vm *vm, vm_vaddr_t gva)
  31{
  32        unsigned int shift = 2 * (vm->page_shift - 3) + vm->page_shift;
  33        uint64_t mask = (1UL << (vm->page_shift - 3)) - 1;
  34
  35        TEST_ASSERT(vm->pgtable_levels == 4,
  36                "Mode %d does not have 4 page table levels", vm->mode);
  37
  38        return (gva >> shift) & mask;
  39}
  40
  41static uint64_t pmd_index(struct kvm_vm *vm, vm_vaddr_t gva)
  42{
  43        unsigned int shift = (vm->page_shift - 3) + vm->page_shift;
  44        uint64_t mask = (1UL << (vm->page_shift - 3)) - 1;
  45
  46        TEST_ASSERT(vm->pgtable_levels >= 3,
  47                "Mode %d does not have >= 3 page table levels", vm->mode);
  48
  49        return (gva >> shift) & mask;
  50}
  51
  52static uint64_t pte_index(struct kvm_vm *vm, vm_vaddr_t gva)
  53{
  54        uint64_t mask = (1UL << (vm->page_shift - 3)) - 1;
  55        return (gva >> vm->page_shift) & mask;
  56}
  57
  58static uint64_t pte_addr(struct kvm_vm *vm, uint64_t entry)
  59{
  60        uint64_t mask = ((1UL << (vm->va_bits - vm->page_shift)) - 1) << vm->page_shift;
  61        return entry & mask;
  62}
  63
  64static uint64_t ptrs_per_pgd(struct kvm_vm *vm)
  65{
  66        unsigned int shift = (vm->pgtable_levels - 1) * (vm->page_shift - 3) + vm->page_shift;
  67        return 1 << (vm->va_bits - shift);
  68}
  69
  70static uint64_t __maybe_unused ptrs_per_pte(struct kvm_vm *vm)
  71{
  72        return 1 << (vm->page_shift - 3);
  73}
  74
  75void virt_pgd_alloc(struct kvm_vm *vm, uint32_t pgd_memslot)
  76{
  77        if (!vm->pgd_created) {
  78                vm_paddr_t paddr = vm_phy_pages_alloc(vm,
  79                        page_align(vm, ptrs_per_pgd(vm) * 8) / vm->page_size,
  80                        KVM_GUEST_PAGE_TABLE_MIN_PADDR, pgd_memslot);
  81                vm->pgd = paddr;
  82                vm->pgd_created = true;
  83        }
  84}
  85
  86void _virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
  87                  uint32_t pgd_memslot, uint64_t flags)
  88{
  89        uint8_t attr_idx = flags & 7;
  90        uint64_t *ptep;
  91
  92        TEST_ASSERT((vaddr % vm->page_size) == 0,
  93                "Virtual address not on page boundary,\n"
  94                "  vaddr: 0x%lx vm->page_size: 0x%x", vaddr, vm->page_size);
  95        TEST_ASSERT(sparsebit_is_set(vm->vpages_valid,
  96                (vaddr >> vm->page_shift)),
  97                "Invalid virtual address, vaddr: 0x%lx", vaddr);
  98        TEST_ASSERT((paddr % vm->page_size) == 0,
  99                "Physical address not on page boundary,\n"
 100                "  paddr: 0x%lx vm->page_size: 0x%x", paddr, vm->page_size);
 101        TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn,
 102                "Physical address beyond beyond maximum supported,\n"
 103                "  paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
 104                paddr, vm->max_gfn, vm->page_size);
 105
 106        ptep = addr_gpa2hva(vm, vm->pgd) + pgd_index(vm, vaddr) * 8;
 107        if (!*ptep) {
 108                *ptep = vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR, pgd_memslot);
 109                *ptep |= 3;
 110        }
 111
 112        switch (vm->pgtable_levels) {
 113        case 4:
 114                ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pud_index(vm, vaddr) * 8;
 115                if (!*ptep) {
 116                        *ptep = vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR, pgd_memslot);
 117                        *ptep |= 3;
 118                }
 119                /* fall through */
 120        case 3:
 121                ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pmd_index(vm, vaddr) * 8;
 122                if (!*ptep) {
 123                        *ptep = vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR, pgd_memslot);
 124                        *ptep |= 3;
 125                }
 126                /* fall through */
 127        case 2:
 128                ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pte_index(vm, vaddr) * 8;
 129                break;
 130        default:
 131                TEST_FAIL("Page table levels must be 2, 3, or 4");
 132        }
 133
 134        *ptep = paddr | 3;
 135        *ptep |= (attr_idx << 2) | (1 << 10) /* Access Flag */;
 136}
 137
 138void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
 139                 uint32_t pgd_memslot)
 140{
 141        uint64_t attr_idx = 4; /* NORMAL (See DEFAULT_MAIR_EL1) */
 142
 143        _virt_pg_map(vm, vaddr, paddr, pgd_memslot, attr_idx);
 144}
 145
 146vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
 147{
 148        uint64_t *ptep;
 149
 150        if (!vm->pgd_created)
 151                goto unmapped_gva;
 152
 153        ptep = addr_gpa2hva(vm, vm->pgd) + pgd_index(vm, gva) * 8;
 154        if (!ptep)
 155                goto unmapped_gva;
 156
 157        switch (vm->pgtable_levels) {
 158        case 4:
 159                ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pud_index(vm, gva) * 8;
 160                if (!ptep)
 161                        goto unmapped_gva;
 162                /* fall through */
 163        case 3:
 164                ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pmd_index(vm, gva) * 8;
 165                if (!ptep)
 166                        goto unmapped_gva;
 167                /* fall through */
 168        case 2:
 169                ptep = addr_gpa2hva(vm, pte_addr(vm, *ptep)) + pte_index(vm, gva) * 8;
 170                if (!ptep)
 171                        goto unmapped_gva;
 172                break;
 173        default:
 174                TEST_FAIL("Page table levels must be 2, 3, or 4");
 175        }
 176
 177        return pte_addr(vm, *ptep) + (gva & (vm->page_size - 1));
 178
 179unmapped_gva:
 180        TEST_FAIL("No mapping for vm virtual address, gva: 0x%lx", gva);
 181        exit(1);
 182}
 183
 184static void pte_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent, uint64_t page, int level)
 185{
 186#ifdef DEBUG
 187        static const char * const type[] = { "", "pud", "pmd", "pte" };
 188        uint64_t pte, *ptep;
 189
 190        if (level == 4)
 191                return;
 192
 193        for (pte = page; pte < page + ptrs_per_pte(vm) * 8; pte += 8) {
 194                ptep = addr_gpa2hva(vm, pte);
 195                if (!*ptep)
 196                        continue;
 197                fprintf(stream, "%*s%s: %lx: %lx at %p\n", indent, "", type[level], pte, *ptep, ptep);
 198                pte_dump(stream, vm, indent + 1, pte_addr(vm, *ptep), level + 1);
 199        }
 200#endif
 201}
 202
 203void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
 204{
 205        int level = 4 - (vm->pgtable_levels - 1);
 206        uint64_t pgd, *ptep;
 207
 208        if (!vm->pgd_created)
 209                return;
 210
 211        for (pgd = vm->pgd; pgd < vm->pgd + ptrs_per_pgd(vm) * 8; pgd += 8) {
 212                ptep = addr_gpa2hva(vm, pgd);
 213                if (!*ptep)
 214                        continue;
 215                fprintf(stream, "%*spgd: %lx: %lx at %p\n", indent, "", pgd, *ptep, ptep);
 216                pte_dump(stream, vm, indent + 1, pte_addr(vm, *ptep), level);
 217        }
 218}
 219
 220void aarch64_vcpu_setup(struct kvm_vm *vm, int vcpuid, struct kvm_vcpu_init *init)
 221{
 222        struct kvm_vcpu_init default_init = { .target = -1, };
 223        uint64_t sctlr_el1, tcr_el1;
 224
 225        if (!init)
 226                init = &default_init;
 227
 228        if (init->target == -1) {
 229                struct kvm_vcpu_init preferred;
 230                vm_ioctl(vm, KVM_ARM_PREFERRED_TARGET, &preferred);
 231                init->target = preferred.target;
 232        }
 233
 234        vcpu_ioctl(vm, vcpuid, KVM_ARM_VCPU_INIT, init);
 235
 236        /*
 237         * Enable FP/ASIMD to avoid trapping when accessing Q0-Q15
 238         * registers, which the variable argument list macros do.
 239         */
 240        set_reg(vm, vcpuid, ARM64_SYS_REG(CPACR_EL1), 3 << 20);
 241
 242        get_reg(vm, vcpuid, ARM64_SYS_REG(SCTLR_EL1), &sctlr_el1);
 243        get_reg(vm, vcpuid, ARM64_SYS_REG(TCR_EL1), &tcr_el1);
 244
 245        switch (vm->mode) {
 246        case VM_MODE_P52V48_4K:
 247                TEST_FAIL("AArch64 does not support 4K sized pages "
 248                          "with 52-bit physical address ranges");
 249        case VM_MODE_PXXV48_4K:
 250                TEST_FAIL("AArch64 does not support 4K sized pages "
 251                          "with ANY-bit physical address ranges");
 252        case VM_MODE_P52V48_64K:
 253                tcr_el1 |= 1ul << 14; /* TG0 = 64KB */
 254                tcr_el1 |= 6ul << 32; /* IPS = 52 bits */
 255                break;
 256        case VM_MODE_P48V48_4K:
 257                tcr_el1 |= 0ul << 14; /* TG0 = 4KB */
 258                tcr_el1 |= 5ul << 32; /* IPS = 48 bits */
 259                break;
 260        case VM_MODE_P48V48_64K:
 261                tcr_el1 |= 1ul << 14; /* TG0 = 64KB */
 262                tcr_el1 |= 5ul << 32; /* IPS = 48 bits */
 263                break;
 264        case VM_MODE_P40V48_4K:
 265                tcr_el1 |= 0ul << 14; /* TG0 = 4KB */
 266                tcr_el1 |= 2ul << 32; /* IPS = 40 bits */
 267                break;
 268        case VM_MODE_P40V48_64K:
 269                tcr_el1 |= 1ul << 14; /* TG0 = 64KB */
 270                tcr_el1 |= 2ul << 32; /* IPS = 40 bits */
 271                break;
 272        default:
 273                TEST_FAIL("Unknown guest mode, mode: 0x%x", vm->mode);
 274        }
 275
 276        sctlr_el1 |= (1 << 0) | (1 << 2) | (1 << 12) /* M | C | I */;
 277        /* TCR_EL1 |= IRGN0:WBWA | ORGN0:WBWA | SH0:Inner-Shareable */;
 278        tcr_el1 |= (1 << 8) | (1 << 10) | (3 << 12);
 279        tcr_el1 |= (64 - vm->va_bits) /* T0SZ */;
 280
 281        set_reg(vm, vcpuid, ARM64_SYS_REG(SCTLR_EL1), sctlr_el1);
 282        set_reg(vm, vcpuid, ARM64_SYS_REG(TCR_EL1), tcr_el1);
 283        set_reg(vm, vcpuid, ARM64_SYS_REG(MAIR_EL1), DEFAULT_MAIR_EL1);
 284        set_reg(vm, vcpuid, ARM64_SYS_REG(TTBR0_EL1), vm->pgd);
 285}
 286
 287void vcpu_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid, uint8_t indent)
 288{
 289        uint64_t pstate, pc;
 290
 291        get_reg(vm, vcpuid, ARM64_CORE_REG(regs.pstate), &pstate);
 292        get_reg(vm, vcpuid, ARM64_CORE_REG(regs.pc), &pc);
 293
 294        fprintf(stream, "%*spstate: 0x%.16lx pc: 0x%.16lx\n",
 295                indent, "", pstate, pc);
 296}
 297
 298void aarch64_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid,
 299                              struct kvm_vcpu_init *init, void *guest_code)
 300{
 301        size_t stack_size = vm->page_size == 4096 ?
 302                                        DEFAULT_STACK_PGS * vm->page_size :
 303                                        vm->page_size;
 304        uint64_t stack_vaddr = vm_vaddr_alloc(vm, stack_size,
 305                                        DEFAULT_ARM64_GUEST_STACK_VADDR_MIN, 0, 0);
 306
 307        vm_vcpu_add(vm, vcpuid);
 308        aarch64_vcpu_setup(vm, vcpuid, init);
 309
 310        set_reg(vm, vcpuid, ARM64_CORE_REG(sp_el1), stack_vaddr + stack_size);
 311        set_reg(vm, vcpuid, ARM64_CORE_REG(regs.pc), (uint64_t)guest_code);
 312}
 313
 314void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code)
 315{
 316        aarch64_vcpu_add_default(vm, vcpuid, NULL, guest_code);
 317}
 318
 319void vcpu_args_set(struct kvm_vm *vm, uint32_t vcpuid, unsigned int num, ...)
 320{
 321        va_list ap;
 322        int i;
 323
 324        TEST_ASSERT(num >= 1 && num <= 8, "Unsupported number of args,\n"
 325                    "  num: %u\n", num);
 326
 327        va_start(ap, num);
 328
 329        for (i = 0; i < num; i++) {
 330                set_reg(vm, vcpuid, ARM64_CORE_REG(regs.regs[i]),
 331                        va_arg(ap, uint64_t));
 332        }
 333
 334        va_end(ap);
 335}
 336
 337void assert_on_unhandled_exception(struct kvm_vm *vm, uint32_t vcpuid)
 338{
 339}
 340