linux/tools/testing/selftests/kvm/lib/s390x/processor.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * KVM selftest s390x library code - CPU-related functions (page tables...)
   4 *
   5 * Copyright (C) 2019, Red Hat, Inc.
   6 */
   7
   8#define _GNU_SOURCE /* for program_invocation_name */
   9
  10#include "processor.h"
  11#include "kvm_util.h"
  12#include "../kvm_util_internal.h"
  13
  14#define KVM_GUEST_PAGE_TABLE_MIN_PADDR          0x180000
  15
  16#define PAGES_PER_REGION 4
  17
  18void virt_pgd_alloc(struct kvm_vm *vm, uint32_t memslot)
  19{
  20        vm_paddr_t paddr;
  21
  22        TEST_ASSERT(vm->page_size == 4096, "Unsupported page size: 0x%x",
  23                    vm->page_size);
  24
  25        if (vm->pgd_created)
  26                return;
  27
  28        paddr = vm_phy_pages_alloc(vm, PAGES_PER_REGION,
  29                                   KVM_GUEST_PAGE_TABLE_MIN_PADDR, memslot);
  30        memset(addr_gpa2hva(vm, paddr), 0xff, PAGES_PER_REGION * vm->page_size);
  31
  32        vm->pgd = paddr;
  33        vm->pgd_created = true;
  34}
  35
  36/*
  37 * Allocate 4 pages for a region/segment table (ri < 4), or one page for
  38 * a page table (ri == 4). Returns a suitable region/segment table entry
  39 * which points to the freshly allocated pages.
  40 */
  41static uint64_t virt_alloc_region(struct kvm_vm *vm, int ri, uint32_t memslot)
  42{
  43        uint64_t taddr;
  44
  45        taddr = vm_phy_pages_alloc(vm,  ri < 4 ? PAGES_PER_REGION : 1,
  46                                   KVM_GUEST_PAGE_TABLE_MIN_PADDR, memslot);
  47        memset(addr_gpa2hva(vm, taddr), 0xff, PAGES_PER_REGION * vm->page_size);
  48
  49        return (taddr & REGION_ENTRY_ORIGIN)
  50                | (((4 - ri) << 2) & REGION_ENTRY_TYPE)
  51                | ((ri < 4 ? (PAGES_PER_REGION - 1) : 0) & REGION_ENTRY_LENGTH);
  52}
  53
  54/*
  55 * VM Virtual Page Map
  56 *
  57 * Input Args:
  58 *   vm - Virtual Machine
  59 *   gva - VM Virtual Address
  60 *   gpa - VM Physical Address
  61 *   memslot - Memory region slot for new virtual translation tables
  62 *
  63 * Output Args: None
  64 *
  65 * Return: None
  66 *
  67 * Within the VM given by vm, creates a virtual translation for the page
  68 * starting at vaddr to the page starting at paddr.
  69 */
  70void virt_pg_map(struct kvm_vm *vm, uint64_t gva, uint64_t gpa,
  71                 uint32_t memslot)
  72{
  73        int ri, idx;
  74        uint64_t *entry;
  75
  76        TEST_ASSERT((gva % vm->page_size) == 0,
  77                "Virtual address not on page boundary,\n"
  78                "  vaddr: 0x%lx vm->page_size: 0x%x",
  79                gva, vm->page_size);
  80        TEST_ASSERT(sparsebit_is_set(vm->vpages_valid,
  81                (gva >> vm->page_shift)),
  82                "Invalid virtual address, vaddr: 0x%lx",
  83                gva);
  84        TEST_ASSERT((gpa % vm->page_size) == 0,
  85                "Physical address not on page boundary,\n"
  86                "  paddr: 0x%lx vm->page_size: 0x%x",
  87                gva, vm->page_size);
  88        TEST_ASSERT((gpa >> vm->page_shift) <= vm->max_gfn,
  89                "Physical address beyond beyond maximum supported,\n"
  90                "  paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
  91                gva, vm->max_gfn, vm->page_size);
  92
  93        /* Walk through region and segment tables */
  94        entry = addr_gpa2hva(vm, vm->pgd);
  95        for (ri = 1; ri <= 4; ri++) {
  96                idx = (gva >> (64 - 11 * ri)) & 0x7ffu;
  97                if (entry[idx] & REGION_ENTRY_INVALID)
  98                        entry[idx] = virt_alloc_region(vm, ri, memslot);
  99                entry = addr_gpa2hva(vm, entry[idx] & REGION_ENTRY_ORIGIN);
 100        }
 101
 102        /* Fill in page table entry */
 103        idx = (gva >> 12) & 0x0ffu;             /* page index */
 104        if (!(entry[idx] & PAGE_INVALID))
 105                fprintf(stderr,
 106                        "WARNING: PTE for gpa=0x%"PRIx64" already set!\n", gpa);
 107        entry[idx] = gpa;
 108}
 109
 110/*
 111 * Address Guest Virtual to Guest Physical
 112 *
 113 * Input Args:
 114 *   vm - Virtual Machine
 115 *   gpa - VM virtual address
 116 *
 117 * Output Args: None
 118 *
 119 * Return:
 120 *   Equivalent VM physical address
 121 *
 122 * Translates the VM virtual address given by gva to a VM physical
 123 * address and then locates the memory region containing the VM
 124 * physical address, within the VM given by vm.  When found, the host
 125 * virtual address providing the memory to the vm physical address is
 126 * returned.
 127 * A TEST_ASSERT failure occurs if no region containing translated
 128 * VM virtual address exists.
 129 */
 130vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
 131{
 132        int ri, idx;
 133        uint64_t *entry;
 134
 135        TEST_ASSERT(vm->page_size == 4096, "Unsupported page size: 0x%x",
 136                    vm->page_size);
 137
 138        entry = addr_gpa2hva(vm, vm->pgd);
 139        for (ri = 1; ri <= 4; ri++) {
 140                idx = (gva >> (64 - 11 * ri)) & 0x7ffu;
 141                TEST_ASSERT(!(entry[idx] & REGION_ENTRY_INVALID),
 142                            "No region mapping for vm virtual address 0x%lx",
 143                            gva);
 144                entry = addr_gpa2hva(vm, entry[idx] & REGION_ENTRY_ORIGIN);
 145        }
 146
 147        idx = (gva >> 12) & 0x0ffu;             /* page index */
 148
 149        TEST_ASSERT(!(entry[idx] & PAGE_INVALID),
 150                    "No page mapping for vm virtual address 0x%lx", gva);
 151
 152        return (entry[idx] & ~0xffful) + (gva & 0xffful);
 153}
 154
 155static void virt_dump_ptes(FILE *stream, struct kvm_vm *vm, uint8_t indent,
 156                           uint64_t ptea_start)
 157{
 158        uint64_t *pte, ptea;
 159
 160        for (ptea = ptea_start; ptea < ptea_start + 0x100 * 8; ptea += 8) {
 161                pte = addr_gpa2hva(vm, ptea);
 162                if (*pte & PAGE_INVALID)
 163                        continue;
 164                fprintf(stream, "%*spte @ 0x%lx: 0x%016lx\n",
 165                        indent, "", ptea, *pte);
 166        }
 167}
 168
 169static void virt_dump_region(FILE *stream, struct kvm_vm *vm, uint8_t indent,
 170                             uint64_t reg_tab_addr)
 171{
 172        uint64_t addr, *entry;
 173
 174        for (addr = reg_tab_addr; addr < reg_tab_addr + 0x400 * 8; addr += 8) {
 175                entry = addr_gpa2hva(vm, addr);
 176                if (*entry & REGION_ENTRY_INVALID)
 177                        continue;
 178                fprintf(stream, "%*srt%lde @ 0x%lx: 0x%016lx\n",
 179                        indent, "", 4 - ((*entry & REGION_ENTRY_TYPE) >> 2),
 180                        addr, *entry);
 181                if (*entry & REGION_ENTRY_TYPE) {
 182                        virt_dump_region(stream, vm, indent + 2,
 183                                         *entry & REGION_ENTRY_ORIGIN);
 184                } else {
 185                        virt_dump_ptes(stream, vm, indent + 2,
 186                                       *entry & REGION_ENTRY_ORIGIN);
 187                }
 188        }
 189}
 190
 191void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
 192{
 193        if (!vm->pgd_created)
 194                return;
 195
 196        virt_dump_region(stream, vm, indent, vm->pgd);
 197}
 198
 199/*
 200 * Create a VM with reasonable defaults
 201 *
 202 * Input Args:
 203 *   vcpuid - The id of the single VCPU to add to the VM.
 204 *   extra_mem_pages - The size of extra memories to add (this will
 205 *                     decide how much extra space we will need to
 206 *                     setup the page tables using mem slot 0)
 207 *   guest_code - The vCPU's entry point
 208 *
 209 * Output Args: None
 210 *
 211 * Return:
 212 *   Pointer to opaque structure that describes the created VM.
 213 */
 214struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_pages,
 215                                 void *guest_code)
 216{
 217        /*
 218         * The additional amount of pages required for the page tables is:
 219         * 1 * n / 256 + 4 * (n / 256) / 2048 + 4 * (n / 256) / 2048^2 + ...
 220         * which is definitely smaller than (n / 256) * 2.
 221         */
 222        uint64_t extra_pg_pages = extra_mem_pages / 256 * 2;
 223        struct kvm_vm *vm;
 224
 225        vm = vm_create(VM_MODE_DEFAULT,
 226                       DEFAULT_GUEST_PHY_PAGES + extra_pg_pages, O_RDWR);
 227
 228        kvm_vm_elf_load(vm, program_invocation_name, 0, 0);
 229        vm_vcpu_add_default(vm, vcpuid, guest_code);
 230
 231        return vm;
 232}
 233
 234/*
 235 * Adds a vCPU with reasonable defaults (i.e. a stack and initial PSW)
 236 *
 237 * Input Args:
 238 *   vcpuid - The id of the VCPU to add to the VM.
 239 *   guest_code - The vCPU's entry point
 240 */
 241void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code)
 242{
 243        size_t stack_size =  DEFAULT_STACK_PGS * getpagesize();
 244        uint64_t stack_vaddr;
 245        struct kvm_regs regs;
 246        struct kvm_sregs sregs;
 247        struct kvm_run *run;
 248
 249        TEST_ASSERT(vm->page_size == 4096, "Unsupported page size: 0x%x",
 250                    vm->page_size);
 251
 252        stack_vaddr = vm_vaddr_alloc(vm, stack_size,
 253                                     DEFAULT_GUEST_STACK_VADDR_MIN, 0, 0);
 254
 255        vm_vcpu_add(vm, vcpuid);
 256
 257        /* Setup guest registers */
 258        vcpu_regs_get(vm, vcpuid, &regs);
 259        regs.gprs[15] = stack_vaddr + (DEFAULT_STACK_PGS * getpagesize()) - 160;
 260        vcpu_regs_set(vm, vcpuid, &regs);
 261
 262        vcpu_sregs_get(vm, vcpuid, &sregs);
 263        sregs.crs[0] |= 0x00040000;             /* Enable floating point regs */
 264        sregs.crs[1] = vm->pgd | 0xf;           /* Primary region table */
 265        vcpu_sregs_set(vm, vcpuid, &sregs);
 266
 267        run = vcpu_state(vm, vcpuid);
 268        run->psw_mask = 0x0400000180000000ULL;  /* DAT enabled + 64 bit mode */
 269        run->psw_addr = (uintptr_t)guest_code;
 270}
 271
 272void vcpu_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid, uint8_t indent)
 273{
 274        struct vcpu *vcpu = vm->vcpu_head;
 275
 276        fprintf(stream, "%*spstate: psw: 0x%.16llx:0x%.16llx\n",
 277                indent, "", vcpu->state->psw_mask, vcpu->state->psw_addr);
 278}
 279