linux/tools/testing/selftests/kvm/lib/s390x/processor.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * KVM selftest s390x library code - CPU-related functions (page tables...)
   4 *
   5 * Copyright (C) 2019, Red Hat, Inc.
   6 */
   7
   8#include "processor.h"
   9#include "kvm_util.h"
  10#include "../kvm_util_internal.h"
  11
  12#define KVM_GUEST_PAGE_TABLE_MIN_PADDR          0x180000
  13
  14#define PAGES_PER_REGION 4
  15
  16void virt_pgd_alloc(struct kvm_vm *vm, uint32_t memslot)
  17{
  18        vm_paddr_t paddr;
  19
  20        TEST_ASSERT(vm->page_size == 4096, "Unsupported page size: 0x%x",
  21                    vm->page_size);
  22
  23        if (vm->pgd_created)
  24                return;
  25
  26        paddr = vm_phy_pages_alloc(vm, PAGES_PER_REGION,
  27                                   KVM_GUEST_PAGE_TABLE_MIN_PADDR, memslot);
  28        memset(addr_gpa2hva(vm, paddr), 0xff, PAGES_PER_REGION * vm->page_size);
  29
  30        vm->pgd = paddr;
  31        vm->pgd_created = true;
  32}
  33
  34/*
  35 * Allocate 4 pages for a region/segment table (ri < 4), or one page for
  36 * a page table (ri == 4). Returns a suitable region/segment table entry
  37 * which points to the freshly allocated pages.
  38 */
  39static uint64_t virt_alloc_region(struct kvm_vm *vm, int ri, uint32_t memslot)
  40{
  41        uint64_t taddr;
  42
  43        taddr = vm_phy_pages_alloc(vm,  ri < 4 ? PAGES_PER_REGION : 1,
  44                                   KVM_GUEST_PAGE_TABLE_MIN_PADDR, memslot);
  45        memset(addr_gpa2hva(vm, taddr), 0xff, PAGES_PER_REGION * vm->page_size);
  46
  47        return (taddr & REGION_ENTRY_ORIGIN)
  48                | (((4 - ri) << 2) & REGION_ENTRY_TYPE)
  49                | ((ri < 4 ? (PAGES_PER_REGION - 1) : 0) & REGION_ENTRY_LENGTH);
  50}
  51
  52void virt_pg_map(struct kvm_vm *vm, uint64_t gva, uint64_t gpa,
  53                 uint32_t memslot)
  54{
  55        int ri, idx;
  56        uint64_t *entry;
  57
  58        TEST_ASSERT((gva % vm->page_size) == 0,
  59                "Virtual address not on page boundary,\n"
  60                "  vaddr: 0x%lx vm->page_size: 0x%x",
  61                gva, vm->page_size);
  62        TEST_ASSERT(sparsebit_is_set(vm->vpages_valid,
  63                (gva >> vm->page_shift)),
  64                "Invalid virtual address, vaddr: 0x%lx",
  65                gva);
  66        TEST_ASSERT((gpa % vm->page_size) == 0,
  67                "Physical address not on page boundary,\n"
  68                "  paddr: 0x%lx vm->page_size: 0x%x",
  69                gva, vm->page_size);
  70        TEST_ASSERT((gpa >> vm->page_shift) <= vm->max_gfn,
  71                "Physical address beyond beyond maximum supported,\n"
  72                "  paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
  73                gva, vm->max_gfn, vm->page_size);
  74
  75        /* Walk through region and segment tables */
  76        entry = addr_gpa2hva(vm, vm->pgd);
  77        for (ri = 1; ri <= 4; ri++) {
  78                idx = (gva >> (64 - 11 * ri)) & 0x7ffu;
  79                if (entry[idx] & REGION_ENTRY_INVALID)
  80                        entry[idx] = virt_alloc_region(vm, ri, memslot);
  81                entry = addr_gpa2hva(vm, entry[idx] & REGION_ENTRY_ORIGIN);
  82        }
  83
  84        /* Fill in page table entry */
  85        idx = (gva >> 12) & 0x0ffu;             /* page index */
  86        if (!(entry[idx] & PAGE_INVALID))
  87                fprintf(stderr,
  88                        "WARNING: PTE for gpa=0x%"PRIx64" already set!\n", gpa);
  89        entry[idx] = gpa;
  90}
  91
  92vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
  93{
  94        int ri, idx;
  95        uint64_t *entry;
  96
  97        TEST_ASSERT(vm->page_size == 4096, "Unsupported page size: 0x%x",
  98                    vm->page_size);
  99
 100        entry = addr_gpa2hva(vm, vm->pgd);
 101        for (ri = 1; ri <= 4; ri++) {
 102                idx = (gva >> (64 - 11 * ri)) & 0x7ffu;
 103                TEST_ASSERT(!(entry[idx] & REGION_ENTRY_INVALID),
 104                            "No region mapping for vm virtual address 0x%lx",
 105                            gva);
 106                entry = addr_gpa2hva(vm, entry[idx] & REGION_ENTRY_ORIGIN);
 107        }
 108
 109        idx = (gva >> 12) & 0x0ffu;             /* page index */
 110
 111        TEST_ASSERT(!(entry[idx] & PAGE_INVALID),
 112                    "No page mapping for vm virtual address 0x%lx", gva);
 113
 114        return (entry[idx] & ~0xffful) + (gva & 0xffful);
 115}
 116
 117static void virt_dump_ptes(FILE *stream, struct kvm_vm *vm, uint8_t indent,
 118                           uint64_t ptea_start)
 119{
 120        uint64_t *pte, ptea;
 121
 122        for (ptea = ptea_start; ptea < ptea_start + 0x100 * 8; ptea += 8) {
 123                pte = addr_gpa2hva(vm, ptea);
 124                if (*pte & PAGE_INVALID)
 125                        continue;
 126                fprintf(stream, "%*spte @ 0x%lx: 0x%016lx\n",
 127                        indent, "", ptea, *pte);
 128        }
 129}
 130
 131static void virt_dump_region(FILE *stream, struct kvm_vm *vm, uint8_t indent,
 132                             uint64_t reg_tab_addr)
 133{
 134        uint64_t addr, *entry;
 135
 136        for (addr = reg_tab_addr; addr < reg_tab_addr + 0x400 * 8; addr += 8) {
 137                entry = addr_gpa2hva(vm, addr);
 138                if (*entry & REGION_ENTRY_INVALID)
 139                        continue;
 140                fprintf(stream, "%*srt%lde @ 0x%lx: 0x%016lx\n",
 141                        indent, "", 4 - ((*entry & REGION_ENTRY_TYPE) >> 2),
 142                        addr, *entry);
 143                if (*entry & REGION_ENTRY_TYPE) {
 144                        virt_dump_region(stream, vm, indent + 2,
 145                                         *entry & REGION_ENTRY_ORIGIN);
 146                } else {
 147                        virt_dump_ptes(stream, vm, indent + 2,
 148                                       *entry & REGION_ENTRY_ORIGIN);
 149                }
 150        }
 151}
 152
 153void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
 154{
 155        if (!vm->pgd_created)
 156                return;
 157
 158        virt_dump_region(stream, vm, indent, vm->pgd);
 159}
 160
 161void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code)
 162{
 163        size_t stack_size =  DEFAULT_STACK_PGS * getpagesize();
 164        uint64_t stack_vaddr;
 165        struct kvm_regs regs;
 166        struct kvm_sregs sregs;
 167        struct kvm_run *run;
 168
 169        TEST_ASSERT(vm->page_size == 4096, "Unsupported page size: 0x%x",
 170                    vm->page_size);
 171
 172        stack_vaddr = vm_vaddr_alloc(vm, stack_size,
 173                                     DEFAULT_GUEST_STACK_VADDR_MIN, 0, 0);
 174
 175        vm_vcpu_add(vm, vcpuid);
 176
 177        /* Setup guest registers */
 178        vcpu_regs_get(vm, vcpuid, &regs);
 179        regs.gprs[15] = stack_vaddr + (DEFAULT_STACK_PGS * getpagesize()) - 160;
 180        vcpu_regs_set(vm, vcpuid, &regs);
 181
 182        vcpu_sregs_get(vm, vcpuid, &sregs);
 183        sregs.crs[0] |= 0x00040000;             /* Enable floating point regs */
 184        sregs.crs[1] = vm->pgd | 0xf;           /* Primary region table */
 185        vcpu_sregs_set(vm, vcpuid, &sregs);
 186
 187        run = vcpu_state(vm, vcpuid);
 188        run->psw_mask = 0x0400000180000000ULL;  /* DAT enabled + 64 bit mode */
 189        run->psw_addr = (uintptr_t)guest_code;
 190}
 191
 192void vcpu_args_set(struct kvm_vm *vm, uint32_t vcpuid, unsigned int num, ...)
 193{
 194        va_list ap;
 195        struct kvm_regs regs;
 196        int i;
 197
 198        TEST_ASSERT(num >= 1 && num <= 5, "Unsupported number of args,\n"
 199                    "  num: %u\n",
 200                    num);
 201
 202        va_start(ap, num);
 203        vcpu_regs_get(vm, vcpuid, &regs);
 204
 205        for (i = 0; i < num; i++)
 206                regs.gprs[i + 2] = va_arg(ap, uint64_t);
 207
 208        vcpu_regs_set(vm, vcpuid, &regs);
 209        va_end(ap);
 210}
 211
 212void vcpu_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid, uint8_t indent)
 213{
 214        struct vcpu *vcpu = vcpu_find(vm, vcpuid);
 215
 216        if (!vcpu)
 217                return;
 218
 219        fprintf(stream, "%*spstate: psw: 0x%.16llx:0x%.16llx\n",
 220                indent, "", vcpu->state->psw_mask, vcpu->state->psw_addr);
 221}
 222
 223void assert_on_unhandled_exception(struct kvm_vm *vm, uint32_t vcpuid)
 224{
 225}
 226