linux/tools/testing/selftests/kvm/lib/x86_64/processor.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0-only
   2/*
   3 * tools/testing/selftests/kvm/lib/x86_64/processor.c
   4 *
   5 * Copyright (C) 2018, Google LLC.
   6 */
   7
   8#include "test_util.h"
   9#include "kvm_util.h"
  10#include "../kvm_util_internal.h"
  11#include "processor.h"
  12
  13#ifndef NUM_INTERRUPTS
  14#define NUM_INTERRUPTS 256
  15#endif
  16
  17#define DEFAULT_CODE_SELECTOR 0x8
  18#define DEFAULT_DATA_SELECTOR 0x10
  19
  20/* Minimum physical address used for virtual translation tables. */
  21#define KVM_GUEST_PAGE_TABLE_MIN_PADDR 0x180000
  22
  23vm_vaddr_t exception_handlers;
  24
  25/* Virtual translation table structure declarations */
  26struct pageMapL4Entry {
  27        uint64_t present:1;
  28        uint64_t writable:1;
  29        uint64_t user:1;
  30        uint64_t write_through:1;
  31        uint64_t cache_disable:1;
  32        uint64_t accessed:1;
  33        uint64_t ignored_06:1;
  34        uint64_t page_size:1;
  35        uint64_t ignored_11_08:4;
  36        uint64_t address:40;
  37        uint64_t ignored_62_52:11;
  38        uint64_t execute_disable:1;
  39};
  40
  41struct pageDirectoryPointerEntry {
  42        uint64_t present:1;
  43        uint64_t writable:1;
  44        uint64_t user:1;
  45        uint64_t write_through:1;
  46        uint64_t cache_disable:1;
  47        uint64_t accessed:1;
  48        uint64_t ignored_06:1;
  49        uint64_t page_size:1;
  50        uint64_t ignored_11_08:4;
  51        uint64_t address:40;
  52        uint64_t ignored_62_52:11;
  53        uint64_t execute_disable:1;
  54};
  55
  56struct pageDirectoryEntry {
  57        uint64_t present:1;
  58        uint64_t writable:1;
  59        uint64_t user:1;
  60        uint64_t write_through:1;
  61        uint64_t cache_disable:1;
  62        uint64_t accessed:1;
  63        uint64_t ignored_06:1;
  64        uint64_t page_size:1;
  65        uint64_t ignored_11_08:4;
  66        uint64_t address:40;
  67        uint64_t ignored_62_52:11;
  68        uint64_t execute_disable:1;
  69};
  70
  71struct pageTableEntry {
  72        uint64_t present:1;
  73        uint64_t writable:1;
  74        uint64_t user:1;
  75        uint64_t write_through:1;
  76        uint64_t cache_disable:1;
  77        uint64_t accessed:1;
  78        uint64_t dirty:1;
  79        uint64_t reserved_07:1;
  80        uint64_t global:1;
  81        uint64_t ignored_11_09:3;
  82        uint64_t address:40;
  83        uint64_t ignored_62_52:11;
  84        uint64_t execute_disable:1;
  85};
  86
  87void regs_dump(FILE *stream, struct kvm_regs *regs,
  88               uint8_t indent)
  89{
  90        fprintf(stream, "%*srax: 0x%.16llx rbx: 0x%.16llx "
  91                "rcx: 0x%.16llx rdx: 0x%.16llx\n",
  92                indent, "",
  93                regs->rax, regs->rbx, regs->rcx, regs->rdx);
  94        fprintf(stream, "%*srsi: 0x%.16llx rdi: 0x%.16llx "
  95                "rsp: 0x%.16llx rbp: 0x%.16llx\n",
  96                indent, "",
  97                regs->rsi, regs->rdi, regs->rsp, regs->rbp);
  98        fprintf(stream, "%*sr8:  0x%.16llx r9:  0x%.16llx "
  99                "r10: 0x%.16llx r11: 0x%.16llx\n",
 100                indent, "",
 101                regs->r8, regs->r9, regs->r10, regs->r11);
 102        fprintf(stream, "%*sr12: 0x%.16llx r13: 0x%.16llx "
 103                "r14: 0x%.16llx r15: 0x%.16llx\n",
 104                indent, "",
 105                regs->r12, regs->r13, regs->r14, regs->r15);
 106        fprintf(stream, "%*srip: 0x%.16llx rfl: 0x%.16llx\n",
 107                indent, "",
 108                regs->rip, regs->rflags);
 109}
 110
 111/*
 112 * Segment Dump
 113 *
 114 * Input Args:
 115 *   stream  - Output FILE stream
 116 *   segment - KVM segment
 117 *   indent  - Left margin indent amount
 118 *
 119 * Output Args: None
 120 *
 121 * Return: None
 122 *
 123 * Dumps the state of the KVM segment given by @segment, to the FILE stream
 124 * given by @stream.
 125 */
 126static void segment_dump(FILE *stream, struct kvm_segment *segment,
 127                         uint8_t indent)
 128{
 129        fprintf(stream, "%*sbase: 0x%.16llx limit: 0x%.8x "
 130                "selector: 0x%.4x type: 0x%.2x\n",
 131                indent, "", segment->base, segment->limit,
 132                segment->selector, segment->type);
 133        fprintf(stream, "%*spresent: 0x%.2x dpl: 0x%.2x "
 134                "db: 0x%.2x s: 0x%.2x l: 0x%.2x\n",
 135                indent, "", segment->present, segment->dpl,
 136                segment->db, segment->s, segment->l);
 137        fprintf(stream, "%*sg: 0x%.2x avl: 0x%.2x "
 138                "unusable: 0x%.2x padding: 0x%.2x\n",
 139                indent, "", segment->g, segment->avl,
 140                segment->unusable, segment->padding);
 141}
 142
 143/*
 144 * dtable Dump
 145 *
 146 * Input Args:
 147 *   stream - Output FILE stream
 148 *   dtable - KVM dtable
 149 *   indent - Left margin indent amount
 150 *
 151 * Output Args: None
 152 *
 153 * Return: None
 154 *
 155 * Dumps the state of the KVM dtable given by @dtable, to the FILE stream
 156 * given by @stream.
 157 */
 158static void dtable_dump(FILE *stream, struct kvm_dtable *dtable,
 159                        uint8_t indent)
 160{
 161        fprintf(stream, "%*sbase: 0x%.16llx limit: 0x%.4x "
 162                "padding: 0x%.4x 0x%.4x 0x%.4x\n",
 163                indent, "", dtable->base, dtable->limit,
 164                dtable->padding[0], dtable->padding[1], dtable->padding[2]);
 165}
 166
 167void sregs_dump(FILE *stream, struct kvm_sregs *sregs,
 168                uint8_t indent)
 169{
 170        unsigned int i;
 171
 172        fprintf(stream, "%*scs:\n", indent, "");
 173        segment_dump(stream, &sregs->cs, indent + 2);
 174        fprintf(stream, "%*sds:\n", indent, "");
 175        segment_dump(stream, &sregs->ds, indent + 2);
 176        fprintf(stream, "%*ses:\n", indent, "");
 177        segment_dump(stream, &sregs->es, indent + 2);
 178        fprintf(stream, "%*sfs:\n", indent, "");
 179        segment_dump(stream, &sregs->fs, indent + 2);
 180        fprintf(stream, "%*sgs:\n", indent, "");
 181        segment_dump(stream, &sregs->gs, indent + 2);
 182        fprintf(stream, "%*sss:\n", indent, "");
 183        segment_dump(stream, &sregs->ss, indent + 2);
 184        fprintf(stream, "%*str:\n", indent, "");
 185        segment_dump(stream, &sregs->tr, indent + 2);
 186        fprintf(stream, "%*sldt:\n", indent, "");
 187        segment_dump(stream, &sregs->ldt, indent + 2);
 188
 189        fprintf(stream, "%*sgdt:\n", indent, "");
 190        dtable_dump(stream, &sregs->gdt, indent + 2);
 191        fprintf(stream, "%*sidt:\n", indent, "");
 192        dtable_dump(stream, &sregs->idt, indent + 2);
 193
 194        fprintf(stream, "%*scr0: 0x%.16llx cr2: 0x%.16llx "
 195                "cr3: 0x%.16llx cr4: 0x%.16llx\n",
 196                indent, "",
 197                sregs->cr0, sregs->cr2, sregs->cr3, sregs->cr4);
 198        fprintf(stream, "%*scr8: 0x%.16llx efer: 0x%.16llx "
 199                "apic_base: 0x%.16llx\n",
 200                indent, "",
 201                sregs->cr8, sregs->efer, sregs->apic_base);
 202
 203        fprintf(stream, "%*sinterrupt_bitmap:\n", indent, "");
 204        for (i = 0; i < (KVM_NR_INTERRUPTS + 63) / 64; i++) {
 205                fprintf(stream, "%*s%.16llx\n", indent + 2, "",
 206                        sregs->interrupt_bitmap[i]);
 207        }
 208}
 209
 210void virt_pgd_alloc(struct kvm_vm *vm, uint32_t pgd_memslot)
 211{
 212        TEST_ASSERT(vm->mode == VM_MODE_PXXV48_4K, "Attempt to use "
 213                "unknown or unsupported guest mode, mode: 0x%x", vm->mode);
 214
 215        /* If needed, create page map l4 table. */
 216        if (!vm->pgd_created) {
 217                vm_paddr_t paddr = vm_phy_page_alloc(vm,
 218                        KVM_GUEST_PAGE_TABLE_MIN_PADDR, pgd_memslot);
 219                vm->pgd = paddr;
 220                vm->pgd_created = true;
 221        }
 222}
 223
 224void virt_pg_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
 225        uint32_t pgd_memslot)
 226{
 227        uint16_t index[4];
 228        struct pageMapL4Entry *pml4e;
 229
 230        TEST_ASSERT(vm->mode == VM_MODE_PXXV48_4K, "Attempt to use "
 231                "unknown or unsupported guest mode, mode: 0x%x", vm->mode);
 232
 233        TEST_ASSERT((vaddr % vm->page_size) == 0,
 234                "Virtual address not on page boundary,\n"
 235                "  vaddr: 0x%lx vm->page_size: 0x%x",
 236                vaddr, vm->page_size);
 237        TEST_ASSERT(sparsebit_is_set(vm->vpages_valid,
 238                (vaddr >> vm->page_shift)),
 239                "Invalid virtual address, vaddr: 0x%lx",
 240                vaddr);
 241        TEST_ASSERT((paddr % vm->page_size) == 0,
 242                "Physical address not on page boundary,\n"
 243                "  paddr: 0x%lx vm->page_size: 0x%x",
 244                paddr, vm->page_size);
 245        TEST_ASSERT((paddr >> vm->page_shift) <= vm->max_gfn,
 246                "Physical address beyond beyond maximum supported,\n"
 247                "  paddr: 0x%lx vm->max_gfn: 0x%lx vm->page_size: 0x%x",
 248                paddr, vm->max_gfn, vm->page_size);
 249
 250        index[0] = (vaddr >> 12) & 0x1ffu;
 251        index[1] = (vaddr >> 21) & 0x1ffu;
 252        index[2] = (vaddr >> 30) & 0x1ffu;
 253        index[3] = (vaddr >> 39) & 0x1ffu;
 254
 255        /* Allocate page directory pointer table if not present. */
 256        pml4e = addr_gpa2hva(vm, vm->pgd);
 257        if (!pml4e[index[3]].present) {
 258                pml4e[index[3]].address = vm_phy_page_alloc(vm,
 259                        KVM_GUEST_PAGE_TABLE_MIN_PADDR, pgd_memslot)
 260                        >> vm->page_shift;
 261                pml4e[index[3]].writable = true;
 262                pml4e[index[3]].present = true;
 263        }
 264
 265        /* Allocate page directory table if not present. */
 266        struct pageDirectoryPointerEntry *pdpe;
 267        pdpe = addr_gpa2hva(vm, pml4e[index[3]].address * vm->page_size);
 268        if (!pdpe[index[2]].present) {
 269                pdpe[index[2]].address = vm_phy_page_alloc(vm,
 270                        KVM_GUEST_PAGE_TABLE_MIN_PADDR, pgd_memslot)
 271                        >> vm->page_shift;
 272                pdpe[index[2]].writable = true;
 273                pdpe[index[2]].present = true;
 274        }
 275
 276        /* Allocate page table if not present. */
 277        struct pageDirectoryEntry *pde;
 278        pde = addr_gpa2hva(vm, pdpe[index[2]].address * vm->page_size);
 279        if (!pde[index[1]].present) {
 280                pde[index[1]].address = vm_phy_page_alloc(vm,
 281                        KVM_GUEST_PAGE_TABLE_MIN_PADDR, pgd_memslot)
 282                        >> vm->page_shift;
 283                pde[index[1]].writable = true;
 284                pde[index[1]].present = true;
 285        }
 286
 287        /* Fill in page table entry. */
 288        struct pageTableEntry *pte;
 289        pte = addr_gpa2hva(vm, pde[index[1]].address * vm->page_size);
 290        pte[index[0]].address = paddr >> vm->page_shift;
 291        pte[index[0]].writable = true;
 292        pte[index[0]].present = 1;
 293}
 294
 295void virt_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
 296{
 297        struct pageMapL4Entry *pml4e, *pml4e_start;
 298        struct pageDirectoryPointerEntry *pdpe, *pdpe_start;
 299        struct pageDirectoryEntry *pde, *pde_start;
 300        struct pageTableEntry *pte, *pte_start;
 301
 302        if (!vm->pgd_created)
 303                return;
 304
 305        fprintf(stream, "%*s                                          "
 306                "                no\n", indent, "");
 307        fprintf(stream, "%*s      index hvaddr         gpaddr         "
 308                "addr         w exec dirty\n",
 309                indent, "");
 310        pml4e_start = (struct pageMapL4Entry *) addr_gpa2hva(vm,
 311                vm->pgd);
 312        for (uint16_t n1 = 0; n1 <= 0x1ffu; n1++) {
 313                pml4e = &pml4e_start[n1];
 314                if (!pml4e->present)
 315                        continue;
 316                fprintf(stream, "%*spml4e 0x%-3zx %p 0x%-12lx 0x%-10lx %u "
 317                        " %u\n",
 318                        indent, "",
 319                        pml4e - pml4e_start, pml4e,
 320                        addr_hva2gpa(vm, pml4e), (uint64_t) pml4e->address,
 321                        pml4e->writable, pml4e->execute_disable);
 322
 323                pdpe_start = addr_gpa2hva(vm, pml4e->address
 324                        * vm->page_size);
 325                for (uint16_t n2 = 0; n2 <= 0x1ffu; n2++) {
 326                        pdpe = &pdpe_start[n2];
 327                        if (!pdpe->present)
 328                                continue;
 329                        fprintf(stream, "%*spdpe  0x%-3zx %p 0x%-12lx 0x%-10lx "
 330                                "%u  %u\n",
 331                                indent, "",
 332                                pdpe - pdpe_start, pdpe,
 333                                addr_hva2gpa(vm, pdpe),
 334                                (uint64_t) pdpe->address, pdpe->writable,
 335                                pdpe->execute_disable);
 336
 337                        pde_start = addr_gpa2hva(vm,
 338                                pdpe->address * vm->page_size);
 339                        for (uint16_t n3 = 0; n3 <= 0x1ffu; n3++) {
 340                                pde = &pde_start[n3];
 341                                if (!pde->present)
 342                                        continue;
 343                                fprintf(stream, "%*spde   0x%-3zx %p "
 344                                        "0x%-12lx 0x%-10lx %u  %u\n",
 345                                        indent, "", pde - pde_start, pde,
 346                                        addr_hva2gpa(vm, pde),
 347                                        (uint64_t) pde->address, pde->writable,
 348                                        pde->execute_disable);
 349
 350                                pte_start = addr_gpa2hva(vm,
 351                                        pde->address * vm->page_size);
 352                                for (uint16_t n4 = 0; n4 <= 0x1ffu; n4++) {
 353                                        pte = &pte_start[n4];
 354                                        if (!pte->present)
 355                                                continue;
 356                                        fprintf(stream, "%*spte   0x%-3zx %p "
 357                                                "0x%-12lx 0x%-10lx %u  %u "
 358                                                "    %u    0x%-10lx\n",
 359                                                indent, "",
 360                                                pte - pte_start, pte,
 361                                                addr_hva2gpa(vm, pte),
 362                                                (uint64_t) pte->address,
 363                                                pte->writable,
 364                                                pte->execute_disable,
 365                                                pte->dirty,
 366                                                ((uint64_t) n1 << 27)
 367                                                        | ((uint64_t) n2 << 18)
 368                                                        | ((uint64_t) n3 << 9)
 369                                                        | ((uint64_t) n4));
 370                                }
 371                        }
 372                }
 373        }
 374}
 375
 376/*
 377 * Set Unusable Segment
 378 *
 379 * Input Args: None
 380 *
 381 * Output Args:
 382 *   segp - Pointer to segment register
 383 *
 384 * Return: None
 385 *
 386 * Sets the segment register pointed to by @segp to an unusable state.
 387 */
 388static void kvm_seg_set_unusable(struct kvm_segment *segp)
 389{
 390        memset(segp, 0, sizeof(*segp));
 391        segp->unusable = true;
 392}
 393
 394static void kvm_seg_fill_gdt_64bit(struct kvm_vm *vm, struct kvm_segment *segp)
 395{
 396        void *gdt = addr_gva2hva(vm, vm->gdt);
 397        struct desc64 *desc = gdt + (segp->selector >> 3) * 8;
 398
 399        desc->limit0 = segp->limit & 0xFFFF;
 400        desc->base0 = segp->base & 0xFFFF;
 401        desc->base1 = segp->base >> 16;
 402        desc->type = segp->type;
 403        desc->s = segp->s;
 404        desc->dpl = segp->dpl;
 405        desc->p = segp->present;
 406        desc->limit1 = segp->limit >> 16;
 407        desc->avl = segp->avl;
 408        desc->l = segp->l;
 409        desc->db = segp->db;
 410        desc->g = segp->g;
 411        desc->base2 = segp->base >> 24;
 412        if (!segp->s)
 413                desc->base3 = segp->base >> 32;
 414}
 415
 416
 417/*
 418 * Set Long Mode Flat Kernel Code Segment
 419 *
 420 * Input Args:
 421 *   vm - VM whose GDT is being filled, or NULL to only write segp
 422 *   selector - selector value
 423 *
 424 * Output Args:
 425 *   segp - Pointer to KVM segment
 426 *
 427 * Return: None
 428 *
 429 * Sets up the KVM segment pointed to by @segp, to be a code segment
 430 * with the selector value given by @selector.
 431 */
 432static void kvm_seg_set_kernel_code_64bit(struct kvm_vm *vm, uint16_t selector,
 433        struct kvm_segment *segp)
 434{
 435        memset(segp, 0, sizeof(*segp));
 436        segp->selector = selector;
 437        segp->limit = 0xFFFFFFFFu;
 438        segp->s = 0x1; /* kTypeCodeData */
 439        segp->type = 0x08 | 0x01 | 0x02; /* kFlagCode | kFlagCodeAccessed
 440                                          * | kFlagCodeReadable
 441                                          */
 442        segp->g = true;
 443        segp->l = true;
 444        segp->present = 1;
 445        if (vm)
 446                kvm_seg_fill_gdt_64bit(vm, segp);
 447}
 448
 449/*
 450 * Set Long Mode Flat Kernel Data Segment
 451 *
 452 * Input Args:
 453 *   vm - VM whose GDT is being filled, or NULL to only write segp
 454 *   selector - selector value
 455 *
 456 * Output Args:
 457 *   segp - Pointer to KVM segment
 458 *
 459 * Return: None
 460 *
 461 * Sets up the KVM segment pointed to by @segp, to be a data segment
 462 * with the selector value given by @selector.
 463 */
 464static void kvm_seg_set_kernel_data_64bit(struct kvm_vm *vm, uint16_t selector,
 465        struct kvm_segment *segp)
 466{
 467        memset(segp, 0, sizeof(*segp));
 468        segp->selector = selector;
 469        segp->limit = 0xFFFFFFFFu;
 470        segp->s = 0x1; /* kTypeCodeData */
 471        segp->type = 0x00 | 0x01 | 0x02; /* kFlagData | kFlagDataAccessed
 472                                          * | kFlagDataWritable
 473                                          */
 474        segp->g = true;
 475        segp->present = true;
 476        if (vm)
 477                kvm_seg_fill_gdt_64bit(vm, segp);
 478}
 479
 480vm_paddr_t addr_gva2gpa(struct kvm_vm *vm, vm_vaddr_t gva)
 481{
 482        uint16_t index[4];
 483        struct pageMapL4Entry *pml4e;
 484        struct pageDirectoryPointerEntry *pdpe;
 485        struct pageDirectoryEntry *pde;
 486        struct pageTableEntry *pte;
 487
 488        TEST_ASSERT(vm->mode == VM_MODE_PXXV48_4K, "Attempt to use "
 489                "unknown or unsupported guest mode, mode: 0x%x", vm->mode);
 490
 491        index[0] = (gva >> 12) & 0x1ffu;
 492        index[1] = (gva >> 21) & 0x1ffu;
 493        index[2] = (gva >> 30) & 0x1ffu;
 494        index[3] = (gva >> 39) & 0x1ffu;
 495
 496        if (!vm->pgd_created)
 497                goto unmapped_gva;
 498        pml4e = addr_gpa2hva(vm, vm->pgd);
 499        if (!pml4e[index[3]].present)
 500                goto unmapped_gva;
 501
 502        pdpe = addr_gpa2hva(vm, pml4e[index[3]].address * vm->page_size);
 503        if (!pdpe[index[2]].present)
 504                goto unmapped_gva;
 505
 506        pde = addr_gpa2hva(vm, pdpe[index[2]].address * vm->page_size);
 507        if (!pde[index[1]].present)
 508                goto unmapped_gva;
 509
 510        pte = addr_gpa2hva(vm, pde[index[1]].address * vm->page_size);
 511        if (!pte[index[0]].present)
 512                goto unmapped_gva;
 513
 514        return (pte[index[0]].address * vm->page_size) + (gva & 0xfffu);
 515
 516unmapped_gva:
 517        TEST_FAIL("No mapping for vm virtual address, gva: 0x%lx", gva);
 518        exit(EXIT_FAILURE);
 519}
 520
 521static void kvm_setup_gdt(struct kvm_vm *vm, struct kvm_dtable *dt, int gdt_memslot,
 522                          int pgd_memslot)
 523{
 524        if (!vm->gdt)
 525                vm->gdt = vm_vaddr_alloc(vm, getpagesize(),
 526                        KVM_UTIL_MIN_VADDR, gdt_memslot, pgd_memslot);
 527
 528        dt->base = vm->gdt;
 529        dt->limit = getpagesize();
 530}
 531
 532static void kvm_setup_tss_64bit(struct kvm_vm *vm, struct kvm_segment *segp,
 533                                int selector, int gdt_memslot,
 534                                int pgd_memslot)
 535{
 536        if (!vm->tss)
 537                vm->tss = vm_vaddr_alloc(vm, getpagesize(),
 538                        KVM_UTIL_MIN_VADDR, gdt_memslot, pgd_memslot);
 539
 540        memset(segp, 0, sizeof(*segp));
 541        segp->base = vm->tss;
 542        segp->limit = 0x67;
 543        segp->selector = selector;
 544        segp->type = 0xb;
 545        segp->present = 1;
 546        kvm_seg_fill_gdt_64bit(vm, segp);
 547}
 548
 549static void vcpu_setup(struct kvm_vm *vm, int vcpuid, int pgd_memslot, int gdt_memslot)
 550{
 551        struct kvm_sregs sregs;
 552
 553        /* Set mode specific system register values. */
 554        vcpu_sregs_get(vm, vcpuid, &sregs);
 555
 556        sregs.idt.limit = 0;
 557
 558        kvm_setup_gdt(vm, &sregs.gdt, gdt_memslot, pgd_memslot);
 559
 560        switch (vm->mode) {
 561        case VM_MODE_PXXV48_4K:
 562                sregs.cr0 = X86_CR0_PE | X86_CR0_NE | X86_CR0_PG;
 563                sregs.cr4 |= X86_CR4_PAE | X86_CR4_OSFXSR;
 564                sregs.efer |= (EFER_LME | EFER_LMA | EFER_NX);
 565
 566                kvm_seg_set_unusable(&sregs.ldt);
 567                kvm_seg_set_kernel_code_64bit(vm, DEFAULT_CODE_SELECTOR, &sregs.cs);
 568                kvm_seg_set_kernel_data_64bit(vm, DEFAULT_DATA_SELECTOR, &sregs.ds);
 569                kvm_seg_set_kernel_data_64bit(vm, DEFAULT_DATA_SELECTOR, &sregs.es);
 570                kvm_setup_tss_64bit(vm, &sregs.tr, 0x18, gdt_memslot, pgd_memslot);
 571                break;
 572
 573        default:
 574                TEST_FAIL("Unknown guest mode, mode: 0x%x", vm->mode);
 575        }
 576
 577        sregs.cr3 = vm->pgd;
 578        vcpu_sregs_set(vm, vcpuid, &sregs);
 579}
 580
 581void vm_vcpu_add_default(struct kvm_vm *vm, uint32_t vcpuid, void *guest_code)
 582{
 583        struct kvm_mp_state mp_state;
 584        struct kvm_regs regs;
 585        vm_vaddr_t stack_vaddr;
 586        stack_vaddr = vm_vaddr_alloc(vm, DEFAULT_STACK_PGS * getpagesize(),
 587                                     DEFAULT_GUEST_STACK_VADDR_MIN, 0, 0);
 588
 589        /* Create VCPU */
 590        vm_vcpu_add(vm, vcpuid);
 591        vcpu_setup(vm, vcpuid, 0, 0);
 592
 593        /* Setup guest general purpose registers */
 594        vcpu_regs_get(vm, vcpuid, &regs);
 595        regs.rflags = regs.rflags | 0x2;
 596        regs.rsp = stack_vaddr + (DEFAULT_STACK_PGS * getpagesize());
 597        regs.rip = (unsigned long) guest_code;
 598        vcpu_regs_set(vm, vcpuid, &regs);
 599
 600        /* Setup the MP state */
 601        mp_state.mp_state = 0;
 602        vcpu_set_mp_state(vm, vcpuid, &mp_state);
 603}
 604
 605/*
 606 * Allocate an instance of struct kvm_cpuid2
 607 *
 608 * Input Args: None
 609 *
 610 * Output Args: None
 611 *
 612 * Return: A pointer to the allocated struct. The caller is responsible
 613 * for freeing this struct.
 614 *
 615 * Since kvm_cpuid2 uses a 0-length array to allow a the size of the
 616 * array to be decided at allocation time, allocation is slightly
 617 * complicated. This function uses a reasonable default length for
 618 * the array and performs the appropriate allocation.
 619 */
 620static struct kvm_cpuid2 *allocate_kvm_cpuid2(void)
 621{
 622        struct kvm_cpuid2 *cpuid;
 623        int nent = 100;
 624        size_t size;
 625
 626        size = sizeof(*cpuid);
 627        size += nent * sizeof(struct kvm_cpuid_entry2);
 628        cpuid = malloc(size);
 629        if (!cpuid) {
 630                perror("malloc");
 631                abort();
 632        }
 633
 634        cpuid->nent = nent;
 635
 636        return cpuid;
 637}
 638
 639/*
 640 * KVM Supported CPUID Get
 641 *
 642 * Input Args: None
 643 *
 644 * Output Args:
 645 *
 646 * Return: The supported KVM CPUID
 647 *
 648 * Get the guest CPUID supported by KVM.
 649 */
 650struct kvm_cpuid2 *kvm_get_supported_cpuid(void)
 651{
 652        static struct kvm_cpuid2 *cpuid;
 653        int ret;
 654        int kvm_fd;
 655
 656        if (cpuid)
 657                return cpuid;
 658
 659        cpuid = allocate_kvm_cpuid2();
 660        kvm_fd = open(KVM_DEV_PATH, O_RDONLY);
 661        if (kvm_fd < 0)
 662                exit(KSFT_SKIP);
 663
 664        ret = ioctl(kvm_fd, KVM_GET_SUPPORTED_CPUID, cpuid);
 665        TEST_ASSERT(ret == 0, "KVM_GET_SUPPORTED_CPUID failed %d %d\n",
 666                    ret, errno);
 667
 668        close(kvm_fd);
 669        return cpuid;
 670}
 671
 672/*
 673 * Locate a cpuid entry.
 674 *
 675 * Input Args:
 676 *   function: The function of the cpuid entry to find.
 677 *   index: The index of the cpuid entry.
 678 *
 679 * Output Args: None
 680 *
 681 * Return: A pointer to the cpuid entry. Never returns NULL.
 682 */
 683struct kvm_cpuid_entry2 *
 684kvm_get_supported_cpuid_index(uint32_t function, uint32_t index)
 685{
 686        struct kvm_cpuid2 *cpuid;
 687        struct kvm_cpuid_entry2 *entry = NULL;
 688        int i;
 689
 690        cpuid = kvm_get_supported_cpuid();
 691        for (i = 0; i < cpuid->nent; i++) {
 692                if (cpuid->entries[i].function == function &&
 693                    cpuid->entries[i].index == index) {
 694                        entry = &cpuid->entries[i];
 695                        break;
 696                }
 697        }
 698
 699        TEST_ASSERT(entry, "Guest CPUID entry not found: (EAX=%x, ECX=%x).",
 700                    function, index);
 701        return entry;
 702}
 703
 704/*
 705 * VM VCPU CPUID Set
 706 *
 707 * Input Args:
 708 *   vm - Virtual Machine
 709 *   vcpuid - VCPU id
 710 *   cpuid - The CPUID values to set.
 711 *
 712 * Output Args: None
 713 *
 714 * Return: void
 715 *
 716 * Set the VCPU's CPUID.
 717 */
 718void vcpu_set_cpuid(struct kvm_vm *vm,
 719                uint32_t vcpuid, struct kvm_cpuid2 *cpuid)
 720{
 721        struct vcpu *vcpu = vcpu_find(vm, vcpuid);
 722        int rc;
 723
 724        TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
 725
 726        rc = ioctl(vcpu->fd, KVM_SET_CPUID2, cpuid);
 727        TEST_ASSERT(rc == 0, "KVM_SET_CPUID2 failed, rc: %i errno: %i",
 728                    rc, errno);
 729
 730}
 731
 732/*
 733 * VCPU Get MSR
 734 *
 735 * Input Args:
 736 *   vm - Virtual Machine
 737 *   vcpuid - VCPU ID
 738 *   msr_index - Index of MSR
 739 *
 740 * Output Args: None
 741 *
 742 * Return: On success, value of the MSR. On failure a TEST_ASSERT is produced.
 743 *
 744 * Get value of MSR for VCPU.
 745 */
 746uint64_t vcpu_get_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index)
 747{
 748        struct vcpu *vcpu = vcpu_find(vm, vcpuid);
 749        struct {
 750                struct kvm_msrs header;
 751                struct kvm_msr_entry entry;
 752        } buffer = {};
 753        int r;
 754
 755        TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
 756        buffer.header.nmsrs = 1;
 757        buffer.entry.index = msr_index;
 758        r = ioctl(vcpu->fd, KVM_GET_MSRS, &buffer.header);
 759        TEST_ASSERT(r == 1, "KVM_GET_MSRS IOCTL failed,\n"
 760                "  rc: %i errno: %i", r, errno);
 761
 762        return buffer.entry.data;
 763}
 764
 765/*
 766 * _VCPU Set MSR
 767 *
 768 * Input Args:
 769 *   vm - Virtual Machine
 770 *   vcpuid - VCPU ID
 771 *   msr_index - Index of MSR
 772 *   msr_value - New value of MSR
 773 *
 774 * Output Args: None
 775 *
 776 * Return: The result of KVM_SET_MSRS.
 777 *
 778 * Sets the value of an MSR for the given VCPU.
 779 */
 780int _vcpu_set_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index,
 781                  uint64_t msr_value)
 782{
 783        struct vcpu *vcpu = vcpu_find(vm, vcpuid);
 784        struct {
 785                struct kvm_msrs header;
 786                struct kvm_msr_entry entry;
 787        } buffer = {};
 788        int r;
 789
 790        TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
 791        memset(&buffer, 0, sizeof(buffer));
 792        buffer.header.nmsrs = 1;
 793        buffer.entry.index = msr_index;
 794        buffer.entry.data = msr_value;
 795        r = ioctl(vcpu->fd, KVM_SET_MSRS, &buffer.header);
 796        return r;
 797}
 798
 799/*
 800 * VCPU Set MSR
 801 *
 802 * Input Args:
 803 *   vm - Virtual Machine
 804 *   vcpuid - VCPU ID
 805 *   msr_index - Index of MSR
 806 *   msr_value - New value of MSR
 807 *
 808 * Output Args: None
 809 *
 810 * Return: On success, nothing. On failure a TEST_ASSERT is produced.
 811 *
 812 * Set value of MSR for VCPU.
 813 */
 814void vcpu_set_msr(struct kvm_vm *vm, uint32_t vcpuid, uint64_t msr_index,
 815        uint64_t msr_value)
 816{
 817        int r;
 818
 819        r = _vcpu_set_msr(vm, vcpuid, msr_index, msr_value);
 820        TEST_ASSERT(r == 1, "KVM_SET_MSRS IOCTL failed,\n"
 821                "  rc: %i errno: %i", r, errno);
 822}
 823
 824void vcpu_args_set(struct kvm_vm *vm, uint32_t vcpuid, unsigned int num, ...)
 825{
 826        va_list ap;
 827        struct kvm_regs regs;
 828
 829        TEST_ASSERT(num >= 1 && num <= 6, "Unsupported number of args,\n"
 830                    "  num: %u\n",
 831                    num);
 832
 833        va_start(ap, num);
 834        vcpu_regs_get(vm, vcpuid, &regs);
 835
 836        if (num >= 1)
 837                regs.rdi = va_arg(ap, uint64_t);
 838
 839        if (num >= 2)
 840                regs.rsi = va_arg(ap, uint64_t);
 841
 842        if (num >= 3)
 843                regs.rdx = va_arg(ap, uint64_t);
 844
 845        if (num >= 4)
 846                regs.rcx = va_arg(ap, uint64_t);
 847
 848        if (num >= 5)
 849                regs.r8 = va_arg(ap, uint64_t);
 850
 851        if (num >= 6)
 852                regs.r9 = va_arg(ap, uint64_t);
 853
 854        vcpu_regs_set(vm, vcpuid, &regs);
 855        va_end(ap);
 856}
 857
 858void vcpu_dump(FILE *stream, struct kvm_vm *vm, uint32_t vcpuid, uint8_t indent)
 859{
 860        struct kvm_regs regs;
 861        struct kvm_sregs sregs;
 862
 863        fprintf(stream, "%*scpuid: %u\n", indent, "", vcpuid);
 864
 865        fprintf(stream, "%*sregs:\n", indent + 2, "");
 866        vcpu_regs_get(vm, vcpuid, &regs);
 867        regs_dump(stream, &regs, indent + 4);
 868
 869        fprintf(stream, "%*ssregs:\n", indent + 2, "");
 870        vcpu_sregs_get(vm, vcpuid, &sregs);
 871        sregs_dump(stream, &sregs, indent + 4);
 872}
 873
 874struct kvm_x86_state {
 875        struct kvm_vcpu_events events;
 876        struct kvm_mp_state mp_state;
 877        struct kvm_regs regs;
 878        struct kvm_xsave xsave;
 879        struct kvm_xcrs xcrs;
 880        struct kvm_sregs sregs;
 881        struct kvm_debugregs debugregs;
 882        union {
 883                struct kvm_nested_state nested;
 884                char nested_[16384];
 885        };
 886        struct kvm_msrs msrs;
 887};
 888
 889static int kvm_get_num_msrs_fd(int kvm_fd)
 890{
 891        struct kvm_msr_list nmsrs;
 892        int r;
 893
 894        nmsrs.nmsrs = 0;
 895        r = ioctl(kvm_fd, KVM_GET_MSR_INDEX_LIST, &nmsrs);
 896        TEST_ASSERT(r == -1 && errno == E2BIG, "Unexpected result from KVM_GET_MSR_INDEX_LIST probe, r: %i",
 897                r);
 898
 899        return nmsrs.nmsrs;
 900}
 901
 902static int kvm_get_num_msrs(struct kvm_vm *vm)
 903{
 904        return kvm_get_num_msrs_fd(vm->kvm_fd);
 905}
 906
 907struct kvm_msr_list *kvm_get_msr_index_list(void)
 908{
 909        struct kvm_msr_list *list;
 910        int nmsrs, r, kvm_fd;
 911
 912        kvm_fd = open(KVM_DEV_PATH, O_RDONLY);
 913        if (kvm_fd < 0)
 914                exit(KSFT_SKIP);
 915
 916        nmsrs = kvm_get_num_msrs_fd(kvm_fd);
 917        list = malloc(sizeof(*list) + nmsrs * sizeof(list->indices[0]));
 918        list->nmsrs = nmsrs;
 919        r = ioctl(kvm_fd, KVM_GET_MSR_INDEX_LIST, list);
 920        close(kvm_fd);
 921
 922        TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_MSR_INDEX_LIST, r: %i",
 923                r);
 924
 925        return list;
 926}
 927
 928struct kvm_x86_state *vcpu_save_state(struct kvm_vm *vm, uint32_t vcpuid)
 929{
 930        struct vcpu *vcpu = vcpu_find(vm, vcpuid);
 931        struct kvm_msr_list *list;
 932        struct kvm_x86_state *state;
 933        int nmsrs, r, i;
 934        static int nested_size = -1;
 935
 936        if (nested_size == -1) {
 937                nested_size = kvm_check_cap(KVM_CAP_NESTED_STATE);
 938                TEST_ASSERT(nested_size <= sizeof(state->nested_),
 939                            "Nested state size too big, %i > %zi",
 940                            nested_size, sizeof(state->nested_));
 941        }
 942
 943        /*
 944         * When KVM exits to userspace with KVM_EXIT_IO, KVM guarantees
 945         * guest state is consistent only after userspace re-enters the
 946         * kernel with KVM_RUN.  Complete IO prior to migrating state
 947         * to a new VM.
 948         */
 949        vcpu_run_complete_io(vm, vcpuid);
 950
 951        nmsrs = kvm_get_num_msrs(vm);
 952        list = malloc(sizeof(*list) + nmsrs * sizeof(list->indices[0]));
 953        list->nmsrs = nmsrs;
 954        r = ioctl(vm->kvm_fd, KVM_GET_MSR_INDEX_LIST, list);
 955        TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_MSR_INDEX_LIST, r: %i",
 956                r);
 957
 958        state = malloc(sizeof(*state) + nmsrs * sizeof(state->msrs.entries[0]));
 959        r = ioctl(vcpu->fd, KVM_GET_VCPU_EVENTS, &state->events);
 960        TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_VCPU_EVENTS, r: %i",
 961                r);
 962
 963        r = ioctl(vcpu->fd, KVM_GET_MP_STATE, &state->mp_state);
 964        TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_MP_STATE, r: %i",
 965                r);
 966
 967        r = ioctl(vcpu->fd, KVM_GET_REGS, &state->regs);
 968        TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_REGS, r: %i",
 969                r);
 970
 971        r = ioctl(vcpu->fd, KVM_GET_XSAVE, &state->xsave);
 972        TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_XSAVE, r: %i",
 973                r);
 974
 975        if (kvm_check_cap(KVM_CAP_XCRS)) {
 976                r = ioctl(vcpu->fd, KVM_GET_XCRS, &state->xcrs);
 977                TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_XCRS, r: %i",
 978                            r);
 979        }
 980
 981        r = ioctl(vcpu->fd, KVM_GET_SREGS, &state->sregs);
 982        TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_SREGS, r: %i",
 983                r);
 984
 985        if (nested_size) {
 986                state->nested.size = sizeof(state->nested_);
 987                r = ioctl(vcpu->fd, KVM_GET_NESTED_STATE, &state->nested);
 988                TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_NESTED_STATE, r: %i",
 989                        r);
 990                TEST_ASSERT(state->nested.size <= nested_size,
 991                        "Nested state size too big, %i (KVM_CHECK_CAP gave %i)",
 992                        state->nested.size, nested_size);
 993        } else
 994                state->nested.size = 0;
 995
 996        state->msrs.nmsrs = nmsrs;
 997        for (i = 0; i < nmsrs; i++)
 998                state->msrs.entries[i].index = list->indices[i];
 999        r = ioctl(vcpu->fd, KVM_GET_MSRS, &state->msrs);
1000        TEST_ASSERT(r == nmsrs, "Unexpected result from KVM_GET_MSRS, r: %i (failed MSR was 0x%x)",
1001                r, r == nmsrs ? -1 : list->indices[r]);
1002
1003        r = ioctl(vcpu->fd, KVM_GET_DEBUGREGS, &state->debugregs);
1004        TEST_ASSERT(r == 0, "Unexpected result from KVM_GET_DEBUGREGS, r: %i",
1005                r);
1006
1007        free(list);
1008        return state;
1009}
1010
1011void vcpu_load_state(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_x86_state *state)
1012{
1013        struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1014        int r;
1015
1016        r = ioctl(vcpu->fd, KVM_SET_XSAVE, &state->xsave);
1017        TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_XSAVE, r: %i",
1018                r);
1019
1020        if (kvm_check_cap(KVM_CAP_XCRS)) {
1021                r = ioctl(vcpu->fd, KVM_SET_XCRS, &state->xcrs);
1022                TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_XCRS, r: %i",
1023                            r);
1024        }
1025
1026        r = ioctl(vcpu->fd, KVM_SET_SREGS, &state->sregs);
1027        TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_SREGS, r: %i",
1028                r);
1029
1030        r = ioctl(vcpu->fd, KVM_SET_MSRS, &state->msrs);
1031        TEST_ASSERT(r == state->msrs.nmsrs, "Unexpected result from KVM_SET_MSRS, r: %i (failed at %x)",
1032                r, r == state->msrs.nmsrs ? -1 : state->msrs.entries[r].index);
1033
1034        r = ioctl(vcpu->fd, KVM_SET_VCPU_EVENTS, &state->events);
1035        TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_VCPU_EVENTS, r: %i",
1036                r);
1037
1038        r = ioctl(vcpu->fd, KVM_SET_MP_STATE, &state->mp_state);
1039        TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_MP_STATE, r: %i",
1040                r);
1041
1042        r = ioctl(vcpu->fd, KVM_SET_DEBUGREGS, &state->debugregs);
1043        TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_DEBUGREGS, r: %i",
1044                r);
1045
1046        r = ioctl(vcpu->fd, KVM_SET_REGS, &state->regs);
1047        TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_REGS, r: %i",
1048                r);
1049
1050        if (state->nested.size) {
1051                r = ioctl(vcpu->fd, KVM_SET_NESTED_STATE, &state->nested);
1052                TEST_ASSERT(r == 0, "Unexpected result from KVM_SET_NESTED_STATE, r: %i",
1053                        r);
1054        }
1055}
1056
1057bool is_intel_cpu(void)
1058{
1059        int eax, ebx, ecx, edx;
1060        const uint32_t *chunk;
1061        const int leaf = 0;
1062
1063        __asm__ __volatile__(
1064                "cpuid"
1065                : /* output */ "=a"(eax), "=b"(ebx),
1066                  "=c"(ecx), "=d"(edx)
1067                : /* input */ "0"(leaf), "2"(0));
1068
1069        chunk = (const uint32_t *)("GenuineIntel");
1070        return (ebx == chunk[0] && edx == chunk[1] && ecx == chunk[2]);
1071}
1072
1073uint32_t kvm_get_cpuid_max_basic(void)
1074{
1075        return kvm_get_supported_cpuid_entry(0)->eax;
1076}
1077
1078uint32_t kvm_get_cpuid_max_extended(void)
1079{
1080        return kvm_get_supported_cpuid_entry(0x80000000)->eax;
1081}
1082
1083void kvm_get_cpu_address_width(unsigned int *pa_bits, unsigned int *va_bits)
1084{
1085        struct kvm_cpuid_entry2 *entry;
1086        bool pae;
1087
1088        /* SDM 4.1.4 */
1089        if (kvm_get_cpuid_max_extended() < 0x80000008) {
1090                pae = kvm_get_supported_cpuid_entry(1)->edx & (1 << 6);
1091                *pa_bits = pae ? 36 : 32;
1092                *va_bits = 32;
1093        } else {
1094                entry = kvm_get_supported_cpuid_entry(0x80000008);
1095                *pa_bits = entry->eax & 0xff;
1096                *va_bits = (entry->eax >> 8) & 0xff;
1097        }
1098}
1099
1100struct idt_entry {
1101        uint16_t offset0;
1102        uint16_t selector;
1103        uint16_t ist : 3;
1104        uint16_t : 5;
1105        uint16_t type : 4;
1106        uint16_t : 1;
1107        uint16_t dpl : 2;
1108        uint16_t p : 1;
1109        uint16_t offset1;
1110        uint32_t offset2; uint32_t reserved;
1111};
1112
1113static void set_idt_entry(struct kvm_vm *vm, int vector, unsigned long addr,
1114                          int dpl, unsigned short selector)
1115{
1116        struct idt_entry *base =
1117                (struct idt_entry *)addr_gva2hva(vm, vm->idt);
1118        struct idt_entry *e = &base[vector];
1119
1120        memset(e, 0, sizeof(*e));
1121        e->offset0 = addr;
1122        e->selector = selector;
1123        e->ist = 0;
1124        e->type = 14;
1125        e->dpl = dpl;
1126        e->p = 1;
1127        e->offset1 = addr >> 16;
1128        e->offset2 = addr >> 32;
1129}
1130
1131void kvm_exit_unexpected_vector(uint32_t value)
1132{
1133        outl(UNEXPECTED_VECTOR_PORT, value);
1134}
1135
1136void route_exception(struct ex_regs *regs)
1137{
1138        typedef void(*handler)(struct ex_regs *);
1139        handler *handlers = (handler *)exception_handlers;
1140
1141        if (handlers && handlers[regs->vector]) {
1142                handlers[regs->vector](regs);
1143                return;
1144        }
1145
1146        kvm_exit_unexpected_vector(regs->vector);
1147}
1148
1149void vm_init_descriptor_tables(struct kvm_vm *vm)
1150{
1151        extern void *idt_handlers;
1152        int i;
1153
1154        vm->idt = vm_vaddr_alloc(vm, getpagesize(), 0x2000, 0, 0);
1155        vm->handlers = vm_vaddr_alloc(vm, 256 * sizeof(void *), 0x2000, 0, 0);
1156        /* Handlers have the same address in both address spaces.*/
1157        for (i = 0; i < NUM_INTERRUPTS; i++)
1158                set_idt_entry(vm, i, (unsigned long)(&idt_handlers)[i], 0,
1159                        DEFAULT_CODE_SELECTOR);
1160}
1161
1162void vcpu_init_descriptor_tables(struct kvm_vm *vm, uint32_t vcpuid)
1163{
1164        struct kvm_sregs sregs;
1165
1166        vcpu_sregs_get(vm, vcpuid, &sregs);
1167        sregs.idt.base = vm->idt;
1168        sregs.idt.limit = NUM_INTERRUPTS * sizeof(struct idt_entry) - 1;
1169        sregs.gdt.base = vm->gdt;
1170        sregs.gdt.limit = getpagesize() - 1;
1171        kvm_seg_set_kernel_data_64bit(NULL, DEFAULT_DATA_SELECTOR, &sregs.gs);
1172        vcpu_sregs_set(vm, vcpuid, &sregs);
1173        *(vm_vaddr_t *)addr_gva2hva(vm, (vm_vaddr_t)(&exception_handlers)) = vm->handlers;
1174}
1175
1176void vm_handle_exception(struct kvm_vm *vm, int vector,
1177                         void (*handler)(struct ex_regs *))
1178{
1179        vm_vaddr_t *handlers = (vm_vaddr_t *)addr_gva2hva(vm, vm->handlers);
1180
1181        handlers[vector] = (vm_vaddr_t)handler;
1182}
1183
1184void assert_on_unhandled_exception(struct kvm_vm *vm, uint32_t vcpuid)
1185{
1186        if (vcpu_state(vm, vcpuid)->exit_reason == KVM_EXIT_IO
1187                && vcpu_state(vm, vcpuid)->io.port == UNEXPECTED_VECTOR_PORT
1188                && vcpu_state(vm, vcpuid)->io.size == 4) {
1189                /* Grab pointer to io data */
1190                uint32_t *data = (void *)vcpu_state(vm, vcpuid)
1191                        + vcpu_state(vm, vcpuid)->io.data_offset;
1192
1193                TEST_ASSERT(false,
1194                            "Unexpected vectored event in guest (vector:0x%x)",
1195                            *data);
1196        }
1197}
1198
1199bool set_cpuid(struct kvm_cpuid2 *cpuid,
1200               struct kvm_cpuid_entry2 *ent)
1201{
1202        int i;
1203
1204        for (i = 0; i < cpuid->nent; i++) {
1205                struct kvm_cpuid_entry2 *cur = &cpuid->entries[i];
1206
1207                if (cur->function != ent->function || cur->index != ent->index)
1208                        continue;
1209
1210                memcpy(cur, ent, sizeof(struct kvm_cpuid_entry2));
1211                return true;
1212        }
1213
1214        return false;
1215}
1216
1217uint64_t kvm_hypercall(uint64_t nr, uint64_t a0, uint64_t a1, uint64_t a2,
1218                       uint64_t a3)
1219{
1220        uint64_t r;
1221
1222        asm volatile("vmcall"
1223                     : "=a"(r)
1224                     : "b"(a0), "c"(a1), "d"(a2), "S"(a3));
1225        return r;
1226}
1227