1
2
3
4
5
6
7
8
9#include "test_util.h"
10#include "kvm_util.h"
11#include "kvm_util_internal.h"
12
13#include <assert.h>
14#include <sys/mman.h>
15#include <sys/types.h>
16#include <sys/stat.h>
17#include <linux/kernel.h>
18
19#define KVM_UTIL_PGS_PER_HUGEPG 512
20#define KVM_UTIL_MIN_PFN 2
21
22
23static void *align(void *x, size_t size)
24{
25 size_t mask = size - 1;
26 TEST_ASSERT(size != 0 && !(size & (size - 1)),
27 "size not a power of 2: %lu", size);
28 return (void *) (((size_t) x + mask) & ~mask);
29}
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47int kvm_check_cap(long cap)
48{
49 int ret;
50 int kvm_fd;
51
52 kvm_fd = open(KVM_DEV_PATH, O_RDONLY);
53 if (kvm_fd < 0)
54 exit(KSFT_SKIP);
55
56 ret = ioctl(kvm_fd, KVM_CHECK_EXTENSION, cap);
57 TEST_ASSERT(ret != -1, "KVM_CHECK_EXTENSION IOCTL failed,\n"
58 " rc: %i errno: %i", ret, errno);
59
60 close(kvm_fd);
61
62 return ret;
63}
64
65
66
67
68
69
70
71
72
73
74
75
76
77int vm_enable_cap(struct kvm_vm *vm, struct kvm_enable_cap *cap)
78{
79 int ret;
80
81 ret = ioctl(vm->fd, KVM_ENABLE_CAP, cap);
82 TEST_ASSERT(ret == 0, "KVM_ENABLE_CAP IOCTL failed,\n"
83 " rc: %i errno: %i", ret, errno);
84
85 return ret;
86}
87
88static void vm_open(struct kvm_vm *vm, int perm, unsigned long type)
89{
90 vm->kvm_fd = open(KVM_DEV_PATH, perm);
91 if (vm->kvm_fd < 0)
92 exit(KSFT_SKIP);
93
94 if (!kvm_check_cap(KVM_CAP_IMMEDIATE_EXIT)) {
95 fprintf(stderr, "immediate_exit not available, skipping test\n");
96 exit(KSFT_SKIP);
97 }
98
99 vm->fd = ioctl(vm->kvm_fd, KVM_CREATE_VM, type);
100 TEST_ASSERT(vm->fd >= 0, "KVM_CREATE_VM ioctl failed, "
101 "rc: %i errno: %i", vm->fd, errno);
102}
103
104const char * const vm_guest_mode_string[] = {
105 "PA-bits:52, VA-bits:48, 4K pages",
106 "PA-bits:52, VA-bits:48, 64K pages",
107 "PA-bits:48, VA-bits:48, 4K pages",
108 "PA-bits:48, VA-bits:48, 64K pages",
109 "PA-bits:40, VA-bits:48, 4K pages",
110 "PA-bits:40, VA-bits:48, 64K pages",
111};
112_Static_assert(sizeof(vm_guest_mode_string)/sizeof(char *) == NUM_VM_MODES,
113 "Missing new mode strings?");
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134struct kvm_vm *_vm_create(enum vm_guest_mode mode, uint64_t phy_pages,
135 int perm, unsigned long type)
136{
137 struct kvm_vm *vm;
138 int kvm_fd;
139
140 vm = calloc(1, sizeof(*vm));
141 TEST_ASSERT(vm != NULL, "Insufficient Memory");
142
143 vm->mode = mode;
144 vm->type = type;
145 vm_open(vm, perm, type);
146
147
148 switch (vm->mode) {
149 case VM_MODE_P52V48_4K:
150 vm->pgtable_levels = 4;
151 vm->pa_bits = 52;
152 vm->va_bits = 48;
153 vm->page_size = 0x1000;
154 vm->page_shift = 12;
155 break;
156 case VM_MODE_P52V48_64K:
157 vm->pgtable_levels = 3;
158 vm->pa_bits = 52;
159 vm->va_bits = 48;
160 vm->page_size = 0x10000;
161 vm->page_shift = 16;
162 break;
163 case VM_MODE_P48V48_4K:
164 vm->pgtable_levels = 4;
165 vm->pa_bits = 48;
166 vm->va_bits = 48;
167 vm->page_size = 0x1000;
168 vm->page_shift = 12;
169 break;
170 case VM_MODE_P48V48_64K:
171 vm->pgtable_levels = 3;
172 vm->pa_bits = 48;
173 vm->va_bits = 48;
174 vm->page_size = 0x10000;
175 vm->page_shift = 16;
176 break;
177 case VM_MODE_P40V48_4K:
178 vm->pgtable_levels = 4;
179 vm->pa_bits = 40;
180 vm->va_bits = 48;
181 vm->page_size = 0x1000;
182 vm->page_shift = 12;
183 break;
184 case VM_MODE_P40V48_64K:
185 vm->pgtable_levels = 3;
186 vm->pa_bits = 40;
187 vm->va_bits = 48;
188 vm->page_size = 0x10000;
189 vm->page_shift = 16;
190 break;
191 default:
192 TEST_ASSERT(false, "Unknown guest mode, mode: 0x%x", mode);
193 }
194
195
196 vm->vpages_valid = sparsebit_alloc();
197 sparsebit_set_num(vm->vpages_valid,
198 0, (1ULL << (vm->va_bits - 1)) >> vm->page_shift);
199 sparsebit_set_num(vm->vpages_valid,
200 (~((1ULL << (vm->va_bits - 1)) - 1)) >> vm->page_shift,
201 (1ULL << (vm->va_bits - 1)) >> vm->page_shift);
202
203
204 vm->max_gfn = ((1ULL << vm->pa_bits) >> vm->page_shift) - 1;
205
206
207 vm->vpages_mapped = sparsebit_alloc();
208 if (phy_pages != 0)
209 vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
210 0, 0, phy_pages, 0);
211
212 return vm;
213}
214
215struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm)
216{
217 return _vm_create(mode, phy_pages, perm, 0);
218}
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233void kvm_vm_restart(struct kvm_vm *vmp, int perm)
234{
235 struct userspace_mem_region *region;
236
237 vm_open(vmp, perm, vmp->type);
238 if (vmp->has_irqchip)
239 vm_create_irqchip(vmp);
240
241 for (region = vmp->userspace_mem_region_head; region;
242 region = region->next) {
243 int ret = ioctl(vmp->fd, KVM_SET_USER_MEMORY_REGION, ®ion->region);
244 TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION IOCTL failed,\n"
245 " rc: %i errno: %i\n"
246 " slot: %u flags: 0x%x\n"
247 " guest_phys_addr: 0x%lx size: 0x%lx",
248 ret, errno, region->region.slot,
249 region->region.flags,
250 region->region.guest_phys_addr,
251 region->region.memory_size);
252 }
253}
254
255void kvm_vm_get_dirty_log(struct kvm_vm *vm, int slot, void *log)
256{
257 struct kvm_dirty_log args = { .dirty_bitmap = log, .slot = slot };
258 int ret;
259
260 ret = ioctl(vm->fd, KVM_GET_DIRTY_LOG, &args);
261 TEST_ASSERT(ret == 0, "%s: KVM_GET_DIRTY_LOG failed: %s",
262 strerror(-ret));
263}
264
265void kvm_vm_clear_dirty_log(struct kvm_vm *vm, int slot, void *log,
266 uint64_t first_page, uint32_t num_pages)
267{
268 struct kvm_clear_dirty_log args = { .dirty_bitmap = log, .slot = slot,
269 .first_page = first_page,
270 .num_pages = num_pages };
271 int ret;
272
273 ret = ioctl(vm->fd, KVM_CLEAR_DIRTY_LOG, &args);
274 TEST_ASSERT(ret == 0, "%s: KVM_CLEAR_DIRTY_LOG failed: %s",
275 strerror(-ret));
276}
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297static struct userspace_mem_region *
298userspace_mem_region_find(struct kvm_vm *vm, uint64_t start, uint64_t end)
299{
300 struct userspace_mem_region *region;
301
302 for (region = vm->userspace_mem_region_head; region;
303 region = region->next) {
304 uint64_t existing_start = region->region.guest_phys_addr;
305 uint64_t existing_end = region->region.guest_phys_addr
306 + region->region.memory_size - 1;
307 if (start <= existing_end && end >= existing_start)
308 return region;
309 }
310
311 return NULL;
312}
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330struct kvm_userspace_memory_region *
331kvm_userspace_memory_region_find(struct kvm_vm *vm, uint64_t start,
332 uint64_t end)
333{
334 struct userspace_mem_region *region;
335
336 region = userspace_mem_region_find(vm, start, end);
337 if (!region)
338 return NULL;
339
340 return ®ion->region;
341}
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359struct vcpu *vcpu_find(struct kvm_vm *vm, uint32_t vcpuid)
360{
361 struct vcpu *vcpup;
362
363 for (vcpup = vm->vcpu_head; vcpup; vcpup = vcpup->next) {
364 if (vcpup->id == vcpuid)
365 return vcpup;
366 }
367
368 return NULL;
369}
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384static void vm_vcpu_rm(struct kvm_vm *vm, uint32_t vcpuid)
385{
386 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
387 int ret;
388
389 ret = munmap(vcpu->state, sizeof(*vcpu->state));
390 TEST_ASSERT(ret == 0, "munmap of VCPU fd failed, rc: %i "
391 "errno: %i", ret, errno);
392 close(vcpu->fd);
393 TEST_ASSERT(ret == 0, "Close of VCPU fd failed, rc: %i "
394 "errno: %i", ret, errno);
395
396 if (vcpu->next)
397 vcpu->next->prev = vcpu->prev;
398 if (vcpu->prev)
399 vcpu->prev->next = vcpu->next;
400 else
401 vm->vcpu_head = vcpu->next;
402 free(vcpu);
403}
404
405void kvm_vm_release(struct kvm_vm *vmp)
406{
407 int ret;
408
409 while (vmp->vcpu_head)
410 vm_vcpu_rm(vmp, vmp->vcpu_head->id);
411
412 ret = close(vmp->fd);
413 TEST_ASSERT(ret == 0, "Close of vm fd failed,\n"
414 " vmp->fd: %i rc: %i errno: %i", vmp->fd, ret, errno);
415
416 close(vmp->kvm_fd);
417 TEST_ASSERT(ret == 0, "Close of /dev/kvm fd failed,\n"
418 " vmp->kvm_fd: %i rc: %i errno: %i", vmp->kvm_fd, ret, errno);
419}
420
421
422
423
424void kvm_vm_free(struct kvm_vm *vmp)
425{
426 int ret;
427
428 if (vmp == NULL)
429 return;
430
431
432 while (vmp->userspace_mem_region_head) {
433 struct userspace_mem_region *region
434 = vmp->userspace_mem_region_head;
435
436 region->region.memory_size = 0;
437 ret = ioctl(vmp->fd, KVM_SET_USER_MEMORY_REGION,
438 ®ion->region);
439 TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION IOCTL failed, "
440 "rc: %i errno: %i", ret, errno);
441
442 vmp->userspace_mem_region_head = region->next;
443 sparsebit_free(®ion->unused_phy_pages);
444 ret = munmap(region->mmap_start, region->mmap_size);
445 TEST_ASSERT(ret == 0, "munmap failed, rc: %i errno: %i",
446 ret, errno);
447
448 free(region);
449 }
450
451
452 sparsebit_free(&vmp->vpages_valid);
453 sparsebit_free(&vmp->vpages_mapped);
454
455 kvm_vm_release(vmp);
456
457
458 free(vmp);
459}
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484int kvm_memcmp_hva_gva(void *hva, struct kvm_vm *vm, vm_vaddr_t gva, size_t len)
485{
486 size_t amt;
487
488
489
490
491
492 for (uintptr_t offset = 0; offset < len; offset += amt) {
493 uintptr_t ptr1 = (uintptr_t)hva + offset;
494
495
496
497
498
499 uintptr_t ptr2 = (uintptr_t)addr_gva2hva(vm, gva + offset);
500
501
502
503
504
505 amt = len - offset;
506 if ((ptr1 >> vm->page_shift) != ((ptr1 + amt) >> vm->page_shift))
507 amt = vm->page_size - (ptr1 % vm->page_size);
508 if ((ptr2 >> vm->page_shift) != ((ptr2 + amt) >> vm->page_shift))
509 amt = vm->page_size - (ptr2 % vm->page_size);
510
511 assert((ptr1 >> vm->page_shift) == ((ptr1 + amt - 1) >> vm->page_shift));
512 assert((ptr2 >> vm->page_shift) == ((ptr2 + amt - 1) >> vm->page_shift));
513
514
515
516
517
518
519 int ret = memcmp((void *)ptr1, (void *)ptr2, amt);
520 if (ret != 0)
521 return ret;
522 }
523
524
525
526
527
528 return 0;
529}
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553void vm_userspace_mem_region_add(struct kvm_vm *vm,
554 enum vm_mem_backing_src_type src_type,
555 uint64_t guest_paddr, uint32_t slot, uint64_t npages,
556 uint32_t flags)
557{
558 int ret;
559 unsigned long pmem_size = 0;
560 struct userspace_mem_region *region;
561 size_t huge_page_size = KVM_UTIL_PGS_PER_HUGEPG * vm->page_size;
562
563 TEST_ASSERT((guest_paddr % vm->page_size) == 0, "Guest physical "
564 "address not on a page boundary.\n"
565 " guest_paddr: 0x%lx vm->page_size: 0x%x",
566 guest_paddr, vm->page_size);
567 TEST_ASSERT((((guest_paddr >> vm->page_shift) + npages) - 1)
568 <= vm->max_gfn, "Physical range beyond maximum "
569 "supported physical address,\n"
570 " guest_paddr: 0x%lx npages: 0x%lx\n"
571 " vm->max_gfn: 0x%lx vm->page_size: 0x%x",
572 guest_paddr, npages, vm->max_gfn, vm->page_size);
573
574
575
576
577
578 region = (struct userspace_mem_region *) userspace_mem_region_find(
579 vm, guest_paddr, (guest_paddr + npages * vm->page_size) - 1);
580 if (region != NULL)
581 TEST_ASSERT(false, "overlapping userspace_mem_region already "
582 "exists\n"
583 " requested guest_paddr: 0x%lx npages: 0x%lx "
584 "page_size: 0x%x\n"
585 " existing guest_paddr: 0x%lx size: 0x%lx",
586 guest_paddr, npages, vm->page_size,
587 (uint64_t) region->region.guest_phys_addr,
588 (uint64_t) region->region.memory_size);
589
590
591 for (region = vm->userspace_mem_region_head; region;
592 region = region->next) {
593 if (region->region.slot == slot)
594 break;
595 }
596 if (region != NULL)
597 TEST_ASSERT(false, "A mem region with the requested slot "
598 "already exists.\n"
599 " requested slot: %u paddr: 0x%lx npages: 0x%lx\n"
600 " existing slot: %u paddr: 0x%lx size: 0x%lx",
601 slot, guest_paddr, npages,
602 region->region.slot,
603 (uint64_t) region->region.guest_phys_addr,
604 (uint64_t) region->region.memory_size);
605
606
607 region = calloc(1, sizeof(*region));
608 TEST_ASSERT(region != NULL, "Insufficient Memory");
609 region->mmap_size = npages * vm->page_size;
610
611
612 if (src_type == VM_MEM_SRC_ANONYMOUS_THP)
613 region->mmap_size += huge_page_size;
614 region->mmap_start = mmap(NULL, region->mmap_size,
615 PROT_READ | PROT_WRITE,
616 MAP_PRIVATE | MAP_ANONYMOUS
617 | (src_type == VM_MEM_SRC_ANONYMOUS_HUGETLB ? MAP_HUGETLB : 0),
618 -1, 0);
619 TEST_ASSERT(region->mmap_start != MAP_FAILED,
620 "test_malloc failed, mmap_start: %p errno: %i",
621 region->mmap_start, errno);
622
623
624 region->host_mem = align(region->mmap_start,
625 src_type == VM_MEM_SRC_ANONYMOUS_THP ? huge_page_size : 1);
626
627
628 if (src_type == VM_MEM_SRC_ANONYMOUS || src_type == VM_MEM_SRC_ANONYMOUS_THP) {
629 ret = madvise(region->host_mem, npages * vm->page_size,
630 src_type == VM_MEM_SRC_ANONYMOUS ? MADV_NOHUGEPAGE : MADV_HUGEPAGE);
631 TEST_ASSERT(ret == 0, "madvise failed,\n"
632 " addr: %p\n"
633 " length: 0x%lx\n"
634 " src_type: %x",
635 region->host_mem, npages * vm->page_size, src_type);
636 }
637
638 region->unused_phy_pages = sparsebit_alloc();
639 sparsebit_set_num(region->unused_phy_pages,
640 guest_paddr >> vm->page_shift, npages);
641 region->region.slot = slot;
642 region->region.flags = flags;
643 region->region.guest_phys_addr = guest_paddr;
644 region->region.memory_size = npages * vm->page_size;
645 region->region.userspace_addr = (uintptr_t) region->host_mem;
646 ret = ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, ®ion->region);
647 TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION IOCTL failed,\n"
648 " rc: %i errno: %i\n"
649 " slot: %u flags: 0x%x\n"
650 " guest_phys_addr: 0x%lx size: 0x%lx",
651 ret, errno, slot, flags,
652 guest_paddr, (uint64_t) region->region.memory_size);
653
654
655 if (vm->userspace_mem_region_head)
656 vm->userspace_mem_region_head->prev = region;
657 region->next = vm->userspace_mem_region_head;
658 vm->userspace_mem_region_head = region;
659}
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676static struct userspace_mem_region *
677memslot2region(struct kvm_vm *vm, uint32_t memslot)
678{
679 struct userspace_mem_region *region;
680
681 for (region = vm->userspace_mem_region_head; region;
682 region = region->next) {
683 if (region->region.slot == memslot)
684 break;
685 }
686 if (region == NULL) {
687 fprintf(stderr, "No mem region with the requested slot found,\n"
688 " requested slot: %u\n", memslot);
689 fputs("---- vm dump ----\n", stderr);
690 vm_dump(stderr, vm, 2);
691 TEST_ASSERT(false, "Mem region not found");
692 }
693
694 return region;
695}
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags)
712{
713 int ret;
714 struct userspace_mem_region *region;
715
716 region = memslot2region(vm, slot);
717
718 region->region.flags = flags;
719
720 ret = ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, ®ion->region);
721
722 TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION IOCTL failed,\n"
723 " rc: %i errno: %i slot: %u flags: 0x%x",
724 ret, errno, slot, flags);
725}
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740static int vcpu_mmap_sz(void)
741{
742 int dev_fd, ret;
743
744 dev_fd = open(KVM_DEV_PATH, O_RDONLY);
745 if (dev_fd < 0)
746 exit(KSFT_SKIP);
747
748 ret = ioctl(dev_fd, KVM_GET_VCPU_MMAP_SIZE, NULL);
749 TEST_ASSERT(ret >= sizeof(struct kvm_run),
750 "%s KVM_GET_VCPU_MMAP_SIZE ioctl failed, rc: %i errno: %i",
751 __func__, ret, errno);
752
753 close(dev_fd);
754
755 return ret;
756}
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772void vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpuid, int pgd_memslot,
773 int gdt_memslot)
774{
775 struct vcpu *vcpu;
776
777
778 vcpu = vcpu_find(vm, vcpuid);
779 if (vcpu != NULL)
780 TEST_ASSERT(false, "vcpu with the specified id "
781 "already exists,\n"
782 " requested vcpuid: %u\n"
783 " existing vcpuid: %u state: %p",
784 vcpuid, vcpu->id, vcpu->state);
785
786
787 vcpu = calloc(1, sizeof(*vcpu));
788 TEST_ASSERT(vcpu != NULL, "Insufficient Memory");
789 vcpu->id = vcpuid;
790 vcpu->fd = ioctl(vm->fd, KVM_CREATE_VCPU, vcpuid);
791 TEST_ASSERT(vcpu->fd >= 0, "KVM_CREATE_VCPU failed, rc: %i errno: %i",
792 vcpu->fd, errno);
793
794 TEST_ASSERT(vcpu_mmap_sz() >= sizeof(*vcpu->state), "vcpu mmap size "
795 "smaller than expected, vcpu_mmap_sz: %i expected_min: %zi",
796 vcpu_mmap_sz(), sizeof(*vcpu->state));
797 vcpu->state = (struct kvm_run *) mmap(NULL, sizeof(*vcpu->state),
798 PROT_READ | PROT_WRITE, MAP_SHARED, vcpu->fd, 0);
799 TEST_ASSERT(vcpu->state != MAP_FAILED, "mmap vcpu_state failed, "
800 "vcpu id: %u errno: %i", vcpuid, errno);
801
802
803 if (vm->vcpu_head)
804 vm->vcpu_head->prev = vcpu;
805 vcpu->next = vm->vcpu_head;
806 vm->vcpu_head = vcpu;
807
808 vcpu_setup(vm, vcpuid, pgd_memslot, gdt_memslot);
809}
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831static vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz,
832 vm_vaddr_t vaddr_min)
833{
834 uint64_t pages = (sz + vm->page_size - 1) >> vm->page_shift;
835
836
837 uint64_t pgidx_start = (vaddr_min + vm->page_size - 1) >> vm->page_shift;
838 if ((pgidx_start * vm->page_size) < vaddr_min)
839 goto no_va_found;
840
841
842 if (!sparsebit_is_set_num(vm->vpages_valid,
843 pgidx_start, pages))
844 pgidx_start = sparsebit_next_set_num(vm->vpages_valid,
845 pgidx_start, pages);
846 do {
847
848
849
850
851
852
853 if (sparsebit_is_clear_num(vm->vpages_mapped,
854 pgidx_start, pages))
855 goto va_found;
856 pgidx_start = sparsebit_next_clear_num(vm->vpages_mapped,
857 pgidx_start, pages);
858 if (pgidx_start == 0)
859 goto no_va_found;
860
861
862
863
864
865 if (!sparsebit_is_set_num(vm->vpages_valid,
866 pgidx_start, pages)) {
867 pgidx_start = sparsebit_next_set_num(
868 vm->vpages_valid, pgidx_start, pages);
869 if (pgidx_start == 0)
870 goto no_va_found;
871 }
872 } while (pgidx_start != 0);
873
874no_va_found:
875 TEST_ASSERT(false, "No vaddr of specified pages available, "
876 "pages: 0x%lx", pages);
877
878
879 return -1;
880
881va_found:
882 TEST_ASSERT(sparsebit_is_set_num(vm->vpages_valid,
883 pgidx_start, pages),
884 "Unexpected, invalid virtual page index range,\n"
885 " pgidx_start: 0x%lx\n"
886 " pages: 0x%lx",
887 pgidx_start, pages);
888 TEST_ASSERT(sparsebit_is_clear_num(vm->vpages_mapped,
889 pgidx_start, pages),
890 "Unexpected, pages already mapped,\n"
891 " pgidx_start: 0x%lx\n"
892 " pages: 0x%lx",
893 pgidx_start, pages);
894
895 return pgidx_start * vm->page_size;
896}
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min,
920 uint32_t data_memslot, uint32_t pgd_memslot)
921{
922 uint64_t pages = (sz >> vm->page_shift) + ((sz % vm->page_size) != 0);
923
924 virt_pgd_alloc(vm, pgd_memslot);
925
926
927
928
929
930 vm_vaddr_t vaddr_start = vm_vaddr_unused_gap(vm, sz, vaddr_min);
931
932
933 for (vm_vaddr_t vaddr = vaddr_start; pages > 0;
934 pages--, vaddr += vm->page_size) {
935 vm_paddr_t paddr;
936
937 paddr = vm_phy_page_alloc(vm,
938 KVM_UTIL_MIN_PFN * vm->page_size, data_memslot);
939
940 virt_pg_map(vm, vaddr, paddr, pgd_memslot);
941
942 sparsebit_set(vm->vpages_mapped,
943 vaddr >> vm->page_shift);
944 }
945
946 return vaddr_start;
947}
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
967 size_t size, uint32_t pgd_memslot)
968{
969 size_t page_size = vm->page_size;
970 size_t npages = size / page_size;
971
972 TEST_ASSERT(vaddr + size > vaddr, "Vaddr overflow");
973 TEST_ASSERT(paddr + size > paddr, "Paddr overflow");
974
975 while (npages--) {
976 virt_pg_map(vm, vaddr, paddr, pgd_memslot);
977 vaddr += page_size;
978 paddr += page_size;
979 }
980}
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa)
1000{
1001 struct userspace_mem_region *region;
1002 for (region = vm->userspace_mem_region_head; region;
1003 region = region->next) {
1004 if ((gpa >= region->region.guest_phys_addr)
1005 && (gpa <= (region->region.guest_phys_addr
1006 + region->region.memory_size - 1)))
1007 return (void *) ((uintptr_t) region->host_mem
1008 + (gpa - region->region.guest_phys_addr));
1009 }
1010
1011 TEST_ASSERT(false, "No vm physical memory at 0x%lx", gpa);
1012 return NULL;
1013}
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva)
1033{
1034 struct userspace_mem_region *region;
1035 for (region = vm->userspace_mem_region_head; region;
1036 region = region->next) {
1037 if ((hva >= region->host_mem)
1038 && (hva <= (region->host_mem
1039 + region->region.memory_size - 1)))
1040 return (vm_paddr_t) ((uintptr_t)
1041 region->region.guest_phys_addr
1042 + (hva - (uintptr_t) region->host_mem));
1043 }
1044
1045 TEST_ASSERT(false, "No mapping to a guest physical address, "
1046 "hva: %p", hva);
1047 return -1;
1048}
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062void vm_create_irqchip(struct kvm_vm *vm)
1063{
1064 int ret;
1065
1066 ret = ioctl(vm->fd, KVM_CREATE_IRQCHIP, 0);
1067 TEST_ASSERT(ret == 0, "KVM_CREATE_IRQCHIP IOCTL failed, "
1068 "rc: %i errno: %i", ret, errno);
1069
1070 vm->has_irqchip = true;
1071}
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088struct kvm_run *vcpu_state(struct kvm_vm *vm, uint32_t vcpuid)
1089{
1090 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1091 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1092
1093 return vcpu->state;
1094}
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110void vcpu_run(struct kvm_vm *vm, uint32_t vcpuid)
1111{
1112 int ret = _vcpu_run(vm, vcpuid);
1113 TEST_ASSERT(ret == 0, "KVM_RUN IOCTL failed, "
1114 "rc: %i errno: %i", ret, errno);
1115}
1116
1117int _vcpu_run(struct kvm_vm *vm, uint32_t vcpuid)
1118{
1119 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1120 int rc;
1121
1122 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1123 do {
1124 rc = ioctl(vcpu->fd, KVM_RUN, NULL);
1125 } while (rc == -1 && errno == EINTR);
1126 return rc;
1127}
1128
1129void vcpu_run_complete_io(struct kvm_vm *vm, uint32_t vcpuid)
1130{
1131 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1132 int ret;
1133
1134 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1135
1136 vcpu->state->immediate_exit = 1;
1137 ret = ioctl(vcpu->fd, KVM_RUN, NULL);
1138 vcpu->state->immediate_exit = 0;
1139
1140 TEST_ASSERT(ret == -1 && errno == EINTR,
1141 "KVM_RUN IOCTL didn't exit immediately, rc: %i, errno: %i",
1142 ret, errno);
1143}
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160void vcpu_set_mp_state(struct kvm_vm *vm, uint32_t vcpuid,
1161 struct kvm_mp_state *mp_state)
1162{
1163 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1164 int ret;
1165
1166 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1167
1168 ret = ioctl(vcpu->fd, KVM_SET_MP_STATE, mp_state);
1169 TEST_ASSERT(ret == 0, "KVM_SET_MP_STATE IOCTL failed, "
1170 "rc: %i errno: %i", ret, errno);
1171}
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188void vcpu_regs_get(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_regs *regs)
1189{
1190 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1191 int ret;
1192
1193 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1194
1195 ret = ioctl(vcpu->fd, KVM_GET_REGS, regs);
1196 TEST_ASSERT(ret == 0, "KVM_GET_REGS failed, rc: %i errno: %i",
1197 ret, errno);
1198}
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215void vcpu_regs_set(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_regs *regs)
1216{
1217 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1218 int ret;
1219
1220 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1221
1222 ret = ioctl(vcpu->fd, KVM_SET_REGS, regs);
1223 TEST_ASSERT(ret == 0, "KVM_SET_REGS failed, rc: %i errno: %i",
1224 ret, errno);
1225}
1226
1227void vcpu_events_get(struct kvm_vm *vm, uint32_t vcpuid,
1228 struct kvm_vcpu_events *events)
1229{
1230 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1231 int ret;
1232
1233 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1234
1235 ret = ioctl(vcpu->fd, KVM_GET_VCPU_EVENTS, events);
1236 TEST_ASSERT(ret == 0, "KVM_GET_VCPU_EVENTS, failed, rc: %i errno: %i",
1237 ret, errno);
1238}
1239
1240void vcpu_events_set(struct kvm_vm *vm, uint32_t vcpuid,
1241 struct kvm_vcpu_events *events)
1242{
1243 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1244 int ret;
1245
1246 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1247
1248 ret = ioctl(vcpu->fd, KVM_SET_VCPU_EVENTS, events);
1249 TEST_ASSERT(ret == 0, "KVM_SET_VCPU_EVENTS, failed, rc: %i errno: %i",
1250 ret, errno);
1251}
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268void vcpu_sregs_get(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_sregs *sregs)
1269{
1270 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1271 int ret;
1272
1273 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1274
1275 ret = ioctl(vcpu->fd, KVM_GET_SREGS, sregs);
1276 TEST_ASSERT(ret == 0, "KVM_GET_SREGS failed, rc: %i errno: %i",
1277 ret, errno);
1278}
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295void vcpu_sregs_set(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_sregs *sregs)
1296{
1297 int ret = _vcpu_sregs_set(vm, vcpuid, sregs);
1298 TEST_ASSERT(ret == 0, "KVM_RUN IOCTL failed, "
1299 "rc: %i errno: %i", ret, errno);
1300}
1301
1302int _vcpu_sregs_set(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_sregs *sregs)
1303{
1304 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1305 int ret;
1306
1307 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1308
1309 return ioctl(vcpu->fd, KVM_SET_SREGS, sregs);
1310}
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325void vcpu_ioctl(struct kvm_vm *vm, uint32_t vcpuid,
1326 unsigned long cmd, void *arg)
1327{
1328 int ret;
1329
1330 ret = _vcpu_ioctl(vm, vcpuid, cmd, arg);
1331 TEST_ASSERT(ret == 0, "vcpu ioctl %lu failed, rc: %i errno: %i (%s)",
1332 cmd, ret, errno, strerror(errno));
1333}
1334
1335int _vcpu_ioctl(struct kvm_vm *vm, uint32_t vcpuid,
1336 unsigned long cmd, void *arg)
1337{
1338 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1339 int ret;
1340
1341 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1342
1343 ret = ioctl(vcpu->fd, cmd, arg);
1344
1345 return ret;
1346}
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360void vm_ioctl(struct kvm_vm *vm, unsigned long cmd, void *arg)
1361{
1362 int ret;
1363
1364 ret = ioctl(vm->fd, cmd, arg);
1365 TEST_ASSERT(ret == 0, "vm ioctl %lu failed, rc: %i errno: %i (%s)",
1366 cmd, ret, errno, strerror(errno));
1367}
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
1385{
1386 struct userspace_mem_region *region;
1387 struct vcpu *vcpu;
1388
1389 fprintf(stream, "%*smode: 0x%x\n", indent, "", vm->mode);
1390 fprintf(stream, "%*sfd: %i\n", indent, "", vm->fd);
1391 fprintf(stream, "%*spage_size: 0x%x\n", indent, "", vm->page_size);
1392 fprintf(stream, "%*sMem Regions:\n", indent, "");
1393 for (region = vm->userspace_mem_region_head; region;
1394 region = region->next) {
1395 fprintf(stream, "%*sguest_phys: 0x%lx size: 0x%lx "
1396 "host_virt: %p\n", indent + 2, "",
1397 (uint64_t) region->region.guest_phys_addr,
1398 (uint64_t) region->region.memory_size,
1399 region->host_mem);
1400 fprintf(stream, "%*sunused_phy_pages: ", indent + 2, "");
1401 sparsebit_dump(stream, region->unused_phy_pages, 0);
1402 }
1403 fprintf(stream, "%*sMapped Virtual Pages:\n", indent, "");
1404 sparsebit_dump(stream, vm->vpages_mapped, indent + 2);
1405 fprintf(stream, "%*spgd_created: %u\n", indent, "",
1406 vm->pgd_created);
1407 if (vm->pgd_created) {
1408 fprintf(stream, "%*sVirtual Translation Tables:\n",
1409 indent + 2, "");
1410 virt_dump(stream, vm, indent + 4);
1411 }
1412 fprintf(stream, "%*sVCPUs:\n", indent, "");
1413 for (vcpu = vm->vcpu_head; vcpu; vcpu = vcpu->next)
1414 vcpu_dump(stream, vm, vcpu->id, indent + 2);
1415}
1416
1417
1418static struct exit_reason {
1419 unsigned int reason;
1420 const char *name;
1421} exit_reasons_known[] = {
1422 {KVM_EXIT_UNKNOWN, "UNKNOWN"},
1423 {KVM_EXIT_EXCEPTION, "EXCEPTION"},
1424 {KVM_EXIT_IO, "IO"},
1425 {KVM_EXIT_HYPERCALL, "HYPERCALL"},
1426 {KVM_EXIT_DEBUG, "DEBUG"},
1427 {KVM_EXIT_HLT, "HLT"},
1428 {KVM_EXIT_MMIO, "MMIO"},
1429 {KVM_EXIT_IRQ_WINDOW_OPEN, "IRQ_WINDOW_OPEN"},
1430 {KVM_EXIT_SHUTDOWN, "SHUTDOWN"},
1431 {KVM_EXIT_FAIL_ENTRY, "FAIL_ENTRY"},
1432 {KVM_EXIT_INTR, "INTR"},
1433 {KVM_EXIT_SET_TPR, "SET_TPR"},
1434 {KVM_EXIT_TPR_ACCESS, "TPR_ACCESS"},
1435 {KVM_EXIT_S390_SIEIC, "S390_SIEIC"},
1436 {KVM_EXIT_S390_RESET, "S390_RESET"},
1437 {KVM_EXIT_DCR, "DCR"},
1438 {KVM_EXIT_NMI, "NMI"},
1439 {KVM_EXIT_INTERNAL_ERROR, "INTERNAL_ERROR"},
1440 {KVM_EXIT_OSI, "OSI"},
1441 {KVM_EXIT_PAPR_HCALL, "PAPR_HCALL"},
1442#ifdef KVM_EXIT_MEMORY_NOT_PRESENT
1443 {KVM_EXIT_MEMORY_NOT_PRESENT, "MEMORY_NOT_PRESENT"},
1444#endif
1445};
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462const char *exit_reason_str(unsigned int exit_reason)
1463{
1464 unsigned int n1;
1465
1466 for (n1 = 0; n1 < ARRAY_SIZE(exit_reasons_known); n1++) {
1467 if (exit_reason == exit_reasons_known[n1].reason)
1468 return exit_reasons_known[n1].name;
1469 }
1470
1471 return "Unknown";
1472}
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
1494 vm_paddr_t paddr_min, uint32_t memslot)
1495{
1496 struct userspace_mem_region *region;
1497 sparsebit_idx_t pg, base;
1498
1499 TEST_ASSERT(num > 0, "Must allocate at least one page");
1500
1501 TEST_ASSERT((paddr_min % vm->page_size) == 0, "Min physical address "
1502 "not divisible by page size.\n"
1503 " paddr_min: 0x%lx page_size: 0x%x",
1504 paddr_min, vm->page_size);
1505
1506 region = memslot2region(vm, memslot);
1507 base = pg = paddr_min >> vm->page_shift;
1508
1509 do {
1510 for (; pg < base + num; ++pg) {
1511 if (!sparsebit_is_set(region->unused_phy_pages, pg)) {
1512 base = pg = sparsebit_next_set(region->unused_phy_pages, pg);
1513 break;
1514 }
1515 }
1516 } while (pg && pg != base + num);
1517
1518 if (pg == 0) {
1519 fprintf(stderr, "No guest physical page available, "
1520 "paddr_min: 0x%lx page_size: 0x%x memslot: %u\n",
1521 paddr_min, vm->page_size, memslot);
1522 fputs("---- vm dump ----\n", stderr);
1523 vm_dump(stderr, vm, 2);
1524 abort();
1525 }
1526
1527 for (pg = base; pg < base + num; ++pg)
1528 sparsebit_clear(region->unused_phy_pages, pg);
1529
1530 return base * vm->page_size;
1531}
1532
1533vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min,
1534 uint32_t memslot)
1535{
1536 return vm_phy_pages_alloc(vm, 1, paddr_min, memslot);
1537}
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva)
1552{
1553 return addr_gpa2hva(vm, addr_gva2gpa(vm, gva));
1554}
1555