1
2
3
4
5
6
7
8#include "test_util.h"
9#include "kvm_util.h"
10#include "kvm_util_internal.h"
11
12#include <assert.h>
13#include <sys/mman.h>
14#include <sys/types.h>
15#include <sys/stat.h>
16#include <linux/kernel.h>
17
18#define KVM_UTIL_PGS_PER_HUGEPG 512
19#define KVM_UTIL_MIN_PFN 2
20
21
22static void *align(void *x, size_t size)
23{
24 size_t mask = size - 1;
25 TEST_ASSERT(size != 0 && !(size & (size - 1)),
26 "size not a power of 2: %lu", size);
27 return (void *) (((size_t) x + mask) & ~mask);
28}
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46int kvm_check_cap(long cap)
47{
48 int ret;
49 int kvm_fd;
50
51 kvm_fd = open(KVM_DEV_PATH, O_RDONLY);
52 if (kvm_fd < 0)
53 exit(KSFT_SKIP);
54
55 ret = ioctl(kvm_fd, KVM_CHECK_EXTENSION, cap);
56 TEST_ASSERT(ret != -1, "KVM_CHECK_EXTENSION IOCTL failed,\n"
57 " rc: %i errno: %i", ret, errno);
58
59 close(kvm_fd);
60
61 return ret;
62}
63
64
65
66
67
68
69
70
71
72
73
74
75
76int vm_enable_cap(struct kvm_vm *vm, struct kvm_enable_cap *cap)
77{
78 int ret;
79
80 ret = ioctl(vm->fd, KVM_ENABLE_CAP, cap);
81 TEST_ASSERT(ret == 0, "KVM_ENABLE_CAP IOCTL failed,\n"
82 " rc: %i errno: %i", ret, errno);
83
84 return ret;
85}
86
87static void vm_open(struct kvm_vm *vm, int perm, unsigned long type)
88{
89 vm->kvm_fd = open(KVM_DEV_PATH, perm);
90 if (vm->kvm_fd < 0)
91 exit(KSFT_SKIP);
92
93 if (!kvm_check_cap(KVM_CAP_IMMEDIATE_EXIT)) {
94 fprintf(stderr, "immediate_exit not available, skipping test\n");
95 exit(KSFT_SKIP);
96 }
97
98 vm->fd = ioctl(vm->kvm_fd, KVM_CREATE_VM, type);
99 TEST_ASSERT(vm->fd >= 0, "KVM_CREATE_VM ioctl failed, "
100 "rc: %i errno: %i", vm->fd, errno);
101}
102
103const char * const vm_guest_mode_string[] = {
104 "PA-bits:52, VA-bits:48, 4K pages",
105 "PA-bits:52, VA-bits:48, 64K pages",
106 "PA-bits:48, VA-bits:48, 4K pages",
107 "PA-bits:48, VA-bits:48, 64K pages",
108 "PA-bits:40, VA-bits:48, 4K pages",
109 "PA-bits:40, VA-bits:48, 64K pages",
110};
111_Static_assert(sizeof(vm_guest_mode_string)/sizeof(char *) == NUM_VM_MODES,
112 "Missing new mode strings?");
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133struct kvm_vm *_vm_create(enum vm_guest_mode mode, uint64_t phy_pages,
134 int perm, unsigned long type)
135{
136 struct kvm_vm *vm;
137
138 vm = calloc(1, sizeof(*vm));
139 TEST_ASSERT(vm != NULL, "Insufficient Memory");
140
141 vm->mode = mode;
142 vm->type = type;
143 vm_open(vm, perm, type);
144
145
146 switch (vm->mode) {
147 case VM_MODE_P52V48_4K:
148 vm->pgtable_levels = 4;
149 vm->pa_bits = 52;
150 vm->va_bits = 48;
151 vm->page_size = 0x1000;
152 vm->page_shift = 12;
153 break;
154 case VM_MODE_P52V48_64K:
155 vm->pgtable_levels = 3;
156 vm->pa_bits = 52;
157 vm->va_bits = 48;
158 vm->page_size = 0x10000;
159 vm->page_shift = 16;
160 break;
161 case VM_MODE_P48V48_4K:
162 vm->pgtable_levels = 4;
163 vm->pa_bits = 48;
164 vm->va_bits = 48;
165 vm->page_size = 0x1000;
166 vm->page_shift = 12;
167 break;
168 case VM_MODE_P48V48_64K:
169 vm->pgtable_levels = 3;
170 vm->pa_bits = 48;
171 vm->va_bits = 48;
172 vm->page_size = 0x10000;
173 vm->page_shift = 16;
174 break;
175 case VM_MODE_P40V48_4K:
176 vm->pgtable_levels = 4;
177 vm->pa_bits = 40;
178 vm->va_bits = 48;
179 vm->page_size = 0x1000;
180 vm->page_shift = 12;
181 break;
182 case VM_MODE_P40V48_64K:
183 vm->pgtable_levels = 3;
184 vm->pa_bits = 40;
185 vm->va_bits = 48;
186 vm->page_size = 0x10000;
187 vm->page_shift = 16;
188 break;
189 default:
190 TEST_ASSERT(false, "Unknown guest mode, mode: 0x%x", mode);
191 }
192
193
194 vm->vpages_valid = sparsebit_alloc();
195 sparsebit_set_num(vm->vpages_valid,
196 0, (1ULL << (vm->va_bits - 1)) >> vm->page_shift);
197 sparsebit_set_num(vm->vpages_valid,
198 (~((1ULL << (vm->va_bits - 1)) - 1)) >> vm->page_shift,
199 (1ULL << (vm->va_bits - 1)) >> vm->page_shift);
200
201
202 vm->max_gfn = ((1ULL << vm->pa_bits) >> vm->page_shift) - 1;
203
204
205 vm->vpages_mapped = sparsebit_alloc();
206 if (phy_pages != 0)
207 vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
208 0, 0, phy_pages, 0);
209
210 return vm;
211}
212
213struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm)
214{
215 return _vm_create(mode, phy_pages, perm, 0);
216}
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231void kvm_vm_restart(struct kvm_vm *vmp, int perm)
232{
233 struct userspace_mem_region *region;
234
235 vm_open(vmp, perm, vmp->type);
236 if (vmp->has_irqchip)
237 vm_create_irqchip(vmp);
238
239 for (region = vmp->userspace_mem_region_head; region;
240 region = region->next) {
241 int ret = ioctl(vmp->fd, KVM_SET_USER_MEMORY_REGION, ®ion->region);
242 TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION IOCTL failed,\n"
243 " rc: %i errno: %i\n"
244 " slot: %u flags: 0x%x\n"
245 " guest_phys_addr: 0x%lx size: 0x%lx",
246 ret, errno, region->region.slot,
247 region->region.flags,
248 region->region.guest_phys_addr,
249 region->region.memory_size);
250 }
251}
252
253void kvm_vm_get_dirty_log(struct kvm_vm *vm, int slot, void *log)
254{
255 struct kvm_dirty_log args = { .dirty_bitmap = log, .slot = slot };
256 int ret;
257
258 ret = ioctl(vm->fd, KVM_GET_DIRTY_LOG, &args);
259 TEST_ASSERT(ret == 0, "%s: KVM_GET_DIRTY_LOG failed: %s",
260 strerror(-ret));
261}
262
263void kvm_vm_clear_dirty_log(struct kvm_vm *vm, int slot, void *log,
264 uint64_t first_page, uint32_t num_pages)
265{
266 struct kvm_clear_dirty_log args = { .dirty_bitmap = log, .slot = slot,
267 .first_page = first_page,
268 .num_pages = num_pages };
269 int ret;
270
271 ret = ioctl(vm->fd, KVM_CLEAR_DIRTY_LOG, &args);
272 TEST_ASSERT(ret == 0, "%s: KVM_CLEAR_DIRTY_LOG failed: %s",
273 strerror(-ret));
274}
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295static struct userspace_mem_region *
296userspace_mem_region_find(struct kvm_vm *vm, uint64_t start, uint64_t end)
297{
298 struct userspace_mem_region *region;
299
300 for (region = vm->userspace_mem_region_head; region;
301 region = region->next) {
302 uint64_t existing_start = region->region.guest_phys_addr;
303 uint64_t existing_end = region->region.guest_phys_addr
304 + region->region.memory_size - 1;
305 if (start <= existing_end && end >= existing_start)
306 return region;
307 }
308
309 return NULL;
310}
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328struct kvm_userspace_memory_region *
329kvm_userspace_memory_region_find(struct kvm_vm *vm, uint64_t start,
330 uint64_t end)
331{
332 struct userspace_mem_region *region;
333
334 region = userspace_mem_region_find(vm, start, end);
335 if (!region)
336 return NULL;
337
338 return ®ion->region;
339}
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357struct vcpu *vcpu_find(struct kvm_vm *vm, uint32_t vcpuid)
358{
359 struct vcpu *vcpup;
360
361 for (vcpup = vm->vcpu_head; vcpup; vcpup = vcpup->next) {
362 if (vcpup->id == vcpuid)
363 return vcpup;
364 }
365
366 return NULL;
367}
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382static void vm_vcpu_rm(struct kvm_vm *vm, uint32_t vcpuid)
383{
384 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
385 int ret;
386
387 ret = munmap(vcpu->state, sizeof(*vcpu->state));
388 TEST_ASSERT(ret == 0, "munmap of VCPU fd failed, rc: %i "
389 "errno: %i", ret, errno);
390 close(vcpu->fd);
391 TEST_ASSERT(ret == 0, "Close of VCPU fd failed, rc: %i "
392 "errno: %i", ret, errno);
393
394 if (vcpu->next)
395 vcpu->next->prev = vcpu->prev;
396 if (vcpu->prev)
397 vcpu->prev->next = vcpu->next;
398 else
399 vm->vcpu_head = vcpu->next;
400 free(vcpu);
401}
402
403void kvm_vm_release(struct kvm_vm *vmp)
404{
405 int ret;
406
407 while (vmp->vcpu_head)
408 vm_vcpu_rm(vmp, vmp->vcpu_head->id);
409
410 ret = close(vmp->fd);
411 TEST_ASSERT(ret == 0, "Close of vm fd failed,\n"
412 " vmp->fd: %i rc: %i errno: %i", vmp->fd, ret, errno);
413
414 close(vmp->kvm_fd);
415 TEST_ASSERT(ret == 0, "Close of /dev/kvm fd failed,\n"
416 " vmp->kvm_fd: %i rc: %i errno: %i", vmp->kvm_fd, ret, errno);
417}
418
419
420
421
422void kvm_vm_free(struct kvm_vm *vmp)
423{
424 int ret;
425
426 if (vmp == NULL)
427 return;
428
429
430 while (vmp->userspace_mem_region_head) {
431 struct userspace_mem_region *region
432 = vmp->userspace_mem_region_head;
433
434 region->region.memory_size = 0;
435 ret = ioctl(vmp->fd, KVM_SET_USER_MEMORY_REGION,
436 ®ion->region);
437 TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION IOCTL failed, "
438 "rc: %i errno: %i", ret, errno);
439
440 vmp->userspace_mem_region_head = region->next;
441 sparsebit_free(®ion->unused_phy_pages);
442 ret = munmap(region->mmap_start, region->mmap_size);
443 TEST_ASSERT(ret == 0, "munmap failed, rc: %i errno: %i",
444 ret, errno);
445
446 free(region);
447 }
448
449
450 sparsebit_free(&vmp->vpages_valid);
451 sparsebit_free(&vmp->vpages_mapped);
452
453 kvm_vm_release(vmp);
454
455
456 free(vmp);
457}
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482int kvm_memcmp_hva_gva(void *hva, struct kvm_vm *vm, vm_vaddr_t gva, size_t len)
483{
484 size_t amt;
485
486
487
488
489
490 for (uintptr_t offset = 0; offset < len; offset += amt) {
491 uintptr_t ptr1 = (uintptr_t)hva + offset;
492
493
494
495
496
497 uintptr_t ptr2 = (uintptr_t)addr_gva2hva(vm, gva + offset);
498
499
500
501
502
503 amt = len - offset;
504 if ((ptr1 >> vm->page_shift) != ((ptr1 + amt) >> vm->page_shift))
505 amt = vm->page_size - (ptr1 % vm->page_size);
506 if ((ptr2 >> vm->page_shift) != ((ptr2 + amt) >> vm->page_shift))
507 amt = vm->page_size - (ptr2 % vm->page_size);
508
509 assert((ptr1 >> vm->page_shift) == ((ptr1 + amt - 1) >> vm->page_shift));
510 assert((ptr2 >> vm->page_shift) == ((ptr2 + amt - 1) >> vm->page_shift));
511
512
513
514
515
516
517 int ret = memcmp((void *)ptr1, (void *)ptr2, amt);
518 if (ret != 0)
519 return ret;
520 }
521
522
523
524
525
526 return 0;
527}
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551void vm_userspace_mem_region_add(struct kvm_vm *vm,
552 enum vm_mem_backing_src_type src_type,
553 uint64_t guest_paddr, uint32_t slot, uint64_t npages,
554 uint32_t flags)
555{
556 int ret;
557 struct userspace_mem_region *region;
558 size_t huge_page_size = KVM_UTIL_PGS_PER_HUGEPG * vm->page_size;
559 size_t alignment;
560
561 TEST_ASSERT((guest_paddr % vm->page_size) == 0, "Guest physical "
562 "address not on a page boundary.\n"
563 " guest_paddr: 0x%lx vm->page_size: 0x%x",
564 guest_paddr, vm->page_size);
565 TEST_ASSERT((((guest_paddr >> vm->page_shift) + npages) - 1)
566 <= vm->max_gfn, "Physical range beyond maximum "
567 "supported physical address,\n"
568 " guest_paddr: 0x%lx npages: 0x%lx\n"
569 " vm->max_gfn: 0x%lx vm->page_size: 0x%x",
570 guest_paddr, npages, vm->max_gfn, vm->page_size);
571
572
573
574
575
576 region = (struct userspace_mem_region *) userspace_mem_region_find(
577 vm, guest_paddr, (guest_paddr + npages * vm->page_size) - 1);
578 if (region != NULL)
579 TEST_ASSERT(false, "overlapping userspace_mem_region already "
580 "exists\n"
581 " requested guest_paddr: 0x%lx npages: 0x%lx "
582 "page_size: 0x%x\n"
583 " existing guest_paddr: 0x%lx size: 0x%lx",
584 guest_paddr, npages, vm->page_size,
585 (uint64_t) region->region.guest_phys_addr,
586 (uint64_t) region->region.memory_size);
587
588
589 for (region = vm->userspace_mem_region_head; region;
590 region = region->next) {
591 if (region->region.slot == slot)
592 break;
593 }
594 if (region != NULL)
595 TEST_ASSERT(false, "A mem region with the requested slot "
596 "already exists.\n"
597 " requested slot: %u paddr: 0x%lx npages: 0x%lx\n"
598 " existing slot: %u paddr: 0x%lx size: 0x%lx",
599 slot, guest_paddr, npages,
600 region->region.slot,
601 (uint64_t) region->region.guest_phys_addr,
602 (uint64_t) region->region.memory_size);
603
604
605 region = calloc(1, sizeof(*region));
606 TEST_ASSERT(region != NULL, "Insufficient Memory");
607 region->mmap_size = npages * vm->page_size;
608
609#ifdef __s390x__
610
611 alignment = 0x100000;
612#else
613 alignment = 1;
614#endif
615
616 if (src_type == VM_MEM_SRC_ANONYMOUS_THP)
617 alignment = max(huge_page_size, alignment);
618
619
620 if (alignment > 1)
621 region->mmap_size += alignment;
622
623 region->mmap_start = mmap(NULL, region->mmap_size,
624 PROT_READ | PROT_WRITE,
625 MAP_PRIVATE | MAP_ANONYMOUS
626 | (src_type == VM_MEM_SRC_ANONYMOUS_HUGETLB ? MAP_HUGETLB : 0),
627 -1, 0);
628 TEST_ASSERT(region->mmap_start != MAP_FAILED,
629 "test_malloc failed, mmap_start: %p errno: %i",
630 region->mmap_start, errno);
631
632
633 region->host_mem = align(region->mmap_start, alignment);
634
635
636 if (src_type == VM_MEM_SRC_ANONYMOUS || src_type == VM_MEM_SRC_ANONYMOUS_THP) {
637 ret = madvise(region->host_mem, npages * vm->page_size,
638 src_type == VM_MEM_SRC_ANONYMOUS ? MADV_NOHUGEPAGE : MADV_HUGEPAGE);
639 TEST_ASSERT(ret == 0, "madvise failed,\n"
640 " addr: %p\n"
641 " length: 0x%lx\n"
642 " src_type: %x",
643 region->host_mem, npages * vm->page_size, src_type);
644 }
645
646 region->unused_phy_pages = sparsebit_alloc();
647 sparsebit_set_num(region->unused_phy_pages,
648 guest_paddr >> vm->page_shift, npages);
649 region->region.slot = slot;
650 region->region.flags = flags;
651 region->region.guest_phys_addr = guest_paddr;
652 region->region.memory_size = npages * vm->page_size;
653 region->region.userspace_addr = (uintptr_t) region->host_mem;
654 ret = ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, ®ion->region);
655 TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION IOCTL failed,\n"
656 " rc: %i errno: %i\n"
657 " slot: %u flags: 0x%x\n"
658 " guest_phys_addr: 0x%lx size: 0x%lx",
659 ret, errno, slot, flags,
660 guest_paddr, (uint64_t) region->region.memory_size);
661
662
663 if (vm->userspace_mem_region_head)
664 vm->userspace_mem_region_head->prev = region;
665 region->next = vm->userspace_mem_region_head;
666 vm->userspace_mem_region_head = region;
667}
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684static struct userspace_mem_region *
685memslot2region(struct kvm_vm *vm, uint32_t memslot)
686{
687 struct userspace_mem_region *region;
688
689 for (region = vm->userspace_mem_region_head; region;
690 region = region->next) {
691 if (region->region.slot == memslot)
692 break;
693 }
694 if (region == NULL) {
695 fprintf(stderr, "No mem region with the requested slot found,\n"
696 " requested slot: %u\n", memslot);
697 fputs("---- vm dump ----\n", stderr);
698 vm_dump(stderr, vm, 2);
699 TEST_ASSERT(false, "Mem region not found");
700 }
701
702 return region;
703}
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags)
720{
721 int ret;
722 struct userspace_mem_region *region;
723
724 region = memslot2region(vm, slot);
725
726 region->region.flags = flags;
727
728 ret = ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, ®ion->region);
729
730 TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION IOCTL failed,\n"
731 " rc: %i errno: %i slot: %u flags: 0x%x",
732 ret, errno, slot, flags);
733}
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748static int vcpu_mmap_sz(void)
749{
750 int dev_fd, ret;
751
752 dev_fd = open(KVM_DEV_PATH, O_RDONLY);
753 if (dev_fd < 0)
754 exit(KSFT_SKIP);
755
756 ret = ioctl(dev_fd, KVM_GET_VCPU_MMAP_SIZE, NULL);
757 TEST_ASSERT(ret >= sizeof(struct kvm_run),
758 "%s KVM_GET_VCPU_MMAP_SIZE ioctl failed, rc: %i errno: %i",
759 __func__, ret, errno);
760
761 close(dev_fd);
762
763 return ret;
764}
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780void vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpuid)
781{
782 struct vcpu *vcpu;
783
784
785 vcpu = vcpu_find(vm, vcpuid);
786 if (vcpu != NULL)
787 TEST_ASSERT(false, "vcpu with the specified id "
788 "already exists,\n"
789 " requested vcpuid: %u\n"
790 " existing vcpuid: %u state: %p",
791 vcpuid, vcpu->id, vcpu->state);
792
793
794 vcpu = calloc(1, sizeof(*vcpu));
795 TEST_ASSERT(vcpu != NULL, "Insufficient Memory");
796 vcpu->id = vcpuid;
797 vcpu->fd = ioctl(vm->fd, KVM_CREATE_VCPU, vcpuid);
798 TEST_ASSERT(vcpu->fd >= 0, "KVM_CREATE_VCPU failed, rc: %i errno: %i",
799 vcpu->fd, errno);
800
801 TEST_ASSERT(vcpu_mmap_sz() >= sizeof(*vcpu->state), "vcpu mmap size "
802 "smaller than expected, vcpu_mmap_sz: %i expected_min: %zi",
803 vcpu_mmap_sz(), sizeof(*vcpu->state));
804 vcpu->state = (struct kvm_run *) mmap(NULL, sizeof(*vcpu->state),
805 PROT_READ | PROT_WRITE, MAP_SHARED, vcpu->fd, 0);
806 TEST_ASSERT(vcpu->state != MAP_FAILED, "mmap vcpu_state failed, "
807 "vcpu id: %u errno: %i", vcpuid, errno);
808
809
810 if (vm->vcpu_head)
811 vm->vcpu_head->prev = vcpu;
812 vcpu->next = vm->vcpu_head;
813 vm->vcpu_head = vcpu;
814}
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836static vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz,
837 vm_vaddr_t vaddr_min)
838{
839 uint64_t pages = (sz + vm->page_size - 1) >> vm->page_shift;
840
841
842 uint64_t pgidx_start = (vaddr_min + vm->page_size - 1) >> vm->page_shift;
843 if ((pgidx_start * vm->page_size) < vaddr_min)
844 goto no_va_found;
845
846
847 if (!sparsebit_is_set_num(vm->vpages_valid,
848 pgidx_start, pages))
849 pgidx_start = sparsebit_next_set_num(vm->vpages_valid,
850 pgidx_start, pages);
851 do {
852
853
854
855
856
857
858 if (sparsebit_is_clear_num(vm->vpages_mapped,
859 pgidx_start, pages))
860 goto va_found;
861 pgidx_start = sparsebit_next_clear_num(vm->vpages_mapped,
862 pgidx_start, pages);
863 if (pgidx_start == 0)
864 goto no_va_found;
865
866
867
868
869
870 if (!sparsebit_is_set_num(vm->vpages_valid,
871 pgidx_start, pages)) {
872 pgidx_start = sparsebit_next_set_num(
873 vm->vpages_valid, pgidx_start, pages);
874 if (pgidx_start == 0)
875 goto no_va_found;
876 }
877 } while (pgidx_start != 0);
878
879no_va_found:
880 TEST_ASSERT(false, "No vaddr of specified pages available, "
881 "pages: 0x%lx", pages);
882
883
884 return -1;
885
886va_found:
887 TEST_ASSERT(sparsebit_is_set_num(vm->vpages_valid,
888 pgidx_start, pages),
889 "Unexpected, invalid virtual page index range,\n"
890 " pgidx_start: 0x%lx\n"
891 " pages: 0x%lx",
892 pgidx_start, pages);
893 TEST_ASSERT(sparsebit_is_clear_num(vm->vpages_mapped,
894 pgidx_start, pages),
895 "Unexpected, pages already mapped,\n"
896 " pgidx_start: 0x%lx\n"
897 " pages: 0x%lx",
898 pgidx_start, pages);
899
900 return pgidx_start * vm->page_size;
901}
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min,
925 uint32_t data_memslot, uint32_t pgd_memslot)
926{
927 uint64_t pages = (sz >> vm->page_shift) + ((sz % vm->page_size) != 0);
928
929 virt_pgd_alloc(vm, pgd_memslot);
930
931
932
933
934
935 vm_vaddr_t vaddr_start = vm_vaddr_unused_gap(vm, sz, vaddr_min);
936
937
938 for (vm_vaddr_t vaddr = vaddr_start; pages > 0;
939 pages--, vaddr += vm->page_size) {
940 vm_paddr_t paddr;
941
942 paddr = vm_phy_page_alloc(vm,
943 KVM_UTIL_MIN_PFN * vm->page_size, data_memslot);
944
945 virt_pg_map(vm, vaddr, paddr, pgd_memslot);
946
947 sparsebit_set(vm->vpages_mapped,
948 vaddr >> vm->page_shift);
949 }
950
951 return vaddr_start;
952}
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
972 size_t size, uint32_t pgd_memslot)
973{
974 size_t page_size = vm->page_size;
975 size_t npages = size / page_size;
976
977 TEST_ASSERT(vaddr + size > vaddr, "Vaddr overflow");
978 TEST_ASSERT(paddr + size > paddr, "Paddr overflow");
979
980 while (npages--) {
981 virt_pg_map(vm, vaddr, paddr, pgd_memslot);
982 vaddr += page_size;
983 paddr += page_size;
984 }
985}
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa)
1005{
1006 struct userspace_mem_region *region;
1007 for (region = vm->userspace_mem_region_head; region;
1008 region = region->next) {
1009 if ((gpa >= region->region.guest_phys_addr)
1010 && (gpa <= (region->region.guest_phys_addr
1011 + region->region.memory_size - 1)))
1012 return (void *) ((uintptr_t) region->host_mem
1013 + (gpa - region->region.guest_phys_addr));
1014 }
1015
1016 TEST_ASSERT(false, "No vm physical memory at 0x%lx", gpa);
1017 return NULL;
1018}
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva)
1038{
1039 struct userspace_mem_region *region;
1040 for (region = vm->userspace_mem_region_head; region;
1041 region = region->next) {
1042 if ((hva >= region->host_mem)
1043 && (hva <= (region->host_mem
1044 + region->region.memory_size - 1)))
1045 return (vm_paddr_t) ((uintptr_t)
1046 region->region.guest_phys_addr
1047 + (hva - (uintptr_t) region->host_mem));
1048 }
1049
1050 TEST_ASSERT(false, "No mapping to a guest physical address, "
1051 "hva: %p", hva);
1052 return -1;
1053}
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067void vm_create_irqchip(struct kvm_vm *vm)
1068{
1069 int ret;
1070
1071 ret = ioctl(vm->fd, KVM_CREATE_IRQCHIP, 0);
1072 TEST_ASSERT(ret == 0, "KVM_CREATE_IRQCHIP IOCTL failed, "
1073 "rc: %i errno: %i", ret, errno);
1074
1075 vm->has_irqchip = true;
1076}
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093struct kvm_run *vcpu_state(struct kvm_vm *vm, uint32_t vcpuid)
1094{
1095 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1096 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1097
1098 return vcpu->state;
1099}
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115void vcpu_run(struct kvm_vm *vm, uint32_t vcpuid)
1116{
1117 int ret = _vcpu_run(vm, vcpuid);
1118 TEST_ASSERT(ret == 0, "KVM_RUN IOCTL failed, "
1119 "rc: %i errno: %i", ret, errno);
1120}
1121
1122int _vcpu_run(struct kvm_vm *vm, uint32_t vcpuid)
1123{
1124 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1125 int rc;
1126
1127 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1128 do {
1129 rc = ioctl(vcpu->fd, KVM_RUN, NULL);
1130 } while (rc == -1 && errno == EINTR);
1131 return rc;
1132}
1133
1134void vcpu_run_complete_io(struct kvm_vm *vm, uint32_t vcpuid)
1135{
1136 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1137 int ret;
1138
1139 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1140
1141 vcpu->state->immediate_exit = 1;
1142 ret = ioctl(vcpu->fd, KVM_RUN, NULL);
1143 vcpu->state->immediate_exit = 0;
1144
1145 TEST_ASSERT(ret == -1 && errno == EINTR,
1146 "KVM_RUN IOCTL didn't exit immediately, rc: %i, errno: %i",
1147 ret, errno);
1148}
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165void vcpu_set_mp_state(struct kvm_vm *vm, uint32_t vcpuid,
1166 struct kvm_mp_state *mp_state)
1167{
1168 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1169 int ret;
1170
1171 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1172
1173 ret = ioctl(vcpu->fd, KVM_SET_MP_STATE, mp_state);
1174 TEST_ASSERT(ret == 0, "KVM_SET_MP_STATE IOCTL failed, "
1175 "rc: %i errno: %i", ret, errno);
1176}
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193void vcpu_regs_get(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_regs *regs)
1194{
1195 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1196 int ret;
1197
1198 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1199
1200 ret = ioctl(vcpu->fd, KVM_GET_REGS, regs);
1201 TEST_ASSERT(ret == 0, "KVM_GET_REGS failed, rc: %i errno: %i",
1202 ret, errno);
1203}
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220void vcpu_regs_set(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_regs *regs)
1221{
1222 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1223 int ret;
1224
1225 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1226
1227 ret = ioctl(vcpu->fd, KVM_SET_REGS, regs);
1228 TEST_ASSERT(ret == 0, "KVM_SET_REGS failed, rc: %i errno: %i",
1229 ret, errno);
1230}
1231
1232#ifdef __KVM_HAVE_VCPU_EVENTS
1233void vcpu_events_get(struct kvm_vm *vm, uint32_t vcpuid,
1234 struct kvm_vcpu_events *events)
1235{
1236 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1237 int ret;
1238
1239 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1240
1241 ret = ioctl(vcpu->fd, KVM_GET_VCPU_EVENTS, events);
1242 TEST_ASSERT(ret == 0, "KVM_GET_VCPU_EVENTS, failed, rc: %i errno: %i",
1243 ret, errno);
1244}
1245
1246void vcpu_events_set(struct kvm_vm *vm, uint32_t vcpuid,
1247 struct kvm_vcpu_events *events)
1248{
1249 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1250 int ret;
1251
1252 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1253
1254 ret = ioctl(vcpu->fd, KVM_SET_VCPU_EVENTS, events);
1255 TEST_ASSERT(ret == 0, "KVM_SET_VCPU_EVENTS, failed, rc: %i errno: %i",
1256 ret, errno);
1257}
1258#endif
1259
1260#ifdef __x86_64__
1261void vcpu_nested_state_get(struct kvm_vm *vm, uint32_t vcpuid,
1262 struct kvm_nested_state *state)
1263{
1264 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1265 int ret;
1266
1267 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1268
1269 ret = ioctl(vcpu->fd, KVM_GET_NESTED_STATE, state);
1270 TEST_ASSERT(ret == 0,
1271 "KVM_SET_NESTED_STATE failed, ret: %i errno: %i",
1272 ret, errno);
1273}
1274
1275int vcpu_nested_state_set(struct kvm_vm *vm, uint32_t vcpuid,
1276 struct kvm_nested_state *state, bool ignore_error)
1277{
1278 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1279 int ret;
1280
1281 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1282
1283 ret = ioctl(vcpu->fd, KVM_SET_NESTED_STATE, state);
1284 if (!ignore_error) {
1285 TEST_ASSERT(ret == 0,
1286 "KVM_SET_NESTED_STATE failed, ret: %i errno: %i",
1287 ret, errno);
1288 }
1289
1290 return ret;
1291}
1292#endif
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309void vcpu_sregs_get(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_sregs *sregs)
1310{
1311 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1312 int ret;
1313
1314 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1315
1316 ret = ioctl(vcpu->fd, KVM_GET_SREGS, sregs);
1317 TEST_ASSERT(ret == 0, "KVM_GET_SREGS failed, rc: %i errno: %i",
1318 ret, errno);
1319}
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336void vcpu_sregs_set(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_sregs *sregs)
1337{
1338 int ret = _vcpu_sregs_set(vm, vcpuid, sregs);
1339 TEST_ASSERT(ret == 0, "KVM_RUN IOCTL failed, "
1340 "rc: %i errno: %i", ret, errno);
1341}
1342
1343int _vcpu_sregs_set(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_sregs *sregs)
1344{
1345 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1346
1347 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1348
1349 return ioctl(vcpu->fd, KVM_SET_SREGS, sregs);
1350}
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365void vcpu_ioctl(struct kvm_vm *vm, uint32_t vcpuid,
1366 unsigned long cmd, void *arg)
1367{
1368 int ret;
1369
1370 ret = _vcpu_ioctl(vm, vcpuid, cmd, arg);
1371 TEST_ASSERT(ret == 0, "vcpu ioctl %lu failed, rc: %i errno: %i (%s)",
1372 cmd, ret, errno, strerror(errno));
1373}
1374
1375int _vcpu_ioctl(struct kvm_vm *vm, uint32_t vcpuid,
1376 unsigned long cmd, void *arg)
1377{
1378 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1379 int ret;
1380
1381 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1382
1383 ret = ioctl(vcpu->fd, cmd, arg);
1384
1385 return ret;
1386}
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400void vm_ioctl(struct kvm_vm *vm, unsigned long cmd, void *arg)
1401{
1402 int ret;
1403
1404 ret = ioctl(vm->fd, cmd, arg);
1405 TEST_ASSERT(ret == 0, "vm ioctl %lu failed, rc: %i errno: %i (%s)",
1406 cmd, ret, errno, strerror(errno));
1407}
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
1425{
1426 struct userspace_mem_region *region;
1427 struct vcpu *vcpu;
1428
1429 fprintf(stream, "%*smode: 0x%x\n", indent, "", vm->mode);
1430 fprintf(stream, "%*sfd: %i\n", indent, "", vm->fd);
1431 fprintf(stream, "%*spage_size: 0x%x\n", indent, "", vm->page_size);
1432 fprintf(stream, "%*sMem Regions:\n", indent, "");
1433 for (region = vm->userspace_mem_region_head; region;
1434 region = region->next) {
1435 fprintf(stream, "%*sguest_phys: 0x%lx size: 0x%lx "
1436 "host_virt: %p\n", indent + 2, "",
1437 (uint64_t) region->region.guest_phys_addr,
1438 (uint64_t) region->region.memory_size,
1439 region->host_mem);
1440 fprintf(stream, "%*sunused_phy_pages: ", indent + 2, "");
1441 sparsebit_dump(stream, region->unused_phy_pages, 0);
1442 }
1443 fprintf(stream, "%*sMapped Virtual Pages:\n", indent, "");
1444 sparsebit_dump(stream, vm->vpages_mapped, indent + 2);
1445 fprintf(stream, "%*spgd_created: %u\n", indent, "",
1446 vm->pgd_created);
1447 if (vm->pgd_created) {
1448 fprintf(stream, "%*sVirtual Translation Tables:\n",
1449 indent + 2, "");
1450 virt_dump(stream, vm, indent + 4);
1451 }
1452 fprintf(stream, "%*sVCPUs:\n", indent, "");
1453 for (vcpu = vm->vcpu_head; vcpu; vcpu = vcpu->next)
1454 vcpu_dump(stream, vm, vcpu->id, indent + 2);
1455}
1456
1457
1458static struct exit_reason {
1459 unsigned int reason;
1460 const char *name;
1461} exit_reasons_known[] = {
1462 {KVM_EXIT_UNKNOWN, "UNKNOWN"},
1463 {KVM_EXIT_EXCEPTION, "EXCEPTION"},
1464 {KVM_EXIT_IO, "IO"},
1465 {KVM_EXIT_HYPERCALL, "HYPERCALL"},
1466 {KVM_EXIT_DEBUG, "DEBUG"},
1467 {KVM_EXIT_HLT, "HLT"},
1468 {KVM_EXIT_MMIO, "MMIO"},
1469 {KVM_EXIT_IRQ_WINDOW_OPEN, "IRQ_WINDOW_OPEN"},
1470 {KVM_EXIT_SHUTDOWN, "SHUTDOWN"},
1471 {KVM_EXIT_FAIL_ENTRY, "FAIL_ENTRY"},
1472 {KVM_EXIT_INTR, "INTR"},
1473 {KVM_EXIT_SET_TPR, "SET_TPR"},
1474 {KVM_EXIT_TPR_ACCESS, "TPR_ACCESS"},
1475 {KVM_EXIT_S390_SIEIC, "S390_SIEIC"},
1476 {KVM_EXIT_S390_RESET, "S390_RESET"},
1477 {KVM_EXIT_DCR, "DCR"},
1478 {KVM_EXIT_NMI, "NMI"},
1479 {KVM_EXIT_INTERNAL_ERROR, "INTERNAL_ERROR"},
1480 {KVM_EXIT_OSI, "OSI"},
1481 {KVM_EXIT_PAPR_HCALL, "PAPR_HCALL"},
1482#ifdef KVM_EXIT_MEMORY_NOT_PRESENT
1483 {KVM_EXIT_MEMORY_NOT_PRESENT, "MEMORY_NOT_PRESENT"},
1484#endif
1485};
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502const char *exit_reason_str(unsigned int exit_reason)
1503{
1504 unsigned int n1;
1505
1506 for (n1 = 0; n1 < ARRAY_SIZE(exit_reasons_known); n1++) {
1507 if (exit_reason == exit_reasons_known[n1].reason)
1508 return exit_reasons_known[n1].name;
1509 }
1510
1511 return "Unknown";
1512}
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
1534 vm_paddr_t paddr_min, uint32_t memslot)
1535{
1536 struct userspace_mem_region *region;
1537 sparsebit_idx_t pg, base;
1538
1539 TEST_ASSERT(num > 0, "Must allocate at least one page");
1540
1541 TEST_ASSERT((paddr_min % vm->page_size) == 0, "Min physical address "
1542 "not divisible by page size.\n"
1543 " paddr_min: 0x%lx page_size: 0x%x",
1544 paddr_min, vm->page_size);
1545
1546 region = memslot2region(vm, memslot);
1547 base = pg = paddr_min >> vm->page_shift;
1548
1549 do {
1550 for (; pg < base + num; ++pg) {
1551 if (!sparsebit_is_set(region->unused_phy_pages, pg)) {
1552 base = pg = sparsebit_next_set(region->unused_phy_pages, pg);
1553 break;
1554 }
1555 }
1556 } while (pg && pg != base + num);
1557
1558 if (pg == 0) {
1559 fprintf(stderr, "No guest physical page available, "
1560 "paddr_min: 0x%lx page_size: 0x%x memslot: %u\n",
1561 paddr_min, vm->page_size, memslot);
1562 fputs("---- vm dump ----\n", stderr);
1563 vm_dump(stderr, vm, 2);
1564 abort();
1565 }
1566
1567 for (pg = base; pg < base + num; ++pg)
1568 sparsebit_clear(region->unused_phy_pages, pg);
1569
1570 return base * vm->page_size;
1571}
1572
1573vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min,
1574 uint32_t memslot)
1575{
1576 return vm_phy_pages_alloc(vm, 1, paddr_min, memslot);
1577}
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva)
1592{
1593 return addr_gpa2hva(vm, addr_gva2gpa(vm, gva));
1594}
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608bool vm_is_unrestricted_guest(struct kvm_vm *vm)
1609{
1610 char val = 'N';
1611 size_t count;
1612 FILE *f;
1613
1614 if (vm == NULL) {
1615
1616 f = fopen(KVM_DEV_PATH, "r");
1617 TEST_ASSERT(f != NULL, "Error in opening KVM dev file: %d",
1618 errno);
1619 fclose(f);
1620 }
1621
1622 f = fopen("/sys/module/kvm_intel/parameters/unrestricted_guest", "r");
1623 if (f) {
1624 count = fread(&val, sizeof(char), 1, f);
1625 TEST_ASSERT(count == 1, "Unable to read from param file.");
1626 fclose(f);
1627 }
1628
1629 return val == 'Y';
1630}
1631