1
2
3
4
5
6
7
8#include "test_util.h"
9#include "kvm_util.h"
10#include "kvm_util_internal.h"
11#include "processor.h"
12
13#include <assert.h>
14#include <sys/mman.h>
15#include <sys/types.h>
16#include <sys/stat.h>
17#include <linux/kernel.h>
18
19#define KVM_UTIL_PGS_PER_HUGEPG 512
20#define KVM_UTIL_MIN_PFN 2
21
22
23static void *align(void *x, size_t size)
24{
25 size_t mask = size - 1;
26 TEST_ASSERT(size != 0 && !(size & (size - 1)),
27 "size not a power of 2: %lu", size);
28 return (void *) (((size_t) x + mask) & ~mask);
29}
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47int kvm_check_cap(long cap)
48{
49 int ret;
50 int kvm_fd;
51
52 kvm_fd = open(KVM_DEV_PATH, O_RDONLY);
53 if (kvm_fd < 0)
54 exit(KSFT_SKIP);
55
56 ret = ioctl(kvm_fd, KVM_CHECK_EXTENSION, cap);
57 TEST_ASSERT(ret != -1, "KVM_CHECK_EXTENSION IOCTL failed,\n"
58 " rc: %i errno: %i", ret, errno);
59
60 close(kvm_fd);
61
62 return ret;
63}
64
65
66
67
68
69
70
71
72
73
74
75
76
77int vm_enable_cap(struct kvm_vm *vm, struct kvm_enable_cap *cap)
78{
79 int ret;
80
81 ret = ioctl(vm->fd, KVM_ENABLE_CAP, cap);
82 TEST_ASSERT(ret == 0, "KVM_ENABLE_CAP IOCTL failed,\n"
83 " rc: %i errno: %i", ret, errno);
84
85 return ret;
86}
87
88static void vm_open(struct kvm_vm *vm, int perm)
89{
90 vm->kvm_fd = open(KVM_DEV_PATH, perm);
91 if (vm->kvm_fd < 0)
92 exit(KSFT_SKIP);
93
94 if (!kvm_check_cap(KVM_CAP_IMMEDIATE_EXIT)) {
95 print_skip("immediate_exit not available");
96 exit(KSFT_SKIP);
97 }
98
99 vm->fd = ioctl(vm->kvm_fd, KVM_CREATE_VM, vm->type);
100 TEST_ASSERT(vm->fd >= 0, "KVM_CREATE_VM ioctl failed, "
101 "rc: %i errno: %i", vm->fd, errno);
102}
103
104const char * const vm_guest_mode_string[] = {
105 "PA-bits:52, VA-bits:48, 4K pages",
106 "PA-bits:52, VA-bits:48, 64K pages",
107 "PA-bits:48, VA-bits:48, 4K pages",
108 "PA-bits:48, VA-bits:48, 64K pages",
109 "PA-bits:40, VA-bits:48, 4K pages",
110 "PA-bits:40, VA-bits:48, 64K pages",
111 "PA-bits:ANY, VA-bits:48, 4K pages",
112};
113_Static_assert(sizeof(vm_guest_mode_string)/sizeof(char *) == NUM_VM_MODES,
114 "Missing new mode strings?");
115
116struct vm_guest_mode_params {
117 unsigned int pa_bits;
118 unsigned int va_bits;
119 unsigned int page_size;
120 unsigned int page_shift;
121};
122
123static const struct vm_guest_mode_params vm_guest_mode_params[] = {
124 { 52, 48, 0x1000, 12 },
125 { 52, 48, 0x10000, 16 },
126 { 48, 48, 0x1000, 12 },
127 { 48, 48, 0x10000, 16 },
128 { 40, 48, 0x1000, 12 },
129 { 40, 48, 0x10000, 16 },
130 { 0, 0, 0x1000, 12 },
131};
132_Static_assert(sizeof(vm_guest_mode_params)/sizeof(struct vm_guest_mode_params) == NUM_VM_MODES,
133 "Missing new mode params?");
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154struct kvm_vm *_vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm)
155{
156 struct kvm_vm *vm;
157
158 pr_debug("%s: mode='%s' pages='%ld' perm='%d'\n", __func__,
159 vm_guest_mode_string(mode), phy_pages, perm);
160
161 vm = calloc(1, sizeof(*vm));
162 TEST_ASSERT(vm != NULL, "Insufficient Memory");
163
164 INIT_LIST_HEAD(&vm->vcpus);
165 INIT_LIST_HEAD(&vm->userspace_mem_regions);
166
167 vm->mode = mode;
168 vm->type = 0;
169
170 vm->pa_bits = vm_guest_mode_params[mode].pa_bits;
171 vm->va_bits = vm_guest_mode_params[mode].va_bits;
172 vm->page_size = vm_guest_mode_params[mode].page_size;
173 vm->page_shift = vm_guest_mode_params[mode].page_shift;
174
175
176 switch (vm->mode) {
177 case VM_MODE_P52V48_4K:
178 vm->pgtable_levels = 4;
179 break;
180 case VM_MODE_P52V48_64K:
181 vm->pgtable_levels = 3;
182 break;
183 case VM_MODE_P48V48_4K:
184 vm->pgtable_levels = 4;
185 break;
186 case VM_MODE_P48V48_64K:
187 vm->pgtable_levels = 3;
188 break;
189 case VM_MODE_P40V48_4K:
190 vm->pgtable_levels = 4;
191 break;
192 case VM_MODE_P40V48_64K:
193 vm->pgtable_levels = 3;
194 break;
195 case VM_MODE_PXXV48_4K:
196#ifdef __x86_64__
197 kvm_get_cpu_address_width(&vm->pa_bits, &vm->va_bits);
198
199
200
201
202
203 TEST_ASSERT(vm->va_bits == 48 || vm->va_bits == 57,
204 "Linear address width (%d bits) not supported",
205 vm->va_bits);
206 pr_debug("Guest physical address width detected: %d\n",
207 vm->pa_bits);
208 vm->pgtable_levels = 4;
209 vm->va_bits = 48;
210#else
211 TEST_FAIL("VM_MODE_PXXV48_4K not supported on non-x86 platforms");
212#endif
213 break;
214 default:
215 TEST_FAIL("Unknown guest mode, mode: 0x%x", mode);
216 }
217
218#ifdef __aarch64__
219 if (vm->pa_bits != 40)
220 vm->type = KVM_VM_TYPE_ARM_IPA_SIZE(vm->pa_bits);
221#endif
222
223 vm_open(vm, perm);
224
225
226 vm->vpages_valid = sparsebit_alloc();
227 sparsebit_set_num(vm->vpages_valid,
228 0, (1ULL << (vm->va_bits - 1)) >> vm->page_shift);
229 sparsebit_set_num(vm->vpages_valid,
230 (~((1ULL << (vm->va_bits - 1)) - 1)) >> vm->page_shift,
231 (1ULL << (vm->va_bits - 1)) >> vm->page_shift);
232
233
234 vm->max_gfn = ((1ULL << vm->pa_bits) >> vm->page_shift) - 1;
235
236
237 vm->vpages_mapped = sparsebit_alloc();
238 if (phy_pages != 0)
239 vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
240 0, 0, phy_pages, 0);
241
242 return vm;
243}
244
245struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm)
246{
247 return _vm_create(mode, phy_pages, perm);
248}
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263void kvm_vm_restart(struct kvm_vm *vmp, int perm)
264{
265 struct userspace_mem_region *region;
266
267 vm_open(vmp, perm);
268 if (vmp->has_irqchip)
269 vm_create_irqchip(vmp);
270
271 list_for_each_entry(region, &vmp->userspace_mem_regions, list) {
272 int ret = ioctl(vmp->fd, KVM_SET_USER_MEMORY_REGION, ®ion->region);
273 TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION IOCTL failed,\n"
274 " rc: %i errno: %i\n"
275 " slot: %u flags: 0x%x\n"
276 " guest_phys_addr: 0x%llx size: 0x%llx",
277 ret, errno, region->region.slot,
278 region->region.flags,
279 region->region.guest_phys_addr,
280 region->region.memory_size);
281 }
282}
283
284void kvm_vm_get_dirty_log(struct kvm_vm *vm, int slot, void *log)
285{
286 struct kvm_dirty_log args = { .dirty_bitmap = log, .slot = slot };
287 int ret;
288
289 ret = ioctl(vm->fd, KVM_GET_DIRTY_LOG, &args);
290 TEST_ASSERT(ret == 0, "%s: KVM_GET_DIRTY_LOG failed: %s",
291 __func__, strerror(-ret));
292}
293
294void kvm_vm_clear_dirty_log(struct kvm_vm *vm, int slot, void *log,
295 uint64_t first_page, uint32_t num_pages)
296{
297 struct kvm_clear_dirty_log args = { .dirty_bitmap = log, .slot = slot,
298 .first_page = first_page,
299 .num_pages = num_pages };
300 int ret;
301
302 ret = ioctl(vm->fd, KVM_CLEAR_DIRTY_LOG, &args);
303 TEST_ASSERT(ret == 0, "%s: KVM_CLEAR_DIRTY_LOG failed: %s",
304 __func__, strerror(-ret));
305}
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326static struct userspace_mem_region *
327userspace_mem_region_find(struct kvm_vm *vm, uint64_t start, uint64_t end)
328{
329 struct userspace_mem_region *region;
330
331 list_for_each_entry(region, &vm->userspace_mem_regions, list) {
332 uint64_t existing_start = region->region.guest_phys_addr;
333 uint64_t existing_end = region->region.guest_phys_addr
334 + region->region.memory_size - 1;
335 if (start <= existing_end && end >= existing_start)
336 return region;
337 }
338
339 return NULL;
340}
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358struct kvm_userspace_memory_region *
359kvm_userspace_memory_region_find(struct kvm_vm *vm, uint64_t start,
360 uint64_t end)
361{
362 struct userspace_mem_region *region;
363
364 region = userspace_mem_region_find(vm, start, end);
365 if (!region)
366 return NULL;
367
368 return ®ion->region;
369}
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387struct vcpu *vcpu_find(struct kvm_vm *vm, uint32_t vcpuid)
388{
389 struct vcpu *vcpu;
390
391 list_for_each_entry(vcpu, &vm->vcpus, list) {
392 if (vcpu->id == vcpuid)
393 return vcpu;
394 }
395
396 return NULL;
397}
398
399
400
401
402
403
404
405
406
407
408
409
410
411static void vm_vcpu_rm(struct vcpu *vcpu)
412{
413 int ret;
414
415 ret = munmap(vcpu->state, sizeof(*vcpu->state));
416 TEST_ASSERT(ret == 0, "munmap of VCPU fd failed, rc: %i "
417 "errno: %i", ret, errno);
418 close(vcpu->fd);
419 TEST_ASSERT(ret == 0, "Close of VCPU fd failed, rc: %i "
420 "errno: %i", ret, errno);
421
422 list_del(&vcpu->list);
423 free(vcpu);
424}
425
426void kvm_vm_release(struct kvm_vm *vmp)
427{
428 struct vcpu *vcpu, *tmp;
429 int ret;
430
431 list_for_each_entry_safe(vcpu, tmp, &vmp->vcpus, list)
432 vm_vcpu_rm(vcpu);
433
434 ret = close(vmp->fd);
435 TEST_ASSERT(ret == 0, "Close of vm fd failed,\n"
436 " vmp->fd: %i rc: %i errno: %i", vmp->fd, ret, errno);
437
438 close(vmp->kvm_fd);
439 TEST_ASSERT(ret == 0, "Close of /dev/kvm fd failed,\n"
440 " vmp->kvm_fd: %i rc: %i errno: %i", vmp->kvm_fd, ret, errno);
441}
442
443static void __vm_mem_region_delete(struct kvm_vm *vm,
444 struct userspace_mem_region *region)
445{
446 int ret;
447
448 list_del(®ion->list);
449
450 region->region.memory_size = 0;
451 ret = ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, ®ion->region);
452 TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION IOCTL failed, "
453 "rc: %i errno: %i", ret, errno);
454
455 sparsebit_free(®ion->unused_phy_pages);
456 ret = munmap(region->mmap_start, region->mmap_size);
457 TEST_ASSERT(ret == 0, "munmap failed, rc: %i errno: %i", ret, errno);
458
459 free(region);
460}
461
462
463
464
465void kvm_vm_free(struct kvm_vm *vmp)
466{
467 struct userspace_mem_region *region, *tmp;
468
469 if (vmp == NULL)
470 return;
471
472
473 list_for_each_entry_safe(region, tmp, &vmp->userspace_mem_regions, list)
474 __vm_mem_region_delete(vmp, region);
475
476
477 sparsebit_free(&vmp->vpages_valid);
478 sparsebit_free(&vmp->vpages_mapped);
479
480 kvm_vm_release(vmp);
481
482
483 free(vmp);
484}
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509int kvm_memcmp_hva_gva(void *hva, struct kvm_vm *vm, vm_vaddr_t gva, size_t len)
510{
511 size_t amt;
512
513
514
515
516
517 for (uintptr_t offset = 0; offset < len; offset += amt) {
518 uintptr_t ptr1 = (uintptr_t)hva + offset;
519
520
521
522
523
524 uintptr_t ptr2 = (uintptr_t)addr_gva2hva(vm, gva + offset);
525
526
527
528
529
530 amt = len - offset;
531 if ((ptr1 >> vm->page_shift) != ((ptr1 + amt) >> vm->page_shift))
532 amt = vm->page_size - (ptr1 % vm->page_size);
533 if ((ptr2 >> vm->page_shift) != ((ptr2 + amt) >> vm->page_shift))
534 amt = vm->page_size - (ptr2 % vm->page_size);
535
536 assert((ptr1 >> vm->page_shift) == ((ptr1 + amt - 1) >> vm->page_shift));
537 assert((ptr2 >> vm->page_shift) == ((ptr2 + amt - 1) >> vm->page_shift));
538
539
540
541
542
543
544 int ret = memcmp((void *)ptr1, (void *)ptr2, amt);
545 if (ret != 0)
546 return ret;
547 }
548
549
550
551
552
553 return 0;
554}
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578void vm_userspace_mem_region_add(struct kvm_vm *vm,
579 enum vm_mem_backing_src_type src_type,
580 uint64_t guest_paddr, uint32_t slot, uint64_t npages,
581 uint32_t flags)
582{
583 int ret;
584 struct userspace_mem_region *region;
585 size_t huge_page_size = KVM_UTIL_PGS_PER_HUGEPG * vm->page_size;
586 size_t alignment;
587
588 TEST_ASSERT(vm_adjust_num_guest_pages(vm->mode, npages) == npages,
589 "Number of guest pages is not compatible with the host. "
590 "Try npages=%d", vm_adjust_num_guest_pages(vm->mode, npages));
591
592 TEST_ASSERT((guest_paddr % vm->page_size) == 0, "Guest physical "
593 "address not on a page boundary.\n"
594 " guest_paddr: 0x%lx vm->page_size: 0x%x",
595 guest_paddr, vm->page_size);
596 TEST_ASSERT((((guest_paddr >> vm->page_shift) + npages) - 1)
597 <= vm->max_gfn, "Physical range beyond maximum "
598 "supported physical address,\n"
599 " guest_paddr: 0x%lx npages: 0x%lx\n"
600 " vm->max_gfn: 0x%lx vm->page_size: 0x%x",
601 guest_paddr, npages, vm->max_gfn, vm->page_size);
602
603
604
605
606
607 region = (struct userspace_mem_region *) userspace_mem_region_find(
608 vm, guest_paddr, (guest_paddr + npages * vm->page_size) - 1);
609 if (region != NULL)
610 TEST_FAIL("overlapping userspace_mem_region already "
611 "exists\n"
612 " requested guest_paddr: 0x%lx npages: 0x%lx "
613 "page_size: 0x%x\n"
614 " existing guest_paddr: 0x%lx size: 0x%lx",
615 guest_paddr, npages, vm->page_size,
616 (uint64_t) region->region.guest_phys_addr,
617 (uint64_t) region->region.memory_size);
618
619
620 list_for_each_entry(region, &vm->userspace_mem_regions, list) {
621 if (region->region.slot != slot)
622 continue;
623
624 TEST_FAIL("A mem region with the requested slot "
625 "already exists.\n"
626 " requested slot: %u paddr: 0x%lx npages: 0x%lx\n"
627 " existing slot: %u paddr: 0x%lx size: 0x%lx",
628 slot, guest_paddr, npages,
629 region->region.slot,
630 (uint64_t) region->region.guest_phys_addr,
631 (uint64_t) region->region.memory_size);
632 }
633
634
635 region = calloc(1, sizeof(*region));
636 TEST_ASSERT(region != NULL, "Insufficient Memory");
637 region->mmap_size = npages * vm->page_size;
638
639#ifdef __s390x__
640
641 alignment = 0x100000;
642#else
643 alignment = 1;
644#endif
645
646 if (src_type == VM_MEM_SRC_ANONYMOUS_THP)
647 alignment = max(huge_page_size, alignment);
648
649
650 if (alignment > 1)
651 region->mmap_size += alignment;
652
653 region->mmap_start = mmap(NULL, region->mmap_size,
654 PROT_READ | PROT_WRITE,
655 MAP_PRIVATE | MAP_ANONYMOUS
656 | (src_type == VM_MEM_SRC_ANONYMOUS_HUGETLB ? MAP_HUGETLB : 0),
657 -1, 0);
658 TEST_ASSERT(region->mmap_start != MAP_FAILED,
659 "test_malloc failed, mmap_start: %p errno: %i",
660 region->mmap_start, errno);
661
662
663 region->host_mem = align(region->mmap_start, alignment);
664
665
666 if (src_type == VM_MEM_SRC_ANONYMOUS || src_type == VM_MEM_SRC_ANONYMOUS_THP) {
667 ret = madvise(region->host_mem, npages * vm->page_size,
668 src_type == VM_MEM_SRC_ANONYMOUS ? MADV_NOHUGEPAGE : MADV_HUGEPAGE);
669 TEST_ASSERT(ret == 0, "madvise failed,\n"
670 " addr: %p\n"
671 " length: 0x%lx\n"
672 " src_type: %x",
673 region->host_mem, npages * vm->page_size, src_type);
674 }
675
676 region->unused_phy_pages = sparsebit_alloc();
677 sparsebit_set_num(region->unused_phy_pages,
678 guest_paddr >> vm->page_shift, npages);
679 region->region.slot = slot;
680 region->region.flags = flags;
681 region->region.guest_phys_addr = guest_paddr;
682 region->region.memory_size = npages * vm->page_size;
683 region->region.userspace_addr = (uintptr_t) region->host_mem;
684 ret = ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, ®ion->region);
685 TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION IOCTL failed,\n"
686 " rc: %i errno: %i\n"
687 " slot: %u flags: 0x%x\n"
688 " guest_phys_addr: 0x%lx size: 0x%lx",
689 ret, errno, slot, flags,
690 guest_paddr, (uint64_t) region->region.memory_size);
691
692
693 list_add(®ion->list, &vm->userspace_mem_regions);
694}
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711struct userspace_mem_region *
712memslot2region(struct kvm_vm *vm, uint32_t memslot)
713{
714 struct userspace_mem_region *region;
715
716 list_for_each_entry(region, &vm->userspace_mem_regions, list) {
717 if (region->region.slot == memslot)
718 return region;
719 }
720
721 fprintf(stderr, "No mem region with the requested slot found,\n"
722 " requested slot: %u\n", memslot);
723 fputs("---- vm dump ----\n", stderr);
724 vm_dump(stderr, vm, 2);
725 TEST_FAIL("Mem region not found");
726 return NULL;
727}
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags)
744{
745 int ret;
746 struct userspace_mem_region *region;
747
748 region = memslot2region(vm, slot);
749
750 region->region.flags = flags;
751
752 ret = ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, ®ion->region);
753
754 TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION IOCTL failed,\n"
755 " rc: %i errno: %i slot: %u flags: 0x%x",
756 ret, errno, slot, flags);
757}
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa)
774{
775 struct userspace_mem_region *region;
776 int ret;
777
778 region = memslot2region(vm, slot);
779
780 region->region.guest_phys_addr = new_gpa;
781
782 ret = ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, ®ion->region);
783
784 TEST_ASSERT(!ret, "KVM_SET_USER_MEMORY_REGION failed\n"
785 "ret: %i errno: %i slot: %u new_gpa: 0x%lx",
786 ret, errno, slot, new_gpa);
787}
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot)
803{
804 __vm_mem_region_delete(vm, memslot2region(vm, slot));
805}
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820static int vcpu_mmap_sz(void)
821{
822 int dev_fd, ret;
823
824 dev_fd = open(KVM_DEV_PATH, O_RDONLY);
825 if (dev_fd < 0)
826 exit(KSFT_SKIP);
827
828 ret = ioctl(dev_fd, KVM_GET_VCPU_MMAP_SIZE, NULL);
829 TEST_ASSERT(ret >= sizeof(struct kvm_run),
830 "%s KVM_GET_VCPU_MMAP_SIZE ioctl failed, rc: %i errno: %i",
831 __func__, ret, errno);
832
833 close(dev_fd);
834
835 return ret;
836}
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852void vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpuid)
853{
854 struct vcpu *vcpu;
855
856
857 vcpu = vcpu_find(vm, vcpuid);
858 if (vcpu != NULL)
859 TEST_FAIL("vcpu with the specified id "
860 "already exists,\n"
861 " requested vcpuid: %u\n"
862 " existing vcpuid: %u state: %p",
863 vcpuid, vcpu->id, vcpu->state);
864
865
866 vcpu = calloc(1, sizeof(*vcpu));
867 TEST_ASSERT(vcpu != NULL, "Insufficient Memory");
868 vcpu->id = vcpuid;
869 vcpu->fd = ioctl(vm->fd, KVM_CREATE_VCPU, vcpuid);
870 TEST_ASSERT(vcpu->fd >= 0, "KVM_CREATE_VCPU failed, rc: %i errno: %i",
871 vcpu->fd, errno);
872
873 TEST_ASSERT(vcpu_mmap_sz() >= sizeof(*vcpu->state), "vcpu mmap size "
874 "smaller than expected, vcpu_mmap_sz: %i expected_min: %zi",
875 vcpu_mmap_sz(), sizeof(*vcpu->state));
876 vcpu->state = (struct kvm_run *) mmap(NULL, sizeof(*vcpu->state),
877 PROT_READ | PROT_WRITE, MAP_SHARED, vcpu->fd, 0);
878 TEST_ASSERT(vcpu->state != MAP_FAILED, "mmap vcpu_state failed, "
879 "vcpu id: %u errno: %i", vcpuid, errno);
880
881
882 list_add(&vcpu->list, &vm->vcpus);
883}
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905static vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz,
906 vm_vaddr_t vaddr_min)
907{
908 uint64_t pages = (sz + vm->page_size - 1) >> vm->page_shift;
909
910
911 uint64_t pgidx_start = (vaddr_min + vm->page_size - 1) >> vm->page_shift;
912 if ((pgidx_start * vm->page_size) < vaddr_min)
913 goto no_va_found;
914
915
916 if (!sparsebit_is_set_num(vm->vpages_valid,
917 pgidx_start, pages))
918 pgidx_start = sparsebit_next_set_num(vm->vpages_valid,
919 pgidx_start, pages);
920 do {
921
922
923
924
925
926
927 if (sparsebit_is_clear_num(vm->vpages_mapped,
928 pgidx_start, pages))
929 goto va_found;
930 pgidx_start = sparsebit_next_clear_num(vm->vpages_mapped,
931 pgidx_start, pages);
932 if (pgidx_start == 0)
933 goto no_va_found;
934
935
936
937
938
939 if (!sparsebit_is_set_num(vm->vpages_valid,
940 pgidx_start, pages)) {
941 pgidx_start = sparsebit_next_set_num(
942 vm->vpages_valid, pgidx_start, pages);
943 if (pgidx_start == 0)
944 goto no_va_found;
945 }
946 } while (pgidx_start != 0);
947
948no_va_found:
949 TEST_FAIL("No vaddr of specified pages available, pages: 0x%lx", pages);
950
951
952 return -1;
953
954va_found:
955 TEST_ASSERT(sparsebit_is_set_num(vm->vpages_valid,
956 pgidx_start, pages),
957 "Unexpected, invalid virtual page index range,\n"
958 " pgidx_start: 0x%lx\n"
959 " pages: 0x%lx",
960 pgidx_start, pages);
961 TEST_ASSERT(sparsebit_is_clear_num(vm->vpages_mapped,
962 pgidx_start, pages),
963 "Unexpected, pages already mapped,\n"
964 " pgidx_start: 0x%lx\n"
965 " pages: 0x%lx",
966 pgidx_start, pages);
967
968 return pgidx_start * vm->page_size;
969}
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min,
993 uint32_t data_memslot, uint32_t pgd_memslot)
994{
995 uint64_t pages = (sz >> vm->page_shift) + ((sz % vm->page_size) != 0);
996
997 virt_pgd_alloc(vm, pgd_memslot);
998
999
1000
1001
1002
1003 vm_vaddr_t vaddr_start = vm_vaddr_unused_gap(vm, sz, vaddr_min);
1004
1005
1006 for (vm_vaddr_t vaddr = vaddr_start; pages > 0;
1007 pages--, vaddr += vm->page_size) {
1008 vm_paddr_t paddr;
1009
1010 paddr = vm_phy_page_alloc(vm,
1011 KVM_UTIL_MIN_PFN * vm->page_size, data_memslot);
1012
1013 virt_pg_map(vm, vaddr, paddr, pgd_memslot);
1014
1015 sparsebit_set(vm->vpages_mapped,
1016 vaddr >> vm->page_shift);
1017 }
1018
1019 return vaddr_start;
1020}
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
1040 unsigned int npages, uint32_t pgd_memslot)
1041{
1042 size_t page_size = vm->page_size;
1043 size_t size = npages * page_size;
1044
1045 TEST_ASSERT(vaddr + size > vaddr, "Vaddr overflow");
1046 TEST_ASSERT(paddr + size > paddr, "Paddr overflow");
1047
1048 while (npages--) {
1049 virt_pg_map(vm, vaddr, paddr, pgd_memslot);
1050 vaddr += page_size;
1051 paddr += page_size;
1052 }
1053}
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa)
1073{
1074 struct userspace_mem_region *region;
1075
1076 list_for_each_entry(region, &vm->userspace_mem_regions, list) {
1077 if ((gpa >= region->region.guest_phys_addr)
1078 && (gpa <= (region->region.guest_phys_addr
1079 + region->region.memory_size - 1)))
1080 return (void *) ((uintptr_t) region->host_mem
1081 + (gpa - region->region.guest_phys_addr));
1082 }
1083
1084 TEST_FAIL("No vm physical memory at 0x%lx", gpa);
1085 return NULL;
1086}
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva)
1106{
1107 struct userspace_mem_region *region;
1108
1109 list_for_each_entry(region, &vm->userspace_mem_regions, list) {
1110 if ((hva >= region->host_mem)
1111 && (hva <= (region->host_mem
1112 + region->region.memory_size - 1)))
1113 return (vm_paddr_t) ((uintptr_t)
1114 region->region.guest_phys_addr
1115 + (hva - (uintptr_t) region->host_mem));
1116 }
1117
1118 TEST_FAIL("No mapping to a guest physical address, hva: %p", hva);
1119 return -1;
1120}
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134void vm_create_irqchip(struct kvm_vm *vm)
1135{
1136 int ret;
1137
1138 ret = ioctl(vm->fd, KVM_CREATE_IRQCHIP, 0);
1139 TEST_ASSERT(ret == 0, "KVM_CREATE_IRQCHIP IOCTL failed, "
1140 "rc: %i errno: %i", ret, errno);
1141
1142 vm->has_irqchip = true;
1143}
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160struct kvm_run *vcpu_state(struct kvm_vm *vm, uint32_t vcpuid)
1161{
1162 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1163 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1164
1165 return vcpu->state;
1166}
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182void vcpu_run(struct kvm_vm *vm, uint32_t vcpuid)
1183{
1184 int ret = _vcpu_run(vm, vcpuid);
1185 TEST_ASSERT(ret == 0, "KVM_RUN IOCTL failed, "
1186 "rc: %i errno: %i", ret, errno);
1187}
1188
1189int _vcpu_run(struct kvm_vm *vm, uint32_t vcpuid)
1190{
1191 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1192 int rc;
1193
1194 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1195 do {
1196 rc = ioctl(vcpu->fd, KVM_RUN, NULL);
1197 } while (rc == -1 && errno == EINTR);
1198 return rc;
1199}
1200
1201void vcpu_run_complete_io(struct kvm_vm *vm, uint32_t vcpuid)
1202{
1203 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1204 int ret;
1205
1206 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1207
1208 vcpu->state->immediate_exit = 1;
1209 ret = ioctl(vcpu->fd, KVM_RUN, NULL);
1210 vcpu->state->immediate_exit = 0;
1211
1212 TEST_ASSERT(ret == -1 && errno == EINTR,
1213 "KVM_RUN IOCTL didn't exit immediately, rc: %i, errno: %i",
1214 ret, errno);
1215}
1216
1217void vcpu_set_guest_debug(struct kvm_vm *vm, uint32_t vcpuid,
1218 struct kvm_guest_debug *debug)
1219{
1220 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1221 int ret = ioctl(vcpu->fd, KVM_SET_GUEST_DEBUG, debug);
1222
1223 TEST_ASSERT(ret == 0, "KVM_SET_GUEST_DEBUG failed: %d", ret);
1224}
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241void vcpu_set_mp_state(struct kvm_vm *vm, uint32_t vcpuid,
1242 struct kvm_mp_state *mp_state)
1243{
1244 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1245 int ret;
1246
1247 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1248
1249 ret = ioctl(vcpu->fd, KVM_SET_MP_STATE, mp_state);
1250 TEST_ASSERT(ret == 0, "KVM_SET_MP_STATE IOCTL failed, "
1251 "rc: %i errno: %i", ret, errno);
1252}
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269void vcpu_regs_get(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_regs *regs)
1270{
1271 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1272 int ret;
1273
1274 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1275
1276 ret = ioctl(vcpu->fd, KVM_GET_REGS, regs);
1277 TEST_ASSERT(ret == 0, "KVM_GET_REGS failed, rc: %i errno: %i",
1278 ret, errno);
1279}
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296void vcpu_regs_set(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_regs *regs)
1297{
1298 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1299 int ret;
1300
1301 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1302
1303 ret = ioctl(vcpu->fd, KVM_SET_REGS, regs);
1304 TEST_ASSERT(ret == 0, "KVM_SET_REGS failed, rc: %i errno: %i",
1305 ret, errno);
1306}
1307
1308#ifdef __KVM_HAVE_VCPU_EVENTS
1309void vcpu_events_get(struct kvm_vm *vm, uint32_t vcpuid,
1310 struct kvm_vcpu_events *events)
1311{
1312 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1313 int ret;
1314
1315 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1316
1317 ret = ioctl(vcpu->fd, KVM_GET_VCPU_EVENTS, events);
1318 TEST_ASSERT(ret == 0, "KVM_GET_VCPU_EVENTS, failed, rc: %i errno: %i",
1319 ret, errno);
1320}
1321
1322void vcpu_events_set(struct kvm_vm *vm, uint32_t vcpuid,
1323 struct kvm_vcpu_events *events)
1324{
1325 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1326 int ret;
1327
1328 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1329
1330 ret = ioctl(vcpu->fd, KVM_SET_VCPU_EVENTS, events);
1331 TEST_ASSERT(ret == 0, "KVM_SET_VCPU_EVENTS, failed, rc: %i errno: %i",
1332 ret, errno);
1333}
1334#endif
1335
1336#ifdef __x86_64__
1337void vcpu_nested_state_get(struct kvm_vm *vm, uint32_t vcpuid,
1338 struct kvm_nested_state *state)
1339{
1340 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1341 int ret;
1342
1343 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1344
1345 ret = ioctl(vcpu->fd, KVM_GET_NESTED_STATE, state);
1346 TEST_ASSERT(ret == 0,
1347 "KVM_SET_NESTED_STATE failed, ret: %i errno: %i",
1348 ret, errno);
1349}
1350
1351int vcpu_nested_state_set(struct kvm_vm *vm, uint32_t vcpuid,
1352 struct kvm_nested_state *state, bool ignore_error)
1353{
1354 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1355 int ret;
1356
1357 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1358
1359 ret = ioctl(vcpu->fd, KVM_SET_NESTED_STATE, state);
1360 if (!ignore_error) {
1361 TEST_ASSERT(ret == 0,
1362 "KVM_SET_NESTED_STATE failed, ret: %i errno: %i",
1363 ret, errno);
1364 }
1365
1366 return ret;
1367}
1368#endif
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385void vcpu_sregs_get(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_sregs *sregs)
1386{
1387 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1388 int ret;
1389
1390 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1391
1392 ret = ioctl(vcpu->fd, KVM_GET_SREGS, sregs);
1393 TEST_ASSERT(ret == 0, "KVM_GET_SREGS failed, rc: %i errno: %i",
1394 ret, errno);
1395}
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412void vcpu_sregs_set(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_sregs *sregs)
1413{
1414 int ret = _vcpu_sregs_set(vm, vcpuid, sregs);
1415 TEST_ASSERT(ret == 0, "KVM_RUN IOCTL failed, "
1416 "rc: %i errno: %i", ret, errno);
1417}
1418
1419int _vcpu_sregs_set(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_sregs *sregs)
1420{
1421 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1422
1423 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1424
1425 return ioctl(vcpu->fd, KVM_SET_SREGS, sregs);
1426}
1427
1428void vcpu_fpu_get(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_fpu *fpu)
1429{
1430 int ret;
1431
1432 ret = _vcpu_ioctl(vm, vcpuid, KVM_GET_FPU, fpu);
1433 TEST_ASSERT(ret == 0, "KVM_GET_FPU failed, rc: %i errno: %i (%s)",
1434 ret, errno, strerror(errno));
1435}
1436
1437void vcpu_fpu_set(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_fpu *fpu)
1438{
1439 int ret;
1440
1441 ret = _vcpu_ioctl(vm, vcpuid, KVM_SET_FPU, fpu);
1442 TEST_ASSERT(ret == 0, "KVM_SET_FPU failed, rc: %i errno: %i (%s)",
1443 ret, errno, strerror(errno));
1444}
1445
1446void vcpu_get_reg(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_one_reg *reg)
1447{
1448 int ret;
1449
1450 ret = _vcpu_ioctl(vm, vcpuid, KVM_GET_ONE_REG, reg);
1451 TEST_ASSERT(ret == 0, "KVM_GET_ONE_REG failed, rc: %i errno: %i (%s)",
1452 ret, errno, strerror(errno));
1453}
1454
1455void vcpu_set_reg(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_one_reg *reg)
1456{
1457 int ret;
1458
1459 ret = _vcpu_ioctl(vm, vcpuid, KVM_SET_ONE_REG, reg);
1460 TEST_ASSERT(ret == 0, "KVM_SET_ONE_REG failed, rc: %i errno: %i (%s)",
1461 ret, errno, strerror(errno));
1462}
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477void vcpu_ioctl(struct kvm_vm *vm, uint32_t vcpuid,
1478 unsigned long cmd, void *arg)
1479{
1480 int ret;
1481
1482 ret = _vcpu_ioctl(vm, vcpuid, cmd, arg);
1483 TEST_ASSERT(ret == 0, "vcpu ioctl %lu failed, rc: %i errno: %i (%s)",
1484 cmd, ret, errno, strerror(errno));
1485}
1486
1487int _vcpu_ioctl(struct kvm_vm *vm, uint32_t vcpuid,
1488 unsigned long cmd, void *arg)
1489{
1490 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1491 int ret;
1492
1493 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1494
1495 ret = ioctl(vcpu->fd, cmd, arg);
1496
1497 return ret;
1498}
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512void vm_ioctl(struct kvm_vm *vm, unsigned long cmd, void *arg)
1513{
1514 int ret;
1515
1516 ret = ioctl(vm->fd, cmd, arg);
1517 TEST_ASSERT(ret == 0, "vm ioctl %lu failed, rc: %i errno: %i (%s)",
1518 cmd, ret, errno, strerror(errno));
1519}
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
1537{
1538 struct userspace_mem_region *region;
1539 struct vcpu *vcpu;
1540
1541 fprintf(stream, "%*smode: 0x%x\n", indent, "", vm->mode);
1542 fprintf(stream, "%*sfd: %i\n", indent, "", vm->fd);
1543 fprintf(stream, "%*spage_size: 0x%x\n", indent, "", vm->page_size);
1544 fprintf(stream, "%*sMem Regions:\n", indent, "");
1545 list_for_each_entry(region, &vm->userspace_mem_regions, list) {
1546 fprintf(stream, "%*sguest_phys: 0x%lx size: 0x%lx "
1547 "host_virt: %p\n", indent + 2, "",
1548 (uint64_t) region->region.guest_phys_addr,
1549 (uint64_t) region->region.memory_size,
1550 region->host_mem);
1551 fprintf(stream, "%*sunused_phy_pages: ", indent + 2, "");
1552 sparsebit_dump(stream, region->unused_phy_pages, 0);
1553 }
1554 fprintf(stream, "%*sMapped Virtual Pages:\n", indent, "");
1555 sparsebit_dump(stream, vm->vpages_mapped, indent + 2);
1556 fprintf(stream, "%*spgd_created: %u\n", indent, "",
1557 vm->pgd_created);
1558 if (vm->pgd_created) {
1559 fprintf(stream, "%*sVirtual Translation Tables:\n",
1560 indent + 2, "");
1561 virt_dump(stream, vm, indent + 4);
1562 }
1563 fprintf(stream, "%*sVCPUs:\n", indent, "");
1564 list_for_each_entry(vcpu, &vm->vcpus, list)
1565 vcpu_dump(stream, vm, vcpu->id, indent + 2);
1566}
1567
1568
1569static struct exit_reason {
1570 unsigned int reason;
1571 const char *name;
1572} exit_reasons_known[] = {
1573 {KVM_EXIT_UNKNOWN, "UNKNOWN"},
1574 {KVM_EXIT_EXCEPTION, "EXCEPTION"},
1575 {KVM_EXIT_IO, "IO"},
1576 {KVM_EXIT_HYPERCALL, "HYPERCALL"},
1577 {KVM_EXIT_DEBUG, "DEBUG"},
1578 {KVM_EXIT_HLT, "HLT"},
1579 {KVM_EXIT_MMIO, "MMIO"},
1580 {KVM_EXIT_IRQ_WINDOW_OPEN, "IRQ_WINDOW_OPEN"},
1581 {KVM_EXIT_SHUTDOWN, "SHUTDOWN"},
1582 {KVM_EXIT_FAIL_ENTRY, "FAIL_ENTRY"},
1583 {KVM_EXIT_INTR, "INTR"},
1584 {KVM_EXIT_SET_TPR, "SET_TPR"},
1585 {KVM_EXIT_TPR_ACCESS, "TPR_ACCESS"},
1586 {KVM_EXIT_S390_SIEIC, "S390_SIEIC"},
1587 {KVM_EXIT_S390_RESET, "S390_RESET"},
1588 {KVM_EXIT_DCR, "DCR"},
1589 {KVM_EXIT_NMI, "NMI"},
1590 {KVM_EXIT_INTERNAL_ERROR, "INTERNAL_ERROR"},
1591 {KVM_EXIT_OSI, "OSI"},
1592 {KVM_EXIT_PAPR_HCALL, "PAPR_HCALL"},
1593#ifdef KVM_EXIT_MEMORY_NOT_PRESENT
1594 {KVM_EXIT_MEMORY_NOT_PRESENT, "MEMORY_NOT_PRESENT"},
1595#endif
1596};
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613const char *exit_reason_str(unsigned int exit_reason)
1614{
1615 unsigned int n1;
1616
1617 for (n1 = 0; n1 < ARRAY_SIZE(exit_reasons_known); n1++) {
1618 if (exit_reason == exit_reasons_known[n1].reason)
1619 return exit_reasons_known[n1].name;
1620 }
1621
1622 return "Unknown";
1623}
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
1645 vm_paddr_t paddr_min, uint32_t memslot)
1646{
1647 struct userspace_mem_region *region;
1648 sparsebit_idx_t pg, base;
1649
1650 TEST_ASSERT(num > 0, "Must allocate at least one page");
1651
1652 TEST_ASSERT((paddr_min % vm->page_size) == 0, "Min physical address "
1653 "not divisible by page size.\n"
1654 " paddr_min: 0x%lx page_size: 0x%x",
1655 paddr_min, vm->page_size);
1656
1657 region = memslot2region(vm, memslot);
1658 base = pg = paddr_min >> vm->page_shift;
1659
1660 do {
1661 for (; pg < base + num; ++pg) {
1662 if (!sparsebit_is_set(region->unused_phy_pages, pg)) {
1663 base = pg = sparsebit_next_set(region->unused_phy_pages, pg);
1664 break;
1665 }
1666 }
1667 } while (pg && pg != base + num);
1668
1669 if (pg == 0) {
1670 fprintf(stderr, "No guest physical page available, "
1671 "paddr_min: 0x%lx page_size: 0x%x memslot: %u\n",
1672 paddr_min, vm->page_size, memslot);
1673 fputs("---- vm dump ----\n", stderr);
1674 vm_dump(stderr, vm, 2);
1675 abort();
1676 }
1677
1678 for (pg = base; pg < base + num; ++pg)
1679 sparsebit_clear(region->unused_phy_pages, pg);
1680
1681 return base * vm->page_size;
1682}
1683
1684vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min,
1685 uint32_t memslot)
1686{
1687 return vm_phy_pages_alloc(vm, 1, paddr_min, memslot);
1688}
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva)
1703{
1704 return addr_gpa2hva(vm, addr_gva2gpa(vm, gva));
1705}
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719bool vm_is_unrestricted_guest(struct kvm_vm *vm)
1720{
1721 char val = 'N';
1722 size_t count;
1723 FILE *f;
1724
1725 if (vm == NULL) {
1726
1727 f = fopen(KVM_DEV_PATH, "r");
1728 TEST_ASSERT(f != NULL, "Error in opening KVM dev file: %d",
1729 errno);
1730 fclose(f);
1731 }
1732
1733 f = fopen("/sys/module/kvm_intel/parameters/unrestricted_guest", "r");
1734 if (f) {
1735 count = fread(&val, sizeof(char), 1, f);
1736 TEST_ASSERT(count == 1, "Unable to read from param file.");
1737 fclose(f);
1738 }
1739
1740 return val == 'Y';
1741}
1742
1743unsigned int vm_get_page_size(struct kvm_vm *vm)
1744{
1745 return vm->page_size;
1746}
1747
1748unsigned int vm_get_page_shift(struct kvm_vm *vm)
1749{
1750 return vm->page_shift;
1751}
1752
1753unsigned int vm_get_max_gfn(struct kvm_vm *vm)
1754{
1755 return vm->max_gfn;
1756}
1757
1758int vm_get_fd(struct kvm_vm *vm)
1759{
1760 return vm->fd;
1761}
1762
1763static unsigned int vm_calc_num_pages(unsigned int num_pages,
1764 unsigned int page_shift,
1765 unsigned int new_page_shift,
1766 bool ceil)
1767{
1768 unsigned int n = 1 << (new_page_shift - page_shift);
1769
1770 if (page_shift >= new_page_shift)
1771 return num_pages * (1 << (page_shift - new_page_shift));
1772
1773 return num_pages / n + !!(ceil && num_pages % n);
1774}
1775
1776static inline int getpageshift(void)
1777{
1778 return __builtin_ffs(getpagesize()) - 1;
1779}
1780
1781unsigned int
1782vm_num_host_pages(enum vm_guest_mode mode, unsigned int num_guest_pages)
1783{
1784 return vm_calc_num_pages(num_guest_pages,
1785 vm_guest_mode_params[mode].page_shift,
1786 getpageshift(), true);
1787}
1788
1789unsigned int
1790vm_num_guest_pages(enum vm_guest_mode mode, unsigned int num_host_pages)
1791{
1792 return vm_calc_num_pages(num_host_pages, getpageshift(),
1793 vm_guest_mode_params[mode].page_shift, false);
1794}
1795
1796unsigned int vm_calc_num_guest_pages(enum vm_guest_mode mode, size_t size)
1797{
1798 unsigned int n;
1799 n = DIV_ROUND_UP(size, vm_guest_mode_params[mode].page_size);
1800 return vm_adjust_num_guest_pages(mode, n);
1801}
1802