1
2
3
4
5
6
7
8#define _GNU_SOURCE
9#include "test_util.h"
10#include "kvm_util.h"
11#include "kvm_util_internal.h"
12#include "processor.h"
13
14#include <assert.h>
15#include <sys/mman.h>
16#include <sys/types.h>
17#include <sys/stat.h>
18#include <unistd.h>
19#include <linux/kernel.h>
20
21#define KVM_UTIL_MIN_PFN 2
22
23static int vcpu_mmap_sz(void);
24
25int open_path_or_exit(const char *path, int flags)
26{
27 int fd;
28
29 fd = open(path, flags);
30 if (fd < 0) {
31 print_skip("%s not available (errno: %d)", path, errno);
32 exit(KSFT_SKIP);
33 }
34
35 return fd;
36}
37
38
39
40
41
42
43
44
45
46
47static int _open_kvm_dev_path_or_exit(int flags)
48{
49 return open_path_or_exit(KVM_DEV_PATH, flags);
50}
51
52int open_kvm_dev_path_or_exit(void)
53{
54 return _open_kvm_dev_path_or_exit(O_RDONLY);
55}
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73int kvm_check_cap(long cap)
74{
75 int ret;
76 int kvm_fd;
77
78 kvm_fd = open_kvm_dev_path_or_exit();
79 ret = ioctl(kvm_fd, KVM_CHECK_EXTENSION, cap);
80 TEST_ASSERT(ret >= 0, "KVM_CHECK_EXTENSION IOCTL failed,\n"
81 " rc: %i errno: %i", ret, errno);
82
83 close(kvm_fd);
84
85 return ret;
86}
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104int vm_check_cap(struct kvm_vm *vm, long cap)
105{
106 int ret;
107
108 ret = ioctl(vm->fd, KVM_CHECK_EXTENSION, cap);
109 TEST_ASSERT(ret >= 0, "KVM_CHECK_EXTENSION VM IOCTL failed,\n"
110 " rc: %i errno: %i", ret, errno);
111
112 return ret;
113}
114
115
116
117
118
119
120
121
122
123
124
125
126
127int vm_enable_cap(struct kvm_vm *vm, struct kvm_enable_cap *cap)
128{
129 int ret;
130
131 ret = ioctl(vm->fd, KVM_ENABLE_CAP, cap);
132 TEST_ASSERT(ret == 0, "KVM_ENABLE_CAP IOCTL failed,\n"
133 " rc: %i errno: %i", ret, errno);
134
135 return ret;
136}
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151int vcpu_enable_cap(struct kvm_vm *vm, uint32_t vcpu_id,
152 struct kvm_enable_cap *cap)
153{
154 struct vcpu *vcpu = vcpu_find(vm, vcpu_id);
155 int r;
156
157 TEST_ASSERT(vcpu, "cannot find vcpu %d", vcpu_id);
158
159 r = ioctl(vcpu->fd, KVM_ENABLE_CAP, cap);
160 TEST_ASSERT(!r, "KVM_ENABLE_CAP vCPU ioctl failed,\n"
161 " rc: %i, errno: %i", r, errno);
162
163 return r;
164}
165
166void vm_enable_dirty_ring(struct kvm_vm *vm, uint32_t ring_size)
167{
168 struct kvm_enable_cap cap = { 0 };
169
170 cap.cap = KVM_CAP_DIRTY_LOG_RING;
171 cap.args[0] = ring_size;
172 vm_enable_cap(vm, &cap);
173 vm->dirty_ring_size = ring_size;
174}
175
176static void vm_open(struct kvm_vm *vm, int perm)
177{
178 vm->kvm_fd = _open_kvm_dev_path_or_exit(perm);
179
180 if (!kvm_check_cap(KVM_CAP_IMMEDIATE_EXIT)) {
181 print_skip("immediate_exit not available");
182 exit(KSFT_SKIP);
183 }
184
185 vm->fd = ioctl(vm->kvm_fd, KVM_CREATE_VM, vm->type);
186 TEST_ASSERT(vm->fd >= 0, "KVM_CREATE_VM ioctl failed, "
187 "rc: %i errno: %i", vm->fd, errno);
188}
189
190const char *vm_guest_mode_string(uint32_t i)
191{
192 static const char * const strings[] = {
193 [VM_MODE_P52V48_4K] = "PA-bits:52, VA-bits:48, 4K pages",
194 [VM_MODE_P52V48_64K] = "PA-bits:52, VA-bits:48, 64K pages",
195 [VM_MODE_P48V48_4K] = "PA-bits:48, VA-bits:48, 4K pages",
196 [VM_MODE_P48V48_16K] = "PA-bits:48, VA-bits:48, 16K pages",
197 [VM_MODE_P48V48_64K] = "PA-bits:48, VA-bits:48, 64K pages",
198 [VM_MODE_P40V48_4K] = "PA-bits:40, VA-bits:48, 4K pages",
199 [VM_MODE_P40V48_16K] = "PA-bits:40, VA-bits:48, 16K pages",
200 [VM_MODE_P40V48_64K] = "PA-bits:40, VA-bits:48, 64K pages",
201 [VM_MODE_PXXV48_4K] = "PA-bits:ANY, VA-bits:48, 4K pages",
202 [VM_MODE_P47V64_4K] = "PA-bits:47, VA-bits:64, 4K pages",
203 [VM_MODE_P44V64_4K] = "PA-bits:44, VA-bits:64, 4K pages",
204 [VM_MODE_P36V48_4K] = "PA-bits:36, VA-bits:48, 4K pages",
205 [VM_MODE_P36V48_16K] = "PA-bits:36, VA-bits:48, 16K pages",
206 [VM_MODE_P36V48_64K] = "PA-bits:36, VA-bits:48, 64K pages",
207 [VM_MODE_P36V47_16K] = "PA-bits:36, VA-bits:47, 16K pages",
208 };
209 _Static_assert(sizeof(strings)/sizeof(char *) == NUM_VM_MODES,
210 "Missing new mode strings?");
211
212 TEST_ASSERT(i < NUM_VM_MODES, "Guest mode ID %d too big", i);
213
214 return strings[i];
215}
216
217const struct vm_guest_mode_params vm_guest_mode_params[] = {
218 [VM_MODE_P52V48_4K] = { 52, 48, 0x1000, 12 },
219 [VM_MODE_P52V48_64K] = { 52, 48, 0x10000, 16 },
220 [VM_MODE_P48V48_4K] = { 48, 48, 0x1000, 12 },
221 [VM_MODE_P48V48_16K] = { 48, 48, 0x4000, 14 },
222 [VM_MODE_P48V48_64K] = { 48, 48, 0x10000, 16 },
223 [VM_MODE_P40V48_4K] = { 40, 48, 0x1000, 12 },
224 [VM_MODE_P40V48_16K] = { 40, 48, 0x4000, 14 },
225 [VM_MODE_P40V48_64K] = { 40, 48, 0x10000, 16 },
226 [VM_MODE_PXXV48_4K] = { 0, 0, 0x1000, 12 },
227 [VM_MODE_P47V64_4K] = { 47, 64, 0x1000, 12 },
228 [VM_MODE_P44V64_4K] = { 44, 64, 0x1000, 12 },
229 [VM_MODE_P36V48_4K] = { 36, 48, 0x1000, 12 },
230 [VM_MODE_P36V48_16K] = { 36, 48, 0x4000, 14 },
231 [VM_MODE_P36V48_64K] = { 36, 48, 0x10000, 16 },
232 [VM_MODE_P36V47_16K] = { 36, 47, 0x4000, 14 },
233};
234_Static_assert(sizeof(vm_guest_mode_params)/sizeof(struct vm_guest_mode_params) == NUM_VM_MODES,
235 "Missing new mode params?");
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256struct kvm_vm *vm_create(enum vm_guest_mode mode, uint64_t phy_pages, int perm)
257{
258 struct kvm_vm *vm;
259
260 pr_debug("%s: mode='%s' pages='%ld' perm='%d'\n", __func__,
261 vm_guest_mode_string(mode), phy_pages, perm);
262
263 vm = calloc(1, sizeof(*vm));
264 TEST_ASSERT(vm != NULL, "Insufficient Memory");
265
266 INIT_LIST_HEAD(&vm->vcpus);
267 vm->regions.gpa_tree = RB_ROOT;
268 vm->regions.hva_tree = RB_ROOT;
269 hash_init(vm->regions.slot_hash);
270
271 vm->mode = mode;
272 vm->type = 0;
273
274 vm->pa_bits = vm_guest_mode_params[mode].pa_bits;
275 vm->va_bits = vm_guest_mode_params[mode].va_bits;
276 vm->page_size = vm_guest_mode_params[mode].page_size;
277 vm->page_shift = vm_guest_mode_params[mode].page_shift;
278
279
280 switch (vm->mode) {
281 case VM_MODE_P52V48_4K:
282 vm->pgtable_levels = 4;
283 break;
284 case VM_MODE_P52V48_64K:
285 vm->pgtable_levels = 3;
286 break;
287 case VM_MODE_P48V48_4K:
288 vm->pgtable_levels = 4;
289 break;
290 case VM_MODE_P48V48_64K:
291 vm->pgtable_levels = 3;
292 break;
293 case VM_MODE_P40V48_4K:
294 case VM_MODE_P36V48_4K:
295 vm->pgtable_levels = 4;
296 break;
297 case VM_MODE_P40V48_64K:
298 case VM_MODE_P36V48_64K:
299 vm->pgtable_levels = 3;
300 break;
301 case VM_MODE_P48V48_16K:
302 case VM_MODE_P40V48_16K:
303 case VM_MODE_P36V48_16K:
304 vm->pgtable_levels = 4;
305 break;
306 case VM_MODE_P36V47_16K:
307 vm->pgtable_levels = 3;
308 break;
309 case VM_MODE_PXXV48_4K:
310#ifdef __x86_64__
311 kvm_get_cpu_address_width(&vm->pa_bits, &vm->va_bits);
312
313
314
315
316
317 TEST_ASSERT(vm->va_bits == 48 || vm->va_bits == 57,
318 "Linear address width (%d bits) not supported",
319 vm->va_bits);
320 pr_debug("Guest physical address width detected: %d\n",
321 vm->pa_bits);
322 vm->pgtable_levels = 4;
323 vm->va_bits = 48;
324#else
325 TEST_FAIL("VM_MODE_PXXV48_4K not supported on non-x86 platforms");
326#endif
327 break;
328 case VM_MODE_P47V64_4K:
329 vm->pgtable_levels = 5;
330 break;
331 case VM_MODE_P44V64_4K:
332 vm->pgtable_levels = 5;
333 break;
334 default:
335 TEST_FAIL("Unknown guest mode, mode: 0x%x", mode);
336 }
337
338#ifdef __aarch64__
339 if (vm->pa_bits != 40)
340 vm->type = KVM_VM_TYPE_ARM_IPA_SIZE(vm->pa_bits);
341#endif
342
343 vm_open(vm, perm);
344
345
346 vm->vpages_valid = sparsebit_alloc();
347 sparsebit_set_num(vm->vpages_valid,
348 0, (1ULL << (vm->va_bits - 1)) >> vm->page_shift);
349 sparsebit_set_num(vm->vpages_valid,
350 (~((1ULL << (vm->va_bits - 1)) - 1)) >> vm->page_shift,
351 (1ULL << (vm->va_bits - 1)) >> vm->page_shift);
352
353
354 vm->max_gfn = vm_compute_max_gfn(vm);
355
356
357 vm->vpages_mapped = sparsebit_alloc();
358 if (phy_pages != 0)
359 vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
360 0, 0, phy_pages, 0);
361
362 return vm;
363}
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387struct kvm_vm *vm_create_with_vcpus(enum vm_guest_mode mode, uint32_t nr_vcpus,
388 uint64_t slot0_mem_pages, uint64_t extra_mem_pages,
389 uint32_t num_percpu_pages, void *guest_code,
390 uint32_t vcpuids[])
391{
392 uint64_t vcpu_pages, extra_pg_pages, pages;
393 struct kvm_vm *vm;
394 int i;
395
396
397 if (slot0_mem_pages < DEFAULT_GUEST_PHY_PAGES)
398 slot0_mem_pages = DEFAULT_GUEST_PHY_PAGES;
399
400
401
402
403
404
405
406 vcpu_pages = (DEFAULT_STACK_PGS + num_percpu_pages) * nr_vcpus;
407 extra_pg_pages = (slot0_mem_pages + extra_mem_pages + vcpu_pages) / PTES_PER_MIN_PAGE * 2;
408 pages = slot0_mem_pages + vcpu_pages + extra_pg_pages;
409
410 TEST_ASSERT(nr_vcpus <= kvm_check_cap(KVM_CAP_MAX_VCPUS),
411 "nr_vcpus = %d too large for host, max-vcpus = %d",
412 nr_vcpus, kvm_check_cap(KVM_CAP_MAX_VCPUS));
413
414 pages = vm_adjust_num_guest_pages(mode, pages);
415 vm = vm_create(mode, pages, O_RDWR);
416
417 kvm_vm_elf_load(vm, program_invocation_name);
418
419#ifdef __x86_64__
420 vm_create_irqchip(vm);
421#endif
422
423 for (i = 0; i < nr_vcpus; ++i) {
424 uint32_t vcpuid = vcpuids ? vcpuids[i] : i;
425
426 vm_vcpu_add_default(vm, vcpuid, guest_code);
427 }
428
429 return vm;
430}
431
432struct kvm_vm *vm_create_default_with_vcpus(uint32_t nr_vcpus, uint64_t extra_mem_pages,
433 uint32_t num_percpu_pages, void *guest_code,
434 uint32_t vcpuids[])
435{
436 return vm_create_with_vcpus(VM_MODE_DEFAULT, nr_vcpus, DEFAULT_GUEST_PHY_PAGES,
437 extra_mem_pages, num_percpu_pages, guest_code, vcpuids);
438}
439
440struct kvm_vm *vm_create_default(uint32_t vcpuid, uint64_t extra_mem_pages,
441 void *guest_code)
442{
443 return vm_create_default_with_vcpus(1, extra_mem_pages, 0, guest_code,
444 (uint32_t []){ vcpuid });
445}
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460void kvm_vm_restart(struct kvm_vm *vmp, int perm)
461{
462 int ctr;
463 struct userspace_mem_region *region;
464
465 vm_open(vmp, perm);
466 if (vmp->has_irqchip)
467 vm_create_irqchip(vmp);
468
469 hash_for_each(vmp->regions.slot_hash, ctr, region, slot_node) {
470 int ret = ioctl(vmp->fd, KVM_SET_USER_MEMORY_REGION, ®ion->region);
471 TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION IOCTL failed,\n"
472 " rc: %i errno: %i\n"
473 " slot: %u flags: 0x%x\n"
474 " guest_phys_addr: 0x%llx size: 0x%llx",
475 ret, errno, region->region.slot,
476 region->region.flags,
477 region->region.guest_phys_addr,
478 region->region.memory_size);
479 }
480}
481
482void kvm_vm_get_dirty_log(struct kvm_vm *vm, int slot, void *log)
483{
484 struct kvm_dirty_log args = { .dirty_bitmap = log, .slot = slot };
485 int ret;
486
487 ret = ioctl(vm->fd, KVM_GET_DIRTY_LOG, &args);
488 TEST_ASSERT(ret == 0, "%s: KVM_GET_DIRTY_LOG failed: %s",
489 __func__, strerror(-ret));
490}
491
492void kvm_vm_clear_dirty_log(struct kvm_vm *vm, int slot, void *log,
493 uint64_t first_page, uint32_t num_pages)
494{
495 struct kvm_clear_dirty_log args = {
496 .dirty_bitmap = log, .slot = slot,
497 .first_page = first_page,
498 .num_pages = num_pages
499 };
500 int ret;
501
502 ret = ioctl(vm->fd, KVM_CLEAR_DIRTY_LOG, &args);
503 TEST_ASSERT(ret == 0, "%s: KVM_CLEAR_DIRTY_LOG failed: %s",
504 __func__, strerror(-ret));
505}
506
507uint32_t kvm_vm_reset_dirty_ring(struct kvm_vm *vm)
508{
509 return ioctl(vm->fd, KVM_RESET_DIRTY_RINGS);
510}
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531static struct userspace_mem_region *
532userspace_mem_region_find(struct kvm_vm *vm, uint64_t start, uint64_t end)
533{
534 struct rb_node *node;
535
536 for (node = vm->regions.gpa_tree.rb_node; node; ) {
537 struct userspace_mem_region *region =
538 container_of(node, struct userspace_mem_region, gpa_node);
539 uint64_t existing_start = region->region.guest_phys_addr;
540 uint64_t existing_end = region->region.guest_phys_addr
541 + region->region.memory_size - 1;
542 if (start <= existing_end && end >= existing_start)
543 return region;
544
545 if (start < existing_start)
546 node = node->rb_left;
547 else
548 node = node->rb_right;
549 }
550
551 return NULL;
552}
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570struct kvm_userspace_memory_region *
571kvm_userspace_memory_region_find(struct kvm_vm *vm, uint64_t start,
572 uint64_t end)
573{
574 struct userspace_mem_region *region;
575
576 region = userspace_mem_region_find(vm, start, end);
577 if (!region)
578 return NULL;
579
580 return ®ion->region;
581}
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599struct vcpu *vcpu_find(struct kvm_vm *vm, uint32_t vcpuid)
600{
601 struct vcpu *vcpu;
602
603 list_for_each_entry(vcpu, &vm->vcpus, list) {
604 if (vcpu->id == vcpuid)
605 return vcpu;
606 }
607
608 return NULL;
609}
610
611
612
613
614
615
616
617
618
619
620
621
622
623static void vm_vcpu_rm(struct kvm_vm *vm, struct vcpu *vcpu)
624{
625 int ret;
626
627 if (vcpu->dirty_gfns) {
628 ret = munmap(vcpu->dirty_gfns, vm->dirty_ring_size);
629 TEST_ASSERT(ret == 0, "munmap of VCPU dirty ring failed, "
630 "rc: %i errno: %i", ret, errno);
631 vcpu->dirty_gfns = NULL;
632 }
633
634 ret = munmap(vcpu->state, vcpu_mmap_sz());
635 TEST_ASSERT(ret == 0, "munmap of VCPU fd failed, rc: %i "
636 "errno: %i", ret, errno);
637 ret = close(vcpu->fd);
638 TEST_ASSERT(ret == 0, "Close of VCPU fd failed, rc: %i "
639 "errno: %i", ret, errno);
640
641 list_del(&vcpu->list);
642 free(vcpu);
643}
644
645void kvm_vm_release(struct kvm_vm *vmp)
646{
647 struct vcpu *vcpu, *tmp;
648 int ret;
649
650 list_for_each_entry_safe(vcpu, tmp, &vmp->vcpus, list)
651 vm_vcpu_rm(vmp, vcpu);
652
653 ret = close(vmp->fd);
654 TEST_ASSERT(ret == 0, "Close of vm fd failed,\n"
655 " vmp->fd: %i rc: %i errno: %i", vmp->fd, ret, errno);
656
657 ret = close(vmp->kvm_fd);
658 TEST_ASSERT(ret == 0, "Close of /dev/kvm fd failed,\n"
659 " vmp->kvm_fd: %i rc: %i errno: %i", vmp->kvm_fd, ret, errno);
660}
661
662static void __vm_mem_region_delete(struct kvm_vm *vm,
663 struct userspace_mem_region *region,
664 bool unlink)
665{
666 int ret;
667
668 if (unlink) {
669 rb_erase(®ion->gpa_node, &vm->regions.gpa_tree);
670 rb_erase(®ion->hva_node, &vm->regions.hva_tree);
671 hash_del(®ion->slot_node);
672 }
673
674 region->region.memory_size = 0;
675 ret = ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, ®ion->region);
676 TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION IOCTL failed, "
677 "rc: %i errno: %i", ret, errno);
678
679 sparsebit_free(®ion->unused_phy_pages);
680 ret = munmap(region->mmap_start, region->mmap_size);
681 TEST_ASSERT(ret == 0, "munmap failed, rc: %i errno: %i", ret, errno);
682
683 free(region);
684}
685
686
687
688
689void kvm_vm_free(struct kvm_vm *vmp)
690{
691 int ctr;
692 struct hlist_node *node;
693 struct userspace_mem_region *region;
694
695 if (vmp == NULL)
696 return;
697
698
699 hash_for_each_safe(vmp->regions.slot_hash, ctr, node, region, slot_node)
700 __vm_mem_region_delete(vmp, region, false);
701
702
703 sparsebit_free(&vmp->vpages_valid);
704 sparsebit_free(&vmp->vpages_mapped);
705
706 kvm_vm_release(vmp);
707
708
709 free(vmp);
710}
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735int kvm_memcmp_hva_gva(void *hva, struct kvm_vm *vm, vm_vaddr_t gva, size_t len)
736{
737 size_t amt;
738
739
740
741
742
743 for (uintptr_t offset = 0; offset < len; offset += amt) {
744 uintptr_t ptr1 = (uintptr_t)hva + offset;
745
746
747
748
749
750 uintptr_t ptr2 = (uintptr_t)addr_gva2hva(vm, gva + offset);
751
752
753
754
755
756 amt = len - offset;
757 if ((ptr1 >> vm->page_shift) != ((ptr1 + amt) >> vm->page_shift))
758 amt = vm->page_size - (ptr1 % vm->page_size);
759 if ((ptr2 >> vm->page_shift) != ((ptr2 + amt) >> vm->page_shift))
760 amt = vm->page_size - (ptr2 % vm->page_size);
761
762 assert((ptr1 >> vm->page_shift) == ((ptr1 + amt - 1) >> vm->page_shift));
763 assert((ptr2 >> vm->page_shift) == ((ptr2 + amt - 1) >> vm->page_shift));
764
765
766
767
768
769
770 int ret = memcmp((void *)ptr1, (void *)ptr2, amt);
771 if (ret != 0)
772 return ret;
773 }
774
775
776
777
778
779 return 0;
780}
781
782static void vm_userspace_mem_region_gpa_insert(struct rb_root *gpa_tree,
783 struct userspace_mem_region *region)
784{
785 struct rb_node **cur, *parent;
786
787 for (cur = &gpa_tree->rb_node, parent = NULL; *cur; ) {
788 struct userspace_mem_region *cregion;
789
790 cregion = container_of(*cur, typeof(*cregion), gpa_node);
791 parent = *cur;
792 if (region->region.guest_phys_addr <
793 cregion->region.guest_phys_addr)
794 cur = &(*cur)->rb_left;
795 else {
796 TEST_ASSERT(region->region.guest_phys_addr !=
797 cregion->region.guest_phys_addr,
798 "Duplicate GPA in region tree");
799
800 cur = &(*cur)->rb_right;
801 }
802 }
803
804 rb_link_node(®ion->gpa_node, parent, cur);
805 rb_insert_color(®ion->gpa_node, gpa_tree);
806}
807
808static void vm_userspace_mem_region_hva_insert(struct rb_root *hva_tree,
809 struct userspace_mem_region *region)
810{
811 struct rb_node **cur, *parent;
812
813 for (cur = &hva_tree->rb_node, parent = NULL; *cur; ) {
814 struct userspace_mem_region *cregion;
815
816 cregion = container_of(*cur, typeof(*cregion), hva_node);
817 parent = *cur;
818 if (region->host_mem < cregion->host_mem)
819 cur = &(*cur)->rb_left;
820 else {
821 TEST_ASSERT(region->host_mem !=
822 cregion->host_mem,
823 "Duplicate HVA in region tree");
824
825 cur = &(*cur)->rb_right;
826 }
827 }
828
829 rb_link_node(®ion->hva_node, parent, cur);
830 rb_insert_color(®ion->hva_node, hva_tree);
831}
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855void vm_userspace_mem_region_add(struct kvm_vm *vm,
856 enum vm_mem_backing_src_type src_type,
857 uint64_t guest_paddr, uint32_t slot, uint64_t npages,
858 uint32_t flags)
859{
860 int ret;
861 struct userspace_mem_region *region;
862 size_t backing_src_pagesz = get_backing_src_pagesz(src_type);
863 size_t alignment;
864
865 TEST_ASSERT(vm_adjust_num_guest_pages(vm->mode, npages) == npages,
866 "Number of guest pages is not compatible with the host. "
867 "Try npages=%d", vm_adjust_num_guest_pages(vm->mode, npages));
868
869 TEST_ASSERT((guest_paddr % vm->page_size) == 0, "Guest physical "
870 "address not on a page boundary.\n"
871 " guest_paddr: 0x%lx vm->page_size: 0x%x",
872 guest_paddr, vm->page_size);
873 TEST_ASSERT((((guest_paddr >> vm->page_shift) + npages) - 1)
874 <= vm->max_gfn, "Physical range beyond maximum "
875 "supported physical address,\n"
876 " guest_paddr: 0x%lx npages: 0x%lx\n"
877 " vm->max_gfn: 0x%lx vm->page_size: 0x%x",
878 guest_paddr, npages, vm->max_gfn, vm->page_size);
879
880
881
882
883
884 region = (struct userspace_mem_region *) userspace_mem_region_find(
885 vm, guest_paddr, (guest_paddr + npages * vm->page_size) - 1);
886 if (region != NULL)
887 TEST_FAIL("overlapping userspace_mem_region already "
888 "exists\n"
889 " requested guest_paddr: 0x%lx npages: 0x%lx "
890 "page_size: 0x%x\n"
891 " existing guest_paddr: 0x%lx size: 0x%lx",
892 guest_paddr, npages, vm->page_size,
893 (uint64_t) region->region.guest_phys_addr,
894 (uint64_t) region->region.memory_size);
895
896
897 hash_for_each_possible(vm->regions.slot_hash, region, slot_node,
898 slot) {
899 if (region->region.slot != slot)
900 continue;
901
902 TEST_FAIL("A mem region with the requested slot "
903 "already exists.\n"
904 " requested slot: %u paddr: 0x%lx npages: 0x%lx\n"
905 " existing slot: %u paddr: 0x%lx size: 0x%lx",
906 slot, guest_paddr, npages,
907 region->region.slot,
908 (uint64_t) region->region.guest_phys_addr,
909 (uint64_t) region->region.memory_size);
910 }
911
912
913 region = calloc(1, sizeof(*region));
914 TEST_ASSERT(region != NULL, "Insufficient Memory");
915 region->mmap_size = npages * vm->page_size;
916
917#ifdef __s390x__
918
919 alignment = 0x100000;
920#else
921 alignment = 1;
922#endif
923
924
925
926
927
928
929
930 if (src_type == VM_MEM_SRC_ANONYMOUS_THP)
931 alignment = max(backing_src_pagesz, alignment);
932
933 ASSERT_EQ(guest_paddr, align_up(guest_paddr, backing_src_pagesz));
934
935
936 if (alignment > 1)
937 region->mmap_size += alignment;
938
939 region->fd = -1;
940 if (backing_src_is_shared(src_type)) {
941 int memfd_flags = MFD_CLOEXEC;
942
943 if (src_type == VM_MEM_SRC_SHARED_HUGETLB)
944 memfd_flags |= MFD_HUGETLB;
945
946 region->fd = memfd_create("kvm_selftest", memfd_flags);
947 TEST_ASSERT(region->fd != -1,
948 "memfd_create failed, errno: %i", errno);
949
950 ret = ftruncate(region->fd, region->mmap_size);
951 TEST_ASSERT(ret == 0, "ftruncate failed, errno: %i", errno);
952
953 ret = fallocate(region->fd,
954 FALLOC_FL_PUNCH_HOLE | FALLOC_FL_KEEP_SIZE, 0,
955 region->mmap_size);
956 TEST_ASSERT(ret == 0, "fallocate failed, errno: %i", errno);
957 }
958
959 region->mmap_start = mmap(NULL, region->mmap_size,
960 PROT_READ | PROT_WRITE,
961 vm_mem_backing_src_alias(src_type)->flag,
962 region->fd, 0);
963 TEST_ASSERT(region->mmap_start != MAP_FAILED,
964 "test_malloc failed, mmap_start: %p errno: %i",
965 region->mmap_start, errno);
966
967 TEST_ASSERT(!is_backing_src_hugetlb(src_type) ||
968 region->mmap_start == align_ptr_up(region->mmap_start, backing_src_pagesz),
969 "mmap_start %p is not aligned to HugeTLB page size 0x%lx",
970 region->mmap_start, backing_src_pagesz);
971
972
973 region->host_mem = align_ptr_up(region->mmap_start, alignment);
974
975
976 if ((src_type == VM_MEM_SRC_ANONYMOUS ||
977 src_type == VM_MEM_SRC_ANONYMOUS_THP) && thp_configured()) {
978 ret = madvise(region->host_mem, npages * vm->page_size,
979 src_type == VM_MEM_SRC_ANONYMOUS ? MADV_NOHUGEPAGE : MADV_HUGEPAGE);
980 TEST_ASSERT(ret == 0, "madvise failed, addr: %p length: 0x%lx src_type: %s",
981 region->host_mem, npages * vm->page_size,
982 vm_mem_backing_src_alias(src_type)->name);
983 }
984
985 region->unused_phy_pages = sparsebit_alloc();
986 sparsebit_set_num(region->unused_phy_pages,
987 guest_paddr >> vm->page_shift, npages);
988 region->region.slot = slot;
989 region->region.flags = flags;
990 region->region.guest_phys_addr = guest_paddr;
991 region->region.memory_size = npages * vm->page_size;
992 region->region.userspace_addr = (uintptr_t) region->host_mem;
993 ret = ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, ®ion->region);
994 TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION IOCTL failed,\n"
995 " rc: %i errno: %i\n"
996 " slot: %u flags: 0x%x\n"
997 " guest_phys_addr: 0x%lx size: 0x%lx",
998 ret, errno, slot, flags,
999 guest_paddr, (uint64_t) region->region.memory_size);
1000
1001
1002 vm_userspace_mem_region_gpa_insert(&vm->regions.gpa_tree, region);
1003 vm_userspace_mem_region_hva_insert(&vm->regions.hva_tree, region);
1004 hash_add(vm->regions.slot_hash, ®ion->slot_node, slot);
1005
1006
1007 if (region->fd >= 0) {
1008 region->mmap_alias = mmap(NULL, region->mmap_size,
1009 PROT_READ | PROT_WRITE,
1010 vm_mem_backing_src_alias(src_type)->flag,
1011 region->fd, 0);
1012 TEST_ASSERT(region->mmap_alias != MAP_FAILED,
1013 "mmap of alias failed, errno: %i", errno);
1014
1015
1016 region->host_alias = align_ptr_up(region->mmap_alias, alignment);
1017 }
1018}
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035struct userspace_mem_region *
1036memslot2region(struct kvm_vm *vm, uint32_t memslot)
1037{
1038 struct userspace_mem_region *region;
1039
1040 hash_for_each_possible(vm->regions.slot_hash, region, slot_node,
1041 memslot)
1042 if (region->region.slot == memslot)
1043 return region;
1044
1045 fprintf(stderr, "No mem region with the requested slot found,\n"
1046 " requested slot: %u\n", memslot);
1047 fputs("---- vm dump ----\n", stderr);
1048 vm_dump(stderr, vm, 2);
1049 TEST_FAIL("Mem region not found");
1050 return NULL;
1051}
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067void vm_mem_region_set_flags(struct kvm_vm *vm, uint32_t slot, uint32_t flags)
1068{
1069 int ret;
1070 struct userspace_mem_region *region;
1071
1072 region = memslot2region(vm, slot);
1073
1074 region->region.flags = flags;
1075
1076 ret = ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, ®ion->region);
1077
1078 TEST_ASSERT(ret == 0, "KVM_SET_USER_MEMORY_REGION IOCTL failed,\n"
1079 " rc: %i errno: %i slot: %u flags: 0x%x",
1080 ret, errno, slot, flags);
1081}
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097void vm_mem_region_move(struct kvm_vm *vm, uint32_t slot, uint64_t new_gpa)
1098{
1099 struct userspace_mem_region *region;
1100 int ret;
1101
1102 region = memslot2region(vm, slot);
1103
1104 region->region.guest_phys_addr = new_gpa;
1105
1106 ret = ioctl(vm->fd, KVM_SET_USER_MEMORY_REGION, ®ion->region);
1107
1108 TEST_ASSERT(!ret, "KVM_SET_USER_MEMORY_REGION failed\n"
1109 "ret: %i errno: %i slot: %u new_gpa: 0x%lx",
1110 ret, errno, slot, new_gpa);
1111}
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126void vm_mem_region_delete(struct kvm_vm *vm, uint32_t slot)
1127{
1128 __vm_mem_region_delete(vm, memslot2region(vm, slot), true);
1129}
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144static int vcpu_mmap_sz(void)
1145{
1146 int dev_fd, ret;
1147
1148 dev_fd = open_kvm_dev_path_or_exit();
1149
1150 ret = ioctl(dev_fd, KVM_GET_VCPU_MMAP_SIZE, NULL);
1151 TEST_ASSERT(ret >= sizeof(struct kvm_run),
1152 "%s KVM_GET_VCPU_MMAP_SIZE ioctl failed, rc: %i errno: %i",
1153 __func__, ret, errno);
1154
1155 close(dev_fd);
1156
1157 return ret;
1158}
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174void vm_vcpu_add(struct kvm_vm *vm, uint32_t vcpuid)
1175{
1176 struct vcpu *vcpu;
1177
1178
1179 vcpu = vcpu_find(vm, vcpuid);
1180 if (vcpu != NULL)
1181 TEST_FAIL("vcpu with the specified id "
1182 "already exists,\n"
1183 " requested vcpuid: %u\n"
1184 " existing vcpuid: %u state: %p",
1185 vcpuid, vcpu->id, vcpu->state);
1186
1187
1188 vcpu = calloc(1, sizeof(*vcpu));
1189 TEST_ASSERT(vcpu != NULL, "Insufficient Memory");
1190 vcpu->id = vcpuid;
1191 vcpu->fd = ioctl(vm->fd, KVM_CREATE_VCPU, vcpuid);
1192 TEST_ASSERT(vcpu->fd >= 0, "KVM_CREATE_VCPU failed, rc: %i errno: %i",
1193 vcpu->fd, errno);
1194
1195 TEST_ASSERT(vcpu_mmap_sz() >= sizeof(*vcpu->state), "vcpu mmap size "
1196 "smaller than expected, vcpu_mmap_sz: %i expected_min: %zi",
1197 vcpu_mmap_sz(), sizeof(*vcpu->state));
1198 vcpu->state = (struct kvm_run *) mmap(NULL, vcpu_mmap_sz(),
1199 PROT_READ | PROT_WRITE, MAP_SHARED, vcpu->fd, 0);
1200 TEST_ASSERT(vcpu->state != MAP_FAILED, "mmap vcpu_state failed, "
1201 "vcpu id: %u errno: %i", vcpuid, errno);
1202
1203
1204 list_add(&vcpu->list, &vm->vcpus);
1205}
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227static vm_vaddr_t vm_vaddr_unused_gap(struct kvm_vm *vm, size_t sz,
1228 vm_vaddr_t vaddr_min)
1229{
1230 uint64_t pages = (sz + vm->page_size - 1) >> vm->page_shift;
1231
1232
1233 uint64_t pgidx_start = (vaddr_min + vm->page_size - 1) >> vm->page_shift;
1234 if ((pgidx_start * vm->page_size) < vaddr_min)
1235 goto no_va_found;
1236
1237
1238 if (!sparsebit_is_set_num(vm->vpages_valid,
1239 pgidx_start, pages))
1240 pgidx_start = sparsebit_next_set_num(vm->vpages_valid,
1241 pgidx_start, pages);
1242 do {
1243
1244
1245
1246
1247
1248
1249 if (sparsebit_is_clear_num(vm->vpages_mapped,
1250 pgidx_start, pages))
1251 goto va_found;
1252 pgidx_start = sparsebit_next_clear_num(vm->vpages_mapped,
1253 pgidx_start, pages);
1254 if (pgidx_start == 0)
1255 goto no_va_found;
1256
1257
1258
1259
1260
1261 if (!sparsebit_is_set_num(vm->vpages_valid,
1262 pgidx_start, pages)) {
1263 pgidx_start = sparsebit_next_set_num(
1264 vm->vpages_valid, pgidx_start, pages);
1265 if (pgidx_start == 0)
1266 goto no_va_found;
1267 }
1268 } while (pgidx_start != 0);
1269
1270no_va_found:
1271 TEST_FAIL("No vaddr of specified pages available, pages: 0x%lx", pages);
1272
1273
1274 return -1;
1275
1276va_found:
1277 TEST_ASSERT(sparsebit_is_set_num(vm->vpages_valid,
1278 pgidx_start, pages),
1279 "Unexpected, invalid virtual page index range,\n"
1280 " pgidx_start: 0x%lx\n"
1281 " pages: 0x%lx",
1282 pgidx_start, pages);
1283 TEST_ASSERT(sparsebit_is_clear_num(vm->vpages_mapped,
1284 pgidx_start, pages),
1285 "Unexpected, pages already mapped,\n"
1286 " pgidx_start: 0x%lx\n"
1287 " pages: 0x%lx",
1288 pgidx_start, pages);
1289
1290 return pgidx_start * vm->page_size;
1291}
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314vm_vaddr_t vm_vaddr_alloc(struct kvm_vm *vm, size_t sz, vm_vaddr_t vaddr_min)
1315{
1316 uint64_t pages = (sz >> vm->page_shift) + ((sz % vm->page_size) != 0);
1317
1318 virt_pgd_alloc(vm);
1319 vm_paddr_t paddr = vm_phy_pages_alloc(vm, pages,
1320 KVM_UTIL_MIN_PFN * vm->page_size, 0);
1321
1322
1323
1324
1325
1326 vm_vaddr_t vaddr_start = vm_vaddr_unused_gap(vm, sz, vaddr_min);
1327
1328
1329 for (vm_vaddr_t vaddr = vaddr_start; pages > 0;
1330 pages--, vaddr += vm->page_size, paddr += vm->page_size) {
1331
1332 virt_pg_map(vm, vaddr, paddr);
1333
1334 sparsebit_set(vm->vpages_mapped,
1335 vaddr >> vm->page_shift);
1336 }
1337
1338 return vaddr_start;
1339}
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355vm_vaddr_t vm_vaddr_alloc_pages(struct kvm_vm *vm, int nr_pages)
1356{
1357 return vm_vaddr_alloc(vm, nr_pages * getpagesize(), KVM_UTIL_MIN_VADDR);
1358}
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374vm_vaddr_t vm_vaddr_alloc_page(struct kvm_vm *vm)
1375{
1376 return vm_vaddr_alloc_pages(vm, 1);
1377}
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396void virt_map(struct kvm_vm *vm, uint64_t vaddr, uint64_t paddr,
1397 unsigned int npages)
1398{
1399 size_t page_size = vm->page_size;
1400 size_t size = npages * page_size;
1401
1402 TEST_ASSERT(vaddr + size > vaddr, "Vaddr overflow");
1403 TEST_ASSERT(paddr + size > paddr, "Paddr overflow");
1404
1405 while (npages--) {
1406 virt_pg_map(vm, vaddr, paddr);
1407 vaddr += page_size;
1408 paddr += page_size;
1409 }
1410}
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429void *addr_gpa2hva(struct kvm_vm *vm, vm_paddr_t gpa)
1430{
1431 struct userspace_mem_region *region;
1432
1433 region = userspace_mem_region_find(vm, gpa, gpa);
1434 if (!region) {
1435 TEST_FAIL("No vm physical memory at 0x%lx", gpa);
1436 return NULL;
1437 }
1438
1439 return (void *)((uintptr_t)region->host_mem
1440 + (gpa - region->region.guest_phys_addr));
1441}
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460vm_paddr_t addr_hva2gpa(struct kvm_vm *vm, void *hva)
1461{
1462 struct rb_node *node;
1463
1464 for (node = vm->regions.hva_tree.rb_node; node; ) {
1465 struct userspace_mem_region *region =
1466 container_of(node, struct userspace_mem_region, hva_node);
1467
1468 if (hva >= region->host_mem) {
1469 if (hva <= (region->host_mem
1470 + region->region.memory_size - 1))
1471 return (vm_paddr_t)((uintptr_t)
1472 region->region.guest_phys_addr
1473 + (hva - (uintptr_t)region->host_mem));
1474
1475 node = node->rb_right;
1476 } else
1477 node = node->rb_left;
1478 }
1479
1480 TEST_FAIL("No mapping to a guest physical address, hva: %p", hva);
1481 return -1;
1482}
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504void *addr_gpa2alias(struct kvm_vm *vm, vm_paddr_t gpa)
1505{
1506 struct userspace_mem_region *region;
1507 uintptr_t offset;
1508
1509 region = userspace_mem_region_find(vm, gpa, gpa);
1510 if (!region)
1511 return NULL;
1512
1513 if (!region->host_alias)
1514 return NULL;
1515
1516 offset = gpa - region->region.guest_phys_addr;
1517 return (void *) ((uintptr_t) region->host_alias + offset);
1518}
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532void vm_create_irqchip(struct kvm_vm *vm)
1533{
1534 int ret;
1535
1536 ret = ioctl(vm->fd, KVM_CREATE_IRQCHIP, 0);
1537 TEST_ASSERT(ret == 0, "KVM_CREATE_IRQCHIP IOCTL failed, "
1538 "rc: %i errno: %i", ret, errno);
1539
1540 vm->has_irqchip = true;
1541}
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558struct kvm_run *vcpu_state(struct kvm_vm *vm, uint32_t vcpuid)
1559{
1560 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1561 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1562
1563 return vcpu->state;
1564}
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580void vcpu_run(struct kvm_vm *vm, uint32_t vcpuid)
1581{
1582 int ret = _vcpu_run(vm, vcpuid);
1583 TEST_ASSERT(ret == 0, "KVM_RUN IOCTL failed, "
1584 "rc: %i errno: %i", ret, errno);
1585}
1586
1587int _vcpu_run(struct kvm_vm *vm, uint32_t vcpuid)
1588{
1589 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1590 int rc;
1591
1592 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1593 do {
1594 rc = ioctl(vcpu->fd, KVM_RUN, NULL);
1595 } while (rc == -1 && errno == EINTR);
1596
1597 assert_on_unhandled_exception(vm, vcpuid);
1598
1599 return rc;
1600}
1601
1602int vcpu_get_fd(struct kvm_vm *vm, uint32_t vcpuid)
1603{
1604 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1605
1606 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1607
1608 return vcpu->fd;
1609}
1610
1611void vcpu_run_complete_io(struct kvm_vm *vm, uint32_t vcpuid)
1612{
1613 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1614 int ret;
1615
1616 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1617
1618 vcpu->state->immediate_exit = 1;
1619 ret = ioctl(vcpu->fd, KVM_RUN, NULL);
1620 vcpu->state->immediate_exit = 0;
1621
1622 TEST_ASSERT(ret == -1 && errno == EINTR,
1623 "KVM_RUN IOCTL didn't exit immediately, rc: %i, errno: %i",
1624 ret, errno);
1625}
1626
1627void vcpu_set_guest_debug(struct kvm_vm *vm, uint32_t vcpuid,
1628 struct kvm_guest_debug *debug)
1629{
1630 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1631 int ret = ioctl(vcpu->fd, KVM_SET_GUEST_DEBUG, debug);
1632
1633 TEST_ASSERT(ret == 0, "KVM_SET_GUEST_DEBUG failed: %d", ret);
1634}
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651void vcpu_set_mp_state(struct kvm_vm *vm, uint32_t vcpuid,
1652 struct kvm_mp_state *mp_state)
1653{
1654 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1655 int ret;
1656
1657 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1658
1659 ret = ioctl(vcpu->fd, KVM_SET_MP_STATE, mp_state);
1660 TEST_ASSERT(ret == 0, "KVM_SET_MP_STATE IOCTL failed, "
1661 "rc: %i errno: %i", ret, errno);
1662}
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680struct kvm_reg_list *vcpu_get_reg_list(struct kvm_vm *vm, uint32_t vcpuid)
1681{
1682 struct kvm_reg_list reg_list_n = { .n = 0 }, *reg_list;
1683 int ret;
1684
1685 ret = _vcpu_ioctl(vm, vcpuid, KVM_GET_REG_LIST, ®_list_n);
1686 TEST_ASSERT(ret == -1 && errno == E2BIG, "KVM_GET_REG_LIST n=0");
1687 reg_list = calloc(1, sizeof(*reg_list) + reg_list_n.n * sizeof(__u64));
1688 reg_list->n = reg_list_n.n;
1689 vcpu_ioctl(vm, vcpuid, KVM_GET_REG_LIST, reg_list);
1690 return reg_list;
1691}
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708void vcpu_regs_get(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_regs *regs)
1709{
1710 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1711 int ret;
1712
1713 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1714
1715 ret = ioctl(vcpu->fd, KVM_GET_REGS, regs);
1716 TEST_ASSERT(ret == 0, "KVM_GET_REGS failed, rc: %i errno: %i",
1717 ret, errno);
1718}
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735void vcpu_regs_set(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_regs *regs)
1736{
1737 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1738 int ret;
1739
1740 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1741
1742 ret = ioctl(vcpu->fd, KVM_SET_REGS, regs);
1743 TEST_ASSERT(ret == 0, "KVM_SET_REGS failed, rc: %i errno: %i",
1744 ret, errno);
1745}
1746
1747#ifdef __KVM_HAVE_VCPU_EVENTS
1748void vcpu_events_get(struct kvm_vm *vm, uint32_t vcpuid,
1749 struct kvm_vcpu_events *events)
1750{
1751 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1752 int ret;
1753
1754 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1755
1756 ret = ioctl(vcpu->fd, KVM_GET_VCPU_EVENTS, events);
1757 TEST_ASSERT(ret == 0, "KVM_GET_VCPU_EVENTS, failed, rc: %i errno: %i",
1758 ret, errno);
1759}
1760
1761void vcpu_events_set(struct kvm_vm *vm, uint32_t vcpuid,
1762 struct kvm_vcpu_events *events)
1763{
1764 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1765 int ret;
1766
1767 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1768
1769 ret = ioctl(vcpu->fd, KVM_SET_VCPU_EVENTS, events);
1770 TEST_ASSERT(ret == 0, "KVM_SET_VCPU_EVENTS, failed, rc: %i errno: %i",
1771 ret, errno);
1772}
1773#endif
1774
1775#ifdef __x86_64__
1776void vcpu_nested_state_get(struct kvm_vm *vm, uint32_t vcpuid,
1777 struct kvm_nested_state *state)
1778{
1779 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1780 int ret;
1781
1782 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1783
1784 ret = ioctl(vcpu->fd, KVM_GET_NESTED_STATE, state);
1785 TEST_ASSERT(ret == 0,
1786 "KVM_SET_NESTED_STATE failed, ret: %i errno: %i",
1787 ret, errno);
1788}
1789
1790int vcpu_nested_state_set(struct kvm_vm *vm, uint32_t vcpuid,
1791 struct kvm_nested_state *state, bool ignore_error)
1792{
1793 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1794 int ret;
1795
1796 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1797
1798 ret = ioctl(vcpu->fd, KVM_SET_NESTED_STATE, state);
1799 if (!ignore_error) {
1800 TEST_ASSERT(ret == 0,
1801 "KVM_SET_NESTED_STATE failed, ret: %i errno: %i",
1802 ret, errno);
1803 }
1804
1805 return ret;
1806}
1807#endif
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824void vcpu_sregs_get(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_sregs *sregs)
1825{
1826 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1827 int ret;
1828
1829 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1830
1831 ret = ioctl(vcpu->fd, KVM_GET_SREGS, sregs);
1832 TEST_ASSERT(ret == 0, "KVM_GET_SREGS failed, rc: %i errno: %i",
1833 ret, errno);
1834}
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851void vcpu_sregs_set(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_sregs *sregs)
1852{
1853 int ret = _vcpu_sregs_set(vm, vcpuid, sregs);
1854 TEST_ASSERT(ret == 0, "KVM_SET_SREGS IOCTL failed, "
1855 "rc: %i errno: %i", ret, errno);
1856}
1857
1858int _vcpu_sregs_set(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_sregs *sregs)
1859{
1860 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1861
1862 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1863
1864 return ioctl(vcpu->fd, KVM_SET_SREGS, sregs);
1865}
1866
1867void vcpu_fpu_get(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_fpu *fpu)
1868{
1869 int ret;
1870
1871 ret = _vcpu_ioctl(vm, vcpuid, KVM_GET_FPU, fpu);
1872 TEST_ASSERT(ret == 0, "KVM_GET_FPU failed, rc: %i errno: %i (%s)",
1873 ret, errno, strerror(errno));
1874}
1875
1876void vcpu_fpu_set(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_fpu *fpu)
1877{
1878 int ret;
1879
1880 ret = _vcpu_ioctl(vm, vcpuid, KVM_SET_FPU, fpu);
1881 TEST_ASSERT(ret == 0, "KVM_SET_FPU failed, rc: %i errno: %i (%s)",
1882 ret, errno, strerror(errno));
1883}
1884
1885void vcpu_get_reg(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_one_reg *reg)
1886{
1887 int ret;
1888
1889 ret = _vcpu_ioctl(vm, vcpuid, KVM_GET_ONE_REG, reg);
1890 TEST_ASSERT(ret == 0, "KVM_GET_ONE_REG failed, rc: %i errno: %i (%s)",
1891 ret, errno, strerror(errno));
1892}
1893
1894void vcpu_set_reg(struct kvm_vm *vm, uint32_t vcpuid, struct kvm_one_reg *reg)
1895{
1896 int ret;
1897
1898 ret = _vcpu_ioctl(vm, vcpuid, KVM_SET_ONE_REG, reg);
1899 TEST_ASSERT(ret == 0, "KVM_SET_ONE_REG failed, rc: %i errno: %i (%s)",
1900 ret, errno, strerror(errno));
1901}
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916void vcpu_ioctl(struct kvm_vm *vm, uint32_t vcpuid,
1917 unsigned long cmd, void *arg)
1918{
1919 int ret;
1920
1921 ret = _vcpu_ioctl(vm, vcpuid, cmd, arg);
1922 TEST_ASSERT(ret == 0, "vcpu ioctl %lu failed, rc: %i errno: %i (%s)",
1923 cmd, ret, errno, strerror(errno));
1924}
1925
1926int _vcpu_ioctl(struct kvm_vm *vm, uint32_t vcpuid,
1927 unsigned long cmd, void *arg)
1928{
1929 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
1930 int ret;
1931
1932 TEST_ASSERT(vcpu != NULL, "vcpu not found, vcpuid: %u", vcpuid);
1933
1934 ret = ioctl(vcpu->fd, cmd, arg);
1935
1936 return ret;
1937}
1938
1939void *vcpu_map_dirty_ring(struct kvm_vm *vm, uint32_t vcpuid)
1940{
1941 struct vcpu *vcpu;
1942 uint32_t size = vm->dirty_ring_size;
1943
1944 TEST_ASSERT(size > 0, "Should enable dirty ring first");
1945
1946 vcpu = vcpu_find(vm, vcpuid);
1947
1948 TEST_ASSERT(vcpu, "Cannot find vcpu %u", vcpuid);
1949
1950 if (!vcpu->dirty_gfns) {
1951 void *addr;
1952
1953 addr = mmap(NULL, size, PROT_READ,
1954 MAP_PRIVATE, vcpu->fd,
1955 vm->page_size * KVM_DIRTY_LOG_PAGE_OFFSET);
1956 TEST_ASSERT(addr == MAP_FAILED, "Dirty ring mapped private");
1957
1958 addr = mmap(NULL, size, PROT_READ | PROT_EXEC,
1959 MAP_PRIVATE, vcpu->fd,
1960 vm->page_size * KVM_DIRTY_LOG_PAGE_OFFSET);
1961 TEST_ASSERT(addr == MAP_FAILED, "Dirty ring mapped exec");
1962
1963 addr = mmap(NULL, size, PROT_READ | PROT_WRITE,
1964 MAP_SHARED, vcpu->fd,
1965 vm->page_size * KVM_DIRTY_LOG_PAGE_OFFSET);
1966 TEST_ASSERT(addr != MAP_FAILED, "Dirty ring map failed");
1967
1968 vcpu->dirty_gfns = addr;
1969 vcpu->dirty_gfns_count = size / sizeof(struct kvm_dirty_gfn);
1970 }
1971
1972 return vcpu->dirty_gfns;
1973}
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987void vm_ioctl(struct kvm_vm *vm, unsigned long cmd, void *arg)
1988{
1989 int ret;
1990
1991 ret = _vm_ioctl(vm, cmd, arg);
1992 TEST_ASSERT(ret == 0, "vm ioctl %lu failed, rc: %i errno: %i (%s)",
1993 cmd, ret, errno, strerror(errno));
1994}
1995
1996int _vm_ioctl(struct kvm_vm *vm, unsigned long cmd, void *arg)
1997{
1998 return ioctl(vm->fd, cmd, arg);
1999}
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013void kvm_ioctl(struct kvm_vm *vm, unsigned long cmd, void *arg)
2014{
2015 int ret;
2016
2017 ret = ioctl(vm->kvm_fd, cmd, arg);
2018 TEST_ASSERT(ret == 0, "KVM ioctl %lu failed, rc: %i errno: %i (%s)",
2019 cmd, ret, errno, strerror(errno));
2020}
2021
2022int _kvm_ioctl(struct kvm_vm *vm, unsigned long cmd, void *arg)
2023{
2024 return ioctl(vm->kvm_fd, cmd, arg);
2025}
2026
2027
2028
2029
2030
2031int _kvm_device_check_attr(int dev_fd, uint32_t group, uint64_t attr)
2032{
2033 struct kvm_device_attr attribute = {
2034 .group = group,
2035 .attr = attr,
2036 .flags = 0,
2037 };
2038
2039 return ioctl(dev_fd, KVM_HAS_DEVICE_ATTR, &attribute);
2040}
2041
2042int kvm_device_check_attr(int dev_fd, uint32_t group, uint64_t attr)
2043{
2044 int ret = _kvm_device_check_attr(dev_fd, group, attr);
2045
2046 TEST_ASSERT(!ret, "KVM_HAS_DEVICE_ATTR failed, rc: %i errno: %i", ret, errno);
2047 return ret;
2048}
2049
2050int _kvm_create_device(struct kvm_vm *vm, uint64_t type, bool test, int *fd)
2051{
2052 struct kvm_create_device create_dev;
2053 int ret;
2054
2055 create_dev.type = type;
2056 create_dev.fd = -1;
2057 create_dev.flags = test ? KVM_CREATE_DEVICE_TEST : 0;
2058 ret = ioctl(vm_get_fd(vm), KVM_CREATE_DEVICE, &create_dev);
2059 *fd = create_dev.fd;
2060 return ret;
2061}
2062
2063int kvm_create_device(struct kvm_vm *vm, uint64_t type, bool test)
2064{
2065 int fd, ret;
2066
2067 ret = _kvm_create_device(vm, type, test, &fd);
2068
2069 if (!test) {
2070 TEST_ASSERT(!ret,
2071 "KVM_CREATE_DEVICE IOCTL failed, rc: %i errno: %i", ret, errno);
2072 return fd;
2073 }
2074 return ret;
2075}
2076
2077int _kvm_device_access(int dev_fd, uint32_t group, uint64_t attr,
2078 void *val, bool write)
2079{
2080 struct kvm_device_attr kvmattr = {
2081 .group = group,
2082 .attr = attr,
2083 .flags = 0,
2084 .addr = (uintptr_t)val,
2085 };
2086 int ret;
2087
2088 ret = ioctl(dev_fd, write ? KVM_SET_DEVICE_ATTR : KVM_GET_DEVICE_ATTR,
2089 &kvmattr);
2090 return ret;
2091}
2092
2093int kvm_device_access(int dev_fd, uint32_t group, uint64_t attr,
2094 void *val, bool write)
2095{
2096 int ret = _kvm_device_access(dev_fd, group, attr, val, write);
2097
2098 TEST_ASSERT(!ret, "KVM_SET|GET_DEVICE_ATTR IOCTL failed, rc: %i errno: %i", ret, errno);
2099 return ret;
2100}
2101
2102int _vcpu_has_device_attr(struct kvm_vm *vm, uint32_t vcpuid, uint32_t group,
2103 uint64_t attr)
2104{
2105 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
2106
2107 TEST_ASSERT(vcpu, "nonexistent vcpu id: %d", vcpuid);
2108
2109 return _kvm_device_check_attr(vcpu->fd, group, attr);
2110}
2111
2112int vcpu_has_device_attr(struct kvm_vm *vm, uint32_t vcpuid, uint32_t group,
2113 uint64_t attr)
2114{
2115 int ret = _vcpu_has_device_attr(vm, vcpuid, group, attr);
2116
2117 TEST_ASSERT(!ret, "KVM_HAS_DEVICE_ATTR IOCTL failed, rc: %i errno: %i", ret, errno);
2118 return ret;
2119}
2120
2121int _vcpu_access_device_attr(struct kvm_vm *vm, uint32_t vcpuid, uint32_t group,
2122 uint64_t attr, void *val, bool write)
2123{
2124 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
2125
2126 TEST_ASSERT(vcpu, "nonexistent vcpu id: %d", vcpuid);
2127
2128 return _kvm_device_access(vcpu->fd, group, attr, val, write);
2129}
2130
2131int vcpu_access_device_attr(struct kvm_vm *vm, uint32_t vcpuid, uint32_t group,
2132 uint64_t attr, void *val, bool write)
2133{
2134 int ret = _vcpu_access_device_attr(vm, vcpuid, group, attr, val, write);
2135
2136 TEST_ASSERT(!ret, "KVM_SET|GET_DEVICE_ATTR IOCTL failed, rc: %i errno: %i", ret, errno);
2137 return ret;
2138}
2139
2140
2141
2142
2143
2144int _kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level)
2145{
2146 struct kvm_irq_level irq_level = {
2147 .irq = irq,
2148 .level = level,
2149 };
2150
2151 return _vm_ioctl(vm, KVM_IRQ_LINE, &irq_level);
2152}
2153
2154void kvm_irq_line(struct kvm_vm *vm, uint32_t irq, int level)
2155{
2156 int ret = _kvm_irq_line(vm, irq, level);
2157
2158 TEST_ASSERT(ret >= 0, "KVM_IRQ_LINE failed, rc: %i errno: %i", ret, errno);
2159}
2160
2161struct kvm_irq_routing *kvm_gsi_routing_create(void)
2162{
2163 struct kvm_irq_routing *routing;
2164 size_t size;
2165
2166 size = sizeof(struct kvm_irq_routing);
2167
2168 size += KVM_MAX_IRQ_ROUTES * sizeof(struct kvm_irq_routing_entry);
2169 routing = calloc(1, size);
2170 assert(routing);
2171
2172 return routing;
2173}
2174
2175void kvm_gsi_routing_irqchip_add(struct kvm_irq_routing *routing,
2176 uint32_t gsi, uint32_t pin)
2177{
2178 int i;
2179
2180 assert(routing);
2181 assert(routing->nr < KVM_MAX_IRQ_ROUTES);
2182
2183 i = routing->nr;
2184 routing->entries[i].gsi = gsi;
2185 routing->entries[i].type = KVM_IRQ_ROUTING_IRQCHIP;
2186 routing->entries[i].flags = 0;
2187 routing->entries[i].u.irqchip.irqchip = 0;
2188 routing->entries[i].u.irqchip.pin = pin;
2189 routing->nr++;
2190}
2191
2192int _kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing)
2193{
2194 int ret;
2195
2196 assert(routing);
2197 ret = ioctl(vm_get_fd(vm), KVM_SET_GSI_ROUTING, routing);
2198 free(routing);
2199
2200 return ret;
2201}
2202
2203void kvm_gsi_routing_write(struct kvm_vm *vm, struct kvm_irq_routing *routing)
2204{
2205 int ret;
2206
2207 ret = _kvm_gsi_routing_write(vm, routing);
2208 TEST_ASSERT(ret == 0, "KVM_SET_GSI_ROUTING failed, rc: %i errno: %i",
2209 ret, errno);
2210}
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227void vm_dump(FILE *stream, struct kvm_vm *vm, uint8_t indent)
2228{
2229 int ctr;
2230 struct userspace_mem_region *region;
2231 struct vcpu *vcpu;
2232
2233 fprintf(stream, "%*smode: 0x%x\n", indent, "", vm->mode);
2234 fprintf(stream, "%*sfd: %i\n", indent, "", vm->fd);
2235 fprintf(stream, "%*spage_size: 0x%x\n", indent, "", vm->page_size);
2236 fprintf(stream, "%*sMem Regions:\n", indent, "");
2237 hash_for_each(vm->regions.slot_hash, ctr, region, slot_node) {
2238 fprintf(stream, "%*sguest_phys: 0x%lx size: 0x%lx "
2239 "host_virt: %p\n", indent + 2, "",
2240 (uint64_t) region->region.guest_phys_addr,
2241 (uint64_t) region->region.memory_size,
2242 region->host_mem);
2243 fprintf(stream, "%*sunused_phy_pages: ", indent + 2, "");
2244 sparsebit_dump(stream, region->unused_phy_pages, 0);
2245 }
2246 fprintf(stream, "%*sMapped Virtual Pages:\n", indent, "");
2247 sparsebit_dump(stream, vm->vpages_mapped, indent + 2);
2248 fprintf(stream, "%*spgd_created: %u\n", indent, "",
2249 vm->pgd_created);
2250 if (vm->pgd_created) {
2251 fprintf(stream, "%*sVirtual Translation Tables:\n",
2252 indent + 2, "");
2253 virt_dump(stream, vm, indent + 4);
2254 }
2255 fprintf(stream, "%*sVCPUs:\n", indent, "");
2256 list_for_each_entry(vcpu, &vm->vcpus, list)
2257 vcpu_dump(stream, vm, vcpu->id, indent + 2);
2258}
2259
2260
2261static struct exit_reason {
2262 unsigned int reason;
2263 const char *name;
2264} exit_reasons_known[] = {
2265 {KVM_EXIT_UNKNOWN, "UNKNOWN"},
2266 {KVM_EXIT_EXCEPTION, "EXCEPTION"},
2267 {KVM_EXIT_IO, "IO"},
2268 {KVM_EXIT_HYPERCALL, "HYPERCALL"},
2269 {KVM_EXIT_DEBUG, "DEBUG"},
2270 {KVM_EXIT_HLT, "HLT"},
2271 {KVM_EXIT_MMIO, "MMIO"},
2272 {KVM_EXIT_IRQ_WINDOW_OPEN, "IRQ_WINDOW_OPEN"},
2273 {KVM_EXIT_SHUTDOWN, "SHUTDOWN"},
2274 {KVM_EXIT_FAIL_ENTRY, "FAIL_ENTRY"},
2275 {KVM_EXIT_INTR, "INTR"},
2276 {KVM_EXIT_SET_TPR, "SET_TPR"},
2277 {KVM_EXIT_TPR_ACCESS, "TPR_ACCESS"},
2278 {KVM_EXIT_S390_SIEIC, "S390_SIEIC"},
2279 {KVM_EXIT_S390_RESET, "S390_RESET"},
2280 {KVM_EXIT_DCR, "DCR"},
2281 {KVM_EXIT_NMI, "NMI"},
2282 {KVM_EXIT_INTERNAL_ERROR, "INTERNAL_ERROR"},
2283 {KVM_EXIT_OSI, "OSI"},
2284 {KVM_EXIT_PAPR_HCALL, "PAPR_HCALL"},
2285 {KVM_EXIT_DIRTY_RING_FULL, "DIRTY_RING_FULL"},
2286 {KVM_EXIT_X86_RDMSR, "RDMSR"},
2287 {KVM_EXIT_X86_WRMSR, "WRMSR"},
2288 {KVM_EXIT_XEN, "XEN"},
2289#ifdef KVM_EXIT_MEMORY_NOT_PRESENT
2290 {KVM_EXIT_MEMORY_NOT_PRESENT, "MEMORY_NOT_PRESENT"},
2291#endif
2292};
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307
2308
2309const char *exit_reason_str(unsigned int exit_reason)
2310{
2311 unsigned int n1;
2312
2313 for (n1 = 0; n1 < ARRAY_SIZE(exit_reasons_known); n1++) {
2314 if (exit_reason == exit_reasons_known[n1].reason)
2315 return exit_reasons_known[n1].name;
2316 }
2317
2318 return "Unknown";
2319}
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340vm_paddr_t vm_phy_pages_alloc(struct kvm_vm *vm, size_t num,
2341 vm_paddr_t paddr_min, uint32_t memslot)
2342{
2343 struct userspace_mem_region *region;
2344 sparsebit_idx_t pg, base;
2345
2346 TEST_ASSERT(num > 0, "Must allocate at least one page");
2347
2348 TEST_ASSERT((paddr_min % vm->page_size) == 0, "Min physical address "
2349 "not divisible by page size.\n"
2350 " paddr_min: 0x%lx page_size: 0x%x",
2351 paddr_min, vm->page_size);
2352
2353 region = memslot2region(vm, memslot);
2354 base = pg = paddr_min >> vm->page_shift;
2355
2356 do {
2357 for (; pg < base + num; ++pg) {
2358 if (!sparsebit_is_set(region->unused_phy_pages, pg)) {
2359 base = pg = sparsebit_next_set(region->unused_phy_pages, pg);
2360 break;
2361 }
2362 }
2363 } while (pg && pg != base + num);
2364
2365 if (pg == 0) {
2366 fprintf(stderr, "No guest physical page available, "
2367 "paddr_min: 0x%lx page_size: 0x%x memslot: %u\n",
2368 paddr_min, vm->page_size, memslot);
2369 fputs("---- vm dump ----\n", stderr);
2370 vm_dump(stderr, vm, 2);
2371 abort();
2372 }
2373
2374 for (pg = base; pg < base + num; ++pg)
2375 sparsebit_clear(region->unused_phy_pages, pg);
2376
2377 return base * vm->page_size;
2378}
2379
2380vm_paddr_t vm_phy_page_alloc(struct kvm_vm *vm, vm_paddr_t paddr_min,
2381 uint32_t memslot)
2382{
2383 return vm_phy_pages_alloc(vm, 1, paddr_min, memslot);
2384}
2385
2386
2387#define KVM_GUEST_PAGE_TABLE_MIN_PADDR 0x180000
2388
2389vm_paddr_t vm_alloc_page_table(struct kvm_vm *vm)
2390{
2391 return vm_phy_page_alloc(vm, KVM_GUEST_PAGE_TABLE_MIN_PADDR, 0);
2392}
2393
2394
2395
2396
2397
2398
2399
2400
2401
2402
2403
2404
2405
2406void *addr_gva2hva(struct kvm_vm *vm, vm_vaddr_t gva)
2407{
2408 return addr_gpa2hva(vm, addr_gva2gpa(vm, gva));
2409}
2410
2411
2412
2413
2414
2415
2416
2417
2418
2419
2420
2421
2422
2423bool vm_is_unrestricted_guest(struct kvm_vm *vm)
2424{
2425 char val = 'N';
2426 size_t count;
2427 FILE *f;
2428
2429 if (vm == NULL) {
2430
2431 close(open_kvm_dev_path_or_exit());
2432 }
2433
2434 f = fopen("/sys/module/kvm_intel/parameters/unrestricted_guest", "r");
2435 if (f) {
2436 count = fread(&val, sizeof(char), 1, f);
2437 TEST_ASSERT(count == 1, "Unable to read from param file.");
2438 fclose(f);
2439 }
2440
2441 return val == 'Y';
2442}
2443
2444unsigned int vm_get_page_size(struct kvm_vm *vm)
2445{
2446 return vm->page_size;
2447}
2448
2449unsigned int vm_get_page_shift(struct kvm_vm *vm)
2450{
2451 return vm->page_shift;
2452}
2453
2454unsigned long __attribute__((weak)) vm_compute_max_gfn(struct kvm_vm *vm)
2455{
2456 return ((1ULL << vm->pa_bits) >> vm->page_shift) - 1;
2457}
2458
2459uint64_t vm_get_max_gfn(struct kvm_vm *vm)
2460{
2461 return vm->max_gfn;
2462}
2463
2464int vm_get_fd(struct kvm_vm *vm)
2465{
2466 return vm->fd;
2467}
2468
2469static unsigned int vm_calc_num_pages(unsigned int num_pages,
2470 unsigned int page_shift,
2471 unsigned int new_page_shift,
2472 bool ceil)
2473{
2474 unsigned int n = 1 << (new_page_shift - page_shift);
2475
2476 if (page_shift >= new_page_shift)
2477 return num_pages * (1 << (page_shift - new_page_shift));
2478
2479 return num_pages / n + !!(ceil && num_pages % n);
2480}
2481
2482static inline int getpageshift(void)
2483{
2484 return __builtin_ffs(getpagesize()) - 1;
2485}
2486
2487unsigned int
2488vm_num_host_pages(enum vm_guest_mode mode, unsigned int num_guest_pages)
2489{
2490 return vm_calc_num_pages(num_guest_pages,
2491 vm_guest_mode_params[mode].page_shift,
2492 getpageshift(), true);
2493}
2494
2495unsigned int
2496vm_num_guest_pages(enum vm_guest_mode mode, unsigned int num_host_pages)
2497{
2498 return vm_calc_num_pages(num_host_pages, getpageshift(),
2499 vm_guest_mode_params[mode].page_shift, false);
2500}
2501
2502unsigned int vm_calc_num_guest_pages(enum vm_guest_mode mode, size_t size)
2503{
2504 unsigned int n;
2505 n = DIV_ROUND_UP(size, vm_guest_mode_params[mode].page_size);
2506 return vm_adjust_num_guest_pages(mode, n);
2507}
2508
2509int vm_get_stats_fd(struct kvm_vm *vm)
2510{
2511 return ioctl(vm->fd, KVM_GET_STATS_FD, NULL);
2512}
2513
2514int vcpu_get_stats_fd(struct kvm_vm *vm, uint32_t vcpuid)
2515{
2516 struct vcpu *vcpu = vcpu_find(vm, vcpuid);
2517
2518 return ioctl(vcpu->fd, KVM_GET_STATS_FD, NULL);
2519}
2520