1
2
3
4
5
6
7
8#define _GNU_SOURCE
9
10#include <stdio.h>
11#include <stdlib.h>
12#include <pthread.h>
13#include <semaphore.h>
14#include <sys/types.h>
15#include <signal.h>
16#include <errno.h>
17#include <linux/bitmap.h>
18#include <linux/bitops.h>
19#include <linux/atomic.h>
20
21#include "kvm_util.h"
22#include "test_util.h"
23#include "guest_modes.h"
24#include "processor.h"
25
26#define VCPU_ID 1
27
28
29#define TEST_MEM_SLOT_INDEX 1
30
31
32#define DEFAULT_GUEST_TEST_MEM 0xc0000000
33
34
35#define TEST_PAGES_PER_LOOP 1024
36
37
38#define TEST_HOST_LOOP_N 32UL
39
40
41#define TEST_HOST_LOOP_INTERVAL 10UL
42
43
44#if defined(__s390x__)
45# define BITOP_LE_SWIZZLE ((BITS_PER_LONG-1) & ~0x7)
46# define test_bit_le(nr, addr) \
47 test_bit((nr) ^ BITOP_LE_SWIZZLE, addr)
48# define set_bit_le(nr, addr) \
49 set_bit((nr) ^ BITOP_LE_SWIZZLE, addr)
50# define clear_bit_le(nr, addr) \
51 clear_bit((nr) ^ BITOP_LE_SWIZZLE, addr)
52# define test_and_set_bit_le(nr, addr) \
53 test_and_set_bit((nr) ^ BITOP_LE_SWIZZLE, addr)
54# define test_and_clear_bit_le(nr, addr) \
55 test_and_clear_bit((nr) ^ BITOP_LE_SWIZZLE, addr)
56#else
57# define test_bit_le test_bit
58# define set_bit_le set_bit
59# define clear_bit_le clear_bit
60# define test_and_set_bit_le test_and_set_bit
61# define test_and_clear_bit_le test_and_clear_bit
62#endif
63
64#define TEST_DIRTY_RING_COUNT 65536
65
66#define SIG_IPI SIGUSR1
67
68
69
70
71
72
73
74static uint64_t host_page_size;
75static uint64_t guest_page_size;
76static uint64_t guest_num_pages;
77static uint64_t random_array[TEST_PAGES_PER_LOOP];
78static uint64_t iteration;
79
80
81
82
83
84
85static uint64_t guest_test_phys_mem;
86
87
88
89
90
91static uint64_t guest_test_virt_mem = DEFAULT_GUEST_TEST_MEM;
92
93
94
95
96
97static void guest_code(void)
98{
99 uint64_t addr;
100 int i;
101
102
103
104
105
106
107
108 for (i = 0; i < guest_num_pages; i++) {
109 addr = guest_test_virt_mem + i * guest_page_size;
110 *(uint64_t *)addr = READ_ONCE(iteration);
111 }
112
113 while (true) {
114 for (i = 0; i < TEST_PAGES_PER_LOOP; i++) {
115 addr = guest_test_virt_mem;
116 addr += (READ_ONCE(random_array[i]) % guest_num_pages)
117 * guest_page_size;
118 addr &= ~(host_page_size - 1);
119 *(uint64_t *)addr = READ_ONCE(iteration);
120 }
121
122
123 GUEST_SYNC(1);
124 }
125}
126
127
128static bool host_quit;
129
130
131static void *host_test_mem;
132static uint64_t host_num_pages;
133
134
135static uint64_t host_dirty_count;
136static uint64_t host_clear_count;
137static uint64_t host_track_next_count;
138
139
140static sem_t sem_vcpu_stop;
141static sem_t sem_vcpu_cont;
142
143
144
145
146
147
148
149static atomic_t vcpu_sync_stop_requested;
150
151
152
153
154
155static bool dirty_ring_vcpu_ring_full;
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170static uint64_t dirty_ring_last_page;
171
172enum log_mode_t {
173
174 LOG_MODE_DIRTY_LOG = 0,
175
176
177 LOG_MODE_CLEAR_LOG = 1,
178
179
180 LOG_MODE_DIRTY_RING = 2,
181
182 LOG_MODE_NUM,
183
184
185 LOG_MODE_ALL = LOG_MODE_NUM,
186};
187
188
189static enum log_mode_t host_log_mode_option = LOG_MODE_ALL;
190
191static enum log_mode_t host_log_mode;
192static pthread_t vcpu_thread;
193static uint32_t test_dirty_ring_count = TEST_DIRTY_RING_COUNT;
194
195static void vcpu_kick(void)
196{
197 pthread_kill(vcpu_thread, SIG_IPI);
198}
199
200
201
202
203
204static void sem_wait_until(sem_t *sem)
205{
206 int ret;
207
208 do
209 ret = sem_wait(sem);
210 while (ret == -1 && errno == EINTR);
211}
212
213static bool clear_log_supported(void)
214{
215 return kvm_check_cap(KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2);
216}
217
218static void clear_log_create_vm_done(struct kvm_vm *vm)
219{
220 struct kvm_enable_cap cap = {};
221 u64 manual_caps;
222
223 manual_caps = kvm_check_cap(KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2);
224 TEST_ASSERT(manual_caps, "MANUAL_CAPS is zero!");
225 manual_caps &= (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE |
226 KVM_DIRTY_LOG_INITIALLY_SET);
227 cap.cap = KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2;
228 cap.args[0] = manual_caps;
229 vm_enable_cap(vm, &cap);
230}
231
232static void dirty_log_collect_dirty_pages(struct kvm_vm *vm, int slot,
233 void *bitmap, uint32_t num_pages)
234{
235 kvm_vm_get_dirty_log(vm, slot, bitmap);
236}
237
238static void clear_log_collect_dirty_pages(struct kvm_vm *vm, int slot,
239 void *bitmap, uint32_t num_pages)
240{
241 kvm_vm_get_dirty_log(vm, slot, bitmap);
242 kvm_vm_clear_dirty_log(vm, slot, bitmap, 0, num_pages);
243}
244
245
246static void vcpu_handle_sync_stop(void)
247{
248 if (atomic_read(&vcpu_sync_stop_requested)) {
249
250 atomic_set(&vcpu_sync_stop_requested, false);
251 sem_post(&sem_vcpu_stop);
252 sem_wait_until(&sem_vcpu_cont);
253 }
254}
255
256static void default_after_vcpu_run(struct kvm_vm *vm, int ret, int err)
257{
258 struct kvm_run *run = vcpu_state(vm, VCPU_ID);
259
260 TEST_ASSERT(ret == 0 || (ret == -1 && err == EINTR),
261 "vcpu run failed: errno=%d", err);
262
263 TEST_ASSERT(get_ucall(vm, VCPU_ID, NULL) == UCALL_SYNC,
264 "Invalid guest sync status: exit_reason=%s\n",
265 exit_reason_str(run->exit_reason));
266
267 vcpu_handle_sync_stop();
268}
269
270static bool dirty_ring_supported(void)
271{
272 return kvm_check_cap(KVM_CAP_DIRTY_LOG_RING);
273}
274
275static void dirty_ring_create_vm_done(struct kvm_vm *vm)
276{
277
278
279
280
281 vm_enable_dirty_ring(vm, test_dirty_ring_count *
282 sizeof(struct kvm_dirty_gfn));
283}
284
285static inline bool dirty_gfn_is_dirtied(struct kvm_dirty_gfn *gfn)
286{
287 return gfn->flags == KVM_DIRTY_GFN_F_DIRTY;
288}
289
290static inline void dirty_gfn_set_collected(struct kvm_dirty_gfn *gfn)
291{
292 gfn->flags = KVM_DIRTY_GFN_F_RESET;
293}
294
295static uint32_t dirty_ring_collect_one(struct kvm_dirty_gfn *dirty_gfns,
296 int slot, void *bitmap,
297 uint32_t num_pages, uint32_t *fetch_index)
298{
299 struct kvm_dirty_gfn *cur;
300 uint32_t count = 0;
301
302 while (true) {
303 cur = &dirty_gfns[*fetch_index % test_dirty_ring_count];
304 if (!dirty_gfn_is_dirtied(cur))
305 break;
306 TEST_ASSERT(cur->slot == slot, "Slot number didn't match: "
307 "%u != %u", cur->slot, slot);
308 TEST_ASSERT(cur->offset < num_pages, "Offset overflow: "
309 "0x%llx >= 0x%x", cur->offset, num_pages);
310
311 set_bit_le(cur->offset, bitmap);
312 dirty_ring_last_page = cur->offset;
313 dirty_gfn_set_collected(cur);
314 (*fetch_index)++;
315 count++;
316 }
317
318 return count;
319}
320
321static void dirty_ring_wait_vcpu(void)
322{
323
324 vcpu_kick();
325 sem_wait_until(&sem_vcpu_stop);
326}
327
328static void dirty_ring_continue_vcpu(void)
329{
330 pr_info("Notifying vcpu to continue\n");
331 sem_post(&sem_vcpu_cont);
332}
333
334static void dirty_ring_collect_dirty_pages(struct kvm_vm *vm, int slot,
335 void *bitmap, uint32_t num_pages)
336{
337
338 static uint32_t fetch_index = 0;
339 uint32_t count = 0, cleared;
340 bool continued_vcpu = false;
341
342 dirty_ring_wait_vcpu();
343
344 if (!dirty_ring_vcpu_ring_full) {
345
346
347
348
349 dirty_ring_continue_vcpu();
350 continued_vcpu = true;
351 }
352
353
354 count = dirty_ring_collect_one(vcpu_map_dirty_ring(vm, VCPU_ID),
355 slot, bitmap, num_pages, &fetch_index);
356
357 cleared = kvm_vm_reset_dirty_ring(vm);
358
359
360 TEST_ASSERT(cleared == count, "Reset dirty pages (%u) mismatch "
361 "with collected (%u)", cleared, count);
362
363 if (!continued_vcpu) {
364 TEST_ASSERT(dirty_ring_vcpu_ring_full,
365 "Didn't continue vcpu even without ring full");
366 dirty_ring_continue_vcpu();
367 }
368
369 pr_info("Iteration %ld collected %u pages\n", iteration, count);
370}
371
372static void dirty_ring_after_vcpu_run(struct kvm_vm *vm, int ret, int err)
373{
374 struct kvm_run *run = vcpu_state(vm, VCPU_ID);
375
376
377 if (get_ucall(vm, VCPU_ID, NULL) == UCALL_SYNC) {
378
379 ;
380 } else if (run->exit_reason == KVM_EXIT_DIRTY_RING_FULL ||
381 (ret == -1 && err == EINTR)) {
382
383 WRITE_ONCE(dirty_ring_vcpu_ring_full,
384 run->exit_reason == KVM_EXIT_DIRTY_RING_FULL);
385 sem_post(&sem_vcpu_stop);
386 pr_info("vcpu stops because %s...\n",
387 dirty_ring_vcpu_ring_full ?
388 "dirty ring is full" : "vcpu is kicked out");
389 sem_wait_until(&sem_vcpu_cont);
390 pr_info("vcpu continues now.\n");
391 } else {
392 TEST_ASSERT(false, "Invalid guest sync status: "
393 "exit_reason=%s\n",
394 exit_reason_str(run->exit_reason));
395 }
396}
397
398static void dirty_ring_before_vcpu_join(void)
399{
400
401 sem_post(&sem_vcpu_cont);
402}
403
404struct log_mode {
405 const char *name;
406
407 bool (*supported)(void);
408
409 void (*create_vm_done)(struct kvm_vm *vm);
410
411 void (*collect_dirty_pages) (struct kvm_vm *vm, int slot,
412 void *bitmap, uint32_t num_pages);
413
414 void (*after_vcpu_run)(struct kvm_vm *vm, int ret, int err);
415 void (*before_vcpu_join) (void);
416} log_modes[LOG_MODE_NUM] = {
417 {
418 .name = "dirty-log",
419 .collect_dirty_pages = dirty_log_collect_dirty_pages,
420 .after_vcpu_run = default_after_vcpu_run,
421 },
422 {
423 .name = "clear-log",
424 .supported = clear_log_supported,
425 .create_vm_done = clear_log_create_vm_done,
426 .collect_dirty_pages = clear_log_collect_dirty_pages,
427 .after_vcpu_run = default_after_vcpu_run,
428 },
429 {
430 .name = "dirty-ring",
431 .supported = dirty_ring_supported,
432 .create_vm_done = dirty_ring_create_vm_done,
433 .collect_dirty_pages = dirty_ring_collect_dirty_pages,
434 .before_vcpu_join = dirty_ring_before_vcpu_join,
435 .after_vcpu_run = dirty_ring_after_vcpu_run,
436 },
437};
438
439
440
441
442
443
444
445
446static unsigned long *host_bmap_track;
447
448static void log_modes_dump(void)
449{
450 int i;
451
452 printf("all");
453 for (i = 0; i < LOG_MODE_NUM; i++)
454 printf(", %s", log_modes[i].name);
455 printf("\n");
456}
457
458static bool log_mode_supported(void)
459{
460 struct log_mode *mode = &log_modes[host_log_mode];
461
462 if (mode->supported)
463 return mode->supported();
464
465 return true;
466}
467
468static void log_mode_create_vm_done(struct kvm_vm *vm)
469{
470 struct log_mode *mode = &log_modes[host_log_mode];
471
472 if (mode->create_vm_done)
473 mode->create_vm_done(vm);
474}
475
476static void log_mode_collect_dirty_pages(struct kvm_vm *vm, int slot,
477 void *bitmap, uint32_t num_pages)
478{
479 struct log_mode *mode = &log_modes[host_log_mode];
480
481 TEST_ASSERT(mode->collect_dirty_pages != NULL,
482 "collect_dirty_pages() is required for any log mode!");
483 mode->collect_dirty_pages(vm, slot, bitmap, num_pages);
484}
485
486static void log_mode_after_vcpu_run(struct kvm_vm *vm, int ret, int err)
487{
488 struct log_mode *mode = &log_modes[host_log_mode];
489
490 if (mode->after_vcpu_run)
491 mode->after_vcpu_run(vm, ret, err);
492}
493
494static void log_mode_before_vcpu_join(void)
495{
496 struct log_mode *mode = &log_modes[host_log_mode];
497
498 if (mode->before_vcpu_join)
499 mode->before_vcpu_join();
500}
501
502static void generate_random_array(uint64_t *guest_array, uint64_t size)
503{
504 uint64_t i;
505
506 for (i = 0; i < size; i++)
507 guest_array[i] = random();
508}
509
510static void *vcpu_worker(void *data)
511{
512 int ret, vcpu_fd;
513 struct kvm_vm *vm = data;
514 uint64_t *guest_array;
515 uint64_t pages_count = 0;
516 struct kvm_signal_mask *sigmask = alloca(offsetof(struct kvm_signal_mask, sigset)
517 + sizeof(sigset_t));
518 sigset_t *sigset = (sigset_t *) &sigmask->sigset;
519
520 vcpu_fd = vcpu_get_fd(vm, VCPU_ID);
521
522
523
524
525
526
527 sigmask->len = 8;
528 pthread_sigmask(0, NULL, sigset);
529 sigdelset(sigset, SIG_IPI);
530 vcpu_ioctl(vm, VCPU_ID, KVM_SET_SIGNAL_MASK, sigmask);
531
532 sigemptyset(sigset);
533 sigaddset(sigset, SIG_IPI);
534
535 guest_array = addr_gva2hva(vm, (vm_vaddr_t)random_array);
536
537 while (!READ_ONCE(host_quit)) {
538
539 generate_random_array(guest_array, TEST_PAGES_PER_LOOP);
540 pages_count += TEST_PAGES_PER_LOOP;
541
542 ret = ioctl(vcpu_fd, KVM_RUN, NULL);
543 if (ret == -1 && errno == EINTR) {
544 int sig = -1;
545 sigwait(sigset, &sig);
546 assert(sig == SIG_IPI);
547 }
548 log_mode_after_vcpu_run(vm, ret, errno);
549 }
550
551 pr_info("Dirtied %"PRIu64" pages\n", pages_count);
552
553 return NULL;
554}
555
556static void vm_dirty_log_verify(enum vm_guest_mode mode, unsigned long *bmap)
557{
558 uint64_t step = vm_num_host_pages(mode, 1);
559 uint64_t page;
560 uint64_t *value_ptr;
561 uint64_t min_iter = 0;
562
563 for (page = 0; page < host_num_pages; page += step) {
564 value_ptr = host_test_mem + page * host_page_size;
565
566
567 if (test_and_clear_bit_le(page, host_bmap_track)) {
568 host_track_next_count++;
569 TEST_ASSERT(test_bit_le(page, bmap),
570 "Page %"PRIu64" should have its dirty bit "
571 "set in this iteration but it is missing",
572 page);
573 }
574
575 if (test_and_clear_bit_le(page, bmap)) {
576 bool matched;
577
578 host_dirty_count++;
579
580
581
582
583
584
585 matched = (*value_ptr == iteration ||
586 *value_ptr == iteration - 1);
587
588 if (host_log_mode == LOG_MODE_DIRTY_RING && !matched) {
589 if (*value_ptr == iteration - 2 && min_iter <= iteration - 2) {
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622 min_iter = iteration - 1;
623 continue;
624 } else if (page == dirty_ring_last_page) {
625
626
627
628
629 continue;
630 }
631 }
632
633 TEST_ASSERT(matched,
634 "Set page %"PRIu64" value %"PRIu64
635 " incorrect (iteration=%"PRIu64")",
636 page, *value_ptr, iteration);
637 } else {
638 host_clear_count++;
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658 TEST_ASSERT(*value_ptr <= iteration,
659 "Clear page %"PRIu64" value %"PRIu64
660 " incorrect (iteration=%"PRIu64")",
661 page, *value_ptr, iteration);
662 if (*value_ptr == iteration) {
663
664
665
666
667
668 set_bit_le(page, host_bmap_track);
669 }
670 }
671 }
672}
673
674static struct kvm_vm *create_vm(enum vm_guest_mode mode, uint32_t vcpuid,
675 uint64_t extra_mem_pages, void *guest_code)
676{
677 struct kvm_vm *vm;
678 uint64_t extra_pg_pages = extra_mem_pages / 512 * 2;
679
680 pr_info("Testing guest mode: %s\n", vm_guest_mode_string(mode));
681
682 vm = vm_create(mode, DEFAULT_GUEST_PHY_PAGES + extra_pg_pages, O_RDWR);
683 kvm_vm_elf_load(vm, program_invocation_name);
684#ifdef __x86_64__
685 vm_create_irqchip(vm);
686#endif
687 log_mode_create_vm_done(vm);
688 vm_vcpu_add_default(vm, vcpuid, guest_code);
689 return vm;
690}
691
692#define DIRTY_MEM_BITS 30
693#define PAGE_SHIFT_4K 12
694
695struct test_params {
696 unsigned long iterations;
697 unsigned long interval;
698 uint64_t phys_offset;
699};
700
701static void run_test(enum vm_guest_mode mode, void *arg)
702{
703 struct test_params *p = arg;
704 struct kvm_vm *vm;
705 unsigned long *bmap;
706
707 if (!log_mode_supported()) {
708 print_skip("Log mode '%s' not supported",
709 log_modes[host_log_mode].name);
710 return;
711 }
712
713
714
715
716
717
718
719
720
721 vm = create_vm(mode, VCPU_ID,
722 2ul << (DIRTY_MEM_BITS - PAGE_SHIFT_4K),
723 guest_code);
724
725 guest_page_size = vm_get_page_size(vm);
726
727
728
729
730 guest_num_pages = (1ul << (DIRTY_MEM_BITS -
731 vm_get_page_shift(vm))) + 3;
732 guest_num_pages = vm_adjust_num_guest_pages(mode, guest_num_pages);
733
734 host_page_size = getpagesize();
735 host_num_pages = vm_num_host_pages(mode, guest_num_pages);
736
737 if (!p->phys_offset) {
738 guest_test_phys_mem = (vm_get_max_gfn(vm) -
739 guest_num_pages) * guest_page_size;
740 guest_test_phys_mem &= ~(host_page_size - 1);
741 } else {
742 guest_test_phys_mem = p->phys_offset;
743 }
744
745#ifdef __s390x__
746
747 guest_test_phys_mem &= ~((1 << 20) - 1);
748#endif
749
750 pr_info("guest physical test memory offset: 0x%lx\n", guest_test_phys_mem);
751
752 bmap = bitmap_zalloc(host_num_pages);
753 host_bmap_track = bitmap_zalloc(host_num_pages);
754
755
756 vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
757 guest_test_phys_mem,
758 TEST_MEM_SLOT_INDEX,
759 guest_num_pages,
760 KVM_MEM_LOG_DIRTY_PAGES);
761
762
763 virt_map(vm, guest_test_virt_mem, guest_test_phys_mem, guest_num_pages);
764
765
766 host_test_mem = addr_gpa2hva(vm, (vm_paddr_t)guest_test_phys_mem);
767
768 ucall_init(vm, NULL);
769
770
771 sync_global_to_guest(vm, host_page_size);
772 sync_global_to_guest(vm, guest_page_size);
773 sync_global_to_guest(vm, guest_test_virt_mem);
774 sync_global_to_guest(vm, guest_num_pages);
775
776
777 iteration = 1;
778 sync_global_to_guest(vm, iteration);
779 host_quit = false;
780 host_dirty_count = 0;
781 host_clear_count = 0;
782 host_track_next_count = 0;
783
784 pthread_create(&vcpu_thread, NULL, vcpu_worker, vm);
785
786 while (iteration < p->iterations) {
787
788 usleep(p->interval * 1000);
789 log_mode_collect_dirty_pages(vm, TEST_MEM_SLOT_INDEX,
790 bmap, host_num_pages);
791
792
793
794
795
796 atomic_set(&vcpu_sync_stop_requested, true);
797 sem_wait_until(&sem_vcpu_stop);
798
799
800
801
802
803
804
805 assert(host_log_mode == LOG_MODE_DIRTY_RING ||
806 atomic_read(&vcpu_sync_stop_requested) == false);
807 vm_dirty_log_verify(mode, bmap);
808 sem_post(&sem_vcpu_cont);
809
810 iteration++;
811 sync_global_to_guest(vm, iteration);
812 }
813
814
815 host_quit = true;
816 log_mode_before_vcpu_join();
817 pthread_join(vcpu_thread, NULL);
818
819 pr_info("Total bits checked: dirty (%"PRIu64"), clear (%"PRIu64"), "
820 "track_next (%"PRIu64")\n", host_dirty_count, host_clear_count,
821 host_track_next_count);
822
823 free(bmap);
824 free(host_bmap_track);
825 ucall_uninit(vm);
826 kvm_vm_free(vm);
827}
828
829static void help(char *name)
830{
831 puts("");
832 printf("usage: %s [-h] [-i iterations] [-I interval] "
833 "[-p offset] [-m mode]\n", name);
834 puts("");
835 printf(" -c: specify dirty ring size, in number of entries\n");
836 printf(" (only useful for dirty-ring test; default: %"PRIu32")\n",
837 TEST_DIRTY_RING_COUNT);
838 printf(" -i: specify iteration counts (default: %"PRIu64")\n",
839 TEST_HOST_LOOP_N);
840 printf(" -I: specify interval in ms (default: %"PRIu64" ms)\n",
841 TEST_HOST_LOOP_INTERVAL);
842 printf(" -p: specify guest physical test memory offset\n"
843 " Warning: a low offset can conflict with the loaded test code.\n");
844 printf(" -M: specify the host logging mode "
845 "(default: run all log modes). Supported modes: \n\t");
846 log_modes_dump();
847 guest_modes_help();
848 puts("");
849 exit(0);
850}
851
852int main(int argc, char *argv[])
853{
854 struct test_params p = {
855 .iterations = TEST_HOST_LOOP_N,
856 .interval = TEST_HOST_LOOP_INTERVAL,
857 };
858 int opt, i;
859 sigset_t sigset;
860
861 sem_init(&sem_vcpu_stop, 0, 0);
862 sem_init(&sem_vcpu_cont, 0, 0);
863
864 guest_modes_append_default();
865
866 while ((opt = getopt(argc, argv, "c:hi:I:p:m:M:")) != -1) {
867 switch (opt) {
868 case 'c':
869 test_dirty_ring_count = strtol(optarg, NULL, 10);
870 break;
871 case 'i':
872 p.iterations = strtol(optarg, NULL, 10);
873 break;
874 case 'I':
875 p.interval = strtol(optarg, NULL, 10);
876 break;
877 case 'p':
878 p.phys_offset = strtoull(optarg, NULL, 0);
879 break;
880 case 'm':
881 guest_modes_cmdline(optarg);
882 break;
883 case 'M':
884 if (!strcmp(optarg, "all")) {
885 host_log_mode_option = LOG_MODE_ALL;
886 break;
887 }
888 for (i = 0; i < LOG_MODE_NUM; i++) {
889 if (!strcmp(optarg, log_modes[i].name)) {
890 pr_info("Setting log mode to: '%s'\n",
891 optarg);
892 host_log_mode_option = i;
893 break;
894 }
895 }
896 if (i == LOG_MODE_NUM) {
897 printf("Log mode '%s' invalid. Please choose "
898 "from: ", optarg);
899 log_modes_dump();
900 exit(1);
901 }
902 break;
903 case 'h':
904 default:
905 help(argv[0]);
906 break;
907 }
908 }
909
910 TEST_ASSERT(p.iterations > 2, "Iterations must be greater than two");
911 TEST_ASSERT(p.interval > 0, "Interval must be greater than zero");
912
913 pr_info("Test iterations: %"PRIu64", interval: %"PRIu64" (ms)\n",
914 p.iterations, p.interval);
915
916 srandom(time(0));
917
918
919 sigemptyset(&sigset);
920 sigaddset(&sigset, SIG_IPI);
921 pthread_sigmask(SIG_BLOCK, &sigset, NULL);
922
923 if (host_log_mode_option == LOG_MODE_ALL) {
924
925 for (i = 0; i < LOG_MODE_NUM; i++) {
926 pr_info("Testing Log Mode '%s'\n", log_modes[i].name);
927 host_log_mode = i;
928 for_each_guest_mode(run_test, &p);
929 }
930 } else {
931 host_log_mode = host_log_mode_option;
932 for_each_guest_mode(run_test, &p);
933 }
934
935 return 0;
936}
937