linux/tools/testing/selftests/kvm/dirty_log_test.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * KVM dirty page logging test
   4 *
   5 * Copyright (C) 2018, Red Hat, Inc.
   6 */
   7
   8#define _GNU_SOURCE /* for program_invocation_name */
   9
  10#include <stdio.h>
  11#include <stdlib.h>
  12#include <pthread.h>
  13#include <semaphore.h>
  14#include <sys/types.h>
  15#include <signal.h>
  16#include <errno.h>
  17#include <linux/bitmap.h>
  18#include <linux/bitops.h>
  19#include <linux/atomic.h>
  20
  21#include "kvm_util.h"
  22#include "test_util.h"
  23#include "guest_modes.h"
  24#include "processor.h"
  25
  26#define VCPU_ID                         1
  27
  28/* The memory slot index to track dirty pages */
  29#define TEST_MEM_SLOT_INDEX             1
  30
  31/* Default guest test virtual memory offset */
  32#define DEFAULT_GUEST_TEST_MEM          0xc0000000
  33
  34/* How many pages to dirty for each guest loop */
  35#define TEST_PAGES_PER_LOOP             1024
  36
  37/* How many host loops to run (one KVM_GET_DIRTY_LOG for each loop) */
  38#define TEST_HOST_LOOP_N                32UL
  39
  40/* Interval for each host loop (ms) */
  41#define TEST_HOST_LOOP_INTERVAL         10UL
  42
  43/* Dirty bitmaps are always little endian, so we need to swap on big endian */
  44#if defined(__s390x__)
  45# define BITOP_LE_SWIZZLE       ((BITS_PER_LONG-1) & ~0x7)
  46# define test_bit_le(nr, addr) \
  47        test_bit((nr) ^ BITOP_LE_SWIZZLE, addr)
  48# define set_bit_le(nr, addr) \
  49        set_bit((nr) ^ BITOP_LE_SWIZZLE, addr)
  50# define clear_bit_le(nr, addr) \
  51        clear_bit((nr) ^ BITOP_LE_SWIZZLE, addr)
  52# define test_and_set_bit_le(nr, addr) \
  53        test_and_set_bit((nr) ^ BITOP_LE_SWIZZLE, addr)
  54# define test_and_clear_bit_le(nr, addr) \
  55        test_and_clear_bit((nr) ^ BITOP_LE_SWIZZLE, addr)
  56#else
  57# define test_bit_le            test_bit
  58# define set_bit_le             set_bit
  59# define clear_bit_le           clear_bit
  60# define test_and_set_bit_le    test_and_set_bit
  61# define test_and_clear_bit_le  test_and_clear_bit
  62#endif
  63
  64#define TEST_DIRTY_RING_COUNT           65536
  65
  66#define SIG_IPI SIGUSR1
  67
  68/*
  69 * Guest/Host shared variables. Ensure addr_gva2hva() and/or
  70 * sync_global_to/from_guest() are used when accessing from
  71 * the host. READ/WRITE_ONCE() should also be used with anything
  72 * that may change.
  73 */
  74static uint64_t host_page_size;
  75static uint64_t guest_page_size;
  76static uint64_t guest_num_pages;
  77static uint64_t random_array[TEST_PAGES_PER_LOOP];
  78static uint64_t iteration;
  79
  80/*
  81 * Guest physical memory offset of the testing memory slot.
  82 * This will be set to the topmost valid physical address minus
  83 * the test memory size.
  84 */
  85static uint64_t guest_test_phys_mem;
  86
  87/*
  88 * Guest virtual memory offset of the testing memory slot.
  89 * Must not conflict with identity mapped test code.
  90 */
  91static uint64_t guest_test_virt_mem = DEFAULT_GUEST_TEST_MEM;
  92
  93/*
  94 * Continuously write to the first 8 bytes of a random pages within
  95 * the testing memory region.
  96 */
  97static void guest_code(void)
  98{
  99        uint64_t addr;
 100        int i;
 101
 102        /*
 103         * On s390x, all pages of a 1M segment are initially marked as dirty
 104         * when a page of the segment is written to for the very first time.
 105         * To compensate this specialty in this test, we need to touch all
 106         * pages during the first iteration.
 107         */
 108        for (i = 0; i < guest_num_pages; i++) {
 109                addr = guest_test_virt_mem + i * guest_page_size;
 110                *(uint64_t *)addr = READ_ONCE(iteration);
 111        }
 112
 113        while (true) {
 114                for (i = 0; i < TEST_PAGES_PER_LOOP; i++) {
 115                        addr = guest_test_virt_mem;
 116                        addr += (READ_ONCE(random_array[i]) % guest_num_pages)
 117                                * guest_page_size;
 118                        addr &= ~(host_page_size - 1);
 119                        *(uint64_t *)addr = READ_ONCE(iteration);
 120                }
 121
 122                /* Tell the host that we need more random numbers */
 123                GUEST_SYNC(1);
 124        }
 125}
 126
 127/* Host variables */
 128static bool host_quit;
 129
 130/* Points to the test VM memory region on which we track dirty logs */
 131static void *host_test_mem;
 132static uint64_t host_num_pages;
 133
 134/* For statistics only */
 135static uint64_t host_dirty_count;
 136static uint64_t host_clear_count;
 137static uint64_t host_track_next_count;
 138
 139/* Whether dirty ring reset is requested, or finished */
 140static sem_t sem_vcpu_stop;
 141static sem_t sem_vcpu_cont;
 142/*
 143 * This is only set by main thread, and only cleared by vcpu thread.  It is
 144 * used to request vcpu thread to stop at the next GUEST_SYNC, since GUEST_SYNC
 145 * is the only place that we'll guarantee both "dirty bit" and "dirty data"
 146 * will match.  E.g., SIG_IPI won't guarantee that if the vcpu is interrupted
 147 * after setting dirty bit but before the data is written.
 148 */
 149static atomic_t vcpu_sync_stop_requested;
 150/*
 151 * This is updated by the vcpu thread to tell the host whether it's a
 152 * ring-full event.  It should only be read until a sem_wait() of
 153 * sem_vcpu_stop and before vcpu continues to run.
 154 */
 155static bool dirty_ring_vcpu_ring_full;
 156/*
 157 * This is only used for verifying the dirty pages.  Dirty ring has a very
 158 * tricky case when the ring just got full, kvm will do userspace exit due to
 159 * ring full.  When that happens, the very last PFN is set but actually the
 160 * data is not changed (the guest WRITE is not really applied yet), because
 161 * we found that the dirty ring is full, refused to continue the vcpu, and
 162 * recorded the dirty gfn with the old contents.
 163 *
 164 * For this specific case, it's safe to skip checking this pfn for this
 165 * bit, because it's a redundant bit, and when the write happens later the bit
 166 * will be set again.  We use this variable to always keep track of the latest
 167 * dirty gfn we've collected, so that if a mismatch of data found later in the
 168 * verifying process, we let it pass.
 169 */
 170static uint64_t dirty_ring_last_page;
 171
 172enum log_mode_t {
 173        /* Only use KVM_GET_DIRTY_LOG for logging */
 174        LOG_MODE_DIRTY_LOG = 0,
 175
 176        /* Use both KVM_[GET|CLEAR]_DIRTY_LOG for logging */
 177        LOG_MODE_CLEAR_LOG = 1,
 178
 179        /* Use dirty ring for logging */
 180        LOG_MODE_DIRTY_RING = 2,
 181
 182        LOG_MODE_NUM,
 183
 184        /* Run all supported modes */
 185        LOG_MODE_ALL = LOG_MODE_NUM,
 186};
 187
 188/* Mode of logging to test.  Default is to run all supported modes */
 189static enum log_mode_t host_log_mode_option = LOG_MODE_ALL;
 190/* Logging mode for current run */
 191static enum log_mode_t host_log_mode;
 192static pthread_t vcpu_thread;
 193static uint32_t test_dirty_ring_count = TEST_DIRTY_RING_COUNT;
 194
 195static void vcpu_kick(void)
 196{
 197        pthread_kill(vcpu_thread, SIG_IPI);
 198}
 199
 200/*
 201 * In our test we do signal tricks, let's use a better version of
 202 * sem_wait to avoid signal interrupts
 203 */
 204static void sem_wait_until(sem_t *sem)
 205{
 206        int ret;
 207
 208        do
 209                ret = sem_wait(sem);
 210        while (ret == -1 && errno == EINTR);
 211}
 212
 213static bool clear_log_supported(void)
 214{
 215        return kvm_check_cap(KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2);
 216}
 217
 218static void clear_log_create_vm_done(struct kvm_vm *vm)
 219{
 220        struct kvm_enable_cap cap = {};
 221        u64 manual_caps;
 222
 223        manual_caps = kvm_check_cap(KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2);
 224        TEST_ASSERT(manual_caps, "MANUAL_CAPS is zero!");
 225        manual_caps &= (KVM_DIRTY_LOG_MANUAL_PROTECT_ENABLE |
 226                        KVM_DIRTY_LOG_INITIALLY_SET);
 227        cap.cap = KVM_CAP_MANUAL_DIRTY_LOG_PROTECT2;
 228        cap.args[0] = manual_caps;
 229        vm_enable_cap(vm, &cap);
 230}
 231
 232static void dirty_log_collect_dirty_pages(struct kvm_vm *vm, int slot,
 233                                          void *bitmap, uint32_t num_pages)
 234{
 235        kvm_vm_get_dirty_log(vm, slot, bitmap);
 236}
 237
 238static void clear_log_collect_dirty_pages(struct kvm_vm *vm, int slot,
 239                                          void *bitmap, uint32_t num_pages)
 240{
 241        kvm_vm_get_dirty_log(vm, slot, bitmap);
 242        kvm_vm_clear_dirty_log(vm, slot, bitmap, 0, num_pages);
 243}
 244
 245/* Should only be called after a GUEST_SYNC */
 246static void vcpu_handle_sync_stop(void)
 247{
 248        if (atomic_read(&vcpu_sync_stop_requested)) {
 249                /* It means main thread is sleeping waiting */
 250                atomic_set(&vcpu_sync_stop_requested, false);
 251                sem_post(&sem_vcpu_stop);
 252                sem_wait_until(&sem_vcpu_cont);
 253        }
 254}
 255
 256static void default_after_vcpu_run(struct kvm_vm *vm, int ret, int err)
 257{
 258        struct kvm_run *run = vcpu_state(vm, VCPU_ID);
 259
 260        TEST_ASSERT(ret == 0 || (ret == -1 && err == EINTR),
 261                    "vcpu run failed: errno=%d", err);
 262
 263        TEST_ASSERT(get_ucall(vm, VCPU_ID, NULL) == UCALL_SYNC,
 264                    "Invalid guest sync status: exit_reason=%s\n",
 265                    exit_reason_str(run->exit_reason));
 266
 267        vcpu_handle_sync_stop();
 268}
 269
 270static bool dirty_ring_supported(void)
 271{
 272        return kvm_check_cap(KVM_CAP_DIRTY_LOG_RING);
 273}
 274
 275static void dirty_ring_create_vm_done(struct kvm_vm *vm)
 276{
 277        /*
 278         * Switch to dirty ring mode after VM creation but before any
 279         * of the vcpu creation.
 280         */
 281        vm_enable_dirty_ring(vm, test_dirty_ring_count *
 282                             sizeof(struct kvm_dirty_gfn));
 283}
 284
 285static inline bool dirty_gfn_is_dirtied(struct kvm_dirty_gfn *gfn)
 286{
 287        return gfn->flags == KVM_DIRTY_GFN_F_DIRTY;
 288}
 289
 290static inline void dirty_gfn_set_collected(struct kvm_dirty_gfn *gfn)
 291{
 292        gfn->flags = KVM_DIRTY_GFN_F_RESET;
 293}
 294
 295static uint32_t dirty_ring_collect_one(struct kvm_dirty_gfn *dirty_gfns,
 296                                       int slot, void *bitmap,
 297                                       uint32_t num_pages, uint32_t *fetch_index)
 298{
 299        struct kvm_dirty_gfn *cur;
 300        uint32_t count = 0;
 301
 302        while (true) {
 303                cur = &dirty_gfns[*fetch_index % test_dirty_ring_count];
 304                if (!dirty_gfn_is_dirtied(cur))
 305                        break;
 306                TEST_ASSERT(cur->slot == slot, "Slot number didn't match: "
 307                            "%u != %u", cur->slot, slot);
 308                TEST_ASSERT(cur->offset < num_pages, "Offset overflow: "
 309                            "0x%llx >= 0x%x", cur->offset, num_pages);
 310                //pr_info("fetch 0x%x page %llu\n", *fetch_index, cur->offset);
 311                set_bit_le(cur->offset, bitmap);
 312                dirty_ring_last_page = cur->offset;
 313                dirty_gfn_set_collected(cur);
 314                (*fetch_index)++;
 315                count++;
 316        }
 317
 318        return count;
 319}
 320
 321static void dirty_ring_wait_vcpu(void)
 322{
 323        /* This makes sure that hardware PML cache flushed */
 324        vcpu_kick();
 325        sem_wait_until(&sem_vcpu_stop);
 326}
 327
 328static void dirty_ring_continue_vcpu(void)
 329{
 330        pr_info("Notifying vcpu to continue\n");
 331        sem_post(&sem_vcpu_cont);
 332}
 333
 334static void dirty_ring_collect_dirty_pages(struct kvm_vm *vm, int slot,
 335                                           void *bitmap, uint32_t num_pages)
 336{
 337        /* We only have one vcpu */
 338        static uint32_t fetch_index = 0;
 339        uint32_t count = 0, cleared;
 340        bool continued_vcpu = false;
 341
 342        dirty_ring_wait_vcpu();
 343
 344        if (!dirty_ring_vcpu_ring_full) {
 345                /*
 346                 * This is not a ring-full event, it's safe to allow
 347                 * vcpu to continue
 348                 */
 349                dirty_ring_continue_vcpu();
 350                continued_vcpu = true;
 351        }
 352
 353        /* Only have one vcpu */
 354        count = dirty_ring_collect_one(vcpu_map_dirty_ring(vm, VCPU_ID),
 355                                       slot, bitmap, num_pages, &fetch_index);
 356
 357        cleared = kvm_vm_reset_dirty_ring(vm);
 358
 359        /* Cleared pages should be the same as collected */
 360        TEST_ASSERT(cleared == count, "Reset dirty pages (%u) mismatch "
 361                    "with collected (%u)", cleared, count);
 362
 363        if (!continued_vcpu) {
 364                TEST_ASSERT(dirty_ring_vcpu_ring_full,
 365                            "Didn't continue vcpu even without ring full");
 366                dirty_ring_continue_vcpu();
 367        }
 368
 369        pr_info("Iteration %ld collected %u pages\n", iteration, count);
 370}
 371
 372static void dirty_ring_after_vcpu_run(struct kvm_vm *vm, int ret, int err)
 373{
 374        struct kvm_run *run = vcpu_state(vm, VCPU_ID);
 375
 376        /* A ucall-sync or ring-full event is allowed */
 377        if (get_ucall(vm, VCPU_ID, NULL) == UCALL_SYNC) {
 378                /* We should allow this to continue */
 379                ;
 380        } else if (run->exit_reason == KVM_EXIT_DIRTY_RING_FULL ||
 381                   (ret == -1 && err == EINTR)) {
 382                /* Update the flag first before pause */
 383                WRITE_ONCE(dirty_ring_vcpu_ring_full,
 384                           run->exit_reason == KVM_EXIT_DIRTY_RING_FULL);
 385                sem_post(&sem_vcpu_stop);
 386                pr_info("vcpu stops because %s...\n",
 387                        dirty_ring_vcpu_ring_full ?
 388                        "dirty ring is full" : "vcpu is kicked out");
 389                sem_wait_until(&sem_vcpu_cont);
 390                pr_info("vcpu continues now.\n");
 391        } else {
 392                TEST_ASSERT(false, "Invalid guest sync status: "
 393                            "exit_reason=%s\n",
 394                            exit_reason_str(run->exit_reason));
 395        }
 396}
 397
 398static void dirty_ring_before_vcpu_join(void)
 399{
 400        /* Kick another round of vcpu just to make sure it will quit */
 401        sem_post(&sem_vcpu_cont);
 402}
 403
 404struct log_mode {
 405        const char *name;
 406        /* Return true if this mode is supported, otherwise false */
 407        bool (*supported)(void);
 408        /* Hook when the vm creation is done (before vcpu creation) */
 409        void (*create_vm_done)(struct kvm_vm *vm);
 410        /* Hook to collect the dirty pages into the bitmap provided */
 411        void (*collect_dirty_pages) (struct kvm_vm *vm, int slot,
 412                                     void *bitmap, uint32_t num_pages);
 413        /* Hook to call when after each vcpu run */
 414        void (*after_vcpu_run)(struct kvm_vm *vm, int ret, int err);
 415        void (*before_vcpu_join) (void);
 416} log_modes[LOG_MODE_NUM] = {
 417        {
 418                .name = "dirty-log",
 419                .collect_dirty_pages = dirty_log_collect_dirty_pages,
 420                .after_vcpu_run = default_after_vcpu_run,
 421        },
 422        {
 423                .name = "clear-log",
 424                .supported = clear_log_supported,
 425                .create_vm_done = clear_log_create_vm_done,
 426                .collect_dirty_pages = clear_log_collect_dirty_pages,
 427                .after_vcpu_run = default_after_vcpu_run,
 428        },
 429        {
 430                .name = "dirty-ring",
 431                .supported = dirty_ring_supported,
 432                .create_vm_done = dirty_ring_create_vm_done,
 433                .collect_dirty_pages = dirty_ring_collect_dirty_pages,
 434                .before_vcpu_join = dirty_ring_before_vcpu_join,
 435                .after_vcpu_run = dirty_ring_after_vcpu_run,
 436        },
 437};
 438
 439/*
 440 * We use this bitmap to track some pages that should have its dirty
 441 * bit set in the _next_ iteration.  For example, if we detected the
 442 * page value changed to current iteration but at the same time the
 443 * page bit is cleared in the latest bitmap, then the system must
 444 * report that write in the next get dirty log call.
 445 */
 446static unsigned long *host_bmap_track;
 447
 448static void log_modes_dump(void)
 449{
 450        int i;
 451
 452        printf("all");
 453        for (i = 0; i < LOG_MODE_NUM; i++)
 454                printf(", %s", log_modes[i].name);
 455        printf("\n");
 456}
 457
 458static bool log_mode_supported(void)
 459{
 460        struct log_mode *mode = &log_modes[host_log_mode];
 461
 462        if (mode->supported)
 463                return mode->supported();
 464
 465        return true;
 466}
 467
 468static void log_mode_create_vm_done(struct kvm_vm *vm)
 469{
 470        struct log_mode *mode = &log_modes[host_log_mode];
 471
 472        if (mode->create_vm_done)
 473                mode->create_vm_done(vm);
 474}
 475
 476static void log_mode_collect_dirty_pages(struct kvm_vm *vm, int slot,
 477                                         void *bitmap, uint32_t num_pages)
 478{
 479        struct log_mode *mode = &log_modes[host_log_mode];
 480
 481        TEST_ASSERT(mode->collect_dirty_pages != NULL,
 482                    "collect_dirty_pages() is required for any log mode!");
 483        mode->collect_dirty_pages(vm, slot, bitmap, num_pages);
 484}
 485
 486static void log_mode_after_vcpu_run(struct kvm_vm *vm, int ret, int err)
 487{
 488        struct log_mode *mode = &log_modes[host_log_mode];
 489
 490        if (mode->after_vcpu_run)
 491                mode->after_vcpu_run(vm, ret, err);
 492}
 493
 494static void log_mode_before_vcpu_join(void)
 495{
 496        struct log_mode *mode = &log_modes[host_log_mode];
 497
 498        if (mode->before_vcpu_join)
 499                mode->before_vcpu_join();
 500}
 501
 502static void generate_random_array(uint64_t *guest_array, uint64_t size)
 503{
 504        uint64_t i;
 505
 506        for (i = 0; i < size; i++)
 507                guest_array[i] = random();
 508}
 509
 510static void *vcpu_worker(void *data)
 511{
 512        int ret, vcpu_fd;
 513        struct kvm_vm *vm = data;
 514        uint64_t *guest_array;
 515        uint64_t pages_count = 0;
 516        struct kvm_signal_mask *sigmask = alloca(offsetof(struct kvm_signal_mask, sigset)
 517                                                 + sizeof(sigset_t));
 518        sigset_t *sigset = (sigset_t *) &sigmask->sigset;
 519
 520        vcpu_fd = vcpu_get_fd(vm, VCPU_ID);
 521
 522        /*
 523         * SIG_IPI is unblocked atomically while in KVM_RUN.  It causes the
 524         * ioctl to return with -EINTR, but it is still pending and we need
 525         * to accept it with the sigwait.
 526         */
 527        sigmask->len = 8;
 528        pthread_sigmask(0, NULL, sigset);
 529        sigdelset(sigset, SIG_IPI);
 530        vcpu_ioctl(vm, VCPU_ID, KVM_SET_SIGNAL_MASK, sigmask);
 531
 532        sigemptyset(sigset);
 533        sigaddset(sigset, SIG_IPI);
 534
 535        guest_array = addr_gva2hva(vm, (vm_vaddr_t)random_array);
 536
 537        while (!READ_ONCE(host_quit)) {
 538                /* Clear any existing kick signals */
 539                generate_random_array(guest_array, TEST_PAGES_PER_LOOP);
 540                pages_count += TEST_PAGES_PER_LOOP;
 541                /* Let the guest dirty the random pages */
 542                ret = ioctl(vcpu_fd, KVM_RUN, NULL);
 543                if (ret == -1 && errno == EINTR) {
 544                        int sig = -1;
 545                        sigwait(sigset, &sig);
 546                        assert(sig == SIG_IPI);
 547                }
 548                log_mode_after_vcpu_run(vm, ret, errno);
 549        }
 550
 551        pr_info("Dirtied %"PRIu64" pages\n", pages_count);
 552
 553        return NULL;
 554}
 555
 556static void vm_dirty_log_verify(enum vm_guest_mode mode, unsigned long *bmap)
 557{
 558        uint64_t step = vm_num_host_pages(mode, 1);
 559        uint64_t page;
 560        uint64_t *value_ptr;
 561        uint64_t min_iter = 0;
 562
 563        for (page = 0; page < host_num_pages; page += step) {
 564                value_ptr = host_test_mem + page * host_page_size;
 565
 566                /* If this is a special page that we were tracking... */
 567                if (test_and_clear_bit_le(page, host_bmap_track)) {
 568                        host_track_next_count++;
 569                        TEST_ASSERT(test_bit_le(page, bmap),
 570                                    "Page %"PRIu64" should have its dirty bit "
 571                                    "set in this iteration but it is missing",
 572                                    page);
 573                }
 574
 575                if (test_and_clear_bit_le(page, bmap)) {
 576                        bool matched;
 577
 578                        host_dirty_count++;
 579
 580                        /*
 581                         * If the bit is set, the value written onto
 582                         * the corresponding page should be either the
 583                         * previous iteration number or the current one.
 584                         */
 585                        matched = (*value_ptr == iteration ||
 586                                   *value_ptr == iteration - 1);
 587
 588                        if (host_log_mode == LOG_MODE_DIRTY_RING && !matched) {
 589                                if (*value_ptr == iteration - 2 && min_iter <= iteration - 2) {
 590                                        /*
 591                                         * Short answer: this case is special
 592                                         * only for dirty ring test where the
 593                                         * page is the last page before a kvm
 594                                         * dirty ring full in iteration N-2.
 595                                         *
 596                                         * Long answer: Assuming ring size R,
 597                                         * one possible condition is:
 598                                         *
 599                                         *      main thr       vcpu thr
 600                                         *      --------       --------
 601                                         *    iter=1
 602                                         *                   write 1 to page 0~(R-1)
 603                                         *                   full, vmexit
 604                                         *    collect 0~(R-1)
 605                                         *    kick vcpu
 606                                         *                   write 1 to (R-1)~(2R-2)
 607                                         *                   full, vmexit
 608                                         *    iter=2
 609                                         *    collect (R-1)~(2R-2)
 610                                         *    kick vcpu
 611                                         *                   write 1 to (2R-2)
 612                                         *                   (NOTE!!! "1" cached in cpu reg)
 613                                         *                   write 2 to (2R-1)~(3R-3)
 614                                         *                   full, vmexit
 615                                         *    iter=3
 616                                         *    collect (2R-2)~(3R-3)
 617                                         *    (here if we read value on page
 618                                         *     "2R-2" is 1, while iter=3!!!)
 619                                         *
 620                                         * This however can only happen once per iteration.
 621                                         */
 622                                        min_iter = iteration - 1;
 623                                        continue;
 624                                } else if (page == dirty_ring_last_page) {
 625                                        /*
 626                                         * Please refer to comments in
 627                                         * dirty_ring_last_page.
 628                                         */
 629                                        continue;
 630                                }
 631                        }
 632
 633                        TEST_ASSERT(matched,
 634                                    "Set page %"PRIu64" value %"PRIu64
 635                                    " incorrect (iteration=%"PRIu64")",
 636                                    page, *value_ptr, iteration);
 637                } else {
 638                        host_clear_count++;
 639                        /*
 640                         * If cleared, the value written can be any
 641                         * value smaller or equals to the iteration
 642                         * number.  Note that the value can be exactly
 643                         * (iteration-1) if that write can happen
 644                         * like this:
 645                         *
 646                         * (1) increase loop count to "iteration-1"
 647                         * (2) write to page P happens (with value
 648                         *     "iteration-1")
 649                         * (3) get dirty log for "iteration-1"; we'll
 650                         *     see that page P bit is set (dirtied),
 651                         *     and not set the bit in host_bmap_track
 652                         * (4) increase loop count to "iteration"
 653                         *     (which is current iteration)
 654                         * (5) get dirty log for current iteration,
 655                         *     we'll see that page P is cleared, with
 656                         *     value "iteration-1".
 657                         */
 658                        TEST_ASSERT(*value_ptr <= iteration,
 659                                    "Clear page %"PRIu64" value %"PRIu64
 660                                    " incorrect (iteration=%"PRIu64")",
 661                                    page, *value_ptr, iteration);
 662                        if (*value_ptr == iteration) {
 663                                /*
 664                                 * This page is _just_ modified; it
 665                                 * should report its dirtyness in the
 666                                 * next run
 667                                 */
 668                                set_bit_le(page, host_bmap_track);
 669                        }
 670                }
 671        }
 672}
 673
 674static struct kvm_vm *create_vm(enum vm_guest_mode mode, uint32_t vcpuid,
 675                                uint64_t extra_mem_pages, void *guest_code)
 676{
 677        struct kvm_vm *vm;
 678        uint64_t extra_pg_pages = extra_mem_pages / 512 * 2;
 679
 680        pr_info("Testing guest mode: %s\n", vm_guest_mode_string(mode));
 681
 682        vm = vm_create(mode, DEFAULT_GUEST_PHY_PAGES + extra_pg_pages, O_RDWR);
 683        kvm_vm_elf_load(vm, program_invocation_name);
 684#ifdef __x86_64__
 685        vm_create_irqchip(vm);
 686#endif
 687        log_mode_create_vm_done(vm);
 688        vm_vcpu_add_default(vm, vcpuid, guest_code);
 689        return vm;
 690}
 691
 692#define DIRTY_MEM_BITS 30 /* 1G */
 693#define PAGE_SHIFT_4K  12
 694
 695struct test_params {
 696        unsigned long iterations;
 697        unsigned long interval;
 698        uint64_t phys_offset;
 699};
 700
 701static void run_test(enum vm_guest_mode mode, void *arg)
 702{
 703        struct test_params *p = arg;
 704        struct kvm_vm *vm;
 705        unsigned long *bmap;
 706
 707        if (!log_mode_supported()) {
 708                print_skip("Log mode '%s' not supported",
 709                           log_modes[host_log_mode].name);
 710                return;
 711        }
 712
 713        /*
 714         * We reserve page table for 2 times of extra dirty mem which
 715         * will definitely cover the original (1G+) test range.  Here
 716         * we do the calculation with 4K page size which is the
 717         * smallest so the page number will be enough for all archs
 718         * (e.g., 64K page size guest will need even less memory for
 719         * page tables).
 720         */
 721        vm = create_vm(mode, VCPU_ID,
 722                       2ul << (DIRTY_MEM_BITS - PAGE_SHIFT_4K),
 723                       guest_code);
 724
 725        guest_page_size = vm_get_page_size(vm);
 726        /*
 727         * A little more than 1G of guest page sized pages.  Cover the
 728         * case where the size is not aligned to 64 pages.
 729         */
 730        guest_num_pages = (1ul << (DIRTY_MEM_BITS -
 731                                   vm_get_page_shift(vm))) + 3;
 732        guest_num_pages = vm_adjust_num_guest_pages(mode, guest_num_pages);
 733
 734        host_page_size = getpagesize();
 735        host_num_pages = vm_num_host_pages(mode, guest_num_pages);
 736
 737        if (!p->phys_offset) {
 738                guest_test_phys_mem = (vm_get_max_gfn(vm) -
 739                                       guest_num_pages) * guest_page_size;
 740                guest_test_phys_mem &= ~(host_page_size - 1);
 741        } else {
 742                guest_test_phys_mem = p->phys_offset;
 743        }
 744
 745#ifdef __s390x__
 746        /* Align to 1M (segment size) */
 747        guest_test_phys_mem &= ~((1 << 20) - 1);
 748#endif
 749
 750        pr_info("guest physical test memory offset: 0x%lx\n", guest_test_phys_mem);
 751
 752        bmap = bitmap_zalloc(host_num_pages);
 753        host_bmap_track = bitmap_zalloc(host_num_pages);
 754
 755        /* Add an extra memory slot for testing dirty logging */
 756        vm_userspace_mem_region_add(vm, VM_MEM_SRC_ANONYMOUS,
 757                                    guest_test_phys_mem,
 758                                    TEST_MEM_SLOT_INDEX,
 759                                    guest_num_pages,
 760                                    KVM_MEM_LOG_DIRTY_PAGES);
 761
 762        /* Do mapping for the dirty track memory slot */
 763        virt_map(vm, guest_test_virt_mem, guest_test_phys_mem, guest_num_pages);
 764
 765        /* Cache the HVA pointer of the region */
 766        host_test_mem = addr_gpa2hva(vm, (vm_paddr_t)guest_test_phys_mem);
 767
 768        ucall_init(vm, NULL);
 769
 770        /* Export the shared variables to the guest */
 771        sync_global_to_guest(vm, host_page_size);
 772        sync_global_to_guest(vm, guest_page_size);
 773        sync_global_to_guest(vm, guest_test_virt_mem);
 774        sync_global_to_guest(vm, guest_num_pages);
 775
 776        /* Start the iterations */
 777        iteration = 1;
 778        sync_global_to_guest(vm, iteration);
 779        host_quit = false;
 780        host_dirty_count = 0;
 781        host_clear_count = 0;
 782        host_track_next_count = 0;
 783
 784        pthread_create(&vcpu_thread, NULL, vcpu_worker, vm);
 785
 786        while (iteration < p->iterations) {
 787                /* Give the vcpu thread some time to dirty some pages */
 788                usleep(p->interval * 1000);
 789                log_mode_collect_dirty_pages(vm, TEST_MEM_SLOT_INDEX,
 790                                             bmap, host_num_pages);
 791
 792                /*
 793                 * See vcpu_sync_stop_requested definition for details on why
 794                 * we need to stop vcpu when verify data.
 795                 */
 796                atomic_set(&vcpu_sync_stop_requested, true);
 797                sem_wait_until(&sem_vcpu_stop);
 798                /*
 799                 * NOTE: for dirty ring, it's possible that we didn't stop at
 800                 * GUEST_SYNC but instead we stopped because ring is full;
 801                 * that's okay too because ring full means we're only missing
 802                 * the flush of the last page, and since we handle the last
 803                 * page specially verification will succeed anyway.
 804                 */
 805                assert(host_log_mode == LOG_MODE_DIRTY_RING ||
 806                       atomic_read(&vcpu_sync_stop_requested) == false);
 807                vm_dirty_log_verify(mode, bmap);
 808                sem_post(&sem_vcpu_cont);
 809
 810                iteration++;
 811                sync_global_to_guest(vm, iteration);
 812        }
 813
 814        /* Tell the vcpu thread to quit */
 815        host_quit = true;
 816        log_mode_before_vcpu_join();
 817        pthread_join(vcpu_thread, NULL);
 818
 819        pr_info("Total bits checked: dirty (%"PRIu64"), clear (%"PRIu64"), "
 820                "track_next (%"PRIu64")\n", host_dirty_count, host_clear_count,
 821                host_track_next_count);
 822
 823        free(bmap);
 824        free(host_bmap_track);
 825        ucall_uninit(vm);
 826        kvm_vm_free(vm);
 827}
 828
 829static void help(char *name)
 830{
 831        puts("");
 832        printf("usage: %s [-h] [-i iterations] [-I interval] "
 833               "[-p offset] [-m mode]\n", name);
 834        puts("");
 835        printf(" -c: specify dirty ring size, in number of entries\n");
 836        printf("     (only useful for dirty-ring test; default: %"PRIu32")\n",
 837               TEST_DIRTY_RING_COUNT);
 838        printf(" -i: specify iteration counts (default: %"PRIu64")\n",
 839               TEST_HOST_LOOP_N);
 840        printf(" -I: specify interval in ms (default: %"PRIu64" ms)\n",
 841               TEST_HOST_LOOP_INTERVAL);
 842        printf(" -p: specify guest physical test memory offset\n"
 843               "     Warning: a low offset can conflict with the loaded test code.\n");
 844        printf(" -M: specify the host logging mode "
 845               "(default: run all log modes).  Supported modes: \n\t");
 846        log_modes_dump();
 847        guest_modes_help();
 848        puts("");
 849        exit(0);
 850}
 851
 852int main(int argc, char *argv[])
 853{
 854        struct test_params p = {
 855                .iterations = TEST_HOST_LOOP_N,
 856                .interval = TEST_HOST_LOOP_INTERVAL,
 857        };
 858        int opt, i;
 859        sigset_t sigset;
 860
 861        sem_init(&sem_vcpu_stop, 0, 0);
 862        sem_init(&sem_vcpu_cont, 0, 0);
 863
 864        guest_modes_append_default();
 865
 866        while ((opt = getopt(argc, argv, "c:hi:I:p:m:M:")) != -1) {
 867                switch (opt) {
 868                case 'c':
 869                        test_dirty_ring_count = strtol(optarg, NULL, 10);
 870                        break;
 871                case 'i':
 872                        p.iterations = strtol(optarg, NULL, 10);
 873                        break;
 874                case 'I':
 875                        p.interval = strtol(optarg, NULL, 10);
 876                        break;
 877                case 'p':
 878                        p.phys_offset = strtoull(optarg, NULL, 0);
 879                        break;
 880                case 'm':
 881                        guest_modes_cmdline(optarg);
 882                        break;
 883                case 'M':
 884                        if (!strcmp(optarg, "all")) {
 885                                host_log_mode_option = LOG_MODE_ALL;
 886                                break;
 887                        }
 888                        for (i = 0; i < LOG_MODE_NUM; i++) {
 889                                if (!strcmp(optarg, log_modes[i].name)) {
 890                                        pr_info("Setting log mode to: '%s'\n",
 891                                                optarg);
 892                                        host_log_mode_option = i;
 893                                        break;
 894                                }
 895                        }
 896                        if (i == LOG_MODE_NUM) {
 897                                printf("Log mode '%s' invalid. Please choose "
 898                                       "from: ", optarg);
 899                                log_modes_dump();
 900                                exit(1);
 901                        }
 902                        break;
 903                case 'h':
 904                default:
 905                        help(argv[0]);
 906                        break;
 907                }
 908        }
 909
 910        TEST_ASSERT(p.iterations > 2, "Iterations must be greater than two");
 911        TEST_ASSERT(p.interval > 0, "Interval must be greater than zero");
 912
 913        pr_info("Test iterations: %"PRIu64", interval: %"PRIu64" (ms)\n",
 914                p.iterations, p.interval);
 915
 916        srandom(time(0));
 917
 918        /* Ensure that vCPU threads start with SIG_IPI blocked.  */
 919        sigemptyset(&sigset);
 920        sigaddset(&sigset, SIG_IPI);
 921        pthread_sigmask(SIG_BLOCK, &sigset, NULL);
 922
 923        if (host_log_mode_option == LOG_MODE_ALL) {
 924                /* Run each log mode */
 925                for (i = 0; i < LOG_MODE_NUM; i++) {
 926                        pr_info("Testing Log Mode '%s'\n", log_modes[i].name);
 927                        host_log_mode = i;
 928                        for_each_guest_mode(run_test, &p);
 929                }
 930        } else {
 931                host_log_mode = host_log_mode_option;
 932                for_each_guest_mode(run_test, &p);
 933        }
 934
 935        return 0;
 936}
 937