linux/tools/testing/selftests/kvm/lib/perf_test_util.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2020, Google LLC.
   4 */
   5#include <inttypes.h>
   6
   7#include "kvm_util.h"
   8#include "perf_test_util.h"
   9#include "processor.h"
  10
  11struct perf_test_args perf_test_args;
  12
  13/*
  14 * Guest virtual memory offset of the testing memory slot.
  15 * Must not conflict with identity mapped test code.
  16 */
  17static uint64_t guest_test_virt_mem = DEFAULT_GUEST_TEST_MEM;
  18
  19struct vcpu_thread {
  20        /* The id of the vCPU. */
  21        int vcpu_id;
  22
  23        /* The pthread backing the vCPU. */
  24        pthread_t thread;
  25
  26        /* Set to true once the vCPU thread is up and running. */
  27        bool running;
  28};
  29
  30/* The vCPU threads involved in this test. */
  31static struct vcpu_thread vcpu_threads[KVM_MAX_VCPUS];
  32
  33/* The function run by each vCPU thread, as provided by the test. */
  34static void (*vcpu_thread_fn)(struct perf_test_vcpu_args *);
  35
  36/* Set to true once all vCPU threads are up and running. */
  37static bool all_vcpu_threads_running;
  38
  39/*
  40 * Continuously write to the first 8 bytes of each page in the
  41 * specified region.
  42 */
  43static void guest_code(uint32_t vcpu_id)
  44{
  45        struct perf_test_args *pta = &perf_test_args;
  46        struct perf_test_vcpu_args *vcpu_args = &pta->vcpu_args[vcpu_id];
  47        uint64_t gva;
  48        uint64_t pages;
  49        int i;
  50
  51        /* Make sure vCPU args data structure is not corrupt. */
  52        GUEST_ASSERT(vcpu_args->vcpu_id == vcpu_id);
  53
  54        gva = vcpu_args->gva;
  55        pages = vcpu_args->pages;
  56
  57        while (true) {
  58                for (i = 0; i < pages; i++) {
  59                        uint64_t addr = gva + (i * pta->guest_page_size);
  60
  61                        if (i % pta->wr_fract == 0)
  62                                *(uint64_t *)addr = 0x0123456789ABCDEF;
  63                        else
  64                                READ_ONCE(*(uint64_t *)addr);
  65                }
  66
  67                GUEST_SYNC(1);
  68        }
  69}
  70
  71void perf_test_setup_vcpus(struct kvm_vm *vm, int vcpus,
  72                           uint64_t vcpu_memory_bytes,
  73                           bool partition_vcpu_memory_access)
  74{
  75        struct perf_test_args *pta = &perf_test_args;
  76        struct perf_test_vcpu_args *vcpu_args;
  77        int vcpu_id;
  78
  79        for (vcpu_id = 0; vcpu_id < vcpus; vcpu_id++) {
  80                vcpu_args = &pta->vcpu_args[vcpu_id];
  81
  82                vcpu_args->vcpu_id = vcpu_id;
  83                if (partition_vcpu_memory_access) {
  84                        vcpu_args->gva = guest_test_virt_mem +
  85                                         (vcpu_id * vcpu_memory_bytes);
  86                        vcpu_args->pages = vcpu_memory_bytes /
  87                                           pta->guest_page_size;
  88                        vcpu_args->gpa = pta->gpa + (vcpu_id * vcpu_memory_bytes);
  89                } else {
  90                        vcpu_args->gva = guest_test_virt_mem;
  91                        vcpu_args->pages = (vcpus * vcpu_memory_bytes) /
  92                                           pta->guest_page_size;
  93                        vcpu_args->gpa = pta->gpa;
  94                }
  95
  96                vcpu_args_set(vm, vcpu_id, 1, vcpu_id);
  97
  98                pr_debug("Added VCPU %d with test mem gpa [%lx, %lx)\n",
  99                         vcpu_id, vcpu_args->gpa, vcpu_args->gpa +
 100                         (vcpu_args->pages * pta->guest_page_size));
 101        }
 102}
 103
 104struct kvm_vm *perf_test_create_vm(enum vm_guest_mode mode, int vcpus,
 105                                   uint64_t vcpu_memory_bytes, int slots,
 106                                   enum vm_mem_backing_src_type backing_src,
 107                                   bool partition_vcpu_memory_access)
 108{
 109        struct perf_test_args *pta = &perf_test_args;
 110        struct kvm_vm *vm;
 111        uint64_t guest_num_pages;
 112        uint64_t backing_src_pagesz = get_backing_src_pagesz(backing_src);
 113        int i;
 114
 115        pr_info("Testing guest mode: %s\n", vm_guest_mode_string(mode));
 116
 117        /* By default vCPUs will write to memory. */
 118        pta->wr_fract = 1;
 119
 120        /*
 121         * Snapshot the non-huge page size.  This is used by the guest code to
 122         * access/dirty pages at the logging granularity.
 123         */
 124        pta->guest_page_size = vm_guest_mode_params[mode].page_size;
 125
 126        guest_num_pages = vm_adjust_num_guest_pages(mode,
 127                                (vcpus * vcpu_memory_bytes) / pta->guest_page_size);
 128
 129        TEST_ASSERT(vcpu_memory_bytes % getpagesize() == 0,
 130                    "Guest memory size is not host page size aligned.");
 131        TEST_ASSERT(vcpu_memory_bytes % pta->guest_page_size == 0,
 132                    "Guest memory size is not guest page size aligned.");
 133        TEST_ASSERT(guest_num_pages % slots == 0,
 134                    "Guest memory cannot be evenly divided into %d slots.",
 135                    slots);
 136
 137        /*
 138         * Pass guest_num_pages to populate the page tables for test memory.
 139         * The memory is also added to memslot 0, but that's a benign side
 140         * effect as KVM allows aliasing HVAs in meslots.
 141         */
 142        vm = vm_create_with_vcpus(mode, vcpus, DEFAULT_GUEST_PHY_PAGES,
 143                                  guest_num_pages, 0, guest_code, NULL);
 144
 145        pta->vm = vm;
 146
 147        /*
 148         * If there should be more memory in the guest test region than there
 149         * can be pages in the guest, it will definitely cause problems.
 150         */
 151        TEST_ASSERT(guest_num_pages < vm_get_max_gfn(vm),
 152                    "Requested more guest memory than address space allows.\n"
 153                    "    guest pages: %" PRIx64 " max gfn: %" PRIx64
 154                    " vcpus: %d wss: %" PRIx64 "]\n",
 155                    guest_num_pages, vm_get_max_gfn(vm), vcpus,
 156                    vcpu_memory_bytes);
 157
 158        pta->gpa = (vm_get_max_gfn(vm) - guest_num_pages) * pta->guest_page_size;
 159        pta->gpa = align_down(pta->gpa, backing_src_pagesz);
 160#ifdef __s390x__
 161        /* Align to 1M (segment size) */
 162        pta->gpa = align_down(pta->gpa, 1 << 20);
 163#endif
 164        pr_info("guest physical test memory offset: 0x%lx\n", pta->gpa);
 165
 166        /* Add extra memory slots for testing */
 167        for (i = 0; i < slots; i++) {
 168                uint64_t region_pages = guest_num_pages / slots;
 169                vm_paddr_t region_start = pta->gpa + region_pages * pta->guest_page_size * i;
 170
 171                vm_userspace_mem_region_add(vm, backing_src, region_start,
 172                                            PERF_TEST_MEM_SLOT_INDEX + i,
 173                                            region_pages, 0);
 174        }
 175
 176        /* Do mapping for the demand paging memory slot */
 177        virt_map(vm, guest_test_virt_mem, pta->gpa, guest_num_pages);
 178
 179        perf_test_setup_vcpus(vm, vcpus, vcpu_memory_bytes, partition_vcpu_memory_access);
 180
 181        ucall_init(vm, NULL);
 182
 183        /* Export the shared variables to the guest. */
 184        sync_global_to_guest(vm, perf_test_args);
 185
 186        return vm;
 187}
 188
 189void perf_test_destroy_vm(struct kvm_vm *vm)
 190{
 191        ucall_uninit(vm);
 192        kvm_vm_free(vm);
 193}
 194
 195void perf_test_set_wr_fract(struct kvm_vm *vm, int wr_fract)
 196{
 197        perf_test_args.wr_fract = wr_fract;
 198        sync_global_to_guest(vm, perf_test_args);
 199}
 200
 201static void *vcpu_thread_main(void *data)
 202{
 203        struct vcpu_thread *vcpu = data;
 204
 205        WRITE_ONCE(vcpu->running, true);
 206
 207        /*
 208         * Wait for all vCPU threads to be up and running before calling the test-
 209         * provided vCPU thread function. This prevents thread creation (which
 210         * requires taking the mmap_sem in write mode) from interfering with the
 211         * guest faulting in its memory.
 212         */
 213        while (!READ_ONCE(all_vcpu_threads_running))
 214                ;
 215
 216        vcpu_thread_fn(&perf_test_args.vcpu_args[vcpu->vcpu_id]);
 217
 218        return NULL;
 219}
 220
 221void perf_test_start_vcpu_threads(int vcpus, void (*vcpu_fn)(struct perf_test_vcpu_args *))
 222{
 223        int vcpu_id;
 224
 225        vcpu_thread_fn = vcpu_fn;
 226        WRITE_ONCE(all_vcpu_threads_running, false);
 227
 228        for (vcpu_id = 0; vcpu_id < vcpus; vcpu_id++) {
 229                struct vcpu_thread *vcpu = &vcpu_threads[vcpu_id];
 230
 231                vcpu->vcpu_id = vcpu_id;
 232                WRITE_ONCE(vcpu->running, false);
 233
 234                pthread_create(&vcpu->thread, NULL, vcpu_thread_main, vcpu);
 235        }
 236
 237        for (vcpu_id = 0; vcpu_id < vcpus; vcpu_id++) {
 238                while (!READ_ONCE(vcpu_threads[vcpu_id].running))
 239                        ;
 240        }
 241
 242        WRITE_ONCE(all_vcpu_threads_running, true);
 243}
 244
 245void perf_test_join_vcpu_threads(int vcpus)
 246{
 247        int vcpu_id;
 248
 249        for (vcpu_id = 0; vcpu_id < vcpus; vcpu_id++)
 250                pthread_join(vcpu_threads[vcpu_id].thread, NULL);
 251}
 252