linux/tools/testing/selftests/kvm/x86_64/userspace_msr_exit_test.c
<<
>>
Prefs
   1// SPDX-License-Identifier: GPL-2.0
   2/*
   3 * Copyright (C) 2020, Google LLC.
   4 *
   5 * Tests for exiting into userspace on registered MSRs
   6 */
   7
   8#define _GNU_SOURCE /* for program_invocation_short_name */
   9#include <sys/ioctl.h>
  10
  11#include "test_util.h"
  12#include "kvm_util.h"
  13#include "vmx.h"
  14
  15/* Forced emulation prefix, used to invoke the emulator unconditionally. */
  16#define KVM_FEP "ud2; .byte 'k', 'v', 'm';"
  17#define KVM_FEP_LENGTH 5
  18static int fep_available = 1;
  19
  20#define VCPU_ID       1
  21#define MSR_NON_EXISTENT 0x474f4f00
  22
  23static u64 deny_bits = 0;
  24struct kvm_msr_filter filter_allow = {
  25        .flags = KVM_MSR_FILTER_DEFAULT_ALLOW,
  26        .ranges = {
  27                {
  28                        .flags = KVM_MSR_FILTER_READ |
  29                                 KVM_MSR_FILTER_WRITE,
  30                        .nmsrs = 1,
  31                        /* Test an MSR the kernel knows about. */
  32                        .base = MSR_IA32_XSS,
  33                        .bitmap = (uint8_t*)&deny_bits,
  34                }, {
  35                        .flags = KVM_MSR_FILTER_READ |
  36                                 KVM_MSR_FILTER_WRITE,
  37                        .nmsrs = 1,
  38                        /* Test an MSR the kernel doesn't know about. */
  39                        .base = MSR_IA32_FLUSH_CMD,
  40                        .bitmap = (uint8_t*)&deny_bits,
  41                }, {
  42                        .flags = KVM_MSR_FILTER_READ |
  43                                 KVM_MSR_FILTER_WRITE,
  44                        .nmsrs = 1,
  45                        /* Test a fabricated MSR that no one knows about. */
  46                        .base = MSR_NON_EXISTENT,
  47                        .bitmap = (uint8_t*)&deny_bits,
  48                },
  49        },
  50};
  51
  52struct kvm_msr_filter filter_fs = {
  53        .flags = KVM_MSR_FILTER_DEFAULT_ALLOW,
  54        .ranges = {
  55                {
  56                        .flags = KVM_MSR_FILTER_READ,
  57                        .nmsrs = 1,
  58                        .base = MSR_FS_BASE,
  59                        .bitmap = (uint8_t*)&deny_bits,
  60                },
  61        },
  62};
  63
  64struct kvm_msr_filter filter_gs = {
  65        .flags = KVM_MSR_FILTER_DEFAULT_ALLOW,
  66        .ranges = {
  67                {
  68                        .flags = KVM_MSR_FILTER_READ,
  69                        .nmsrs = 1,
  70                        .base = MSR_GS_BASE,
  71                        .bitmap = (uint8_t*)&deny_bits,
  72                },
  73        },
  74};
  75
  76static uint64_t msr_non_existent_data;
  77static int guest_exception_count;
  78static u32 msr_reads, msr_writes;
  79
  80static u8 bitmap_00000000[KVM_MSR_FILTER_MAX_BITMAP_SIZE];
  81static u8 bitmap_00000000_write[KVM_MSR_FILTER_MAX_BITMAP_SIZE];
  82static u8 bitmap_40000000[KVM_MSR_FILTER_MAX_BITMAP_SIZE];
  83static u8 bitmap_c0000000[KVM_MSR_FILTER_MAX_BITMAP_SIZE];
  84static u8 bitmap_c0000000_read[KVM_MSR_FILTER_MAX_BITMAP_SIZE];
  85static u8 bitmap_deadbeef[1] = { 0x1 };
  86
  87static void deny_msr(uint8_t *bitmap, u32 msr)
  88{
  89        u32 idx = msr & (KVM_MSR_FILTER_MAX_BITMAP_SIZE - 1);
  90
  91        bitmap[idx / 8] &= ~(1 << (idx % 8));
  92}
  93
  94static void prepare_bitmaps(void)
  95{
  96        memset(bitmap_00000000, 0xff, sizeof(bitmap_00000000));
  97        memset(bitmap_00000000_write, 0xff, sizeof(bitmap_00000000_write));
  98        memset(bitmap_40000000, 0xff, sizeof(bitmap_40000000));
  99        memset(bitmap_c0000000, 0xff, sizeof(bitmap_c0000000));
 100        memset(bitmap_c0000000_read, 0xff, sizeof(bitmap_c0000000_read));
 101
 102        deny_msr(bitmap_00000000_write, MSR_IA32_POWER_CTL);
 103        deny_msr(bitmap_c0000000_read, MSR_SYSCALL_MASK);
 104        deny_msr(bitmap_c0000000_read, MSR_GS_BASE);
 105}
 106
 107struct kvm_msr_filter filter_deny = {
 108        .flags = KVM_MSR_FILTER_DEFAULT_DENY,
 109        .ranges = {
 110                {
 111                        .flags = KVM_MSR_FILTER_READ,
 112                        .base = 0x00000000,
 113                        .nmsrs = KVM_MSR_FILTER_MAX_BITMAP_SIZE * BITS_PER_BYTE,
 114                        .bitmap = bitmap_00000000,
 115                }, {
 116                        .flags = KVM_MSR_FILTER_WRITE,
 117                        .base = 0x00000000,
 118                        .nmsrs = KVM_MSR_FILTER_MAX_BITMAP_SIZE * BITS_PER_BYTE,
 119                        .bitmap = bitmap_00000000_write,
 120                }, {
 121                        .flags = KVM_MSR_FILTER_READ | KVM_MSR_FILTER_WRITE,
 122                        .base = 0x40000000,
 123                        .nmsrs = KVM_MSR_FILTER_MAX_BITMAP_SIZE * BITS_PER_BYTE,
 124                        .bitmap = bitmap_40000000,
 125                }, {
 126                        .flags = KVM_MSR_FILTER_READ,
 127                        .base = 0xc0000000,
 128                        .nmsrs = KVM_MSR_FILTER_MAX_BITMAP_SIZE * BITS_PER_BYTE,
 129                        .bitmap = bitmap_c0000000_read,
 130                }, {
 131                        .flags = KVM_MSR_FILTER_WRITE,
 132                        .base = 0xc0000000,
 133                        .nmsrs = KVM_MSR_FILTER_MAX_BITMAP_SIZE * BITS_PER_BYTE,
 134                        .bitmap = bitmap_c0000000,
 135                }, {
 136                        .flags = KVM_MSR_FILTER_WRITE | KVM_MSR_FILTER_READ,
 137                        .base = 0xdeadbeef,
 138                        .nmsrs = 1,
 139                        .bitmap = bitmap_deadbeef,
 140                },
 141        },
 142};
 143
 144struct kvm_msr_filter no_filter_deny = {
 145        .flags = KVM_MSR_FILTER_DEFAULT_ALLOW,
 146};
 147
 148/*
 149 * Note: Force test_rdmsr() to not be inlined to prevent the labels,
 150 * rdmsr_start and rdmsr_end, from being defined multiple times.
 151 */
 152static noinline uint64_t test_rdmsr(uint32_t msr)
 153{
 154        uint32_t a, d;
 155
 156        guest_exception_count = 0;
 157
 158        __asm__ __volatile__("rdmsr_start: rdmsr; rdmsr_end:" :
 159                        "=a"(a), "=d"(d) : "c"(msr) : "memory");
 160
 161        return a | ((uint64_t) d << 32);
 162}
 163
 164/*
 165 * Note: Force test_wrmsr() to not be inlined to prevent the labels,
 166 * wrmsr_start and wrmsr_end, from being defined multiple times.
 167 */
 168static noinline void test_wrmsr(uint32_t msr, uint64_t value)
 169{
 170        uint32_t a = value;
 171        uint32_t d = value >> 32;
 172
 173        guest_exception_count = 0;
 174
 175        __asm__ __volatile__("wrmsr_start: wrmsr; wrmsr_end:" ::
 176                        "a"(a), "d"(d), "c"(msr) : "memory");
 177}
 178
 179extern char rdmsr_start, rdmsr_end;
 180extern char wrmsr_start, wrmsr_end;
 181
 182/*
 183 * Note: Force test_em_rdmsr() to not be inlined to prevent the labels,
 184 * rdmsr_start and rdmsr_end, from being defined multiple times.
 185 */
 186static noinline uint64_t test_em_rdmsr(uint32_t msr)
 187{
 188        uint32_t a, d;
 189
 190        guest_exception_count = 0;
 191
 192        __asm__ __volatile__(KVM_FEP "em_rdmsr_start: rdmsr; em_rdmsr_end:" :
 193                        "=a"(a), "=d"(d) : "c"(msr) : "memory");
 194
 195        return a | ((uint64_t) d << 32);
 196}
 197
 198/*
 199 * Note: Force test_em_wrmsr() to not be inlined to prevent the labels,
 200 * wrmsr_start and wrmsr_end, from being defined multiple times.
 201 */
 202static noinline void test_em_wrmsr(uint32_t msr, uint64_t value)
 203{
 204        uint32_t a = value;
 205        uint32_t d = value >> 32;
 206
 207        guest_exception_count = 0;
 208
 209        __asm__ __volatile__(KVM_FEP "em_wrmsr_start: wrmsr; em_wrmsr_end:" ::
 210                        "a"(a), "d"(d), "c"(msr) : "memory");
 211}
 212
 213extern char em_rdmsr_start, em_rdmsr_end;
 214extern char em_wrmsr_start, em_wrmsr_end;
 215
 216static void guest_code_filter_allow(void)
 217{
 218        uint64_t data;
 219
 220        /*
 221         * Test userspace intercepting rdmsr / wrmsr for MSR_IA32_XSS.
 222         *
 223         * A GP is thrown if anything other than 0 is written to
 224         * MSR_IA32_XSS.
 225         */
 226        data = test_rdmsr(MSR_IA32_XSS);
 227        GUEST_ASSERT(data == 0);
 228        GUEST_ASSERT(guest_exception_count == 0);
 229
 230        test_wrmsr(MSR_IA32_XSS, 0);
 231        GUEST_ASSERT(guest_exception_count == 0);
 232
 233        test_wrmsr(MSR_IA32_XSS, 1);
 234        GUEST_ASSERT(guest_exception_count == 1);
 235
 236        /*
 237         * Test userspace intercepting rdmsr / wrmsr for MSR_IA32_FLUSH_CMD.
 238         *
 239         * A GP is thrown if MSR_IA32_FLUSH_CMD is read
 240         * from or if a value other than 1 is written to it.
 241         */
 242        test_rdmsr(MSR_IA32_FLUSH_CMD);
 243        GUEST_ASSERT(guest_exception_count == 1);
 244
 245        test_wrmsr(MSR_IA32_FLUSH_CMD, 0);
 246        GUEST_ASSERT(guest_exception_count == 1);
 247
 248        test_wrmsr(MSR_IA32_FLUSH_CMD, 1);
 249        GUEST_ASSERT(guest_exception_count == 0);
 250
 251        /*
 252         * Test userspace intercepting rdmsr / wrmsr for MSR_NON_EXISTENT.
 253         *
 254         * Test that a fabricated MSR can pass through the kernel
 255         * and be handled in userspace.
 256         */
 257        test_wrmsr(MSR_NON_EXISTENT, 2);
 258        GUEST_ASSERT(guest_exception_count == 0);
 259
 260        data = test_rdmsr(MSR_NON_EXISTENT);
 261        GUEST_ASSERT(data == 2);
 262        GUEST_ASSERT(guest_exception_count == 0);
 263
 264        /*
 265         * Test to see if the instruction emulator is available (ie: the module
 266         * parameter 'kvm.force_emulation_prefix=1' is set).  This instruction
 267         * will #UD if it isn't available.
 268         */
 269        __asm__ __volatile__(KVM_FEP "nop");
 270
 271        if (fep_available) {
 272                /* Let userspace know we aren't done. */
 273                GUEST_SYNC(0);
 274
 275                /*
 276                 * Now run the same tests with the instruction emulator.
 277                 */
 278                data = test_em_rdmsr(MSR_IA32_XSS);
 279                GUEST_ASSERT(data == 0);
 280                GUEST_ASSERT(guest_exception_count == 0);
 281                test_em_wrmsr(MSR_IA32_XSS, 0);
 282                GUEST_ASSERT(guest_exception_count == 0);
 283                test_em_wrmsr(MSR_IA32_XSS, 1);
 284                GUEST_ASSERT(guest_exception_count == 1);
 285
 286                test_em_rdmsr(MSR_IA32_FLUSH_CMD);
 287                GUEST_ASSERT(guest_exception_count == 1);
 288                test_em_wrmsr(MSR_IA32_FLUSH_CMD, 0);
 289                GUEST_ASSERT(guest_exception_count == 1);
 290                test_em_wrmsr(MSR_IA32_FLUSH_CMD, 1);
 291                GUEST_ASSERT(guest_exception_count == 0);
 292
 293                test_em_wrmsr(MSR_NON_EXISTENT, 2);
 294                GUEST_ASSERT(guest_exception_count == 0);
 295                data = test_em_rdmsr(MSR_NON_EXISTENT);
 296                GUEST_ASSERT(data == 2);
 297                GUEST_ASSERT(guest_exception_count == 0);
 298        }
 299
 300        GUEST_DONE();
 301}
 302
 303static void guest_msr_calls(bool trapped)
 304{
 305        /* This goes into the in-kernel emulation */
 306        wrmsr(MSR_SYSCALL_MASK, 0);
 307
 308        if (trapped) {
 309                /* This goes into user space emulation */
 310                GUEST_ASSERT(rdmsr(MSR_SYSCALL_MASK) == MSR_SYSCALL_MASK);
 311                GUEST_ASSERT(rdmsr(MSR_GS_BASE) == MSR_GS_BASE);
 312        } else {
 313                GUEST_ASSERT(rdmsr(MSR_SYSCALL_MASK) != MSR_SYSCALL_MASK);
 314                GUEST_ASSERT(rdmsr(MSR_GS_BASE) != MSR_GS_BASE);
 315        }
 316
 317        /* If trapped == true, this goes into user space emulation */
 318        wrmsr(MSR_IA32_POWER_CTL, 0x1234);
 319
 320        /* This goes into the in-kernel emulation */
 321        rdmsr(MSR_IA32_POWER_CTL);
 322
 323        /* Invalid MSR, should always be handled by user space exit */
 324        GUEST_ASSERT(rdmsr(0xdeadbeef) == 0xdeadbeef);
 325        wrmsr(0xdeadbeef, 0x1234);
 326}
 327
 328static void guest_code_filter_deny(void)
 329{
 330        guest_msr_calls(true);
 331
 332        /*
 333         * Disable msr filtering, so that the kernel
 334         * handles everything in the next round
 335         */
 336        GUEST_SYNC(0);
 337
 338        guest_msr_calls(false);
 339
 340        GUEST_DONE();
 341}
 342
 343static void guest_code_permission_bitmap(void)
 344{
 345        uint64_t data;
 346
 347        data = test_rdmsr(MSR_FS_BASE);
 348        GUEST_ASSERT(data == MSR_FS_BASE);
 349        data = test_rdmsr(MSR_GS_BASE);
 350        GUEST_ASSERT(data != MSR_GS_BASE);
 351
 352        /* Let userspace know to switch the filter */
 353        GUEST_SYNC(0);
 354
 355        data = test_rdmsr(MSR_FS_BASE);
 356        GUEST_ASSERT(data != MSR_FS_BASE);
 357        data = test_rdmsr(MSR_GS_BASE);
 358        GUEST_ASSERT(data == MSR_GS_BASE);
 359
 360        GUEST_DONE();
 361}
 362
 363static void __guest_gp_handler(struct ex_regs *regs,
 364                               char *r_start, char *r_end,
 365                               char *w_start, char *w_end)
 366{
 367        if (regs->rip == (uintptr_t)r_start) {
 368                regs->rip = (uintptr_t)r_end;
 369                regs->rax = 0;
 370                regs->rdx = 0;
 371        } else if (regs->rip == (uintptr_t)w_start) {
 372                regs->rip = (uintptr_t)w_end;
 373        } else {
 374                GUEST_ASSERT(!"RIP is at an unknown location!");
 375        }
 376
 377        ++guest_exception_count;
 378}
 379
 380static void guest_gp_handler(struct ex_regs *regs)
 381{
 382        __guest_gp_handler(regs, &rdmsr_start, &rdmsr_end,
 383                           &wrmsr_start, &wrmsr_end);
 384}
 385
 386static void guest_fep_gp_handler(struct ex_regs *regs)
 387{
 388        __guest_gp_handler(regs, &em_rdmsr_start, &em_rdmsr_end,
 389                           &em_wrmsr_start, &em_wrmsr_end);
 390}
 391
 392static void guest_ud_handler(struct ex_regs *regs)
 393{
 394        fep_available = 0;
 395        regs->rip += KVM_FEP_LENGTH;
 396}
 397
 398static void run_guest(struct kvm_vm *vm)
 399{
 400        int rc;
 401
 402        rc = _vcpu_run(vm, VCPU_ID);
 403        TEST_ASSERT(rc == 0, "vcpu_run failed: %d\n", rc);
 404}
 405
 406static void check_for_guest_assert(struct kvm_vm *vm)
 407{
 408        struct kvm_run *run = vcpu_state(vm, VCPU_ID);
 409        struct ucall uc;
 410
 411        if (run->exit_reason == KVM_EXIT_IO &&
 412                get_ucall(vm, VCPU_ID, &uc) == UCALL_ABORT) {
 413                        TEST_FAIL("%s at %s:%ld", (const char *)uc.args[0],
 414                                __FILE__, uc.args[1]);
 415        }
 416}
 417
 418static void process_rdmsr(struct kvm_vm *vm, uint32_t msr_index)
 419{
 420        struct kvm_run *run = vcpu_state(vm, VCPU_ID);
 421
 422        check_for_guest_assert(vm);
 423
 424        TEST_ASSERT(run->exit_reason == KVM_EXIT_X86_RDMSR,
 425                    "Unexpected exit reason: %u (%s),\n",
 426                    run->exit_reason,
 427                    exit_reason_str(run->exit_reason));
 428        TEST_ASSERT(run->msr.index == msr_index,
 429                        "Unexpected msr (0x%04x), expected 0x%04x",
 430                        run->msr.index, msr_index);
 431
 432        switch (run->msr.index) {
 433        case MSR_IA32_XSS:
 434                run->msr.data = 0;
 435                break;
 436        case MSR_IA32_FLUSH_CMD:
 437                run->msr.error = 1;
 438                break;
 439        case MSR_NON_EXISTENT:
 440                run->msr.data = msr_non_existent_data;
 441                break;
 442        case MSR_FS_BASE:
 443                run->msr.data = MSR_FS_BASE;
 444                break;
 445        case MSR_GS_BASE:
 446                run->msr.data = MSR_GS_BASE;
 447                break;
 448        default:
 449                TEST_ASSERT(false, "Unexpected MSR: 0x%04x", run->msr.index);
 450        }
 451}
 452
 453static void process_wrmsr(struct kvm_vm *vm, uint32_t msr_index)
 454{
 455        struct kvm_run *run = vcpu_state(vm, VCPU_ID);
 456
 457        check_for_guest_assert(vm);
 458
 459        TEST_ASSERT(run->exit_reason == KVM_EXIT_X86_WRMSR,
 460                    "Unexpected exit reason: %u (%s),\n",
 461                    run->exit_reason,
 462                    exit_reason_str(run->exit_reason));
 463        TEST_ASSERT(run->msr.index == msr_index,
 464                        "Unexpected msr (0x%04x), expected 0x%04x",
 465                        run->msr.index, msr_index);
 466
 467        switch (run->msr.index) {
 468        case MSR_IA32_XSS:
 469                if (run->msr.data != 0)
 470                        run->msr.error = 1;
 471                break;
 472        case MSR_IA32_FLUSH_CMD:
 473                if (run->msr.data != 1)
 474                        run->msr.error = 1;
 475                break;
 476        case MSR_NON_EXISTENT:
 477                msr_non_existent_data = run->msr.data;
 478                break;
 479        default:
 480                TEST_ASSERT(false, "Unexpected MSR: 0x%04x", run->msr.index);
 481        }
 482}
 483
 484static void process_ucall_done(struct kvm_vm *vm)
 485{
 486        struct kvm_run *run = vcpu_state(vm, VCPU_ID);
 487        struct ucall uc;
 488
 489        check_for_guest_assert(vm);
 490
 491        TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
 492                    "Unexpected exit reason: %u (%s)",
 493                    run->exit_reason,
 494                    exit_reason_str(run->exit_reason));
 495
 496        TEST_ASSERT(get_ucall(vm, VCPU_ID, &uc) == UCALL_DONE,
 497                    "Unexpected ucall command: %lu, expected UCALL_DONE (%d)",
 498                    uc.cmd, UCALL_DONE);
 499}
 500
 501static uint64_t process_ucall(struct kvm_vm *vm)
 502{
 503        struct kvm_run *run = vcpu_state(vm, VCPU_ID);
 504        struct ucall uc = {};
 505
 506        check_for_guest_assert(vm);
 507
 508        TEST_ASSERT(run->exit_reason == KVM_EXIT_IO,
 509                    "Unexpected exit reason: %u (%s)",
 510                    run->exit_reason,
 511                    exit_reason_str(run->exit_reason));
 512
 513        switch (get_ucall(vm, VCPU_ID, &uc)) {
 514        case UCALL_SYNC:
 515                break;
 516        case UCALL_ABORT:
 517                check_for_guest_assert(vm);
 518                break;
 519        case UCALL_DONE:
 520                process_ucall_done(vm);
 521                break;
 522        default:
 523                TEST_ASSERT(false, "Unexpected ucall");
 524        }
 525
 526        return uc.cmd;
 527}
 528
 529static void run_guest_then_process_rdmsr(struct kvm_vm *vm, uint32_t msr_index)
 530{
 531        run_guest(vm);
 532        process_rdmsr(vm, msr_index);
 533}
 534
 535static void run_guest_then_process_wrmsr(struct kvm_vm *vm, uint32_t msr_index)
 536{
 537        run_guest(vm);
 538        process_wrmsr(vm, msr_index);
 539}
 540
 541static uint64_t run_guest_then_process_ucall(struct kvm_vm *vm)
 542{
 543        run_guest(vm);
 544        return process_ucall(vm);
 545}
 546
 547static void run_guest_then_process_ucall_done(struct kvm_vm *vm)
 548{
 549        run_guest(vm);
 550        process_ucall_done(vm);
 551}
 552
 553static void test_msr_filter_allow(void) {
 554        struct kvm_enable_cap cap = {
 555                .cap = KVM_CAP_X86_USER_SPACE_MSR,
 556                .args[0] = KVM_MSR_EXIT_REASON_FILTER,
 557        };
 558        struct kvm_vm *vm;
 559        int rc;
 560
 561        /* Create VM */
 562        vm = vm_create_default(VCPU_ID, 0, guest_code_filter_allow);
 563        vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
 564
 565        rc = kvm_check_cap(KVM_CAP_X86_USER_SPACE_MSR);
 566        TEST_ASSERT(rc, "KVM_CAP_X86_USER_SPACE_MSR is available");
 567        vm_enable_cap(vm, &cap);
 568
 569        rc = kvm_check_cap(KVM_CAP_X86_MSR_FILTER);
 570        TEST_ASSERT(rc, "KVM_CAP_X86_MSR_FILTER is available");
 571
 572        vm_ioctl(vm, KVM_X86_SET_MSR_FILTER, &filter_allow);
 573
 574        vm_init_descriptor_tables(vm);
 575        vcpu_init_descriptor_tables(vm, VCPU_ID);
 576
 577        vm_handle_exception(vm, GP_VECTOR, guest_gp_handler);
 578
 579        /* Process guest code userspace exits. */
 580        run_guest_then_process_rdmsr(vm, MSR_IA32_XSS);
 581        run_guest_then_process_wrmsr(vm, MSR_IA32_XSS);
 582        run_guest_then_process_wrmsr(vm, MSR_IA32_XSS);
 583
 584        run_guest_then_process_rdmsr(vm, MSR_IA32_FLUSH_CMD);
 585        run_guest_then_process_wrmsr(vm, MSR_IA32_FLUSH_CMD);
 586        run_guest_then_process_wrmsr(vm, MSR_IA32_FLUSH_CMD);
 587
 588        run_guest_then_process_wrmsr(vm, MSR_NON_EXISTENT);
 589        run_guest_then_process_rdmsr(vm, MSR_NON_EXISTENT);
 590
 591        vm_handle_exception(vm, UD_VECTOR, guest_ud_handler);
 592        run_guest(vm);
 593        vm_handle_exception(vm, UD_VECTOR, NULL);
 594
 595        if (process_ucall(vm) != UCALL_DONE) {
 596                vm_handle_exception(vm, GP_VECTOR, guest_fep_gp_handler);
 597
 598                /* Process emulated rdmsr and wrmsr instructions. */
 599                run_guest_then_process_rdmsr(vm, MSR_IA32_XSS);
 600                run_guest_then_process_wrmsr(vm, MSR_IA32_XSS);
 601                run_guest_then_process_wrmsr(vm, MSR_IA32_XSS);
 602
 603                run_guest_then_process_rdmsr(vm, MSR_IA32_FLUSH_CMD);
 604                run_guest_then_process_wrmsr(vm, MSR_IA32_FLUSH_CMD);
 605                run_guest_then_process_wrmsr(vm, MSR_IA32_FLUSH_CMD);
 606
 607                run_guest_then_process_wrmsr(vm, MSR_NON_EXISTENT);
 608                run_guest_then_process_rdmsr(vm, MSR_NON_EXISTENT);
 609
 610                /* Confirm the guest completed without issues. */
 611                run_guest_then_process_ucall_done(vm);
 612        } else {
 613                printf("To run the instruction emulated tests set the module parameter 'kvm.force_emulation_prefix=1'\n");
 614        }
 615
 616        kvm_vm_free(vm);
 617}
 618
 619static int handle_ucall(struct kvm_vm *vm)
 620{
 621        struct ucall uc;
 622
 623        switch (get_ucall(vm, VCPU_ID, &uc)) {
 624        case UCALL_ABORT:
 625                TEST_FAIL("Guest assertion not met");
 626                break;
 627        case UCALL_SYNC:
 628                vm_ioctl(vm, KVM_X86_SET_MSR_FILTER, &no_filter_deny);
 629                break;
 630        case UCALL_DONE:
 631                return 1;
 632        default:
 633                TEST_FAIL("Unknown ucall %lu", uc.cmd);
 634        }
 635
 636        return 0;
 637}
 638
 639static void handle_rdmsr(struct kvm_run *run)
 640{
 641        run->msr.data = run->msr.index;
 642        msr_reads++;
 643
 644        if (run->msr.index == MSR_SYSCALL_MASK ||
 645            run->msr.index == MSR_GS_BASE) {
 646                TEST_ASSERT(run->msr.reason == KVM_MSR_EXIT_REASON_FILTER,
 647                            "MSR read trap w/o access fault");
 648        }
 649
 650        if (run->msr.index == 0xdeadbeef) {
 651                TEST_ASSERT(run->msr.reason == KVM_MSR_EXIT_REASON_UNKNOWN,
 652                            "MSR deadbeef read trap w/o inval fault");
 653        }
 654}
 655
 656static void handle_wrmsr(struct kvm_run *run)
 657{
 658        /* ignore */
 659        msr_writes++;
 660
 661        if (run->msr.index == MSR_IA32_POWER_CTL) {
 662                TEST_ASSERT(run->msr.data == 0x1234,
 663                            "MSR data for MSR_IA32_POWER_CTL incorrect");
 664                TEST_ASSERT(run->msr.reason == KVM_MSR_EXIT_REASON_FILTER,
 665                            "MSR_IA32_POWER_CTL trap w/o access fault");
 666        }
 667
 668        if (run->msr.index == 0xdeadbeef) {
 669                TEST_ASSERT(run->msr.data == 0x1234,
 670                            "MSR data for deadbeef incorrect");
 671                TEST_ASSERT(run->msr.reason == KVM_MSR_EXIT_REASON_UNKNOWN,
 672                            "deadbeef trap w/o inval fault");
 673        }
 674}
 675
 676static void test_msr_filter_deny(void) {
 677        struct kvm_enable_cap cap = {
 678                .cap = KVM_CAP_X86_USER_SPACE_MSR,
 679                .args[0] = KVM_MSR_EXIT_REASON_INVAL |
 680                           KVM_MSR_EXIT_REASON_UNKNOWN |
 681                           KVM_MSR_EXIT_REASON_FILTER,
 682        };
 683        struct kvm_vm *vm;
 684        struct kvm_run *run;
 685        int rc;
 686
 687        /* Create VM */
 688        vm = vm_create_default(VCPU_ID, 0, guest_code_filter_deny);
 689        vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
 690        run = vcpu_state(vm, VCPU_ID);
 691
 692        rc = kvm_check_cap(KVM_CAP_X86_USER_SPACE_MSR);
 693        TEST_ASSERT(rc, "KVM_CAP_X86_USER_SPACE_MSR is available");
 694        vm_enable_cap(vm, &cap);
 695
 696        rc = kvm_check_cap(KVM_CAP_X86_MSR_FILTER);
 697        TEST_ASSERT(rc, "KVM_CAP_X86_MSR_FILTER is available");
 698
 699        prepare_bitmaps();
 700        vm_ioctl(vm, KVM_X86_SET_MSR_FILTER, &filter_deny);
 701
 702        while (1) {
 703                rc = _vcpu_run(vm, VCPU_ID);
 704
 705                TEST_ASSERT(rc == 0, "vcpu_run failed: %d\n", rc);
 706
 707                switch (run->exit_reason) {
 708                case KVM_EXIT_X86_RDMSR:
 709                        handle_rdmsr(run);
 710                        break;
 711                case KVM_EXIT_X86_WRMSR:
 712                        handle_wrmsr(run);
 713                        break;
 714                case KVM_EXIT_IO:
 715                        if (handle_ucall(vm))
 716                                goto done;
 717                        break;
 718                }
 719
 720        }
 721
 722done:
 723        TEST_ASSERT(msr_reads == 4, "Handled 4 rdmsr in user space");
 724        TEST_ASSERT(msr_writes == 3, "Handled 3 wrmsr in user space");
 725
 726        kvm_vm_free(vm);
 727}
 728
 729static void test_msr_permission_bitmap(void) {
 730        struct kvm_enable_cap cap = {
 731                .cap = KVM_CAP_X86_USER_SPACE_MSR,
 732                .args[0] = KVM_MSR_EXIT_REASON_FILTER,
 733        };
 734        struct kvm_vm *vm;
 735        int rc;
 736
 737        /* Create VM */
 738        vm = vm_create_default(VCPU_ID, 0, guest_code_permission_bitmap);
 739        vcpu_set_cpuid(vm, VCPU_ID, kvm_get_supported_cpuid());
 740
 741        rc = kvm_check_cap(KVM_CAP_X86_USER_SPACE_MSR);
 742        TEST_ASSERT(rc, "KVM_CAP_X86_USER_SPACE_MSR is available");
 743        vm_enable_cap(vm, &cap);
 744
 745        rc = kvm_check_cap(KVM_CAP_X86_MSR_FILTER);
 746        TEST_ASSERT(rc, "KVM_CAP_X86_MSR_FILTER is available");
 747
 748        vm_ioctl(vm, KVM_X86_SET_MSR_FILTER, &filter_fs);
 749        run_guest_then_process_rdmsr(vm, MSR_FS_BASE);
 750        TEST_ASSERT(run_guest_then_process_ucall(vm) == UCALL_SYNC, "Expected ucall state to be UCALL_SYNC.");
 751        vm_ioctl(vm, KVM_X86_SET_MSR_FILTER, &filter_gs);
 752        run_guest_then_process_rdmsr(vm, MSR_GS_BASE);
 753        run_guest_then_process_ucall_done(vm);
 754
 755        kvm_vm_free(vm);
 756}
 757
 758int main(int argc, char *argv[])
 759{
 760        /* Tell stdout not to buffer its content */
 761        setbuf(stdout, NULL);
 762
 763        test_msr_filter_allow();
 764
 765        test_msr_filter_deny();
 766
 767        test_msr_permission_bitmap();
 768
 769        return 0;
 770}
 771